KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > com > sleepycat > je > log > LogManager


1 /*-
2  * See the file LICENSE for redistribution information.
3  *
4  * Copyright (c) 2002,2006 Oracle. All rights reserved.
5  *
6  * $Id: LogManager.java,v 1.162 2006/11/27 23:15:03 mark Exp $
7  */

8
9 package com.sleepycat.je.log;
10
11 import java.io.IOException JavaDoc;
12 import java.io.RandomAccessFile JavaDoc;
13 import java.nio.BufferOverflowException JavaDoc;
14 import java.nio.ByteBuffer JavaDoc;
15 import java.nio.channels.ClosedChannelException JavaDoc;
16 import java.util.List JavaDoc;
17 import java.util.zip.Checksum JavaDoc;
18
19 import com.sleepycat.je.DatabaseException;
20 import com.sleepycat.je.EnvironmentStats;
21 import com.sleepycat.je.RunRecoveryException;
22 import com.sleepycat.je.StatsConfig;
23 import com.sleepycat.je.cleaner.TrackedFileSummary;
24 import com.sleepycat.je.cleaner.UtilizationTracker;
25 import com.sleepycat.je.config.EnvironmentParams;
26 import com.sleepycat.je.dbi.DbConfigManager;
27 import com.sleepycat.je.dbi.EnvironmentImpl;
28 import com.sleepycat.je.dbi.Operation;
29 import com.sleepycat.je.latch.Latch;
30 import com.sleepycat.je.latch.LatchSupport;
31 import com.sleepycat.je.log.entry.LogEntry;
32 import com.sleepycat.je.utilint.Adler32;
33 import com.sleepycat.je.utilint.DbLsn;
34 import com.sleepycat.je.utilint.TestHook;
35 import com.sleepycat.je.utilint.Tracer;
36
37 /**
38  * The LogManager supports reading and writing to the JE log.
39  */

40 abstract public class LogManager {
41
42     // no-op loggable object
43
private static final String JavaDoc DEBUG_NAME = LogManager.class.getName();
44     
45     /*
46      * Log entry header field sizes
47      */

48     public static final int HEADER_BYTES = 14; // size of entry header
49
static final int CHECKSUM_BYTES = 4; // size of checksum field
50
static final int PREV_BYTES = 4; // size of previous field
51
static final int HEADER_CONTENT_BYTES =
52         HEADER_BYTES - CHECKSUM_BYTES;
53     static final int HEADER_CHECKSUM_OFFSET = 0;
54     static final int HEADER_ENTRY_TYPE_OFFSET = 4;
55     static final int HEADER_VERSION_OFFSET = 5;
56     static final int HEADER_PREV_OFFSET = 6;
57     public static final int HEADER_SIZE_OFFSET = 10;
58
59     protected LogBufferPool logBufferPool; // log buffers
60
protected Latch logWriteLatch; // synchronizes log writes
61
private boolean doChecksumOnRead; // if true, do checksum on read
62
private FileManager fileManager; // access to files
63
protected EnvironmentImpl envImpl;
64     private boolean readOnly;
65     private int readBufferSize; // how many bytes to read when faulting in.
66
/* The last LSN in the log during recovery. */
67     private long lastLsnAtRecovery = DbLsn.NULL_LSN;
68
69     /* Stats */
70
71     /*
72      * Number of times we have to repeat a read when we fault in an object
73      * because the initial read was too small.
74      */

75     private int nRepeatFaultReads;
76
77     /*
78      * Number of times we have to use the temporary marshalling buffer to
79      * write to the log.
80      */

81     private long nTempBufferWrites;
82
83     /* For unit tests */
84     private TestHook readHook; // used for generating exceptions on log reads
85

86     /**
87      * There is a single log manager per database environment.
88      */

89     public LogManager(EnvironmentImpl envImpl,
90                       boolean readOnly)
91         throws DatabaseException {
92
93         // Set up log buffers
94
this.envImpl = envImpl;
95         this.fileManager = envImpl.getFileManager();
96         DbConfigManager configManager = envImpl.getConfigManager();
97     this.readOnly = readOnly;
98         logBufferPool = new LogBufferPool(fileManager, envImpl);
99
100         /* See if we're configured to do a checksum when reading in objects. */
101         doChecksumOnRead =
102         configManager.getBoolean(EnvironmentParams.LOG_CHECKSUM_READ);
103
104         logWriteLatch = LatchSupport.makeLatch(DEBUG_NAME, envImpl);
105         readBufferSize =
106         configManager.getInt(EnvironmentParams.LOG_FAULT_READ_SIZE);
107     }
108
109     public boolean getChecksumOnRead() {
110         return doChecksumOnRead;
111     }
112
113     public long getLastLsnAtRecovery() {
114     return lastLsnAtRecovery;
115     }
116
117     public void setLastLsnAtRecovery(long lastLsnAtRecovery) {
118     this.lastLsnAtRecovery = lastLsnAtRecovery;
119     }
120
121     /**
122      * Reset the pool when the cache is resized. This method is called after
123      * the memory budget has been calculated.
124      */

125     public void resetPool(DbConfigManager configManager)
126     throws DatabaseException {
127
128         logBufferPool.reset(configManager);
129     }
130
131     /*
132      * Writing to the log
133      */

134
135     /**
136      * Log this single object and force a write of the log files.
137      * @param item object to be logged
138      * @param fsyncRequired if true, log files should also be fsynced.
139      * @return LSN of the new log entry
140      */

141     public long logForceFlush(LoggableObject item,
142                               boolean fsyncRequired)
143     throws DatabaseException {
144
145         return log(item,
146                    false, // is provisional
147
true, // flush required
148
fsyncRequired,
149            false, // forceNewLogFile
150
false, // backgroundIO
151
DbLsn.NULL_LSN, // old lsn
152
0); // old size
153
}
154
155     /**
156      * Log this single object and force a flip of the log files.
157      * @param item object to be logged
158      * @param fsyncRequired if true, log files should also be fsynced.
159      * @return LSN of the new log entry
160      */

161     public long logForceFlip(LoggableObject item)
162     throws DatabaseException {
163
164         return log(item,
165                    false, // is provisional
166
true, // flush required
167
false, // fsync required
168
true, // forceNewLogFile
169
false, // backgroundIO
170
DbLsn.NULL_LSN, // old lsn
171
0); // old size
172
}
173
174     /**
175      * Write a log entry.
176      * @return LSN of the new log entry
177      */

178     public long log(LoggableObject item)
179     throws DatabaseException {
180
181         return log(item,
182                    false, // is provisional
183
false, // flush required
184
false, // fsync required
185
false, // forceNewLogFile
186
false, // backgroundIO
187
DbLsn.NULL_LSN, // old lsn
188
0); // old size
189
}
190
191     /**
192      * Write a log entry.
193      * @return LSN of the new log entry
194      */

195     public long log(LoggableObject item,
196             boolean isProvisional,
197             boolean backgroundIO,
198             long oldNodeLsn,
199                     int oldNodeSize)
200     throws DatabaseException {
201
202         return log(item,
203                    isProvisional,
204                    false, // flush required
205
false, // fsync required
206
false, // forceNewLogFile
207
backgroundIO,
208                    oldNodeLsn,
209                    oldNodeSize);
210     }
211
212     /**
213      * Write a log entry.
214      * @param item is the item to be logged.
215      * @param isProvisional true if this entry should not be read during
216      * recovery.
217      * @param flushRequired if true, write the log to the file after
218      * adding the item. i.e. call java.nio.channel.FileChannel.write().
219      * @param fsyncRequired if true, fsync the last file after adding the item.
220      * @param forceNewLogFile if true, flip to a new log file before logging
221      * the item.
222      * @param backgroundIO if true, sleep when the backgroundIOLimit is
223      * exceeded.
224      * @param oldNodeLsn is the previous version of the node to be counted as
225      * obsolete, or NULL_LSN if the item is not a node or has no old LSN.
226      * @param oldNodeSize is the log size of the previous version of the node
227      * when oldNodeLsn is not NULL_LSN and the old node is an LN. For old INs,
228      * zero must be specified.
229      * @return LSN of the new log entry
230      */

231     private long log(LoggableObject item,
232                      boolean isProvisional,
233                      boolean flushRequired,
234                      boolean fsyncRequired,
235              boolean forceNewLogFile,
236              boolean backgroundIO,
237                      long oldNodeLsn,
238                      int oldNodeSize)
239     throws DatabaseException {
240
241     if (readOnly) {
242         return DbLsn.NULL_LSN;
243     }
244
245         boolean marshallOutsideLatch = item.marshallOutsideWriteLatch();
246         ByteBuffer JavaDoc marshalledBuffer = null;
247         UtilizationTracker tracker = envImpl.getUtilizationTracker();
248         LogResult logResult = null;
249
250         try {
251
252             /*
253              * If possible, marshall this item outside the log write
254              * latch to allow greater concurrency by shortening the
255              * write critical section.
256              */

257             if (marshallOutsideLatch) {
258                 int itemSize = item.getLogSize();
259                 int entrySize = itemSize + HEADER_BYTES;
260         marshalledBuffer = marshallIntoBuffer(item,
261                                                       itemSize,
262                                                       isProvisional,
263                                                       entrySize);
264             }
265
266             logResult = logItem(item, isProvisional, flushRequired,
267                                 forceNewLogFile, oldNodeLsn, oldNodeSize,
268                                 marshallOutsideLatch, marshalledBuffer,
269                                 tracker);
270
271         } catch (BufferOverflowException JavaDoc e) {
272
273             /*
274              * A BufferOverflowException may be seen when a thread is
275              * interrupted in the middle of the log and the nio direct buffer
276              * is mangled is some way by the NIO libraries. JE applications
277              * should refrain from using thread interrupt as a thread
278              * communications mechanism because nio behavior in the face of
279              * interrupts is uncertain. See SR [#10463].
280              *
281              * One way or another, this type of io exception leaves us in an
282              * unworkable state, so throw a run recovery exception.
283              */

284             throw new RunRecoveryException(envImpl, e);
285         } catch (IOException JavaDoc e) {
286
287             /*
288              * Other IOExceptions, such as out of disk conditions, should
289              * notify the application but leave the environment in workable
290              * condition.
291              */

292             throw new DatabaseException(Tracer.getStackTrace(e), e);
293         }
294
295         /*
296          * Finish up business outside of the log write latch critical section.
297          */

298
299         /*
300      * If this logged object needs to be fsynced, do so now using the group
301      * commit mechanism.
302          */

303         if (fsyncRequired) {
304             fileManager.groupSync();
305         }
306
307         /*
308          * Periodically, as a function of how much data is written, ask the
309      * checkpointer or the cleaner to wake up.
310          */

311         envImpl.getCheckpointer().wakeupAfterWrite();
312         if (logResult.wakeupCleaner) {
313             tracker.activateCleaner();
314         }
315
316         /* Update background writes. */
317         if (backgroundIO) {
318             envImpl.updateBackgroundWrites
319                 (logResult.entrySize, logBufferPool.getLogBufferSize());
320         }
321
322         return logResult.currentLsn;
323     }
324
325     abstract protected LogResult logItem(LoggableObject item,
326                                          boolean isProvisional,
327                                          boolean flushRequired,
328                      boolean forceNewLogFile,
329                                          long oldNodeLsn,
330                                          int oldNodeSize,
331                                          boolean marshallOutsideLatch,
332                                          ByteBuffer JavaDoc marshalledBuffer,
333                                          UtilizationTracker tracker)
334         throws IOException JavaDoc, DatabaseException;
335
336     /**
337      * Called within the log write critical section.
338      */

339     protected LogResult logInternal(LoggableObject item,
340                                     boolean isProvisional,
341                                     boolean flushRequired,
342                     boolean forceNewLogFile,
343                                     long oldNodeLsn,
344                                     int oldNodeSize,
345                                     boolean marshallOutsideLatch,
346                                     ByteBuffer JavaDoc marshalledBuffer,
347                                     UtilizationTracker tracker)
348         throws IOException JavaDoc, DatabaseException {
349
350         /*
351          * Do obsolete tracking before marshalling a FileSummaryLN into the log
352          * buffer so that a FileSummaryLN counts itself. countObsoleteNode
353          * must be called before computing the entry size, since it can change
354          * the size of a FileSummaryLN entry that we're logging
355          */

356         LogEntryType entryType = item.getLogType();
357         if (oldNodeLsn != DbLsn.NULL_LSN) {
358             tracker.countObsoleteNode(oldNodeLsn, entryType, oldNodeSize);
359         }
360
361         /*
362          * If an item must be protected within the log write latch for
363          * marshalling, take care to also calculate its size in the protected
364          * section. Note that we have to get the size *before* marshalling so
365          * that the currentLsn and size are correct for utilization tracking.
366          */

367         int entrySize;
368         if (marshallOutsideLatch) {
369             entrySize = marshalledBuffer.limit();
370         } else {
371             entrySize = item.getLogSize() + HEADER_BYTES;
372         }
373
374         /*
375          * Get the next free slot in the log, under the log write latch. Bump
376          * the LSN values, which gives us a valid previous pointer, which is
377          * part of the log entry header. That's why doing the checksum must be
378          * in the log write latch -- we need to bump the LSN first, and bumping
379          * the LSN must be done within the log write latch.
380          */

381
382     if (forceNewLogFile) {
383         fileManager.forceNewLogFile();
384     }
385
386         boolean flippedFile = fileManager.bumpLsn(entrySize);
387         long currentLsn = DbLsn.NULL_LSN;
388         boolean wakeupCleaner = false;
389     boolean usedTemporaryBuffer = false;
390     boolean success = false;
391         try {
392             currentLsn = fileManager.getLastUsedLsn();
393             
394             /*
395              * countNewLogEntry and countObsoleteNodeInexact cannot change a
396              * FileSummaryLN size, so they are safe to call after getLogSize().
397              */

398             wakeupCleaner =
399                 tracker.countNewLogEntry(currentLsn, entryType, entrySize);
400
401             /*
402              * LN deletions are obsolete immediately. Inexact counting is used
403              * to save resources because the cleaner knows that all deleted LNs
404              * are obsolete.
405              */

406             if (item.countAsObsoleteWhenLogged()) {
407                 tracker.countObsoleteNodeInexact
408                     (currentLsn, entryType, entrySize);
409             }
410
411             /*
412              * This item must be marshalled within the log write latch.
413              */

414             if (!marshallOutsideLatch) {
415                 marshalledBuffer = marshallIntoBuffer(item,
416                                                       entrySize-HEADER_BYTES,
417                                                       isProvisional,
418                                                       entrySize);
419             }
420
421             /* Sanity check */
422             if (entrySize != marshalledBuffer.limit()) {
423                 throw new DatabaseException(
424                  "Logged item entrySize= " + entrySize +
425                  " but marshalledSize=" + marshalledBuffer.limit() +
426                  " type=" + entryType + " currentLsn=" +
427                  DbLsn.getNoFormatString(currentLsn));
428             }
429                                             
430             /*
431              * Ask for a log buffer suitable for holding this new entry. If
432              * the current log buffer is full, or if we flipped into a new
433              * file, write it to disk and get a new, empty log buffer to
434              * use. The returned buffer will be latched for write.
435              */

436             LogBuffer useLogBuffer =
437                 logBufferPool.getWriteBuffer(entrySize, flippedFile);
438
439             /* Add checksum to entry. */
440             marshalledBuffer =
441                 addPrevOffsetAndChecksum(marshalledBuffer,
442                                          fileManager.getPrevEntryOffset(),
443                                          entrySize);
444
445         /*
446          * If the LogBufferPool buffer (useBuffer) doesn't have sufficient
447          * space (since they're fixed size), just use the temporary buffer
448          * and throw it away when we're done. That way we don't grow the
449          * LogBuffers in the pool permanently. We risk an OOME on this
450          * temporary usage, but we'll risk it. [#12674]
451          */

452             useLogBuffer.latchForWrite();
453             try {
454                 ByteBuffer JavaDoc useBuffer = useLogBuffer.getDataBuffer();
455                 if (useBuffer.capacity() - useBuffer.position() < entrySize) {
456                     fileManager.writeLogBuffer
457                         (new LogBuffer(marshalledBuffer, currentLsn));
458                     usedTemporaryBuffer = true;
459                     assert useBuffer.position() == 0;
460                     nTempBufferWrites++;
461                 } else {
462                     /* Copy marshalled object into write buffer. */
463                     useBuffer.put(marshalledBuffer);
464                 }
465             } finally {
466                 useLogBuffer.release();
467             }
468
469             /*
470              * If this is a replicated log entry and this site is part of a
471              * replication group, send this operation to other sites.
472              * The replication logic takes care of deciding whether this site
473              * is a master.
474              */

475             if (envImpl.isReplicated()) {
476                 if (item.getLogType().isReplicated()) {
477                     envImpl.getReplicator().replicateOperation(
478                                         Operation.PLACEHOLDER,
479                                         marshalledBuffer);
480                 }
481             }
482         success = true;
483         } finally {
484         if (!success) {
485
486         /*
487          * The LSN pointer, log buffer position, and corresponding file
488          * position march in lockstep.
489          *
490          * 1. We bump the LSN.
491          * 2. We copy loggable item into the log buffer.
492          * 3. We may try to write the log buffer.
493          *
494          * If we've failed to put the item into the log buffer (2), we
495          * need to restore old LSN state so that the log buffer doesn't
496          * have a hole. [SR #12638] If we fail after (2), we don't need
497          * to restore state, because log buffers will still match file
498          * positions.
499          */

500         fileManager.restoreLastPosition();
501         }
502     }
503         
504     /*
505      * Tell the log buffer pool that we finished the write. Record the
506      * LSN against this logbuffer, and write the buffer to disk if
507      * needed.
508      */

509     if (!usedTemporaryBuffer) {
510         logBufferPool.writeCompleted(currentLsn, flushRequired);
511     }
512
513         /*
514          * If the txn is not null, the first item is an LN. Update the txn with
515          * info about the latest LSN. Note that this has to happen within the
516          * log write latch.
517          */

518         item.postLogWork(currentLsn);
519
520         return new LogResult(currentLsn, wakeupCleaner, entrySize);
521     }
522
523     /**
524      * Serialize a loggable object into this buffer.
525      */

526     private ByteBuffer JavaDoc marshallIntoBuffer(LoggableObject item,
527                                           int itemSize,
528                                           boolean isProvisional,
529                                           int entrySize)
530     throws DatabaseException {
531
532         ByteBuffer JavaDoc destBuffer = ByteBuffer.allocate(entrySize);
533
534         /* Reserve 4 bytes at the head for the checksum. */
535         destBuffer.position(CHECKSUM_BYTES);
536
537         /* Write the header. */
538         writeHeader(destBuffer, item.getLogType(), itemSize, isProvisional);
539
540         /* Put the entry in. */
541         item.writeToLog(destBuffer);
542
543         /* Set the limit so it can be used as the size of the entry. */
544         destBuffer.flip();
545
546         return destBuffer;
547     }
548
549     private ByteBuffer JavaDoc addPrevOffsetAndChecksum(ByteBuffer JavaDoc destBuffer,
550                                                 long lastOffset,
551                                                 int entrySize) {
552
553         Checksum JavaDoc checksum = Adler32.makeChecksum();
554             
555         /* Add the prev pointer */
556         destBuffer.position(HEADER_PREV_OFFSET);
557         LogUtils.writeUnsignedInt(destBuffer, lastOffset);
558
559         /* Now calculate the checksum and write it into the buffer. */
560         checksum.update(destBuffer.array(), CHECKSUM_BYTES,
561                         (entrySize - CHECKSUM_BYTES));
562         destBuffer.position(0);
563         LogUtils.writeUnsignedInt(destBuffer, checksum.getValue());
564
565         /* Leave this buffer ready for copying into another buffer. */
566         destBuffer.position(0);
567
568         return destBuffer;
569     }
570
571     /**
572      * Serialize a loggable object into this buffer. Return it ready for a
573      * copy.
574      */

575     ByteBuffer JavaDoc putIntoBuffer(LoggableObject item,
576                              int itemSize,
577                              long prevLogEntryOffset,
578                              boolean isProvisional,
579                              int entrySize)
580     throws DatabaseException {
581
582         ByteBuffer JavaDoc destBuffer =
583         marshallIntoBuffer(item, itemSize, isProvisional, entrySize);
584         return addPrevOffsetAndChecksum(destBuffer, 0, entrySize);
585     }
586
587     /**
588      * Helper to write the common entry header.
589      * @param destBuffer destination
590      * @param item object being logged
591      * @param itemSize We could ask the item for this, but are passing it
592      * as a parameter for efficiency, because it's already available
593      */

594     private void writeHeader(ByteBuffer JavaDoc destBuffer,
595                              LogEntryType itemType,
596                              int itemSize,
597                              boolean isProvisional) {
598         // log entry type
599
byte typeNum = itemType.getTypeNum();
600         destBuffer.put(typeNum);
601
602         // version
603
byte version = itemType.getVersion();
604         if (isProvisional)
605             version = LogEntryType.setProvisional(version);
606         destBuffer.put(version);
607
608         // entry size
609
destBuffer.position(HEADER_SIZE_OFFSET);
610         LogUtils.writeInt(destBuffer, itemSize);
611     }
612
613     /*
614      * Reading from the log.
615      */

616
617     /**
618      * Instantiate all the objects in the log entry at this LSN.
619      * @param lsn location of entry in log.
620      * @return log entry that embodies all the objects in the log entry.
621      */

622     public LogEntry getLogEntry(long lsn)
623         throws DatabaseException {
624
625     /*
626      * Fail loudly if the environment is invalid. A RunRecoveryException
627      * must have occurred.
628      */

629     envImpl.checkIfInvalid();
630
631         /*
632          * Get a log source for the log entry which provides an abstraction
633          * that hides whether the entry is in a buffer or on disk. Will
634          * register as a reader for the buffer or the file, which will take a
635          * latch if necessary.
636          */

637         LogSource logSource = getLogSource(lsn);
638
639         /* Read the log entry from the log source. */
640         return getLogEntryFromLogSource(lsn, logSource);
641     }
642
643     LogEntry getLogEntry(long lsn, RandomAccessFile JavaDoc file)
644         throws DatabaseException {
645
646         return getLogEntryFromLogSource
647         (lsn, new FileSource(file, readBufferSize, fileManager));
648     }
649
650     /**
651      * Instantiate all the objects in the log entry at this LSN. This will
652      * release the log source at the first opportunity.
653      *
654      * @param lsn location of entry in log
655      * @return log entry that embodies all the objects in the log entry
656      */

657     private LogEntry getLogEntryFromLogSource(long lsn,
658                                               LogSource logSource)
659         throws DatabaseException {
660
661         try {
662
663             /*
664              * Read the log entry header into a byte buffer. Be sure to read it
665              * in the order that it was written, and with the same marshalling!
666              * Ideally, entry header read/write would be encapsulated in a
667              * single class, but we don't want to have to instantiate a new
668              * object in the critical path here.
669          * XXX - false economy, change.
670              */

671             long fileOffset = DbLsn.getFileOffset(lsn);
672             ByteBuffer JavaDoc entryBuffer = logSource.getBytes(fileOffset);
673
674             /* Read the checksum to move the buffer forward. */
675             ChecksumValidator validator = null;
676             long storedChecksum = LogUtils.getUnsignedInt(entryBuffer);
677             if (doChecksumOnRead) {
678                 validator = new ChecksumValidator();
679                 validator.update(envImpl, entryBuffer,
680                  HEADER_CONTENT_BYTES, false);
681             }
682
683             /* Read the header. */
684             byte loggableType = entryBuffer.get(); // log entry type
685
byte version = entryBuffer.get(); // version
686
/* Read the size, skipping over the prev offset. */
687             entryBuffer.position(entryBuffer.position() + PREV_BYTES);
688             int itemSize = LogUtils.readInt(entryBuffer);
689
690             /*
691              * Now that we know the size, read the rest of the entry
692              * if the first read didn't get enough.
693              */

694             if (entryBuffer.remaining() < itemSize) {
695                 entryBuffer = logSource.getBytes(fileOffset + HEADER_BYTES,
696                                                  itemSize);
697                 nRepeatFaultReads++;
698             }
699
700             /*
701              * Do entry validation. Run checksum before checking the entry
702              * type, it will be the more encompassing error.
703              */

704             if (doChecksumOnRead) {
705                 /* Check the checksum first. */
706                 validator.update(envImpl, entryBuffer, itemSize, false);
707                 validator.validate(envImpl, storedChecksum, lsn);
708             }
709
710             assert LogEntryType.isValidType(loggableType):
711                 "Read non-valid log entry type: " + loggableType;
712
713             /* Read the entry. */
714             LogEntry logEntry =
715                 LogEntryType.findType(loggableType, version).getNewLogEntry();
716             logEntry.readEntry(entryBuffer, itemSize, version, true);
717
718             /* For testing only; generate a read io exception. */
719             if (readHook != null) {
720                 readHook.doIOHook();
721             }
722
723             /*
724              * Done with the log source, release in the finally clause. Note
725              * that the buffer we get back from logSource is just a duplicated
726              * buffer, where the position and state are copied but not the
727              * actual data. So we must not release the logSource until we are
728              * done marshalling the data from the buffer into the object
729              * itself.
730              */

731             return logEntry;
732         } catch (DatabaseException e) {
733
734             /*
735          * Propagate DatabaseExceptions, we want to preserve any subtypes
736              * for downstream handling.
737              */

738             throw e;
739         } catch (ClosedChannelException JavaDoc e) {
740
741             /*
742              * The channel should never be closed. It may be closed because
743              * of an interrupt received by another thread. See SR [#10463]
744              */

745             throw new RunRecoveryException(envImpl,
746                                            "Channel closed, may be "+
747                                            "due to thread interrupt",
748                                            e);
749         } catch (Exception JavaDoc e) {
750             throw new DatabaseException(e);
751         } finally {
752             if (logSource != null) {
753                 logSource.release();
754             }
755         }
756     }
757
758     /**
759      * Fault in the first object in the log entry log entry at this LSN.
760      * @param lsn location of object in log
761      * @return the object in the log
762      */

763     public Object JavaDoc get(long lsn)
764         throws DatabaseException {
765
766         LogEntry entry = getLogEntry(lsn);
767         return entry.getMainItem();
768     }
769
770     /**
771      * Find the LSN, whether in a file or still in the log buffers.
772      * Is public for unit testing.
773      */

774     public LogSource getLogSource(long lsn)
775         throws DatabaseException {
776
777         /*
778      * First look in log to see if this LSN is still in memory.
779      */

780         LogBuffer logBuffer = logBufferPool.getReadBuffer(lsn);
781
782         if (logBuffer == null) {
783             try {
784                 /* Not in the in-memory log -- read it off disk. */
785                 return new FileHandleSource
786                     (fileManager.getFileHandle(DbLsn.getFileNumber(lsn)),
787                      readBufferSize,
788              fileManager);
789             } catch (LogFileNotFoundException e) {
790                 /* Add LSN to exception message. */
791                 throw new LogFileNotFoundException
792             (DbLsn.getNoFormatString(lsn) + ' ' + e.getMessage());
793             }
794         } else {
795             return logBuffer;
796         }
797     }
798
799     /**
800      * Flush all log entries, fsync the log file.
801      */

802     public void flush()
803     throws DatabaseException {
804
805     if (!readOnly) {
806             flushInternal();
807             fileManager.syncLogEnd();
808     }
809     }
810
811     /**
812      * May be used to avoid sync to speed unit tests.
813      */

814     public void flushNoSync()
815     throws DatabaseException {
816
817     if (!readOnly) {
818             flushInternal();
819     }
820     }
821
822     abstract protected void flushInternal()
823         throws LogException, DatabaseException;
824
825
826     public void loadStats(StatsConfig config, EnvironmentStats stats)
827         throws DatabaseException {
828
829         stats.setNRepeatFaultReads(nRepeatFaultReads);
830     stats.setNTempBufferWrites(nTempBufferWrites);
831         if (config.getClear()) {
832             nRepeatFaultReads = 0;
833             nTempBufferWrites = 0;
834         }
835
836         logBufferPool.loadStats(config, stats);
837         fileManager.loadStats(config, stats);
838     }
839
840     /**
841      * Returns a tracked summary for the given file which will not be flushed.
842      * Used for watching changes that occur while a file is being cleaned.
843      */

844     abstract public TrackedFileSummary getUnflushableTrackedSummary(long file)
845         throws DatabaseException;
846
847     protected TrackedFileSummary getUnflushableTrackedSummaryInternal(long file)
848         throws DatabaseException {
849
850         return envImpl.getUtilizationTracker().
851                        getUnflushableTrackedSummary(file);
852     }
853
854     /**
855      * Count node as obsolete under the log write latch. This is done here
856      * because the log write latch is managed here, and all utilization
857      * counting must be performed under the log write latch.
858      */

859     abstract public void countObsoleteNode(long lsn,
860                                            LogEntryType type,
861                                            int size)
862         throws DatabaseException;
863
864     protected void countObsoleteNodeInternal(UtilizationTracker tracker,
865                                              long lsn,
866                                              LogEntryType type,
867                                              int size)
868         throws DatabaseException {
869         
870         tracker.countObsoleteNode(lsn, type, size);
871     }
872
873     /**
874      * Counts file summary info under the log write latch.
875      */

876     abstract public void countObsoleteNodes(TrackedFileSummary[] summaries)
877         throws DatabaseException;
878
879     protected void countObsoleteNodesInternal(UtilizationTracker tracker,
880                                               TrackedFileSummary[] summaries)
881         throws DatabaseException {
882         
883         for (int i = 0; i < summaries.length; i += 1) {
884             TrackedFileSummary summary = summaries[i];
885             tracker.addSummary(summary.getFileNumber(), summary);
886         }
887     }
888
889     /**
890      * Counts the given obsolete IN LSNs under the log write latch.
891      */

892     abstract public void countObsoleteINs(List JavaDoc lsnList)
893         throws DatabaseException;
894
895     protected void countObsoleteINsInternal(List JavaDoc lsnList)
896         throws DatabaseException {
897         
898         UtilizationTracker tracker = envImpl.getUtilizationTracker();
899
900         for (int i = 0; i < lsnList.size(); i += 1) {
901             Long JavaDoc offset = (Long JavaDoc) lsnList.get(i);
902             tracker.countObsoleteNode
903                 (offset.longValue(), LogEntryType.LOG_IN, 0);
904         }
905     }
906
907     /* For unit testing only. */
908     public void setReadHook(TestHook hook) {
909         readHook = hook;
910     }
911
912     /**
913      * LogResult holds the multivalue return from logInternal.
914      */

915     static class LogResult {
916         long currentLsn;
917         boolean wakeupCleaner;
918         int entrySize;
919
920         LogResult(long currentLsn,
921                   boolean wakeupCleaner,
922                   int entrySize) {
923             this.currentLsn = currentLsn;
924             this.wakeupCleaner = wakeupCleaner;
925             this.entrySize = entrySize;
926         }
927     }
928 }
929
Popular Tags