KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > com > sleepycat > je > tree > IN


1 /*-
2  * See the file LICENSE for redistribution information.
3  *
4  * Copyright (c) 2002,2006 Oracle. All rights reserved.
5  *
6  * $Id: IN.java,v 1.294 2006/11/03 03:07:56 mark Exp $
7  */

8
9 package com.sleepycat.je.tree;
10
11 import java.nio.ByteBuffer JavaDoc;
12 import java.util.ArrayList JavaDoc;
13 import java.util.Comparator JavaDoc;
14 import java.util.List JavaDoc;
15 import java.util.logging.Level JavaDoc;
16 import java.util.logging.Logger JavaDoc;
17
18 import com.sleepycat.je.DatabaseException;
19 import com.sleepycat.je.cleaner.UtilizationTracker;
20 import com.sleepycat.je.config.EnvironmentParams;
21 import com.sleepycat.je.dbi.DatabaseId;
22 import com.sleepycat.je.dbi.DatabaseImpl;
23 import com.sleepycat.je.dbi.DbConfigManager;
24 import com.sleepycat.je.dbi.DbTree;
25 import com.sleepycat.je.dbi.EnvironmentImpl;
26 import com.sleepycat.je.dbi.INList;
27 import com.sleepycat.je.dbi.MemoryBudget;
28 import com.sleepycat.je.latch.LatchNotHeldException;
29 import com.sleepycat.je.latch.LatchSupport;
30 import com.sleepycat.je.latch.SharedLatch;
31 import com.sleepycat.je.log.LogEntryType;
32 import com.sleepycat.je.log.LogException;
33 import com.sleepycat.je.log.LogFileNotFoundException;
34 import com.sleepycat.je.log.LogManager;
35 import com.sleepycat.je.log.LogReadable;
36 import com.sleepycat.je.log.LogUtils;
37 import com.sleepycat.je.log.LoggableObject;
38 import com.sleepycat.je.log.entry.INLogEntry;
39 import com.sleepycat.je.utilint.DbLsn;
40 import com.sleepycat.je.utilint.Tracer;
41
42 /**
43  * An IN represents an Internal Node in the JE tree.
44  */

45 public class IN extends Node
46     implements Comparable JavaDoc, LoggableObject, LogReadable {
47
48     private static final String JavaDoc BEGIN_TAG = "<in>";
49     private static final String JavaDoc END_TAG = "</in>";
50     private static final String JavaDoc TRACE_SPLIT = "Split:";
51     private static final String JavaDoc TRACE_DELETE = "Delete:";
52
53     private static final byte KNOWN_DELETED_BIT = 0x1;
54     private static final byte CLEAR_KNOWN_DELETED_BIT = ~0x1;
55     private static final byte DIRTY_BIT = 0x2;
56     private static final byte CLEAR_DIRTY_BIT = ~0x2;
57     private static final byte MIGRATE_BIT = 0x4;
58     private static final byte CLEAR_MIGRATE_BIT = ~0x4;
59     private static final byte PENDING_DELETED_BIT = 0x8;
60     private static final byte CLEAR_PENDING_DELETED_BIT = ~0x8;
61
62     private static final int BYTES_PER_LSN_ENTRY = 4;
63     private static final int MAX_FILE_OFFSET = 0xfffffe;
64     private static final int THREE_BYTE_NEGATIVE_ONE = 0xffffff;
65     private static final int GROWTH_INCREMENT = 5; // for future
66

67     /*
68      * Levels:
69      * The mapping tree has levels in the 0x20000 -> 0x2ffffnumber space.
70      * The main tree has levels in the 0x10000 -> 0x1ffff number space.
71      * The duplicate tree levels are in 0-> 0xffff number space.
72      */

73     public static final int DBMAP_LEVEL = 0x20000;
74     public static final int MAIN_LEVEL = 0x10000;
75     public static final int LEVEL_MASK = 0x0ffff;
76     public static final int MIN_LEVEL = -1;
77     public static final int MAX_LEVEL = Integer.MAX_VALUE;
78     public static final int BIN_LEVEL = MAIN_LEVEL | 1;
79
80     /*
81      * IN eviction types returned by getEvictionType.
82      */

83     public static final int MAY_NOT_EVICT = 0;
84     public static final int MAY_EVICT_LNS = 1;
85     public static final int MAY_EVICT_NODE = 2;
86
87     protected SharedLatch latch;
88     private long generation;
89     private boolean dirty;
90     private int nEntries;
91     private byte[] identifierKey;
92
93     /*
94      * The following four arrays could more easily be embodied in an array of
95      * ChildReferences. However, for in-memory space savings, we save the
96      * overhead of ChildReference and DbLsn objects by in-lining the elements
97      * of the ChildReference directly in the IN.
98      */

99     private Node[] entryTargets;
100     private byte[][] entryKeyVals; // byte[][] instead of Key[] to save space
101

102     /*
103      * The following entryLsnXXX fields are used for storing LSNs. There are
104      * two possible representations: a byte array based rep, and a long array
105      * based one. For compactness, the byte array rep is used initially. A
106      * single byte[] that uses four bytes per LSN is used. The baseFileNumber
107      * field contains the lowest file number of any LSN in the array. Then for
108      * each entry (four bytes each), the first byte contains the offset from
109      * the baseFileNumber of that LSN's file number. The remaining three bytes
110      * contain the file offset portion of the LSN. Three bytes will hold a
111      * maximum offset of 16,777,214 (0xfffffe), so with the default JE log file
112      * size of 10,000,000 bytes this works well.
113      *
114      * If either (1) the difference in file numbers exceeds 127
115      * (Byte.MAX_VALUE) or (2) the file offset is greater than 16,777,214, then
116      * the byte[] based rep mutates to a long[] based rep.
117      *
118      * In the byte[] rep, DbLsn.NULL_LSN is represented by setting the file
119      * offset bytes for a given entry to -1 (0xffffff).
120      */

121     private long baseFileNumber;
122     private byte[] entryLsnByteArray;
123     private long[] entryLsnLongArray;
124     private byte[] entryStates;
125     private DatabaseImpl databaseImpl;
126     private boolean isRoot; // true if this is the root of a tree
127
private int level;
128     private long inMemorySize;
129     private boolean inListResident; // true if this IN is on the IN list
130
// Location of last full version.
131
private long lastFullVersion = DbLsn.NULL_LSN;
132
133     /*
134      * A list of Long LSNs that cannot be counted as obsolete until an ancestor
135      * IN is logged non-provisionally.
136      */

137     private List JavaDoc provisionalObsolete;
138
139     /* Used to indicate that an exact match was found in findEntry. */
140     public static final int EXACT_MATCH = (1 << 16);
141
142     /* Used to indicate that an insert was successful. */
143     public static final int INSERT_SUCCESS = (1 << 17);
144
145     /*
146      * accumluted memory budget delta. Once this exceeds
147      * MemoryBudget.ACCUMULATED_LIMIT we inform the MemoryBudget that a change
148      * has occurred. See SR 12273.
149      */

150     private int accumulatedDelta = 0;
151
152     /*
153      * Max allowable accumulation of memory budget changes before MemoryBudget
154      * should be updated. This allows for consolidating multiple calls to
155      * updateXXXMemoryBudget() into one call. Not declared final so that the
156      * unit tests can modify it. See SR 12273.
157      */

158     public static int ACCUMULATED_LIMIT = 1000;
159
160     /**
161      * Create an empty IN, with no node id, to be filled in from the log.
162      */

163     public IN() {
164         super(false);
165         init(null, Key.EMPTY_KEY, 0, 0);
166     }
167
168     /**
169      * Create a new IN.
170      */

171     public IN(DatabaseImpl db, byte[] identifierKey, int capacity, int level) {
172
173         super(true);
174         init(db, identifierKey, capacity, generateLevel(db.getId(), level));
175         initMemorySize();
176     }
177
178     /**
179      * Initialize IN object.
180      */

181     protected void init(DatabaseImpl db,
182                         byte[] identifierKey,
183                         int initialCapacity,
184                         int level) {
185         setDatabase(db);
186     EnvironmentImpl env =
187         (databaseImpl == null) ? null : databaseImpl.getDbEnvironment();
188         latch =
189         LatchSupport.makeSharedLatch(shortClassName() + getNodeId(), env);
190     latch.setExclusiveOnly(EnvironmentImpl.getSharedLatches() ?
191                    isAlwaysLatchedExclusively() :
192                    true);
193     assert latch.setNoteLatch(true);
194         generation = 0;
195         dirty = false;
196         nEntries = 0;
197         this.identifierKey = identifierKey;
198     entryTargets = new Node[initialCapacity];
199     entryKeyVals = new byte[initialCapacity][];
200     baseFileNumber = -1;
201     entryLsnByteArray = new byte[initialCapacity << 2];
202     entryLsnLongArray = null;
203     entryStates = new byte[initialCapacity];
204         isRoot = false;
205         this.level = level;
206         inListResident = false;
207     }
208
209     /**
210      * Initialize the per-node memory count by computing its memory usage.
211      */

212     protected void initMemorySize() {
213         inMemorySize = computeMemorySize();
214     }
215
216     /*
217      * To get an inexpensive but random distribution of INs in the INList,
218      * equality and comparison for INs is based on a combination of the node
219      * id and identify hash code. Note that this will still give a correct
220      * equality value for any comparisons outside the INList sorted set.
221      */

222     private long getEqualityKey() {
223         int hash = System.identityHashCode(this);
224         long hash2 = (((long) hash) << 32) | hash;
225         return hash2 ^ getNodeId();
226     }
227     
228     public boolean equals(Object JavaDoc obj) {
229         if (!(obj instanceof IN)) {
230             return false;
231         }
232
233         IN in = (IN) obj;
234         return (this.getEqualityKey() == in.getEqualityKey());
235     }
236
237     public int hashCode() {
238         return (int) getEqualityKey();
239     }
240
241     /**
242      * Sort based on node id.
243      */

244     public int compareTo(Object JavaDoc o) {
245         if (o == null) {
246             throw new NullPointerException JavaDoc();
247         }
248
249         IN argIN = (IN) o;
250
251         long argEqualityKey = argIN.getEqualityKey();
252         long myEqualityKey = getEqualityKey();
253
254         if (myEqualityKey < argEqualityKey) {
255             return -1;
256         } else if (myEqualityKey > argEqualityKey) {
257             return 1;
258         } else {
259             return 0;
260         }
261     }
262
263     /**
264      * Create a new IN. Need this because we can't call newInstance() without
265      * getting a 0 for nodeid.
266      */

267     protected IN createNewInstance(byte[] identifierKey,
268                                    int maxEntries,
269                                    int level) {
270         return new IN(databaseImpl, identifierKey, maxEntries, level);
271     }
272
273     /*
274      * Return whether the shared latch for this kind of node should be of the
275      * "always exclusive" variety. Presently, only IN's are actually latched
276      * shared. BINs, DINs, and DBINs are all latched exclusive only.
277      */

278     boolean isAlwaysLatchedExclusively() {
279     return false;
280     }
281
282     /**
283      * Initialize a node that has been read in from the log.
284      */

285     public void postFetchInit(DatabaseImpl db, long sourceLsn)
286         throws DatabaseException {
287
288         setDatabase(db);
289         setLastFullLsn(sourceLsn);
290         EnvironmentImpl env = db.getDbEnvironment();
291         initMemorySize(); // compute before adding to inlist
292
env.getInMemoryINs().add(this);
293     }
294
295     /**
296      * Initialize a node read in during recovery.
297      */

298     public void postRecoveryInit(DatabaseImpl db, long sourceLsn) {
299         setDatabase(db);
300         setLastFullLsn(sourceLsn);
301         initMemorySize();
302     }
303
304     /**
305      * Sets the last logged LSN.
306      */

307     void setLastFullLsn(long lsn) {
308         lastFullVersion = lsn;
309     }
310         
311     /**
312      * Returns the last logged LSN, or null if never logged. Is public for
313      * unit testing.
314      */

315     public long getLastFullVersion() {
316         return lastFullVersion;
317     }
318
319     /**
320      * Latch this node exclusive, optionally setting the generation.
321      */

322     public void latch(boolean updateGeneration)
323         throws DatabaseException {
324
325         if (updateGeneration) {
326             setGeneration();
327         }
328         latch.acquireExclusive();
329     }
330
331     /**
332      * Latch this node shared, optionally setting the generation.
333      */

334     public void latchShared(boolean updateGeneration)
335     throws DatabaseException {
336
337     if (updateGeneration) {
338         setGeneration();
339     }
340
341     latch.acquireShared();
342     }
343
344     /**
345      * Latch this node if it is not latched by another thread, optionally
346      * setting the generation if the latch succeeds.
347      */

348     public boolean latchNoWait(boolean updateGeneration)
349         throws DatabaseException {
350
351         if (latch.acquireExclusiveNoWait()) {
352             if (updateGeneration) {
353                 setGeneration();
354             }
355             return true;
356         } else {
357             return false;
358         }
359     }
360
361     /**
362      * Latch this node exclusive and set the generation.
363      */

364     public void latch()
365         throws DatabaseException {
366
367         latch(true);
368     }
369
370     /**
371      * Latch this node shared and set the generation.
372      */

373     public void latchShared()
374     throws DatabaseException {
375
376     latchShared(true);
377     }
378
379     /**
380      * Latch this node if it is not latched by another thread, and set the
381      * generation if the latch succeeds.
382      */

383     public boolean latchNoWait()
384         throws DatabaseException {
385
386         return latchNoWait(true);
387     }
388     
389     /**
390      * Release the latch on this node.
391      */

392     public void releaseLatch()
393         throws LatchNotHeldException {
394
395         latch.release();
396     }
397
398     /**
399      * Release the latch on this node.
400      */

401     public void releaseLatchIfOwner()
402         throws LatchNotHeldException {
403
404         latch.releaseIfOwner();
405     }
406
407     /**
408      * @return true if this thread holds the IN's latch
409      */

410     public boolean isLatchOwnerForRead() {
411     return latch.isOwner();
412     }
413
414     public boolean isLatchOwnerForWrite() {
415     return latch.isWriteLockedByCurrentThread();
416     }
417
418     public long getGeneration() {
419         return generation;
420     }
421
422     public void setGeneration() {
423         generation = Generation.getNextGeneration();
424     }
425
426     public void setGeneration(long newGeneration) {
427         generation = newGeneration;
428     }
429
430     public int getLevel() {
431         return level;
432     }
433
434     protected int generateLevel(DatabaseId dbId, int newLevel) {
435         if (dbId.equals(DbTree.ID_DB_ID)) {
436             return newLevel | DBMAP_LEVEL;
437         } else {
438             return newLevel | MAIN_LEVEL;
439         }
440     }
441
442     public boolean getDirty() {
443         return dirty;
444     }
445
446     /* public for unit tests */
447     public void setDirty(boolean dirty) {
448         this.dirty = dirty;
449     }
450
451     public boolean isRoot() {
452         return isRoot;
453     }
454
455     public boolean isDbRoot() {
456     return isRoot;
457     }
458
459     void setIsRoot(boolean isRoot) {
460         this.isRoot = isRoot;
461         setDirty(true);
462     }
463
464     /**
465      * @return the identifier key for this node.
466      */

467     public byte[] getIdentifierKey() {
468         return identifierKey;
469     }
470
471     /**
472      * Set the identifier key for this node.
473      *
474      * @param key - the new identifier key for this node.
475      */

476     void setIdentifierKey(byte[] key) {
477         identifierKey = key;
478         setDirty(true);
479     }
480
481     /**
482      * Get the key (dupe or identifier) in child that is used to locate it in
483      * 'this' node.
484      */

485     public byte[] getChildKey(IN child)
486         throws DatabaseException {
487
488         return child.getIdentifierKey();
489     }
490
491     /*
492      * An IN uses the main key in its searches.
493      */

494     public byte[] selectKey(byte[] mainTreeKey, byte[] dupTreeKey) {
495         return mainTreeKey;
496     }
497
498     /**
499      * Return the key for this duplicate set.
500      */

501     public byte[] getDupKey()
502         throws DatabaseException {
503
504         throw new DatabaseException(shortClassName() + ".getDupKey() called");
505     }
506
507     /**
508      * Return the key for navigating through the duplicate tree.
509      */

510     public byte[] getDupTreeKey() {
511         return null;
512     }
513     /**
514      * Return the key for navigating through the main tree.
515      */

516     public byte[] getMainTreeKey() {
517         return getIdentifierKey();
518     }
519
520     /**
521      * Get the database for this IN.
522      */

523     public DatabaseImpl getDatabase() {
524         return databaseImpl;
525     }
526
527     /**
528      * Set the database reference for this node.
529      */

530     public void setDatabase(DatabaseImpl db) {
531         databaseImpl = db;
532     }
533         
534     /*
535      * Get the database id for this node.
536      */

537     public DatabaseId getDatabaseId() {
538         return databaseImpl.getId();
539     }
540
541     private void setEntryInternal(int from, int to) {
542     entryTargets[to] = entryTargets[from];
543     entryKeyVals[to] = entryKeyVals[from];
544     entryStates[to] = entryStates[from];
545     /* Will implement this in the future. Note, don't adjust if mutating.*/
546     //maybeAdjustCapacity(offset);
547
if (entryLsnLongArray == null) {
548         int fromOff = from << 2;
549         int toOff = to << 2;
550         entryLsnByteArray[toOff++] = entryLsnByteArray[fromOff++];
551         entryLsnByteArray[toOff++] = entryLsnByteArray[fromOff++];
552         entryLsnByteArray[toOff++] = entryLsnByteArray[fromOff++];
553         entryLsnByteArray[toOff] = entryLsnByteArray[fromOff];
554     } else {
555         entryLsnLongArray[to] = entryLsnLongArray[from];
556     }
557     }
558
559     private void clearEntry(int idx) {
560     entryTargets[idx] = null;
561     entryKeyVals[idx] = null;
562     setLsnElement(idx, DbLsn.NULL_LSN);
563     entryStates[idx] = 0;
564     }
565
566     /**
567      * Return the idx'th key.
568      */

569     public byte[] getKey(int idx) {
570         return entryKeyVals[idx];
571     }
572
573     /**
574      * Set the idx'th key.
575      */

576     private void setKey(int idx, byte[] keyVal) {
577     entryKeyVals[idx] = keyVal;
578         entryStates[idx] |= DIRTY_BIT;
579     }
580
581     /**
582      * Get the idx'th migrate status.
583      */

584     public boolean getMigrate(int idx) {
585         return (entryStates[idx] & MIGRATE_BIT) != 0;
586     }
587
588     /**
589      * Set the idx'th migrate status.
590      */

591     public void setMigrate(int idx, boolean migrate) {
592         if (migrate) {
593             entryStates[idx] |= MIGRATE_BIT;
594         } else {
595             entryStates[idx] &= CLEAR_MIGRATE_BIT;
596         }
597     }
598
599     public byte getState(int idx) {
600     return entryStates[idx];
601     }
602
603     /**
604      * Return the idx'th target.
605      */

606     public Node getTarget(int idx) {
607         return entryTargets[idx];
608     }
609
610     /**
611      * Sets the idx'th target. No need to make dirty, that state only applies
612      * to key and LSN.
613      *
614      * <p>WARNING: This method does not update the memory budget. The caller
615      * must update the budget.</p>
616      */

617     void setTarget(int idx, Node target) {
618         entryTargets[idx] = target;
619     }
620
621     /**
622      * Return the idx'th LSN for this entry.
623      *
624      * @return the idx'th LSN for this entry.
625      */

626     public long getLsn(int idx) {
627     if (entryLsnLongArray == null) {
628         int offset = idx << 2;
629         int fileOffset = getFileOffset(offset);
630         if (fileOffset == -1) {
631         return DbLsn.NULL_LSN;
632         } else {
633         return DbLsn.makeLsn((long) (baseFileNumber +
634                          getFileNumberOffset(offset)),
635                      fileOffset);
636         }
637     } else {
638         return entryLsnLongArray[idx];
639     }
640     }
641
642     /**
643      * Sets the idx'th target LSN. Make this a private helper method, so we're
644      * sure to set the IN dirty where appropriate.
645      */

646     private void setLsn(int idx, long lsn) {
647
648     int oldSize = computeLsnOverhead();
649     /* setLsnElement can mutate to an array of longs. */
650     setLsnElement(idx, lsn);
651     changeMemorySize(computeLsnOverhead() - oldSize);
652         entryStates[idx] |= DIRTY_BIT;
653     }
654
655     /* For unit tests. */
656     long[] getEntryLsnLongArray() {
657     return entryLsnLongArray;
658     }
659
660     /* For unit tests. */
661     byte[] getEntryLsnByteArray() {
662     return entryLsnByteArray;
663     }
664
665     /* For unit tests. */
666     void initEntryLsn(int capacity) {
667     entryLsnLongArray = null;
668     entryLsnByteArray = new byte[capacity << 2];
669     baseFileNumber = -1;
670     }
671
672     /* Use default protection for unit tests. */
673     void setLsnElement(int idx, long value) {
674
675     int offset = idx << 2;
676     /* Will implement this in the future. Note, don't adjust if mutating.*/
677     //maybeAdjustCapacity(offset);
678
if (entryLsnLongArray != null) {
679         entryLsnLongArray[idx] = value;
680         return;
681     }
682
683     if (value == DbLsn.NULL_LSN) {
684         setFileNumberOffset(offset, (byte) 0);
685         setFileOffset(offset, -1);
686         return;
687     }
688
689     long thisFileNumber = DbLsn.getFileNumber(value);
690
691     if (baseFileNumber == -1) {
692         /* First entry. */
693         baseFileNumber = thisFileNumber;
694         setFileNumberOffset(offset, (byte) 0);
695     } else {
696         if (thisFileNumber < baseFileNumber) {
697         if (!adjustFileNumbers(thisFileNumber)) {
698             mutateToLongArray(idx, value);
699             return;
700         }
701         baseFileNumber = thisFileNumber;
702         }
703         long fileNumberDifference = thisFileNumber - baseFileNumber;
704         if (fileNumberDifference > Byte.MAX_VALUE) {
705         mutateToLongArray(idx, value);
706         return;
707         }
708         setFileNumberOffset
709         (offset, (byte) (thisFileNumber - baseFileNumber));
710         //assert getFileNumberOffset(offset) >= 0;
711
}
712
713     int fileOffset = (int) DbLsn.getFileOffset(value);
714     if (fileOffset > MAX_FILE_OFFSET) {
715         mutateToLongArray(idx, value);
716         return;
717     }
718
719     setFileOffset(offset, fileOffset);
720     //assert getLsn(offset) == value;
721
}
722
723     private void mutateToLongArray(int idx, long value) {
724     int nElts = entryLsnByteArray.length >> 2;
725     long[] newArr = new long[nElts];
726     for (int i = 0; i < nElts; i++) {
727         newArr[i] = getLsn(i);
728     }
729     newArr[idx] = value;
730     entryLsnLongArray = newArr;
731     entryLsnByteArray = null;
732     }
733
734     /* Will implement this in the future. Note, don't adjust if mutating.*/
735     /***
736     private void maybeAdjustCapacity(int offset) {
737     if (entryLsnLongArray == null) {
738         int bytesNeeded = offset + BYTES_PER_LSN_ENTRY;
739         int currentBytes = entryLsnByteArray.length;
740         if (currentBytes < bytesNeeded) {
741         int newBytes = bytesNeeded +
742             (GROWTH_INCREMENT * BYTES_PER_LSN_ENTRY);
743         byte[] newArr = new byte[newBytes];
744         System.arraycopy(entryLsnByteArray, 0, newArr, 0,
745                  currentBytes);
746         entryLsnByteArray = newArr;
747         for (int i = currentBytes;
748              i < newBytes;
749              i += BYTES_PER_LSN_ENTRY) {
750             setFileNumberOffset(i, (byte) 0);
751             setFileOffset(i, -1);
752         }
753         }
754     } else {
755         int currentEntries = entryLsnLongArray.length;
756         int idx = offset >> 2;
757         if (currentEntries < idx + 1) {
758         int newEntries = idx + GROWTH_INCREMENT;
759         long[] newArr = new long[newEntries];
760         System.arraycopy(entryLsnLongArray, 0, newArr, 0,
761                  currentEntries);
762         entryLsnLongArray = newArr;
763         for (int i = currentEntries; i < newEntries; i++) {
764             entryLsnLongArray[i] = DbLsn.NULL_LSN;
765         }
766         }
767     }
768     }
769     ***/

770
771     private boolean adjustFileNumbers(long newBaseFileNumber) {
772     long oldBaseFileNumber = baseFileNumber;
773     for (int i = 0;
774          i < entryLsnByteArray.length;
775          i += BYTES_PER_LSN_ENTRY) {
776         if (getFileOffset(i) == -1) {
777         continue;
778         }
779
780         long curEntryFileNumber =
781         oldBaseFileNumber + getFileNumberOffset(i);
782         long newCurEntryFileNumberOffset =
783         (curEntryFileNumber - newBaseFileNumber);
784         if (newCurEntryFileNumberOffset > Byte.MAX_VALUE) {
785         long undoOffset = oldBaseFileNumber - newBaseFileNumber;
786         for (int j = i - BYTES_PER_LSN_ENTRY;
787              j >= 0;
788              j -= BYTES_PER_LSN_ENTRY) {
789             if (getFileOffset(j) == -1) {
790             continue;
791             }
792             setFileNumberOffset
793             (j, (byte) (getFileNumberOffset(j) - undoOffset));
794             //assert getFileNumberOffset(j) >= 0;
795
}
796         return false;
797         }
798         setFileNumberOffset(i, (byte) newCurEntryFileNumberOffset);
799
800         //assert getFileNumberOffset(i) >= 0;
801
}
802     return true;
803     }
804
805     private void setFileNumberOffset(int offset, byte fileNumberOffset) {
806     entryLsnByteArray[offset] = fileNumberOffset;
807     }
808
809     private byte getFileNumberOffset(int offset) {
810     return entryLsnByteArray[offset];
811     }
812
813     private void setFileOffset(int offset, int fileOffset) {
814     put3ByteInt(offset + 1, fileOffset);
815     }
816
817     private int getFileOffset(int offset) {
818     return get3ByteInt(offset + 1);
819     }
820
821     private void put3ByteInt(int offset, int value) {
822     entryLsnByteArray[offset++] = (byte) (value >>> 0);
823     entryLsnByteArray[offset++] = (byte) (value >>> 8);
824     entryLsnByteArray[offset] = (byte) (value >>> 16);
825     }
826
827     private int get3ByteInt(int offset) {
828         int ret = (entryLsnByteArray[offset++] & 0xFF) << 0;
829     ret += (entryLsnByteArray[offset++] & 0xFF) << 8;
830     ret += (entryLsnByteArray[offset] & 0xFF) << 16;
831     if (ret == THREE_BYTE_NEGATIVE_ONE) {
832         ret = -1;
833     }
834
835     return ret;
836     }
837
838     /**
839      * @return true if the idx'th entry has been deleted, although the
840      * transaction that performed the deletion may not be committed.
841      */

842     public boolean isEntryPendingDeleted(int idx) {
843         return ((entryStates[idx] & PENDING_DELETED_BIT) != 0);
844     }
845
846     /**
847      * Set pendingDeleted to true.
848      */

849     public void setPendingDeleted(int idx) {
850         entryStates[idx] |= PENDING_DELETED_BIT;
851         entryStates[idx] |= DIRTY_BIT;
852     }
853
854     /**
855      * Set pendingDeleted to false.
856      */

857     public void clearPendingDeleted(int idx) {
858         entryStates[idx] &= CLEAR_PENDING_DELETED_BIT;
859         entryStates[idx] |= DIRTY_BIT;
860     }
861
862     /**
863      * @return true if the idx'th entry is deleted for sure. If a transaction
864      * performed the deletion, it has been committed.
865      */

866     public boolean isEntryKnownDeleted(int idx) {
867         return ((entryStates[idx] & KNOWN_DELETED_BIT) != 0);
868     }
869
870     /**
871      * Set knownDeleted to true.
872      */

873     void setKnownDeleted(int idx) {
874         entryStates[idx] |= KNOWN_DELETED_BIT;
875         entryStates[idx] |= DIRTY_BIT;
876     }
877
878     /**
879      * Set knownDeleted to false.
880      */

881     void clearKnownDeleted(int idx) {
882         entryStates[idx] &= CLEAR_KNOWN_DELETED_BIT;
883         entryStates[idx] |= DIRTY_BIT;
884     }
885
886     /**
887      * @return true if the object is dirty.
888      */

889     boolean isDirty(int idx) {
890         return ((entryStates[idx] & DIRTY_BIT) != 0);
891     }
892
893     /**
894      * @return the number of entries in this node.
895      */

896     public int getNEntries() {
897         return nEntries;
898     }
899
900     /*
901      * In the future we may want to move the following static methods to an
902      * EntryState utility class and share all state bit twidling among IN,
903      * ChildReference, and DeltaInfo.
904      */

905
906     /**
907      * Returns true if the given state is known deleted.
908      */

909     static boolean isStateKnownDeleted(byte state) {
910         return ((state & KNOWN_DELETED_BIT) != 0);
911     }
912
913     /**
914      * Returns true if the given state is known deleted.
915      */

916     static boolean isStatePendingDeleted(byte state) {
917         return ((state & KNOWN_DELETED_BIT) != 0);
918     }
919
920     /**
921      * @return the maximum number of entries in this node.
922      */

923     int getMaxEntries() {
924         return entryTargets.length;
925     }
926
927     /**
928      * Returns the target of the idx'th entry or null if a pendingDeleted or
929      * knownDeleted entry has been cleaned. Note that null can only be
930      * returned for a slot that could contain a deleted LN, not other node
931      * types and not a DupCountLN since DupCountLNs are never deleted. Null is
932      * also returned for a KnownDeleted slot with a NULL_LSN.
933      *
934      * @return the target node or null.
935      */

936     public Node fetchTarget(int idx)
937         throws DatabaseException {
938
939         if (entryTargets[idx] == null) {
940             /* Fault object in from log. */
941             long lsn = getLsn(idx);
942             if (lsn == DbLsn.NULL_LSN) {
943                 if (!isEntryKnownDeleted(idx)) {
944                     throw new DatabaseException(makeFetchErrorMsg
945                         ("NULL_LSN without KnownDeleted", this, lsn,
946                          entryStates[idx]));
947                 }
948
949                 /*
950                  * Ignore a NULL_LSN (return null) if KnownDeleted is set.
951                  * This is the remnant of an incomplete insertion -- see
952                  * Tree.insert. [#13126]
953                  */

954             } else {
955                 try {
956                     EnvironmentImpl env = databaseImpl.getDbEnvironment();
957                     Node node = (Node) env.getLogManager().get(lsn);
958                     node.postFetchInit(databaseImpl, lsn);
959             assert isLatchOwnerForWrite();
960                     entryTargets[idx] = node;
961                     updateMemorySize(null, node);
962                 } catch (LogFileNotFoundException LNFE) {
963                     if (!isEntryKnownDeleted(idx) &&
964                         !isEntryPendingDeleted(idx)) {
965                         throw new DatabaseException
966                             (makeFetchErrorMsg(LNFE.toString(),
967                                                this,
968                                                lsn,
969                                                entryStates[idx]));
970                     }
971
972                     /*
973                      * Ignore. Cleaner got to the log file, so just return
974                      * null. It is safe to ignore a deleted file for a
975                      * pendingDeleted entry because the cleaner will not clean
976                      * files with active transactions.
977                      */

978                 } catch (Exception JavaDoc e) {
979                     throw new DatabaseException
980                         (makeFetchErrorMsg(e.toString(), this, lsn,
981                                            entryStates[idx]),
982                          e);
983                 }
984             }
985         }
986
987         return entryTargets[idx];
988     }
989
990     static String JavaDoc makeFetchErrorMsg(String JavaDoc msg, IN in, long lsn, byte state) {
991
992         /*
993          * Bolster the exception with the LSN, which is critical for
994          * debugging. Otherwise, the exception propagates upward and loses the
995          * problem LSN.
996          */

997         StringBuffer JavaDoc sb = new StringBuffer JavaDoc();
998         sb.append("fetchTarget of ");
999         if (lsn == DbLsn.NULL_LSN) {
1000            sb.append("null lsn");
1001        } else {
1002            sb.append(DbLsn.getNoFormatString(lsn));
1003        }
1004        if (in != null) {
1005            sb.append(" parent IN=").append(in.getNodeId());
1006            sb.append(" lastFullVersion=");
1007            sb.append(DbLsn.getNoFormatString(in.getLastFullVersion()));
1008            sb.append(" parent.getDirty()=").append(in.getDirty());
1009        }
1010        sb.append(" state=").append(state);
1011        sb.append(" ").append(msg);
1012        return sb.toString();
1013    }
1014
1015    /*
1016     * All methods that modify the entry array must adjust memory sizing.
1017     */

1018
1019    /**
1020     * Set the idx'th entry of this node.
1021     */

1022    public void setEntry(int idx,
1023             Node target,
1024             byte[] keyVal,
1025                         long lsn,
1026             byte state) {
1027        
1028    long oldSize = getEntryInMemorySize(idx);
1029    int newNEntries = idx + 1;
1030        if (newNEntries > nEntries) {
1031
1032        /*
1033         * If the new entry is going to bump nEntries, then we don't need
1034         * the size and LSN accounting included in oldSize.
1035         */

1036            nEntries = newNEntries;
1037        oldSize = 0;
1038        }
1039    entryTargets[idx] = target;
1040    entryKeyVals[idx] = keyVal;
1041    setLsnElement(idx, lsn);
1042    entryStates[idx] = state;
1043    long newSize = getEntryInMemorySize(idx);
1044    updateMemorySize(oldSize, newSize);
1045        setDirty(true);
1046    }
1047
1048    /**
1049     * Update the idx'th entry of this node.
1050     *
1051     * Note: does not dirty the node.
1052     */

1053    public void updateEntry(int idx, Node node) {
1054    long oldSize = getEntryInMemorySize(idx);
1055    setTarget(idx, node);
1056    long newSize = getEntryInMemorySize(idx);
1057        updateMemorySize(oldSize, newSize);
1058    }
1059
1060    /**
1061     * Update the idx'th entry of this node.
1062     */

1063    public void updateEntry(int idx, Node node, long lsn) {
1064    long oldSize = getEntryInMemorySize(idx);
1065        if (notOverwritingDeferredWriteEntry(lsn)) {
1066            setLsn(idx, lsn);
1067        }
1068    setTarget(idx, node);
1069    long newSize = getEntryInMemorySize(idx);
1070        updateMemorySize(oldSize, newSize);
1071        setDirty(true);
1072    }
1073
1074    /**
1075     * Update the idx'th entry of this node.
1076     */

1077    public void updateEntry(int idx, Node node, long lsn, byte[] key) {
1078    long oldSize = getEntryInMemorySize(idx);
1079        if (notOverwritingDeferredWriteEntry(lsn)) {
1080            setLsn(idx, lsn);
1081        }
1082    setTarget(idx, node);
1083    setKey(idx, key);
1084    long newSize = getEntryInMemorySize(idx);
1085        updateMemorySize(oldSize, newSize);
1086        setDirty(true);
1087    }
1088
1089    /**
1090     * Update the idx'th entry of this node.
1091     */

1092    public void updateEntry(int idx, long lsn) {
1093        if (notOverwritingDeferredWriteEntry(lsn)) {
1094            setLsn(idx, lsn);
1095        }
1096        setDirty(true);
1097    }
1098
1099    /**
1100     * Update the idx'th entry of this node.
1101     */

1102    public void updateEntry(int idx, long lsn, byte state) {
1103        if (notOverwritingDeferredWriteEntry(lsn)) {
1104            setLsn(idx, lsn);
1105        }
1106    entryStates[idx] = state;
1107        setDirty(true);
1108    }
1109
1110    /**
1111     * Update the idx'th entry of this node. This flavor is used when the
1112     * target LN is being modified, by an operation like a delete or update. We
1113     * don't have to check whether the LSN has been nulled or not, because we
1114     * know an LSN existed before. Also, the modification of the target is done
1115     * in the caller, so instead of passing in the old and new nodes, we pass
1116     * in the old and new node sizes.
1117     */

1118    public void updateEntry(int idx,
1119                long lsn,
1120                long oldLNSize,
1121                long newLNSize) {
1122        updateMemorySize(oldLNSize, newLNSize);
1123        if (notOverwritingDeferredWriteEntry(lsn)) {
1124            setLsn(idx, lsn);
1125        }
1126        setDirty(true);
1127    }
1128
1129    /**
1130     * Update the idx'th entry of this node. Only update the key if the new
1131     * key is less than the existing key.
1132     */

1133    private void updateEntryCompareKey(int idx,
1134                       Node node,
1135                       long lsn,
1136                       byte[] key) {
1137    long oldSize = getEntryInMemorySize(idx);
1138        if (notOverwritingDeferredWriteEntry(lsn)) {
1139            setLsn(idx, lsn);
1140        }
1141    setTarget(idx, node);
1142    byte[] existingKey = getKey(idx);
1143    int s = Key.compareKeys(key, existingKey, getKeyComparator());
1144    if (s < 0) {
1145        setKey(idx, key);
1146    }
1147    long newSize = getEntryInMemorySize(idx);
1148        updateMemorySize(oldSize, newSize);
1149        setDirty(true);
1150    }
1151
1152    /**
1153     * When a deferred write database calls one of the optionalLog methods,
1154     * it may receive a DbLsn.NULL_LSN as the return value, because the
1155     * logging didn't really happen. A NULL_LSN should never overwrite a
1156     * valid lsn (that resulted from Database.sync() or eviction), lest
1157     * we lose the handle to the last on disk version.
1158     */

1159    boolean notOverwritingDeferredWriteEntry(long newLsn) {
1160        if (databaseImpl.isDeferredWrite() &&
1161            (newLsn == DbLsn.NULL_LSN)) {
1162            return false;
1163        } else
1164            return true;
1165    }
1166
1167    /*
1168     * Memory usage calculations.
1169     */

1170    public boolean verifyMemorySize() {
1171
1172        long calcMemorySize = computeMemorySize();
1173        if (calcMemorySize != inMemorySize) {
1174
1175            String JavaDoc msg = "-Warning: Out of sync. " +
1176                "Should be " + calcMemorySize +
1177                " / actual: " +
1178                inMemorySize + " node: " + getNodeId();
1179            Tracer.trace(Level.INFO,
1180                         databaseImpl.getDbEnvironment(),
1181                         msg);
1182                         
1183            System.out.println(msg);
1184
1185            return false;
1186        } else {
1187            return true;
1188        }
1189    }
1190
1191    /**
1192     * Return the number of bytes used by this IN. Latching is up to the
1193     * caller.
1194     */

1195    public long getInMemorySize() {
1196        return inMemorySize;
1197    }
1198
1199    private long getEntryInMemorySize(int idx) {
1200    return getEntryInMemorySize(entryKeyVals[idx],
1201                                    entryTargets[idx]);
1202    }
1203
1204    protected long getEntryInMemorySize(byte[] key, Node target) {
1205
1206        /*
1207         * Do not count state size here, since it is counted as overhead
1208         * during initialization.
1209         */

1210    long ret = 0;
1211    if (key != null) {
1212            ret += MemoryBudget.byteArraySize(key.length);
1213    }
1214    if (target != null) {
1215        ret += target.getMemorySizeIncludedByParent();
1216    }
1217    return ret;
1218    }
1219
1220    /**
1221     * Count up the memory usage attributable to this node alone. LNs children
1222     * are counted by their BIN/DIN parents, but INs are not counted by their
1223     * parents because they are resident on the IN list.
1224     */

1225    protected long computeMemorySize() {
1226        MemoryBudget mb = databaseImpl.getDbEnvironment().getMemoryBudget();
1227        long calcMemorySize = getMemoryOverhead(mb);
1228    calcMemorySize += computeLsnOverhead();
1229        for (int i = 0; i < nEntries; i++) {
1230            calcMemorySize += getEntryInMemorySize(i);
1231        }
1232        /* XXX Need to update size when changing the identifierKey.
1233           if (identifierKey != null) {
1234           calcMemorySize +=
1235           MemoryBudget.byteArraySize(identifierKey.length);
1236           }
1237        */

1238
1239        if (provisionalObsolete != null) {
1240            calcMemorySize += provisionalObsolete.size() *
1241                              MemoryBudget.LONG_LIST_PER_ITEM_OVERHEAD;
1242        }
1243
1244        return calcMemorySize;
1245    }
1246
1247    /* Called once at environment startup by MemoryBudget. */
1248    public static long computeOverhead(DbConfigManager configManager)
1249        throws DatabaseException {
1250
1251        /*
1252     * Overhead consists of all the fields in this class plus the
1253     * entry arrays in the IN class.
1254         */

1255        return MemoryBudget.IN_FIXED_OVERHEAD +
1256            IN.computeArraysOverhead(configManager);
1257    }
1258
1259    private int computeLsnOverhead() {
1260    if (entryLsnLongArray == null) {
1261        return MemoryBudget.byteArraySize(entryLsnByteArray.length);
1262    } else {
1263        return MemoryBudget.BYTE_ARRAY_OVERHEAD +
1264        entryLsnLongArray.length * MemoryBudget.LONG_OVERHEAD;
1265    }
1266    }
1267
1268    protected static long computeArraysOverhead(DbConfigManager configManager)
1269        throws DatabaseException {
1270
1271        /* Count three array elements: states, Keys, and Nodes */
1272        int capacity = configManager.getInt(EnvironmentParams.NODE_MAX);
1273        return
1274            MemoryBudget.byteArraySize(capacity) + // state array
1275
(capacity *
1276         (2 * MemoryBudget.ARRAY_ITEM_OVERHEAD)); // keys + nodes
1277
}
1278    
1279    /* Overridden by subclasses. */
1280    protected long getMemoryOverhead(MemoryBudget mb) {
1281        return mb.getINOverhead();
1282    }
1283
1284    protected void updateMemorySize(ChildReference oldRef,
1285                    ChildReference newRef) {
1286        long delta = 0;
1287        if (newRef != null) {
1288            delta = getEntryInMemorySize(newRef.getKey(), newRef.getTarget());
1289        }
1290
1291        if (oldRef != null) {
1292            delta -= getEntryInMemorySize(oldRef.getKey(), oldRef.getTarget());
1293        }
1294        changeMemorySize(delta);
1295    }
1296
1297    protected void updateMemorySize(long oldSize, long newSize) {
1298        long delta = newSize - oldSize;
1299        changeMemorySize(delta);
1300    }
1301
1302    void updateMemorySize(Node oldNode, Node newNode) {
1303        long delta = 0;
1304        if (newNode != null) {
1305            delta = newNode.getMemorySizeIncludedByParent();
1306        }
1307
1308        if (oldNode != null) {
1309            delta -= oldNode.getMemorySizeIncludedByParent();
1310        }
1311        changeMemorySize(delta);
1312    }
1313
1314    private void changeMemorySize(long delta) {
1315        inMemorySize += delta;
1316
1317        /*
1318         * Only update the environment cache usage stats if this IN is actually
1319         * on the IN list. For example, when we create new INs, they are
1320         * manipulated off the IN list before being added; if we updated the
1321         * environment wide cache then, we'd end up double counting.
1322         */

1323        if (inListResident) {
1324            MemoryBudget mb =
1325                databaseImpl.getDbEnvironment().getMemoryBudget();
1326
1327        accumulatedDelta += delta;
1328        if (accumulatedDelta > ACCUMULATED_LIMIT ||
1329        accumulatedDelta < -ACCUMULATED_LIMIT) {
1330        mb.updateTreeMemoryUsage(accumulatedDelta);
1331        accumulatedDelta = 0;
1332        }
1333        }
1334    }
1335
1336    public int getAccumulatedDelta() {
1337    return accumulatedDelta;
1338    }
1339
1340    public void setInListResident(boolean resident) {
1341        inListResident = resident;
1342    }
1343
1344    /**
1345     * Returns whether the given key is greater than or equal to the first key
1346     * in the IN and less than or equal to the last key in the IN. This method
1347     * is used to determine whether a key to be inserted belongs in this IN,
1348     * without doing a tree search. If false is returned it is still possible
1349     * that the key belongs in this IN, but a tree search must be performed to
1350     * find out.
1351     */

1352    public boolean isKeyInBounds(byte[] keyVal) {
1353
1354        if (nEntries < 2) {
1355            return false;
1356        }
1357
1358        Comparator JavaDoc userCompareToFcn = getKeyComparator();
1359        int cmp;
1360        byte[] myKey;
1361
1362        /* Compare key given to my first key. */
1363        myKey = entryKeyVals[0];
1364        cmp = Key.compareKeys(keyVal, myKey, userCompareToFcn);
1365        if (cmp < 0) {
1366            return false;
1367        }
1368
1369        /* Compare key given to my last key. */
1370        myKey = entryKeyVals[nEntries - 1];
1371        cmp = Key.compareKeys(keyVal, myKey, userCompareToFcn);
1372        if (cmp > 0) {
1373            return false;
1374        }
1375
1376        return true;
1377    }
1378
1379    /**
1380     * Find the entry in this IN for which key arg is >= the key.
1381     *
1382     * Currently uses a binary search, but eventually, this may use binary or
1383     * linear search depending on key size, number of entries, etc.
1384     *
1385     * Note that the 0'th entry's key is treated specially in an IN. It always
1386     * compares lower than any other key.
1387     *
1388     * This is public so that DbCursorTest can access it.
1389     *
1390     * @param key - the key to search for.
1391     * @param indicateIfDuplicate - true if EXACT_MATCH should
1392     * be or'd onto the return value if key is already present in this node.
1393     * @param exact - true if an exact match must be found.
1394     * @return offset for the entry that has a key >= the arg. 0 if key
1395     * is less than the 1st entry. -1 if exact is true and no exact match
1396     * is found. If indicateIfDuplicate is true and an exact match was found
1397     * then EXACT_MATCH is or'd onto the return value.
1398     */

1399    public int findEntry(byte[] key,
1400                         boolean indicateIfDuplicate,
1401                         boolean exact) {
1402        int high = nEntries - 1;
1403        int low = 0;
1404        int middle = 0;
1405
1406        Comparator JavaDoc userCompareToFcn = getKeyComparator();
1407
1408        /*
1409         * IN's are special in that they have a entry[0] where the key is a
1410         * virtual key in that it always compares lower than any other key.
1411         * BIN's don't treat key[0] specially. But if the caller asked for an
1412         * exact match or to indicate duplicates, then use the key[0] and
1413         * forget about the special entry zero comparison.
1414         */

1415        boolean entryZeroSpecialCompare =
1416            entryZeroKeyComparesLow() && !exact && !indicateIfDuplicate;
1417
1418        assert nEntries >= 0;
1419        
1420        while (low <= high) {
1421            middle = (high + low) / 2;
1422            int s;
1423            byte[] middleKey = null;
1424            if (middle == 0 && entryZeroSpecialCompare) {
1425                s = 1;
1426            } else {
1427                middleKey = entryKeyVals[middle];
1428                s = Key.compareKeys(key, middleKey, userCompareToFcn);
1429            }
1430            if (s < 0) {
1431                high = middle - 1;
1432            } else if (s > 0) {
1433                low = middle + 1;
1434            } else {
1435                int ret;
1436                if (indicateIfDuplicate) {
1437                    ret = middle | EXACT_MATCH;
1438                } else {
1439                    ret = middle;
1440                }
1441
1442                if ((ret >= 0) && exact && isEntryKnownDeleted(ret & 0xffff)) {
1443                    return -1;
1444                } else {
1445                    return ret;
1446                }
1447            }
1448        }
1449
1450        /*
1451     * No match found. Either return -1 if caller wanted exact matches
1452     * only, or return entry for which arg key is > entry key.
1453     */

1454        if (exact) {
1455            return -1;
1456        } else {
1457            return high;
1458        }
1459    }
1460
1461    /**
1462     * Inserts the argument ChildReference into this IN. Assumes this node is
1463     * already latched by the caller.
1464     *
1465     * @param entry The ChildReference to insert into the IN.
1466     *
1467     * @return true if the entry was successfully inserted, false
1468     * if it was a duplicate.
1469     *
1470     * @throws InconsistentNodeException if the node is full
1471     * (it should have been split earlier).
1472     */

1473    public boolean insertEntry(ChildReference entry)
1474        throws DatabaseException {
1475
1476        return (insertEntry1(entry) & INSERT_SUCCESS) != 0;
1477    }
1478
1479    /**
1480     * Same as insertEntry except that it returns the index where the dup was
1481     * found instead of false. The return value is |'d with either
1482     * INSERT_SUCCESS or EXACT_MATCH depending on whether the entry was
1483     * inserted or it was a duplicate, resp.
1484     *
1485     * This returns a failure if there's a duplicate match. The caller must do
1486     * the processing to check if the entry is actually deleted and can be
1487     * overwritten. This is foisted upon the caller rather than handled in this
1488     * object because there may be some latch releasing/retaking in order to
1489     * check a child LN.
1490     *
1491     * Inserts the argument ChildReference into this IN. Assumes this node is
1492     * already latched by the caller.
1493     *
1494     * @param entry The ChildReference to insert into the IN.
1495     *
1496     * @return either (1) the index of location in the IN where the entry was
1497     * inserted |'d with INSERT_SUCCESS, or (2) the index of the duplicate in
1498     * the IN |'d with EXACT_MATCH if the entry was found to be a duplicate.
1499     *
1500     * @throws InconsistentNodeException if the node is full (it should have
1501     * been split earlier).
1502     */

1503    public int insertEntry1(ChildReference entry)
1504        throws DatabaseException {
1505
1506    if (nEntries >= entryTargets.length) {
1507        compress(null, true);
1508    }
1509
1510    if (nEntries < entryTargets.length) {
1511        byte[] key = entry.getKey();
1512
1513        /*
1514         * Search without requiring an exact match, but do let us know the
1515         * index of the match if there is one.
1516         */

1517        int index = findEntry(key, true, false);
1518
1519        if (index >= 0 && (index & EXACT_MATCH) != 0) {
1520
1521        /*
1522         * There is an exact match. Don't insert; let the caller
1523         * decide what to do with this duplicate.
1524         */

1525        return index;
1526        } else {
1527
1528        /*
1529         * There was no key match, so insert to the right of this
1530         * entry.
1531         */

1532        index++;
1533        }
1534
1535        /* We found a spot for insert, shift entries as needed. */
1536        if (index < nEntries) {
1537        int oldSize = computeLsnOverhead();
1538
1539        /*
1540         * Adding elements to the LSN array can change the space used.
1541         */

1542        shiftEntriesRight(index);
1543        changeMemorySize(computeLsnOverhead() - oldSize);
1544        }
1545        entryTargets[index] = entry.getTarget();
1546        entryKeyVals[index] = entry.getKey();
1547        setLsnElement(index, entry.getLsn());
1548        entryStates[index] = entry.getState();
1549        nEntries++;
1550        adjustCursorsForInsert(index);
1551        updateMemorySize(0, getEntryInMemorySize(index));
1552        setDirty(true);
1553        return (index | INSERT_SUCCESS);
1554    } else {
1555        throw new InconsistentNodeException
1556        ("Node " + getNodeId() +
1557         " should have been split before calling insertEntry");
1558    }
1559    }
1560
1561    /**
1562     * Deletes the ChildReference with the key arg from this IN. Assumes this
1563     * node is already latched by the caller.
1564     *
1565     * This seems to only be used by INTest.
1566     *
1567     * @param key The key of the reference to delete from the IN.
1568     *
1569     * @param maybeValidate true if assert validation should occur prior to
1570     * delete. Set this to false during recovery.
1571     *
1572     * @return true if the entry was successfully deleted, false if it was not
1573     * found.
1574     */

1575    boolean deleteEntry(byte[] key, boolean maybeValidate)
1576        throws DatabaseException {
1577
1578        if (nEntries == 0) {
1579            return false; // caller should put this node on the IN cleaner list
1580
}
1581
1582        int index = findEntry(key, false, true);
1583        if (index < 0) {
1584            return false;
1585        }
1586
1587        return deleteEntry(index, maybeValidate);
1588    }
1589
1590    /**
1591     * Deletes the ChildReference at index from this IN. Assumes this node is
1592     * already latched by the caller.
1593     *
1594     * @param index The index of the entry to delete from the IN.
1595     *
1596     * @param maybeValidate true if asserts are enabled.
1597     *
1598     * @return true if the entry was successfully deleted, false if it was not
1599     * found.
1600     */

1601    public boolean deleteEntry(int index, boolean maybeValidate)
1602        throws DatabaseException {
1603
1604    if (nEntries == 0) {
1605        return false;
1606    }
1607
1608        /* Check the subtree validation only if maybeValidate is true. */
1609        assert maybeValidate ?
1610            validateSubtreeBeforeDelete(index) :
1611            true;
1612
1613    if (index < nEntries) {
1614        updateMemorySize(getEntryInMemorySize(index), 0);
1615        int oldLSNArraySize = computeLsnOverhead();
1616        /* LSNArray.setElement can mutate to an array of longs. */
1617        for (int i = index; i < nEntries - 1; i++) {
1618        setEntryInternal(i + 1, i);
1619        }
1620        clearEntry(nEntries - 1);
1621        updateMemorySize(oldLSNArraySize, computeLsnOverhead());
1622        nEntries--;
1623        setDirty(true);
1624        setProhibitNextDelta();
1625
1626        /*
1627         * Note that we don't have to adjust cursors for delete, since
1628         * there should be nothing pointing at this record.
1629         */

1630        traceDelete(Level.FINEST, index);
1631        return true;
1632    } else {
1633        return false;
1634    }
1635    }
1636
1637    /**
1638     * Do nothing since INs don't support deltas.
1639     */

1640    public void setProhibitNextDelta() {
1641    }
1642
1643    /* Called by the incompressor. */
1644    public boolean compress(BINReference binRef, boolean canFetch)
1645        throws DatabaseException {
1646
1647    return false;
1648    }
1649
1650    public boolean isCompressible() {
1651        return false;
1652    }
1653
1654    /*
1655     * Validate the subtree that we're about to delete. Make sure there aren't
1656     * more than one valid entry on each IN and that the last level of the tree
1657     * is empty. Also check that there are no cursors on any bins in this
1658     * subtree. Assumes caller is holding the latch on this parent node.
1659     *
1660     * While we could latch couple down the tree, rather than hold latches as
1661     * we descend, we are presumably about to delete this subtree so
1662     * concurrency shouldn't be an issue.
1663     *
1664     * @return true if the subtree rooted at the entry specified by "index" is
1665     * ok to delete.
1666     */

1667    boolean validateSubtreeBeforeDelete(int index)
1668        throws DatabaseException {
1669        
1670    if (index >= nEntries) {
1671
1672        /*
1673         * There's no entry here, so of course this entry is deletable.
1674         */

1675        return true;
1676    } else {
1677        Node child = fetchTarget(index);
1678        return child != null && child.isValidForDelete();
1679    }
1680    }
1681
1682    /**
1683     * Return true if this node needs splitting. For the moment, needing to be
1684     * split is defined by there being no free entries available.
1685     */

1686    public boolean needsSplitting() {
1687        if ((entryTargets.length - nEntries) < 1) {
1688            return true;
1689        } else {
1690            return false;
1691        }
1692    }
1693
1694    /**
1695     * Indicates whether whether entry 0's key is "special" in that it always
1696     * compares less than any other key. BIN's don't have the special key, but
1697     * IN's do.
1698     */

1699    boolean entryZeroKeyComparesLow() {
1700        return true;
1701    }
1702
1703    /**
1704     * Split this into two nodes. Parent IN is passed in parent and should be
1705     * latched by the caller.
1706     *
1707     * childIndex is the index in parent of where "this" can be found.
1708     * @return lsn of the newly logged parent
1709     */

1710    void split(IN parent, int childIndex, int maxEntries)
1711        throws DatabaseException {
1712
1713        splitInternal(parent, childIndex, maxEntries, -1);
1714    }
1715
1716    protected void splitInternal(IN parent,
1717                 int childIndex,
1718                 int maxEntries,
1719                 int splitIndex)
1720        throws DatabaseException {
1721
1722        /*
1723         * Find the index of the existing identifierKey so we know which IN
1724         * (new or old) to put it in.
1725         */

1726        if (identifierKey == null) {
1727            throw new InconsistentNodeException("idkey is null");
1728        }
1729        int idKeyIndex = findEntry(identifierKey, false, false);
1730
1731    if (splitIndex < 0) {
1732        splitIndex = nEntries / 2;
1733    }
1734
1735        int low, high;
1736        IN newSibling = null;
1737
1738        if (idKeyIndex < splitIndex) {
1739
1740            /*
1741             * Current node (this) keeps left half entries. Right half entries
1742             * will go in the new node.
1743             */

1744            low = splitIndex;
1745            high = nEntries;
1746        } else {
1747
1748            /*
1749         * Current node (this) keeps right half entries. Left half entries
1750         * and entry[0] will go in the new node.
1751         */

1752            low = 0;
1753            high = splitIndex;
1754        }
1755
1756        byte[] newIdKey = entryKeyVals[low];
1757    long parentLsn = DbLsn.NULL_LSN;
1758
1759        newSibling = createNewInstance(newIdKey, maxEntries, level);
1760        newSibling.latch();
1761        long oldMemorySize = inMemorySize;
1762    try {
1763        
1764        int toIdx = 0;
1765        boolean deletedEntrySeen = false;
1766        BINReference binRef = null;
1767        for (int i = low; i < high; i++) {
1768        byte[] thisKey = entryKeyVals[i];
1769        if (isEntryPendingDeleted(i)) {
1770            if (!deletedEntrySeen) {
1771            deletedEntrySeen = true;
1772            binRef = new BINReference(newSibling.getNodeId(),
1773                          databaseImpl.getId(),
1774                          newIdKey);
1775            }
1776            binRef.addDeletedKey(new Key(thisKey));
1777        }
1778        newSibling.setEntry(toIdx++,
1779                    entryTargets[i],
1780                    thisKey,
1781                    getLsn(i),
1782                    entryStates[i]);
1783                clearEntry(i);
1784        }
1785
1786        if (deletedEntrySeen) {
1787        databaseImpl.getDbEnvironment().
1788            addToCompressorQueue(binRef, false);
1789        }
1790
1791        int newSiblingNEntries = (high - low);
1792
1793        /*
1794         * Remove the entries that we just copied into newSibling from this
1795         * node.
1796         */

1797        if (low == 0) {
1798        shiftEntriesLeft(newSiblingNEntries);
1799        }
1800
1801        newSibling.nEntries = toIdx;
1802        nEntries -= newSiblingNEntries;
1803        setDirty(true);
1804
1805        adjustCursors(newSibling, low, high);
1806
1807        /*
1808         * Parent refers to child through an element of the entries array.
1809         * Depending on which half of the BIN we copied keys from, we
1810         * either have to adjust one pointer and add a new one, or we have
1811         * to just add a new pointer to the new sibling.
1812         *
1813         * Note that we must use the provisional form of logging because
1814         * all three log entries must be read atomically. The parent must
1815         * get logged last, as all referred-to children must preceed
1816         * it. Provisional entries guarantee that all three are processed
1817         * as a unit. Recovery skips provisional entries, so the changed
1818         * children are only used if the parent makes it out to the log.
1819         */

1820        EnvironmentImpl env = databaseImpl.getDbEnvironment();
1821        LogManager logManager = env.getLogManager();
1822        INList inMemoryINs = env.getInMemoryINs();
1823
1824        long newSiblingLsn =
1825                newSibling.optionalLogProvisional(logManager, parent);
1826
1827        long myNewLsn = optionalLogProvisional(logManager, parent);
1828
1829        /*
1830         * When we update the parent entry, we use updateEntryCompareKey so
1831         * that we don't replace the parent's key that points at 'this'
1832         * with a key that is > than the existing one. Replacing the
1833         * parent's key with something > would effectively render a piece
1834         * of the subtree inaccessible. So only replace the parent key
1835         * with something <= the existing one. See tree/SplitTest.java for
1836         * more details on the scenario.
1837         */

1838        if (low == 0) {
1839
1840        /*
1841         * Change the original entry to point to the new child and add
1842         * an entry to point to the newly logged version of this
1843         * existing child.
1844         */

1845                if (childIndex == 0) {
1846                    parent.updateEntryCompareKey(childIndex, newSibling,
1847                                                 newSiblingLsn, newIdKey);
1848                } else {
1849                    parent.updateEntry(childIndex, newSibling, newSiblingLsn);
1850                }
1851
1852        boolean insertOk = parent.insertEntry
1853            (new ChildReference(this, entryKeyVals[0], myNewLsn));
1854        assert insertOk;
1855        } else {
1856
1857        /*
1858         * Update the existing child's LSN to reflect the newly logged
1859         * version and insert new child into parent.
1860         */

1861        if (childIndex == 0) {
1862
1863            /*
1864             * This's idkey may be < the parent's entry 0 so we need to
1865             * update parent's entry 0 with the key for 'this'.
1866             */

1867            parent.updateEntryCompareKey
1868            (childIndex, this, myNewLsn, entryKeyVals[0]);
1869        } else {
1870            parent.updateEntry(childIndex, this, myNewLsn);
1871        }
1872        boolean insertOk = parent.insertEntry
1873            (new ChildReference(newSibling, newIdKey, newSiblingLsn));
1874        assert insertOk;
1875        }
1876
1877        parentLsn = parent.optionalLog(logManager);
1878            
1879            /*
1880             * Maintain dirtiness if this is the root, so this parent
1881             * will be checkpointed. Other parents who are not roots
1882             * are logged as part of the propagation of splits
1883             * upwards.
1884             */

1885            if (parent.isRoot()) {
1886                parent.setDirty(true);
1887            }
1888
1889            /*
1890             * Update size. newSibling and parent are correct, but this IN has
1891             * had its entries shifted and is not correct.
1892             */

1893            long newSize = computeMemorySize();
1894            updateMemorySize(oldMemorySize, newSize);
1895        inMemoryINs.add(newSibling);
1896        
1897        /* Debug log this information. */
1898        traceSplit(Level.FINE, parent,
1899               newSibling, parentLsn, myNewLsn,
1900               newSiblingLsn, splitIndex, idKeyIndex, childIndex);
1901    } finally {
1902        newSibling.releaseLatch();
1903    }
1904    }
1905
1906    /**
1907     * Called when we know we are about to split on behalf of a key that is the
1908     * minimum (leftSide) or maximum (!leftSide) of this node. This is
1909     * achieved by just forcing the split to occur either one element in from
1910     * the left or the right (i.e. splitIndex is 1 or nEntries - 1).
1911     */

1912    void splitSpecial(IN parent,
1913              int parentIndex,
1914              int maxEntriesPerNode,
1915              byte[] key,
1916              boolean leftSide)
1917    throws DatabaseException {
1918
1919    int index = findEntry(key, false, false);
1920    if (leftSide &&
1921        index == 0) {
1922        splitInternal(parent, parentIndex, maxEntriesPerNode, 1);
1923    } else if (!leftSide &&
1924           index == (nEntries - 1)) {
1925        splitInternal(parent, parentIndex, maxEntriesPerNode,
1926              nEntries - 1);
1927    } else {
1928            split(parent, parentIndex, maxEntriesPerNode);
1929    }
1930    }
1931
1932    void adjustCursors(IN newSibling,
1933                       int newSiblingLow,
1934                       int newSiblingHigh) {
1935        /* Cursors never refer to IN's. */
1936    }
1937
1938    void adjustCursorsForInsert(int insertIndex) {
1939        /* Cursors never refer to IN's. */
1940    }
1941
1942    /**
1943     * Return the relevant user defined comparison function for this type of
1944     * node. For IN's and BIN's, this is the BTree Comparison function.
1945     */

1946    public Comparator JavaDoc getKeyComparator() {
1947        return databaseImpl.getBtreeComparator();
1948    }
1949
1950    /**
1951     * Shift entries to the right starting with (and including) the entry at
1952     * index. Caller is responsible for incrementing nEntries.
1953     *
1954     * @param index - The position to start shifting from.
1955     */

1956    private void shiftEntriesRight(int index) {
1957        for (int i = nEntries; i > index; i--) {
1958        setEntryInternal(i - 1, i);
1959        }
1960        clearEntry(index);
1961        setDirty(true);
1962    }
1963
1964    /**
1965     * Shift entries starting at the byHowMuch'th element to the left, thus
1966     * removing the first byHowMuch'th elements of the entries array. This
1967     * always starts at the 0th entry. Caller is responsible for decrementing
1968     * nEntries.
1969     *
1970     * @param byHowMuch - The number of entries to remove from the left side
1971     * of the entries array.
1972     */

1973    private void shiftEntriesLeft(int byHowMuch) {
1974        for (int i = 0; i < nEntries - byHowMuch; i++) {
1975        setEntryInternal(i + byHowMuch, i);
1976        }
1977        for (int i = nEntries - byHowMuch; i < nEntries; i++) {
1978        clearEntry(i);
1979        }
1980        setDirty(true);
1981    }
1982
1983    /**
1984     * Check that the IN is in a valid state. For now, validity means that the
1985     * keys are in sorted order and that there are more than 0 entries.
1986     * maxKey, if non-null specifies that all keys in this node must be less
1987     * than maxKey.
1988     */

1989    public void verify(byte[] maxKey)
1990        throws DatabaseException {
1991
1992    /********* never code, but may be used for the basis of a verify()
1993           method in the future.
1994    try {
1995        Comparator userCompareToFcn =
1996        (databaseImpl == null ? null : getKeyComparator());
1997
1998        byte[] key1 = null;
1999        for (int i = 1; i < nEntries; i++) {
2000        key1 = entryKeyVals[i];
2001        byte[] key2 = entryKeyVals[i - 1];
2002
2003        int s = Key.compareKeys(key1, key2, userCompareToFcn);
2004        if (s <= 0) {
2005            throw new InconsistentNodeException
2006            ("IN " + getNodeId() + " key " + (i-1) +
2007             " (" + Key.dumpString(key2, 0) +
2008             ") and " +
2009             i + " (" + Key.dumpString(key1, 0) +
2010             ") are out of order");
2011        }
2012        }
2013
2014        boolean inconsistent = false;
2015        if (maxKey != null && key1 != null) {
2016                if (Key.compareKeys(key1, maxKey, userCompareToFcn) >= 0) {
2017                    inconsistent = true;
2018                }
2019        }
2020
2021        if (inconsistent) {
2022        throw new InconsistentNodeException
2023            ("IN " + getNodeId() +
2024             " has entry larger than next entry in parent.");
2025        }
2026    } catch (DatabaseException DE) {
2027        DE.printStackTrace(System.out);
2028    }
2029    *****************/

2030    }
2031
2032    /**
2033     * Add self and children to this in-memory IN list. Called by recovery, can
2034     * run with no latching.
2035     */

2036    void rebuildINList(INList inList)
2037        throws DatabaseException {
2038
2039        /*
2040         * Recompute your in memory size first and then add yourself to the
2041         * list.
2042         */

2043        initMemorySize();
2044        inList.add(this);
2045
2046        /*
2047         * Add your children if they're resident. (LNs know how to stop the
2048         * flow).
2049         */

2050        for (int i = 0; i < nEntries; i++) {
2051            Node n = getTarget(i);
2052            if (n != null) {
2053                n.rebuildINList(inList);
2054            }
2055        }
2056    }
2057
2058    /**
2059     * Remove self and children from the in-memory IN list. The INList latch is
2060     * already held before this is called. Also count removed nodes as
2061     * obsolete.
2062     */

2063    void accountForSubtreeRemoval(INList inList,
2064                                  UtilizationTracker tracker)
2065        throws DatabaseException {
2066
2067        if (nEntries > 1) {
2068            throw new DatabaseException
2069                ("Found non-deletable IN " + getNodeId() +
2070                 " while flushing INList. nEntries = " + nEntries);
2071        }
2072
2073        /* Remove self. */
2074        inList.removeLatchAlreadyHeld(this);
2075
2076        /* Count as obsolete. */
2077        if (lastFullVersion != DbLsn.NULL_LSN) {
2078            tracker.countObsoleteNode(lastFullVersion, getLogType(), 0);
2079        }
2080
2081        /*
2082         * Remove your children. They should already be resident. (LNs know
2083         * how to stop.)
2084         */

2085        for (int i = 0; i < nEntries; i++) {
2086            Node n = fetchTarget(i);
2087            if (n != null) {
2088                n.accountForSubtreeRemoval
2089                    (inList, tracker);
2090            }
2091        }
2092    }
2093
2094    /**
2095     * Check if this node fits the qualifications for being part of a deletable
2096     * subtree. It can only have one IN child and no LN children.
2097     *
2098     * We assume that this is only called under an assert.
2099     */

2100    boolean isValidForDelete()
2101        throws DatabaseException {
2102
2103    /*
2104     * Can only have one valid child, and that child should be
2105     * deletable.
2106     */

2107    if (nEntries > 1) { // more than 1 entry.
2108
return false;
2109    } else if (nEntries == 1) { // 1 entry, check child
2110
Node child = fetchTarget(0);
2111        if (child == null) {
2112        return false;
2113        }
2114        child.latchShared();
2115        boolean ret = child.isValidForDelete();
2116        child.releaseLatch();
2117        return ret;
2118    } else { // 0 entries.
2119
return true;
2120    }
2121    }
2122
2123    /**
2124     * See if you are the parent of this child. If not, find a child of your's
2125     * that may be the parent, and return it. If there are no possiblities,
2126     * return null. Note that the keys of the target are passed in so we don't
2127     * have to latch the target to look at them. Also, this node is latched
2128     * upon entry.
2129     *
2130     * @param doFetch If true, fetch the child in the pursuit of this search.
2131     * If false, give up if the child is not resident. In that case, we have
2132     * a potential ancestor, but are not sure if this is the parent.
2133     */

2134    void findParent(Tree.SearchType searchType,
2135                    long targetNodeId,
2136            boolean targetContainsDuplicates,
2137                    boolean targetIsRoot,
2138                    byte[] targetMainTreeKey,
2139                    byte[] targetDupTreeKey,
2140                    SearchResult result,
2141                    boolean requireExactMatch,
2142                    boolean updateGeneration,
2143                    int targetLevel,
2144                    List JavaDoc trackingList,
2145                    boolean doFetch)
2146        throws DatabaseException {
2147
2148        assert isLatchOwnerForWrite();
2149
2150        /* We are this node -- there's no parent in this subtree. */
2151        if (getNodeId() == targetNodeId) {
2152            releaseLatch();
2153            result.exactParentFound = false; // no parent exists
2154
result.keepSearching = false;
2155            result.parent = null;
2156            return;
2157        }
2158
2159        /* Find an entry */
2160        if (getNEntries() == 0) {
2161
2162            /*
2163             * No more children, can't descend anymore. Return this node, you
2164             * could be the parent.
2165             */

2166            result.keepSearching = false;
2167            result.exactParentFound = false;
2168            if (requireExactMatch) {
2169                releaseLatch();
2170                result.parent = null;
2171            } else {
2172                result.parent = this;
2173                result.index = -1;
2174            }
2175            return;
2176        } else {
2177            if (searchType == Tree.SearchType.NORMAL) {
2178                /* Look for the entry matching key in the current node. */
2179                result.index = findEntry(selectKey(targetMainTreeKey,
2180                                                   targetDupTreeKey),
2181                                         false, false);
2182            } else if (searchType == Tree.SearchType.LEFT) {
2183                /* Left search, always take the 0th entry. */
2184                result.index = 0;
2185            } else if (searchType == Tree.SearchType.RIGHT) {
2186                /* Right search, always take the highest entry. */
2187                result.index = nEntries - 1;
2188            } else {
2189                throw new IllegalArgumentException JavaDoc
2190                    ("Invalid value of searchType: " + searchType);
2191            }
2192
2193            if (result.index < 0) {
2194                result.keepSearching = false;
2195                result.exactParentFound = false;
2196                if (requireExactMatch) {
2197                    releaseLatch();
2198                    result.parent = null;
2199                } else {
2200                    /* This node is going to be the prospective parent. */
2201                    result.parent = this;
2202                }
2203                return;
2204            }
2205            
2206            /*
2207             * Get the child node that matches. If fetchTarget returns null, a
2208             * deleted LN was cleaned.
2209             */

2210            Node child = null;
2211            boolean isDeleted = false;
2212            if (isEntryKnownDeleted(result.index)) {
2213                isDeleted = true;
2214            } else if (doFetch) {
2215                child = fetchTarget(result.index);
2216                if (child == null) {
2217                    isDeleted = true;
2218                }
2219            } else {
2220                child = getTarget(result.index);
2221            }
2222
2223            /* The child is a deleted cleaned entry or is knownDeleted. */
2224            if (isDeleted) {
2225                result.exactParentFound = false;
2226                result.keepSearching = false;
2227                if (requireExactMatch) {
2228                    result.parent = null;
2229                    releaseLatch();
2230                } else {
2231                    result.parent = this;
2232                }
2233                return;
2234            }
2235
2236            /* Try matching by level. */
2237            if (targetLevel >= 0 && level == targetLevel + 1) {
2238                result.exactParentFound = true;
2239                result.parent = this;
2240                result.keepSearching = false;
2241                return;
2242            }
2243            
2244            if (child == null) {
2245                assert !doFetch;
2246
2247                /*
2248                 * This node will be the possible parent.
2249                 */

2250                result.keepSearching = false;
2251                result.exactParentFound = false;
2252                result.parent = this;
2253                result.childNotResident = true;
2254                return;
2255            }
2256
2257            long childLsn = getLsn(result.index);
2258
2259            /*
2260             * Note that if the child node needs latching, it's done in
2261             * isSoughtNode.
2262             */

2263            if (child.isSoughtNode(targetNodeId, updateGeneration)) {
2264                /* We found the child, so this is the parent. */
2265                result.exactParentFound = true;
2266                result.parent = this;
2267                result.keepSearching = false;
2268                return;
2269            } else {
2270
2271                /*
2272                 * Decide whether we can descend, or the search is going to be
2273                 * unsuccessful or whether this node is going to be the future
2274                 * parent. It depends on what this node is, the target, and the
2275                 * child.
2276                 */

2277                descendOnParentSearch(result,
2278                                      targetContainsDuplicates,
2279                                      targetIsRoot,
2280                                      targetNodeId,
2281                                      child,
2282                                      requireExactMatch);
2283
2284                /* If we're tracking, save the lsn and node id */
2285                if (trackingList != null) {
2286                    if ((result.parent != this) && (result.parent != null)) {
2287                        trackingList.add(new TrackingInfo(childLsn,
2288                                                          child.getNodeId()));
2289                    }
2290                }
2291                return;
2292            }
2293        }
2294    }
2295
2296    /*
2297     * If this search can go further, return the child. If it can't, and you
2298     * are a possible new parent to this child, return this IN. If the search
2299     * can't go further and this IN can't be a parent to this child, return
2300     * null.
2301     */

2302    protected void descendOnParentSearch(SearchResult result,
2303                                         boolean targetContainsDuplicates,
2304                                         boolean targetIsRoot,
2305                                         long targetNodeId,
2306                                         Node child,
2307                                         boolean requireExactMatch)
2308        throws DatabaseException {
2309
2310        if (child.canBeAncestor(targetContainsDuplicates)) {
2311            /* We can search further. */
2312            releaseLatch();
2313            result.parent = (IN) child;
2314        } else {
2315
2316            /*
2317             * Our search ends, we didn't find it. If we need an exact match,
2318             * give up, if we only need a potential match, keep this node
2319             * latched and return it.
2320             */

2321            ((IN) child).releaseLatch();
2322            result.exactParentFound = false;
2323            result.keepSearching = false;
2324
2325            if (requireExactMatch) {
2326                releaseLatch();
2327                result.parent = null;
2328            } else {
2329                result.parent = this;
2330            }
2331        }
2332    }
2333
2334    /*
2335     * @return true if this IN is the child of the search chain. Note that
2336     * if this returns false, the child remains latched.
2337     */

2338    protected boolean isSoughtNode(long nid, boolean updateGeneration)
2339        throws DatabaseException {
2340
2341        latch(updateGeneration);
2342        if (getNodeId() == nid) {
2343            releaseLatch();
2344            return true;
2345        } else {
2346            return false;
2347        }
2348    }
2349
2350    /*
2351     * An IN can be an ancestor of any internal node.
2352     */

2353    protected boolean canBeAncestor(boolean targetContainsDuplicates) {
2354        return true;
2355    }
2356
2357    /**
2358     * Returns whether this node can be evicted. This is faster than
2359     * (getEvictionType() == MAY_EVICT_NODE) because it does a more static,
2360     * stringent check and is used by the evictor after a node has been
2361     * selected, to check that it is still evictable. The more complex
2362     * evaluation done by getEvictionType() is used when initially selecting
2363     * a node for inclusion in the eviction set.
2364     */

2365    public boolean isEvictable() {
2366
2367        if (isEvictionProhibited()) {
2368            return false;
2369        }
2370
2371        /*
2372         * An IN can be evicted if its resident children are all LNs, because
2373         * those children can be logged and stripped before this node is
2374         * evicted.
2375         */

2376        if (hasNonLNChildren()) {
2377            return false;
2378        }
2379
2380        return true;
2381    }
2382
2383    /**
2384     * Returns the eviction type for this IN, for use by the evictor. Uses the
2385     * internal isEvictionProhibited and getChildEvictionType methods that may
2386     * be overridden by subclasses.
2387     *
2388     * This differs from isEvictable() because it does more detailed evaluation
2389     * about the degree of evictability. It's used generally when selecting
2390     * candidates for eviction.
2391     *
2392     * @return MAY_EVICT_LNS if evictable LNs may be stripped; otherwise,
2393     * MAY_EVICT_NODE if the node itself may be evicted; otherwise,
2394     * MAY_NOT_EVICT.
2395     */

2396    public int getEvictionType() {
2397
2398        if (isEvictionProhibited()) {
2399            return MAY_NOT_EVICT;
2400        } else {
2401            return getChildEvictionType();
2402        }
2403    }
2404
2405    /**
2406     * Returns whether the node is not evictable, irrespective of the status
2407     * of the children nodes.
2408     */

2409    boolean isEvictionProhibited() {
2410
2411        return isDbRoot();
2412    }
2413
2414    /**
2415     * Returns whether any resident children are not LNs (are INs).
2416     * For an IN, that equates to whether there are any resident children
2417     * at all.
2418     */

2419    boolean hasNonLNChildren() {
2420
2421        return hasResidentChildren();
2422    }
2423
2424    /**
2425     * Returns the eviction type based on the status of child nodes,
2426     * irrespective of isEvictionProhibited.
2427     */

2428    int getChildEvictionType() {
2429
2430        return hasResidentChildren() ? MAY_NOT_EVICT : MAY_EVICT_NODE;
2431    }
2432
2433    /**
2434     * Returns whether any child is non-null.
2435     */

2436    private boolean hasResidentChildren() {
2437
2438        for (int i = 0; i < getNEntries(); i++) {
2439            if (getTarget(i) != null) {
2440                return true;
2441            }
2442        }
2443
2444        return false;
2445    }
2446
2447    /*
2448     * DbStat support.
2449     */

2450    void accumulateStats(TreeWalkerStatsAccumulator acc) {
2451    acc.processIN(this, new Long JavaDoc(getNodeId()), getLevel());
2452    }
2453
2454    /*
2455     * Logging support
2456     */

2457
2458    /**
2459     * When splits and checkpoints intermingle in a deferred write databases,
2460     * a checkpoint target may appear which has a valid target but a null LSN.
2461     * Deferred write dbs are written out in checkpoint style by either
2462     * Database.sync() or a checkpoint which has cleaned a file containing
2463     * deferred write entries. For example,
2464     * INa
2465     * |
2466     * BINb
2467     *
2468     * A checkpoint or Database.sync starts
2469     * The INList is traversed, dirty nodes are selected
2470     * BINb is bypassed on the INList, since it's not dirty
2471     * BINb is split, creating a new sibling, BINc, and dirtying INa
2472     * INa is selected as a dirty node for the ckpt
2473     *
2474     * If this happens, INa is in the selected dirty set, but not its dirty
2475     * child BINb and new child BINc.
2476     *
2477     * In a durable db, the existence of BINb and BINc are logged
2478     * anyway. But in a deferred write db, there is an entry that points to
2479     * BINc, but no logged version.
2480     *
2481     * This will not cause problems with eviction, because INa can't be
2482     * evicted until BINb and BINc are logged, are non-dirty, and are detached.
2483     * But it can cause problems at recovery, because INa will have a null LSN
2484     * for a valid entry, and the LN children of BINc will not find a home.
2485     * To prevent this, search for all dirty children that might have been
2486     * missed during the selection phase, and write them out. It's not
2487     * sufficient to write only null-LSN children, because the existing sibling
2488     * must be logged lest LN children recover twice (once in the new sibling,
2489     * once in the old existing sibling.
2490     */

2491    public void logDirtyChildren()
2492        throws DatabaseException {
2493
2494        EnvironmentImpl envImpl = getDatabase().getDbEnvironment();
2495
2496        /* Look for targets that are dirty. */
2497        for (int i = 0; i < getNEntries(); i++) {
2498
2499            IN child = (IN) getTarget(i);
2500            if (child != null) {
2501                child.latch(false);
2502                try {
2503                    if (child.getDirty()) {
2504                        /* Ask descendents to log their children. */
2505                        child.logDirtyChildren();
2506                        long childLsn =
2507                            child.log(envImpl.getLogManager(),
2508                                      false, // allow deltas
2509
true, // is provisional
2510
false, // proactive migration
2511
true, // backgroundIO
2512
this); // provisional parent
2513
updateEntry(i, childLsn);
2514                    }
2515                } finally {
2516                    child.releaseLatch();
2517                }
2518            }
2519        }
2520    }
2521        
2522    /**
2523     * Log this IN and clear the dirty flag.
2524     */

2525    public long log(LogManager logManager)
2526        throws DatabaseException {
2527
2528        return logInternal(logManager,
2529                           false, // allowDeltas
2530
false, // isProvisional
2531
false, // proactiveMigration
2532
false, // backgroundIO
2533
null); // parent
2534
}
2535
2536    /**
2537     * Log this node with all available options.
2538     */

2539    public long log(LogManager logManager,
2540                    boolean allowDeltas,
2541                    boolean isProvisional,
2542                    boolean proactiveMigration,
2543                    boolean backgroundIO,
2544                    IN parent) // for provisional
2545
throws DatabaseException {
2546
2547        return logInternal(logManager,
2548                           allowDeltas,
2549                           isProvisional,
2550                           proactiveMigration,
2551                           backgroundIO,
2552                           parent);
2553    }
2554
2555    /**
2556     * Log this IN and clear the dirty flag.
2557     */

2558    public long optionalLog(LogManager logManager)
2559        throws DatabaseException {
2560
2561        if (databaseImpl.isDeferredWrite()) {
2562            return DbLsn.NULL_LSN;
2563        } else {
2564            return logInternal(logManager,
2565                               false, // allowDeltas
2566
false, // isProvisional
2567
false, // proactiveMigration
2568
false, // backgroundIO
2569
null); // parent
2570
}
2571    }
2572
2573
2574    /**
2575     * Log this node provisionally and clear the dirty flag.
2576     * @param item object to be logged
2577     * @return LSN of the new log entry
2578     */

2579    public long optionalLogProvisional(LogManager logManager, IN parent)
2580        throws DatabaseException {
2581
2582        if (databaseImpl.isDeferredWrite()) {
2583            return DbLsn.NULL_LSN;
2584        } else {
2585            return logInternal(logManager,
2586                               false, // allowDeltas
2587
true, // isProvisional
2588
false, // proactiveMigration
2589
false, // backgroundIO
2590
parent);
2591        }
2592    }
2593
2594    /**
2595     * Decide how to log this node. INs are always logged in full. Migration
2596     * never performed since it only applies to BINs.
2597     */

2598    protected long logInternal(LogManager logManager,
2599                   boolean allowDeltas,
2600                               boolean isProvisional,
2601                               boolean proactiveMigration,
2602                               boolean backgroundIO,
2603                               IN parent)
2604        throws DatabaseException {
2605
2606        /*
2607         * The last version of this node must be counted obsolete at the
2608         * correct time. If logging non-provisionally, the last version of this
2609         * node and any provisionally logged descendants are immediately
2610         * obsolete and can be flushed. If logging provisionally, the last
2611         * version isn't obsolete until an ancestor is logged
2612         * non-provisionally, so propagate obsolete lsns upwards.
2613         */

2614        long lsn = logManager.log
2615            (new INLogEntry(this), isProvisional, backgroundIO,
2616             isProvisional ? DbLsn.NULL_LSN : lastFullVersion, 0);
2617
2618        if (isProvisional) {
2619            if (parent != null) {
2620                parent.trackProvisionalObsolete
2621                    (this, lastFullVersion, DbLsn.NULL_LSN);
2622            }
2623        } else {
2624            flushProvisionalObsolete(logManager);
2625        }
2626
2627        setLastFullLsn(lsn);
2628        setDirty(false);
2629        return lsn;
2630    }
2631
2632    /**
2633     * Adds the given obsolete LSNs and any tracked obsolete LSNs for the given
2634     * child IN to this IN's tracking list. This method is called to track
2635     * obsolete LSNs when a child IN is logged provisionally. Such LSNs cannot
2636     * be considered obsolete until an ancestor IN is logged non-provisionally.
2637     */

2638    void trackProvisionalObsolete(IN child,
2639                                  long obsoleteLsn1,
2640                                  long obsoleteLsn2) {
2641
2642        int memDelta = 0;
2643
2644        if (child.provisionalObsolete != null) {
2645
2646            int childMemDelta = child.provisionalObsolete.size() *
2647                                MemoryBudget.LONG_LIST_PER_ITEM_OVERHEAD;
2648
2649            if (provisionalObsolete != null) {
2650                provisionalObsolete.addAll(child.provisionalObsolete);
2651            } else {
2652                provisionalObsolete = child.provisionalObsolete;
2653            }
2654            child.provisionalObsolete = null;
2655
2656            child.changeMemorySize(0 - childMemDelta);
2657            memDelta += childMemDelta;
2658        }
2659
2660        if (obsoleteLsn1 != DbLsn.NULL_LSN || obsoleteLsn2 != DbLsn.NULL_LSN) {
2661
2662            if (provisionalObsolete == null) {
2663                provisionalObsolete = new ArrayList JavaDoc();
2664            }
2665
2666            if (obsoleteLsn1 != DbLsn.NULL_LSN) {
2667                provisionalObsolete.add(new Long JavaDoc(obsoleteLsn1));
2668                memDelta += MemoryBudget.LONG_LIST_PER_ITEM_OVERHEAD;
2669            }
2670
2671            if (obsoleteLsn2 != DbLsn.NULL_LSN) {
2672                provisionalObsolete.add(new Long JavaDoc(obsoleteLsn2));
2673                memDelta += MemoryBudget.LONG_LIST_PER_ITEM_OVERHEAD;
2674            }
2675        }
2676
2677        if (memDelta != 0) {
2678            changeMemorySize(memDelta);
2679        }
2680    }
2681
2682    /**
2683     * Adds the provisional obsolete tracking information in this node to the
2684     * live tracker. This method is called when this node is logged
2685     * non-provisionally.
2686     */

2687    void flushProvisionalObsolete(LogManager logManager)
2688        throws DatabaseException {
2689
2690        if (provisionalObsolete != null) {
2691
2692            int memDelta = provisionalObsolete.size() *
2693                           MemoryBudget.LONG_LIST_PER_ITEM_OVERHEAD;
2694
2695            logManager.countObsoleteINs(provisionalObsolete);
2696            provisionalObsolete = null;
2697
2698            changeMemorySize(0 - memDelta);
2699        }
2700    }
2701
2702    /**
2703     * @see LoggableObject#getLogType
2704     */

2705    public LogEntryType getLogType() {
2706        return LogEntryType.LOG_IN;
2707    }
2708
2709    /**
2710     * @see LoggableObject#getLogSize
2711     */

2712    public int getLogSize() {
2713        int size = super.getLogSize(); // ancestors
2714
size += LogUtils.getByteArrayLogSize(identifierKey); // identifier key
2715
size += LogUtils.getBooleanLogSize(); // isRoot
2716
size += LogUtils.INT_BYTES; // nentries;
2717
size += LogUtils.INT_BYTES; // level
2718
size += LogUtils.INT_BYTES; // length of entries array
2719
size += LogUtils.getBooleanLogSize(); // compactLsnsRep
2720
boolean compactLsnsRep = (entryLsnLongArray == null);
2721    if (compactLsnsRep) {
2722        size += LogUtils.INT_BYTES; // baseFileNumber
2723
}
2724
2725        for (int i = 0; i < nEntries; i++) { // entries
2726
size += LogUtils.getByteArrayLogSize(entryKeyVals[i]) + // key
2727
(compactLsnsRep ? LogUtils.INT_BYTES :
2728         LogUtils.getLongLogSize()) + // LSN
2729
1; // state
2730
}
2731        return size;
2732    }
2733
2734    /**
2735     * @see LoggableObject#writeToLog
2736     */

2737    public void writeToLog(ByteBuffer JavaDoc logBuffer) {
2738
2739        // ancestors
2740
super.writeToLog(logBuffer);
2741
2742        // identifier key
2743
LogUtils.writeByteArray(logBuffer, identifierKey);
2744
2745        // isRoot
2746
LogUtils.writeBoolean(logBuffer, isRoot);
2747
2748        // nEntries
2749
LogUtils.writeInt(logBuffer, nEntries);
2750
2751        // level
2752
LogUtils.writeInt(logBuffer, level);
2753
2754        // length of entries array
2755
LogUtils.writeInt(logBuffer, entryTargets.length);
2756
2757    // true if compact representation
2758
boolean compactLsnsRep = (entryLsnLongArray == null);
2759    LogUtils.writeBoolean(logBuffer, compactLsnsRep);
2760    if (compactLsnsRep) {
2761        LogUtils.writeInt(logBuffer, (int) baseFileNumber);
2762    }
2763
2764        // entries
2765
for (int i = 0; i < nEntries; i++) {
2766            LogUtils.writeByteArray(logBuffer, entryKeyVals[i]); // key
2767

2768            /*
2769             * A NULL_LSN may be stored when an incomplete insertion occurs,
2770             * but in that case the KnownDeleted flag must be set. See
2771             * Tree.insert. [#13126]
2772             */

2773            assert checkForNullLSN(i) :
2774                "logging IN " + getNodeId() + " with null lsn child " +
2775                " db=" + databaseImpl.getDebugName() +
2776                " isDeferredWrite=" + databaseImpl.isDeferredWrite();
2777
2778        if (compactLsnsRep) { // LSN
2779
int offset = i << 2;
2780        int fileOffset = getFileOffset(offset);
2781        logBuffer.put(getFileNumberOffset(offset));
2782        logBuffer.put((byte) ((fileOffset >>> 0) & 0xff));
2783        logBuffer.put((byte) ((fileOffset >>> 8) & 0xff));
2784        logBuffer.put((byte) ((fileOffset >>> 16) & 0xff));
2785        } else {
2786        LogUtils.writeLong(logBuffer, entryLsnLongArray[i]);
2787        }
2788        logBuffer.put(entryStates[i]); // state
2789
entryStates[i] &= CLEAR_DIRTY_BIT;
2790        }
2791    }
2792
2793    /*
2794     * Used for assertion to prevent writing a null lsn to the log.
2795     */

2796    private boolean checkForNullLSN(int index) {
2797        boolean ok;
2798        if (this instanceof BIN) {
2799            ok = !(getLsn(index) == DbLsn.NULL_LSN &&
2800                   (entryStates[index] & KNOWN_DELETED_BIT) == 0);
2801        } else {
2802            ok = (getLsn(index) != DbLsn.NULL_LSN);
2803        }
2804        return ok;
2805    }
2806
2807    /**
2808     * @see LogReadable#readFromLog
2809     */

2810    public void readFromLog(ByteBuffer JavaDoc itemBuffer, byte entryTypeVersion)
2811        throws LogException {
2812
2813        // ancestors
2814
super.readFromLog(itemBuffer, entryTypeVersion);
2815
2816        // identifier key
2817
identifierKey = LogUtils.readByteArray(itemBuffer);
2818
2819        // isRoot
2820
isRoot = LogUtils.readBoolean(itemBuffer);
2821
2822        // nEntries
2823
nEntries = LogUtils.readInt(itemBuffer);
2824
2825        // level
2826
level = LogUtils.readInt(itemBuffer);
2827
2828        // nentries
2829
int length = LogUtils.readInt(itemBuffer);
2830
2831    entryTargets = new Node[length];
2832    entryKeyVals = new byte[length][];
2833    baseFileNumber = -1;
2834    long storedBaseFileNumber = -1;
2835    entryLsnByteArray = new byte[length << 2];
2836    entryLsnLongArray = null;
2837    entryStates = new byte[length];
2838    boolean compactLsnsRep = false;
2839    if (entryTypeVersion > 1) {
2840        compactLsnsRep = LogUtils.readBoolean(itemBuffer);
2841        if (compactLsnsRep) {
2842        baseFileNumber = LogUtils.readInt(itemBuffer) & 0xffffffff;
2843                storedBaseFileNumber = baseFileNumber;
2844        }
2845    }
2846    for (int i = 0; i < nEntries; i++) {
2847        entryKeyVals[i] = LogUtils.readByteArray(itemBuffer); // key
2848
long lsn;
2849        if (compactLsnsRep) {
2850        /* LSNs in compact form. */
2851        byte fileNumberOffset = itemBuffer.get();
2852        int fileOffset = (itemBuffer.get() & 0xff);
2853        fileOffset |= ((itemBuffer.get() & 0xff) << 8);
2854        fileOffset |= ((itemBuffer.get() & 0xff) << 16);
2855                if (fileOffset == THREE_BYTE_NEGATIVE_ONE) {
2856                    lsn = DbLsn.NULL_LSN;
2857                } else {
2858                    lsn = DbLsn.makeLsn
2859                        (storedBaseFileNumber + fileNumberOffset, fileOffset);
2860                }
2861        } else {
2862        /* LSNs in long form. */
2863        lsn = LogUtils.readLong(itemBuffer); // LSN
2864
}
2865            setLsnElement(i, lsn);
2866
2867        byte entryState = itemBuffer.get(); // state
2868
entryState &= CLEAR_DIRTY_BIT;
2869        entryState &= CLEAR_MIGRATE_BIT;
2870
2871            /*
2872             * A NULL_LSN is the remnant of an incomplete insertion and the
2873             * KnownDeleted flag should be set. But because of bugs in prior
2874             * releases, the KnownDeleted flag may not be set. So set it here.
2875             * See Tree.insert. [#13126]
2876             */

2877            if (lsn == DbLsn.NULL_LSN) {
2878                entryState |= KNOWN_DELETED_BIT;
2879            }
2880
2881            entryStates[i] = entryState;
2882    }
2883
2884        latch.setName(shortClassName() + getNodeId());
2885    }
2886    
2887    /**
2888     * @see LogReadable#dumpLog
2889     */

2890    public void dumpLog(StringBuffer JavaDoc sb, boolean verbose) {
2891        sb.append(beginTag());
2892
2893        super.dumpLog(sb, verbose);
2894        sb.append(Key.dumpString(identifierKey, 0));
2895
2896        // isRoot
2897
sb.append("<isRoot val=\"");
2898        sb.append(isRoot);
2899        sb.append("\"/>");
2900
2901        // level
2902
sb.append("<level val=\"");
2903        sb.append(Integer.toHexString(level));
2904        sb.append("\"/>");
2905
2906        // nEntries, length of entries array
2907
sb.append("<entries numEntries=\"");
2908        sb.append(nEntries);
2909        sb.append("\" length=\"");
2910        sb.append(entryTargets.length);
2911    boolean compactLsnsRep = (entryLsnLongArray == null);
2912        if (compactLsnsRep) {
2913            sb.append("\" baseFileNumber=\"");
2914            sb.append(baseFileNumber);
2915        }
2916        sb.append("\">");
2917
2918        if (verbose) {
2919            for (int i = 0; i < nEntries; i++) {
2920        sb.append("<ref knownDeleted=\"").
2921            append(isEntryKnownDeleted(i));
2922                sb.append("\" pendingDeleted=\"").
2923            append(isEntryPendingDeleted(i));
2924        sb.append("\">");
2925                sb.append(Key.dumpString(entryKeyVals[i], 0));
2926        sb.append(DbLsn.toString(getLsn(i)));
2927        sb.append("</ref>");
2928            }
2929        }
2930
2931        sb.append("</entries>");
2932
2933        /* Add on any additional items from subclasses before the end tag. */
2934        dumpLogAdditional(sb);
2935
2936        sb.append(endTag());
2937    }
2938
2939    /**
2940     * @see LogReadable#logEntryIsTransactional.
2941     */

2942    public boolean logEntryIsTransactional() {
2943    return false;
2944    }
2945
2946    /**
2947     * @see LogReadable#getTransactionId
2948     */

2949    public long getTransactionId() {
2950    return 0;
2951    }
2952
2953    /**
2954     * Allows subclasses to add additional fields before the end tag. If they
2955     * just overload dumpLog, the xml isn't nested.
2956     */

2957    protected void dumpLogAdditional(StringBuffer JavaDoc sb) {
2958    }
2959
2960    public String JavaDoc beginTag() {
2961        return BEGIN_TAG;
2962    }
2963
2964    public String JavaDoc endTag() {
2965        return END_TAG;
2966    }
2967
2968    void dumpKeys() throws DatabaseException {
2969        for (int i = 0; i < nEntries; i++) {
2970            System.out.println(Key.dumpString(entryKeyVals[i], 0));
2971        }
2972    }
2973
2974    /**
2975     * For unit test support:
2976     * @return a string that dumps information about this IN, without
2977     */

2978    public String JavaDoc dumpString(int nSpaces, boolean dumpTags) {
2979        StringBuffer JavaDoc sb = new StringBuffer JavaDoc();
2980        if (dumpTags) {
2981            sb.append(TreeUtils.indent(nSpaces));
2982            sb.append(beginTag());
2983            sb.append('\n');
2984        }
2985
2986        sb.append(super.dumpString(nSpaces+2, true));
2987        sb.append('\n');
2988
2989        sb.append(TreeUtils.indent(nSpaces+2));
2990        sb.append("<idkey>");
2991        sb.append(identifierKey == null ? "" :
2992                  Key.dumpString(identifierKey, 0));
2993        sb.append("</idkey>");
2994        sb.append('\n');
2995        sb.append(TreeUtils.indent(nSpaces+2));
2996        sb.append("<dirty val=\"").append(dirty).append("\"/>");
2997        sb.append('\n');
2998        sb.append(TreeUtils.indent(nSpaces+2));
2999        sb.append("<generation val=\"").append(generation).append("\"/>");
3000        sb.append('\n');
3001        sb.append(TreeUtils.indent(nSpaces+2));
3002        sb.append("<level val=\"");
3003        sb.append(Integer.toHexString(level)).append("\"/>");
3004        sb.append('\n');
3005        sb.append(TreeUtils.indent(nSpaces+2));
3006        sb.append("<isRoot val=\"").append(isRoot).append("\"/>");
3007        sb.append('\n');
3008
3009        sb.append(TreeUtils.indent(nSpaces+2));
3010        sb.append("<entries nEntries=\"");
3011        sb.append(nEntries);
3012        sb.append("\">");
3013        sb.append('\n');
3014
3015        for (int i = 0; i < nEntries; i++) {
3016            sb.append(TreeUtils.indent(nSpaces+4));
3017            sb.append("<entry id=\"" + i + "\">");
3018            sb.append('\n');
3019            if (getLsn(i) == DbLsn.NULL_LSN) {
3020                sb.append(TreeUtils.indent(nSpaces + 6));
3021                sb.append("<lsn/>");
3022            } else {
3023                sb.append(DbLsn.dumpString(getLsn(i), nSpaces + 6));
3024            }
3025            sb.append('\n');
3026            if (entryKeyVals[i] == null) {
3027                sb.append(TreeUtils.indent(nSpaces + 6));
3028                sb.append("<key/>");
3029            } else {
3030                sb.append(Key.dumpString(entryKeyVals[i], (nSpaces + 6)));
3031            }
3032            sb.append('\n');
3033            if (entryTargets[i] == null) {
3034                sb.append(TreeUtils.indent(nSpaces + 6));
3035                sb.append("<target/>");
3036            } else {
3037                sb.append(entryTargets[i].dumpString(nSpaces + 6, true));
3038            }
3039            sb.append('\n');
3040            sb.append(TreeUtils.indent(nSpaces + 6));
3041            dumpDeletedState(sb, getState(i));
3042            sb.append("<dirty val=\"").append(isDirty(i)).append("\"/>");
3043            sb.append('\n');
3044            sb.append(TreeUtils.indent(nSpaces+4));
3045            sb.append("</entry>");
3046            sb.append('\n');
3047        }
3048
3049        sb.append(TreeUtils.indent(nSpaces+2));
3050        sb.append("</entries>");
3051        sb.append('\n');
3052        if (dumpTags) {
3053            sb.append(TreeUtils.indent(nSpaces));
3054            sb.append(endTag());
3055        }
3056        return sb.toString();
3057    }
3058
3059    /**
3060     * Utility method for output of knownDeleted and pendingDelete.
3061     */

3062    static void dumpDeletedState(StringBuffer JavaDoc sb, byte state) {
3063        sb.append("<knownDeleted val=\"");
3064        sb.append(isStateKnownDeleted(state)).append("\"/>");
3065        sb.append("<pendingDeleted val=\"");
3066        sb.append(isStatePendingDeleted(state)).append("\"/>");
3067    }
3068
3069    public String JavaDoc toString() {
3070        return dumpString(0, true);
3071    }
3072
3073    public String JavaDoc shortClassName() {
3074        return "IN";
3075    }
3076
3077    /**
3078     * Send trace messages to the java.util.logger. Don't rely on the logger
3079     * alone to conditionalize whether we send this message, we don't even want
3080     * to construct the message if the level is not enabled.
3081     */

3082    private void traceSplit(Level JavaDoc level,
3083                            IN parent,
3084                            IN newSibling,
3085                            long parentLsn,
3086                            long myNewLsn,
3087                            long newSiblingLsn,
3088                            int splitIndex,
3089                            int idKeyIndex,
3090                            int childIndex) {
3091        Logger JavaDoc logger = databaseImpl.getDbEnvironment().getLogger();
3092        if (logger.isLoggable(level)) {
3093            StringBuffer JavaDoc sb = new StringBuffer JavaDoc();
3094            sb.append(TRACE_SPLIT);
3095            sb.append(" parent=");
3096            sb.append(parent.getNodeId());
3097            sb.append(" child=");
3098            sb.append(getNodeId());
3099            sb.append(" newSibling=");
3100            sb.append(newSibling.getNodeId());
3101            sb.append(" parentLsn = ");
3102            sb.append(DbLsn.getNoFormatString(parentLsn));
3103            sb.append(" childLsn = ");
3104            sb.append(DbLsn.getNoFormatString(myNewLsn));
3105            sb.append(" newSiblingLsn = ");
3106            sb.append(DbLsn.getNoFormatString(newSiblingLsn));
3107            sb.append(" splitIdx=");
3108            sb.append(splitIndex);
3109            sb.append(" idKeyIdx=");
3110            sb.append(idKeyIndex);
3111            sb.append(" childIdx=");
3112            sb.append(childIndex);
3113            logger.log(level, sb.toString());
3114        }
3115    }
3116
3117    /**
3118     * Send trace messages to the java.util.logger. Don't rely on the logger
3119     * alone to conditionalize whether we send this message, we don't even want
3120     * to construct the message if the level is not enabled.
3121     */

3122    private void traceDelete(Level JavaDoc level, int index) {
3123        Logger JavaDoc logger = databaseImpl.getDbEnvironment().getLogger();
3124        if (logger.isLoggable(level)) {
3125            StringBuffer JavaDoc sb = new StringBuffer JavaDoc();
3126            sb.append(TRACE_DELETE);
3127            sb.append(" in=").append(getNodeId());
3128            sb.append(" index=");
3129            sb.append(index);
3130            logger.log(level, sb.toString());
3131        }
3132    }
3133}
3134
Popular Tags