1 8 package org.ozoneDB.core.storage.gammaStore; 9 10 import java.io.File ; 11 import java.io.FileInputStream ; 12 import java.io.FileOutputStream ; 13 import java.io.IOException ; 14 import java.io.ObjectInputStream ; 15 import java.io.ObjectOutputStream ; 16 import java.util.Collection ; 17 import java.util.Iterator ; 18 import java.util.LinkedList ; 19 import java.util.Properties ; 20 import java.util.logging.Level ; 21 import java.util.logging.Logger ; 22 import org.ozoneDB.OzoneInternalException; 23 import org.ozoneDB.core.ConfigurationException; 24 import org.ozoneDB.core.ServerComponent; 25 import org.ozoneDB.core.storage.Cache; 26 import org.ozoneDB.core.storage.PropertyConfigurable; 27 import org.ozoneDB.core.storage.PropertyConfigurableFactory; 28 import org.ozoneDB.core.storage.PropertyInfo; 29 import org.ozoneDB.core.storage.TrimmingCache; 30 import org.ozoneDB.core.storage.WeakReferenceCache; 31 32 42 public final class IndexManager implements PropertyConfigurable { 43 44 private static Logger log = Logger.getLogger(IndexManager.class.getName()); 45 46 public static final int MINMAXBRANCHNODESIZE = 3; 47 48 public static final int MINMAXLEAFNODESIZE = 3; 49 50 public static final PropertyInfo INDEXSTREAMFACTORY = new PropertyInfo( 51 ".indexStreamFactory", 52 "String (classname)", 53 null, 54 "factory to use to insert extra streams while (de)serializing index nodes", 55 new String [] { 56 "org.ozoneDB.core.gammaStore.ZipStreamFactory", 57 "org.ozoneDB.core.gammaStore.GZIPStreamFactory", 58 } 59 ); 60 61 public static final PropertyInfo DIRTYINDEXNODECACHE = new PropertyInfo( 62 ".dirtyIndexNodeCache", 63 "String (classname, must implement org.ozoneDB.core.storage.TrimmingCache)", 64 null, 65 "cache for caching index nodes that have to be written to disk", 66 new String [] {"org.ozoneDB.core.gammaStore.FixedSizeDelayCache"} 67 ); 68 69 public static final PropertyInfo GENERALINDEXNODECACHE = new PropertyInfo( 70 ".generalIndexNodeCache", 71 "String (classname, must implement org.ozoneDB.core.storage.TrimmingCache)", 72 null, 73 "cache for caching index nodes, both changed and unchanched", 74 new String [] {"org.ozoneDB.core.gammaStore.FixedSizeCache"} 75 ); 76 77 public static final PropertyInfo MAXBRANCHNODESIZE = new PropertyInfo( 78 ".maxBranchNodeSize", 79 "int", 80 "474", 81 "Maximum number of index nodes in a branch node. Raw size of a branch node " + 82 "(serialized, without compression) is x + 537 + n * 16 with x = 0 for " + 83 "n <= 32, x = 4 for 32 < n <= 64, x = 8 for 64 < n <= 128, etc., " + 84 "where n is the maximum number of index nodes.", 85 new String [] { 86 "93 (files just under 2K)", 87 "226 (files just under 4K)", 88 "474 (files just under 8K)", 89 "982 (files just under 16K)", 90 "1998 (files just under 32K)", 91 } 92 ); 93 94 public static final PropertyInfo BRANCHNODEMERGEREACH = new PropertyInfo( 95 ".branchNodeMergeReach", 96 "int", 97 "2", 98 "When a branch node becomes too small and wants to merge with other " + 99 "branches, this value determines how 'far' a node should look for other " + 100 "nodes to merge with. Say we have branch nodes 1 to 10 and 5 wants to " + 101 "merge and this value is 2, then it will try to merge with 3, 4, 6 and 7.", 102 new String [] { "1", "2" } 103 ); 104 105 public static final PropertyInfo BRANCHNODEMERGESIZE = new PropertyInfo( 106 ".branchNodeMergeSize", 107 "int", 108 "300", 109 "When a child node is removed from a branch and the size of this branch " + 110 "is below this value, then the branch tries to merge with other branches." + 111 "Must be smaller than " + MAXBRANCHNODESIZE.getKey() + ". See also " + 112 BRANCHNODEMERGEREACH.getKey() + ".", 113 new String [] { "1", "100", "250" } 114 ); 115 116 public static final PropertyInfo MAXLEAFNODESIZE = new PropertyInfo( 117 ".maxLeafNodeSize", 118 "int", 119 "100", 120 "Maximum number of locations in a leaf node. Raw size of a leaf node " + 121 "(serialized, without compression) is x + 598 + n * 20 with x = 0 for " + 122 "n <= 32, x = 4 for 32 < n <= 64, x = 8 for 64 < n <= 128, etc., " + 123 "where n is the maximum number of locations.", 124 new String [] { 125 "72 (files just under 2K)", 126 "179 (files just under 4K)", 127 "377 (files just under 8K)", 128 "784 (files just under 16K)", 129 "1598 (files just under 32K)", 130 } 131 ); 132 133 public static final PropertyInfo LEAFNODEMERGEREACH = new PropertyInfo( 134 ".leafNodeMergeReach", 135 "int", 136 "2", 137 "When a leaf node becomes too small and wants to merge with other " + 138 "leaves, this value determines how 'far' a node should look for other " + 139 "nodes to merge with. Say we have leaf nodes 1 to 10 and 5 wants to " + 140 "merge and this value is 2, then it will try to merge with 3, 4, 6 and 7.", 141 new String [] { "1", "2" } 142 ); 143 144 public static final PropertyInfo LEAFNODEMERGESIZE = new PropertyInfo( 145 ".leafNodeMergeSize", 146 "int", 147 "250", 148 "When a container location is removed from a leaf and the size of this leaf " + 149 "is below this value, then the leaf tries to merge with other leaves." + 150 "Must be smaller than " + MAXLEAFNODESIZE.getKey() + ". See also " + 151 LEAFNODEMERGEREACH.getKey() + ".", 152 new String [] { "10", "100" } 153 ); 154 155 public static final PropertyInfo INDEXNODESTORAGEFACTORY = new PropertyInfo( 156 ".indexNodeStorageFactory", 157 "String (classname)", 158 null, 159 "factory to use to create Storage instances for reading/writing index nodes", 160 new String [] {"org.ozoneDB.code.gammaStore.FileStreamStorageFactory"} 161 ); 162 163 public static final PropertyInfo INDEXNODESTREAMFACTORY = new PropertyInfo( 164 ".indexNodeStreamFactory", 165 "String (classname)", 166 "", 167 "factory to use to create java.io.[In|Out]putStream instances that are " + 168 "\"plugged in\" during the (de)serializing of index nodes", 169 new String [] {"org.ozoneDB.code.gammaStore.ZipStreamStorageFactory"} 170 ); 171 172 175 private static final String CONFIGNAME = "indexmanager"; 176 177 180 private IndexLeafNode newestLeafNode; 181 182 185 private IndexBranchNode rootNode; 186 187 190 private long nodeIdCounter = -1; 191 192 195 private StorageFactory storageFactory; 196 197 200 private StreamFactory streamFactory; 201 202 205 private TrimmingCache dirtyNodeCache; 206 207 210 private TrimmingCache generalNodeCache; 211 212 216 private WeakReferenceCache backupNodeCache; 217 218 private int maxLeafNodeSize; 219 220 private int leafNodeMergeSize; 221 222 private int leafNodeMergeReach; 223 224 private int maxBranchNodeSize; 225 226 private int branchNodeMergeSize; 227 228 private int branchNodeMergeReach; 229 230 private Serializer nodeSerializer; 231 232 236 private Deleter deleter; 237 238 private transient long nodeLoaded; 239 240 private transient long nodeLoadedDirect; 241 242 private transient long nodeLoadedSerializer; 243 244 private transient long nodeLoadedCache; 245 246 private transient long nodeLoadedDisk; 247 248 private String dbDirectory; 249 250 private long size; 251 252 private String prefix; 253 254 263 public IndexManager(Properties properties, String prefix, boolean initialize) throws IOException { 264 265 this.prefix = prefix; 266 backupNodeCache = new WeakReferenceCache(); 267 268 setStorageFactory((StorageFactory) PropertyConfigurableFactory.create(StorageFactory.class, properties, getPrefix() + INDEXNODESTORAGEFACTORY.getKey())); 269 String streamFactoryClassname = properties.getProperty(getPrefix() + INDEXNODESTREAMFACTORY.getKey(), INDEXNODESTREAMFACTORY.getDefaultValue()); 270 if (streamFactoryClassname.length() > 0) { 271 setStreamFactory((StreamFactory) PropertyConfigurableFactory.create(StreamFactory.class, properties, getPrefix() + INDEXNODESTREAMFACTORY.getKey())); 272 } 273 setNodeSerializer(new Serializer(getStorageFactory(), getStreamFactory(), "NodeSerializer")); 274 275 deleter = new Deleter("node storage deleter"); 276 277 setDirtyNodeCache((TrimmingCache) PropertyConfigurableFactory.create(Cache.class, properties, getPrefix() + DIRTYINDEXNODECACHE.getKey())); 278 getDirtyNodeCache().setSynchronizer(this); 279 getDirtyNodeCache().setTrimHandler(new TrimmingCache.TrimHandler() { 280 public void trimming(Object key, Object value) { 281 IndexNode indexNode = (IndexNode) value; 282 if (log.isLoggable(Level.FINE)) log.fine("indexnode " + indexNode.getNodeId() + " is trimmed from dirty cache, going to serializer"); 283 getGeneralNodeCache().put(new Long (indexNode.getNodeId()), indexNode); 284 serialize(indexNode); 285 if (log.isLoggable(Level.FINER)) log.finer("put in serializer: " + indexNode.getNodeId()); 286 } 287 }); 288 289 setGeneralNodeCache((TrimmingCache) PropertyConfigurableFactory.create(Cache.class, properties, getPrefix() + GENERALINDEXNODECACHE.getKey())); 290 getGeneralNodeCache().setSynchronizer(this); 291 getGeneralNodeCache().setTrimHandler(new TrimmingCache.TrimHandler() { 292 public void trimming(Object key, Object value) { 293 IndexNode indexNode = (IndexNode) value; 294 if (indexNode.isDirty() && getDirtyNodeCache().get(new Long (indexNode.getNodeId())) == null) { 295 IndexNode serializing = (IndexNode) getNodeSerializer().remove(new Long (indexNode.getNodeId())); 296 if (serializing == null) { 297 log.severe("WTF? not in dirty and not serializing? " + indexNode.getNodeId()); 298 } 299 } 300 getBackupNodeCache().put(new Long (indexNode.getNodeId()), indexNode); 301 if (log.isLoggable(Level.FINE)) log.fine("indexnode " + indexNode.getNodeId() + " is trimmed from general cache, going to backup cache"); 302 } 303 }); 304 305 try { 306 String num = properties.getProperty(MAXBRANCHNODESIZE.getKey(), MAXBRANCHNODESIZE.getDefaultValue()); 307 setMaxBranchNodeSize(Integer.parseInt(num)); 308 num = properties.getProperty(MAXLEAFNODESIZE.getKey(), MAXLEAFNODESIZE.getDefaultValue()); 309 setMaxLeafNodeSize(Integer.parseInt(num)); 310 } catch (NumberFormatException e) { 311 throw new ConfigurationException(e); 312 } 313 dbDirectory = properties.getProperty(GammaStore.DIRECTORY.getKey()); 314 if (initialize) { 315 log.info("deleting all files in index directory"); 316 getStorageFactory().deleteAll(); 317 318 IndexBranchNode branchNode = new IndexBranchNode(this); 322 setRootNode(branchNode); 323 IndexLeafNode leafNode = new IndexLeafNode(this); 324 setNewestLeafNode(leafNode); 325 branchNode.putChildNode(leafNode); 326 } else { 327 ObjectInputStream config = new ObjectInputStream (new FileInputStream (new File (dbDirectory + File.separator + CONFIGNAME))); 328 setSize(config.readLong()); 329 setNodeIdCounter(config.readLong()); 330 if (log.isLoggable(Level.FINER)) log.finest("read nodeIdCounter: " + getNodeIdCounter()); 331 long nodeId = config.readLong(); 332 if (log.isLoggable(Level.FINER)) log.finest("read root nodeId: " + nodeId); 333 setRootNode((IndexBranchNode) loadNode(nodeId)); 334 nodeId = config.readLong(); 335 if (log.isLoggable(Level.FINER)) log.finest("read newest leaf nodeId: " + nodeId); 336 setNewestLeafNode((IndexLeafNode) loadNode(nodeId)); 337 } 338 } 339 340 344 public void shutdown() throws IOException { 345 if (log.isLoggable(Level.INFO)) log.info("IndexManager shutting down"); 346 if (log.isLoggable(Level.INFO)) log.info("nodes loaded: " + nodeLoaded + "; direct: " + nodeLoadedDirect + ", from serializer: " + nodeLoadedSerializer + ", from cache: " + nodeLoadedCache + ", from disk: " + nodeLoadedDisk); 347 ObjectOutputStream config = new ObjectOutputStream (new FileOutputStream (new File (dbDirectory + File.separator + CONFIGNAME))); 348 config.writeLong(getSize()); 350 config.writeLong(getNodeIdCounter()); 351 config.writeLong(getRootNode().getNodeId()); 352 config.writeLong(getNewestLeafNode().getNodeId()); 353 config.close(); 354 355 if (log.isLoggable(Level.INFO)) log.info("IndexManager has " + getGeneralNodeCache().size() + " cached index nodes, " + getDirtyNodeCache().size() + " are dirty"); 356 for(Iterator i = getDirtyNodeCache().copyToMap().values().iterator(); i.hasNext(); ) { 357 IndexNode indexNode = (IndexNode) i.next(); 358 serialize(indexNode); 359 } 360 IndexNode n = getRootNode(); 361 if (n.isDirty()) { 362 serialize(n); 363 } 364 n.endInvoke(); 365 n = getNewestLeafNode(); 366 if (n.isDirty()) { 367 serialize(n); 368 } 369 n.endInvoke(); 370 if(log.isLoggable(Level.INFO)) log.info("serializer has " + getNodeSerializer().size() + " nodes to serialize"); 371 getNodeSerializer().stopWhenReady(); 372 if (log.isLoggable(Level.INFO)) log.info("IndexManager has shut down"); 373 } 374 375 376 384 IndexNode getNode(long nodeId) { 385 Long nodeIdLong = new Long (nodeId); 386 nodeLoaded++; 387 if (log.isLoggable(Level.FINER)) log.finer("getting node " + nodeId); 388 IndexNode result = getRootNode(); 389 if (result.getNodeId() == nodeId) { 390 if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " is the root node"); 391 nodeLoadedDirect++; 392 } else { 393 result.endInvoke(); 394 result = getNewestLeafNode(); 395 if (result.getNodeId() == nodeId) { 396 if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " is the newest leaf node"); 397 nodeLoadedDirect++; 398 } else { 399 result.endInvoke(); 400 result = getNodeFromCaches(nodeIdLong); 401 if (result != null) { 402 if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " was in some cache"); 404 nodeLoadedCache++; 405 result.startInvoke(); 406 } else { 407 if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " was not in some cache"); 409 result = (IndexNode) getNodeSerializer().remove(nodeIdLong); 410 if (result != null) { 411 if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " was in the nodeSerializer"); 413 nodeLoadedSerializer++; 414 result.startInvoke(); 415 416 result.setDirty(); 423 putInCaches(result); 424 } else { 425 if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " was not in the nodeSerializer"); 427 result = loadNode(nodeId); 428 } 429 } 430 } 431 } 432 return result; 433 } 434 435 439 private IndexNode loadNode(long nodeId) { 440 IndexNode result; 441 try { 442 result = IndexNode.read(this, nodeId); 443 nodeLoadedDisk++; 444 } catch (IOException e) { 445 log.log(Level.SEVERE, "could not read index node " + nodeId, e); 446 throw new OzoneInternalException("could not read index node " + nodeId, e); 447 } 448 return result; 449 } 450 451 458 void checkSerializerSize() { 459 int size = getNodeSerializer().size(); 460 int timeout = (size - 100) * 10; if (timeout > 0) { 462 if (log.isLoggable(Level.INFO)) log.info("timeout needed because serializer contains " + size + " indexnodes"); 463 try { 464 Thread.sleep(timeout); 465 } catch (InterruptedException ignore) { 466 } 467 } 468 } 469 470 private IndexLeafNode getLeafNode(long objectId) { 471 if (log.isLoggable(Level.FINER)) log.finer("getting leafnode for " + objectId); 472 IndexLeafNode result = null; 473 IndexBranchNode parentNode = getRootNode(); 474 do { 475 if (log.isLoggable(Level.FINEST)) log.finest("current branchnode: " + parentNode.getNodeId()); 476 IndexNode childNode = getNode(parentNode.getChildNodeId(objectId)); 477 parentNode.endInvoke(); 478 if (childNode instanceof IndexLeafNode) { 479 result = (IndexLeafNode) childNode; 480 } else { 481 parentNode = (IndexBranchNode) childNode; 482 } 483 } while (result == null); 484 if (log.isLoggable(Level.FINER)) log.finer("found leafnode: " + result.getNodeId()); 485 return result; 486 } 487 494 public synchronized ContainerLocation getContainerLocation(long objectId) { 495 IndexLeafNode l = getLeafNode(objectId); 496 try { 497 ContainerLocation result = l.getContainerLocation(objectId); 498 return result; 499 } finally { 500 l.endInvoke(); 501 } 502 } 503 504 506 public synchronized void putContainerLocation(long objectId, ContainerLocation containerLocation) { 507 IndexLeafNode leafNode; 508 leafNode = getNewestLeafNode(); 509 if (objectId < leafNode.getMinObjectId()) { 510 leafNode.endInvoke(); 511 leafNode = getLeafNode(objectId); 512 } 513 if (!leafNode.existsContainerLocation(objectId)) { 514 size++; 515 } 516 leafNode.putContainerLocation(objectId, containerLocation); 517 leafNode.endInvoke(); 518 } 519 520 public synchronized void removeContainerLocation(long objectId) { 521 ContainerLocation containerLocation; 522 IndexLeafNode leafNode = getLeafNode(objectId); 523 leafNode.removeContainerLocation(objectId); 524 leafNode.endInvoke(); 525 size--; 526 } 527 528 531 public long getSize() { 532 return size; 533 } 534 535 long nextNodeId() { 536 return ++nodeIdCounter; 537 } 538 539 private long getNodeIdCounter() { 540 return nodeIdCounter; 541 } 542 543 private void setNodeIdCounter(long nodeIdCounter) { 544 this.nodeIdCounter = nodeIdCounter; 545 } 546 547 private void setSize(long size) { 548 this.size = size; 549 } 550 551 int getMaxLeafNodeSize() { 552 return maxLeafNodeSize; } 554 555 private void setMaxLeafNodeSize(int maxLeafNodeSize) { 556 if (maxLeafNodeSize < MINMAXLEAFNODESIZE) { 557 throw new ConfigurationException("max leaf node size must be >= " + MINMAXLEAFNODESIZE); 558 } 559 this.maxLeafNodeSize = maxLeafNodeSize; 560 } 561 562 int getMaxBranchNodeSize() { 563 return maxBranchNodeSize; } 565 566 private void setMaxBranchNodeSize(int maxBranchNodeSize) { 567 if (maxBranchNodeSize < MINMAXBRANCHNODESIZE) { 568 throw new ConfigurationException("max branch node size must be >= " + MINMAXBRANCHNODESIZE); 569 } 570 this.maxBranchNodeSize = maxBranchNodeSize; 571 } 572 573 private float getLeafNodeMergeThreshhold() { 574 return 0.5F; } 576 577 private int getLeafNodeMergeDistance() { 578 return 4; } 580 581 private int getNodeCacheSize() { 582 return 200; } 584 585 private IndexBranchNode getRootNode() { 586 rootNode.startInvoke(); 587 return rootNode; 588 } 589 590 void putInCaches(IndexNode indexNode) { 591 Long id = new Long (indexNode.getNodeId()); 592 if (indexNode.isDirty()) { 595 getDirtyNodeCache().put(id, indexNode); 596 } else { 597 getGeneralNodeCache().put(id, indexNode); 598 } 599 } 600 601 void removeFromCaches(IndexNode indexNode) { 602 Long id = new Long (indexNode.getNodeId()); 603 getBackupNodeCache().remove(id); 604 getGeneralNodeCache().remove(id); 605 getDirtyNodeCache().remove(id); 606 } 607 608 private IndexNode getNodeFromCaches(Long id) { 609 IndexNode result = (IndexNode) getGeneralNodeCache().get(id); 610 if (result == null) { 611 result = (IndexNode) getDirtyNodeCache().get(id); 612 if (result == null) { 613 614 result = (IndexNode) getBackupNodeCache().get(id); 620 if (result != null) { 621 622 getGeneralNodeCache().put(id, result); 626 } 627 } 628 } 629 return result; 630 } 631 632 void setRootNode(IndexBranchNode rootNode) { 633 if (log.isLoggable(Level.FINE)) log.fine("old root node: " + this.rootNode); 634 if (this.rootNode != null) { 635 putInCaches(this.rootNode); 636 } 637 this.rootNode = rootNode; 638 if (log.isLoggable(Level.FINE)) log.fine("new root node: " + this.rootNode); 639 } 640 641 private IndexLeafNode getNewestLeafNode() { 642 newestLeafNode.startInvoke(); 643 return newestLeafNode; 644 } 645 646 void setNewestLeafNode(IndexLeafNode newestLeafNode) { 647 if (this.newestLeafNode != null) { 648 putInCaches(this.newestLeafNode); 649 } 650 this.newestLeafNode = newestLeafNode; 651 } 652 653 StorageFactory getStorageFactory() { 654 return storageFactory; 655 } 656 657 StreamFactory getStreamFactory() { 658 return streamFactory; 659 } 660 661 private void setStorageFactory(StorageFactory storageFactory) { 662 this.storageFactory = storageFactory; 663 } 664 665 private void setStreamFactory(StreamFactory streamFactory) { 666 this.streamFactory = streamFactory; 667 } 668 669 private TrimmingCache getDirtyNodeCache() { 670 return dirtyNodeCache; 671 } 672 673 private void setDirtyNodeCache(TrimmingCache dirtyNodeCache) { 674 this.dirtyNodeCache = dirtyNodeCache; 675 } 676 677 680 private TrimmingCache getGeneralNodeCache() { 681 return generalNodeCache; 682 } 683 684 private void setGeneralNodeCache(TrimmingCache generalNodeCache) { 685 this.generalNodeCache = generalNodeCache; 686 } 687 688 private WeakReferenceCache getBackupNodeCache() { 689 return backupNodeCache; 690 } 691 692 synchronized void serialize(IndexNode indexNode) { 693 if (log.isLoggable(Level.FINER)) log.finer("putting indexnode " + indexNode.getNodeId() + " into serializer"); 694 indexNode.setDirty(false); 696 getNodeSerializer().put(new Long (indexNode.getNodeId()), indexNode); 697 } 698 699 Serializer getNodeSerializer() { 700 return nodeSerializer; 701 } 702 703 Deleter getDeleter() { 704 return deleter; 705 } 706 707 private void setNodeSerializer(Serializer nodeSerializer) { 708 this.nodeSerializer = nodeSerializer; 709 } 710 711 public Collection getPropertyInfos() { 712 Collection result = new LinkedList (); 713 result.add(INDEXSTREAMFACTORY); 714 result.add(GENERALINDEXNODECACHE); 715 result.add(DIRTYINDEXNODECACHE); 716 result.add(MAXBRANCHNODESIZE); 717 result.add(MAXLEAFNODESIZE); 718 result.add(INDEXNODESTORAGEFACTORY); 719 return result; 720 } 721 722 int getLeafNodeMergeReach() { 723 return leafNodeMergeReach; 724 } 725 726 private void setLeafNodeMergeReach(int leafNodeMergeReach) { 727 this.leafNodeMergeReach = leafNodeMergeReach; 728 } 729 730 int getBranchNodeMergeSize() { 731 return branchNodeMergeSize; 732 } 733 734 private void setBranchNodeMergeSize(int branchNodeMergeSize) { 735 this.branchNodeMergeSize = branchNodeMergeSize; 736 } 737 738 int getBranchNodeMergeReach() { 739 return branchNodeMergeReach; 740 } 741 742 private void setBranchNodeMergeReach(int branchNodeMergeReach) { 743 this.branchNodeMergeReach = branchNodeMergeReach; 744 } 745 746 void nodeBecameDirty(IndexNode indexNode) { 747 getDirtyNodeCache().put(new Long (indexNode.getNodeId()), indexNode); 748 } 749 750 public String getPrefix() { 751 return prefix; 752 } 753 754 } | Popular Tags |