KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > ozoneDB > core > storage > gammaStore > IndexManager


1 // You can redistribute this software and/or modify it under the terms of
2
// the Ozone Core License version 1 published by ozone-db.org.
3
//
4
// Copyright (C) 2003-@year@, Leo Mekenkamp. All rights reserved.
5
//
6
// $Id: IndexManager.java,v 1.8 2004/04/06 18:39:39 leomekenkamp Exp $
7

8 package org.ozoneDB.core.storage.gammaStore;
9
10 import java.io.File JavaDoc;
11 import java.io.FileInputStream JavaDoc;
12 import java.io.FileOutputStream JavaDoc;
13 import java.io.IOException JavaDoc;
14 import java.io.ObjectInputStream JavaDoc;
15 import java.io.ObjectOutputStream JavaDoc;
16 import java.util.Collection JavaDoc;
17 import java.util.Iterator JavaDoc;
18 import java.util.LinkedList JavaDoc;
19 import java.util.Properties JavaDoc;
20 import java.util.logging.Level JavaDoc;
21 import java.util.logging.Logger JavaDoc;
22 import org.ozoneDB.OzoneInternalException;
23 import org.ozoneDB.core.ConfigurationException;
24 import org.ozoneDB.core.ServerComponent;
25 import org.ozoneDB.core.storage.Cache;
26 import org.ozoneDB.core.storage.PropertyConfigurable;
27 import org.ozoneDB.core.storage.PropertyConfigurableFactory;
28 import org.ozoneDB.core.storage.PropertyInfo;
29 import org.ozoneDB.core.storage.TrimmingCache;
30 import org.ozoneDB.core.storage.WeakReferenceCache;
31
32 /**
33  * Takes care of storing and caching all object locations. Because of the nature
34  * of generated object ids (a newly generated id is always one bigger than the
35  * id generated before) this class is very fast when adding new ids, but a bit
36  * slow when inserting ids in random order. Random order insertion will only
37  * take place during a crash recovery.
38  *
39  * @author <a HREF="mailto:leoATmekenkampD0Tcom">Leo Mekenkamp (mind the anti sp@m)</a>
40  * @version $Id: IndexManager.java,v 1.8 2004/04/06 18:39:39 leomekenkamp Exp $
41  */

42 public final class IndexManager implements PropertyConfigurable {
43     
44     private static Logger JavaDoc log = Logger.getLogger(IndexManager.class.getName());
45
46     public static final int MINMAXBRANCHNODESIZE = 3;
47     
48     public static final int MINMAXLEAFNODESIZE = 3;
49
50     public static final PropertyInfo INDEXSTREAMFACTORY = new PropertyInfo(
51         ".indexStreamFactory",
52         "String (classname)",
53         null,
54         "factory to use to insert extra streams while (de)serializing index nodes",
55         new String JavaDoc[] {
56             "org.ozoneDB.core.gammaStore.ZipStreamFactory",
57             "org.ozoneDB.core.gammaStore.GZIPStreamFactory",
58         }
59     );
60
61     public static final PropertyInfo DIRTYINDEXNODECACHE = new PropertyInfo(
62         ".dirtyIndexNodeCache",
63         "String (classname, must implement org.ozoneDB.core.storage.TrimmingCache)",
64         null,
65         "cache for caching index nodes that have to be written to disk",
66         new String JavaDoc[] {"org.ozoneDB.core.gammaStore.FixedSizeDelayCache"}
67     );
68
69     public static final PropertyInfo GENERALINDEXNODECACHE = new PropertyInfo(
70         ".generalIndexNodeCache",
71         "String (classname, must implement org.ozoneDB.core.storage.TrimmingCache)",
72         null,
73         "cache for caching index nodes, both changed and unchanched",
74         new String JavaDoc[] {"org.ozoneDB.core.gammaStore.FixedSizeCache"}
75     );
76
77     public static final PropertyInfo MAXBRANCHNODESIZE = new PropertyInfo(
78         ".maxBranchNodeSize",
79         "int",
80         "474",
81         "Maximum number of index nodes in a branch node. Raw size of a branch node " +
82         "(serialized, without compression) is x + 537 + n * 16 with x = 0 for " +
83         "n <= 32, x = 4 for 32 < n <= 64, x = 8 for 64 < n <= 128, etc., " +
84         "where n is the maximum number of index nodes.",
85         new String JavaDoc[] {
86             "93 (files just under 2K)",
87             "226 (files just under 4K)",
88             "474 (files just under 8K)",
89             "982 (files just under 16K)",
90             "1998 (files just under 32K)",
91         }
92     );
93
94     public static final PropertyInfo BRANCHNODEMERGEREACH = new PropertyInfo(
95         ".branchNodeMergeReach",
96         "int",
97         "2",
98         "When a branch node becomes too small and wants to merge with other " +
99         "branches, this value determines how 'far' a node should look for other " +
100         "nodes to merge with. Say we have branch nodes 1 to 10 and 5 wants to " +
101         "merge and this value is 2, then it will try to merge with 3, 4, 6 and 7.",
102         new String JavaDoc[] { "1", "2" }
103     );
104
105     public static final PropertyInfo BRANCHNODEMERGESIZE = new PropertyInfo(
106         ".branchNodeMergeSize",
107         "int",
108         "300",
109         "When a child node is removed from a branch and the size of this branch " +
110         "is below this value, then the branch tries to merge with other branches." +
111         "Must be smaller than " + MAXBRANCHNODESIZE.getKey() + ". See also " +
112         BRANCHNODEMERGEREACH.getKey() + ".",
113         new String JavaDoc[] { "1", "100", "250" }
114     );
115
116     public static final PropertyInfo MAXLEAFNODESIZE = new PropertyInfo(
117         ".maxLeafNodeSize",
118         "int",
119         "100",
120         "Maximum number of locations in a leaf node. Raw size of a leaf node " +
121         "(serialized, without compression) is x + 598 + n * 20 with x = 0 for " +
122         "n <= 32, x = 4 for 32 < n <= 64, x = 8 for 64 < n <= 128, etc., " +
123         "where n is the maximum number of locations.",
124         new String JavaDoc[] {
125             "72 (files just under 2K)",
126             "179 (files just under 4K)",
127             "377 (files just under 8K)",
128             "784 (files just under 16K)",
129             "1598 (files just under 32K)",
130         }
131     );
132
133     public static final PropertyInfo LEAFNODEMERGEREACH = new PropertyInfo(
134         ".leafNodeMergeReach",
135         "int",
136         "2",
137         "When a leaf node becomes too small and wants to merge with other " +
138         "leaves, this value determines how 'far' a node should look for other " +
139         "nodes to merge with. Say we have leaf nodes 1 to 10 and 5 wants to " +
140         "merge and this value is 2, then it will try to merge with 3, 4, 6 and 7.",
141         new String JavaDoc[] { "1", "2" }
142     );
143
144     public static final PropertyInfo LEAFNODEMERGESIZE = new PropertyInfo(
145         ".leafNodeMergeSize",
146         "int",
147         "250",
148         "When a container location is removed from a leaf and the size of this leaf " +
149         "is below this value, then the leaf tries to merge with other leaves." +
150         "Must be smaller than " + MAXLEAFNODESIZE.getKey() + ". See also " +
151         LEAFNODEMERGEREACH.getKey() + ".",
152         new String JavaDoc[] { "10", "100" }
153     );
154
155     public static final PropertyInfo INDEXNODESTORAGEFACTORY = new PropertyInfo(
156         ".indexNodeStorageFactory",
157         "String (classname)",
158         null,
159         "factory to use to create Storage instances for reading/writing index nodes",
160         new String JavaDoc[] {"org.ozoneDB.code.gammaStore.FileStreamStorageFactory"}
161     );
162
163     public static final PropertyInfo INDEXNODESTREAMFACTORY = new PropertyInfo(
164         ".indexNodeStreamFactory",
165         "String (classname)",
166         "",
167         "factory to use to create java.io.[In|Out]putStream instances that are " +
168         "\"plugged in\" during the (de)serializing of index nodes",
169         new String JavaDoc[] {"org.ozoneDB.code.gammaStore.ZipStreamStorageFactory"}
170     );
171
172     /**
173      * (file)name to store / read configuration at shutdown / startup
174      */

175     private static final String JavaDoc CONFIGNAME = "indexmanager";
176     
177     /**
178      * provides fast access for when new objects are created
179      */

180     private IndexLeafNode newestLeafNode;
181     
182     /**
183      * entry point for the tree containing all index nodes
184      */

185     private IndexBranchNode rootNode;
186     
187     /**
188      * for generating unique ids; -1 so the first returned id will be 0
189      */

190     private long nodeIdCounter = -1;
191     
192     /**
193      * factory needed for swapping index nodes in and out
194      */

195     private StorageFactory storageFactory;
196
197     /**
198      * factory needed for extra streams during (de)serialization
199      */

200     private StreamFactory streamFactory;
201
202     /**
203      * holds all dirty nodes ("You dirty little nodes, you...")
204      */

205     private TrimmingCache dirtyNodeCache;
206     
207     /**
208      * holds most index nodes
209      */

210     private TrimmingCache generalNodeCache;
211     
212     /**
213      * makes sure we have access to nodes that have been thrown out of the other
214      * caches but are still strongly referenced somwhere
215      */

216     private WeakReferenceCache backupNodeCache;
217     
218     private int maxLeafNodeSize;
219     
220     private int leafNodeMergeSize;
221     
222     private int leafNodeMergeReach;
223     
224     private int maxBranchNodeSize;
225     
226     private int branchNodeMergeSize;
227     
228     private int branchNodeMergeReach;
229     
230     private Serializer nodeSerializer;
231     
232     /**
233      * takes care of deleting index nodes that have become empty and have
234      * been removed from the indexmanager
235      */

236     private Deleter deleter;
237     
238     private transient long nodeLoaded;
239     
240     private transient long nodeLoadedDirect;
241     
242     private transient long nodeLoadedSerializer;
243
244     private transient long nodeLoadedCache;
245     
246     private transient long nodeLoadedDisk;
247     
248     private String JavaDoc dbDirectory;
249     
250     private long size;
251     
252     private String JavaDoc prefix;
253     
254     /**
255      * initialize if <code>true</code> preform a full (re)initialization,
256      * if <code>false</code>, tries to read config.
257      * @param properties
258      * @param prefix
259      * @param initialize
260      * TODO: refactor with the new java.util.concurrent package in the 1.5 JDK
261      * to make fully reentrant
262      */

263     public IndexManager(Properties JavaDoc properties, String JavaDoc prefix, boolean initialize) throws IOException JavaDoc {
264         
265         this.prefix = prefix;
266         backupNodeCache = new WeakReferenceCache();
267         
268         setStorageFactory((StorageFactory) PropertyConfigurableFactory.create(StorageFactory.class, properties, getPrefix() + INDEXNODESTORAGEFACTORY.getKey()));
269         String JavaDoc streamFactoryClassname = properties.getProperty(getPrefix() + INDEXNODESTREAMFACTORY.getKey(), INDEXNODESTREAMFACTORY.getDefaultValue());
270         if (streamFactoryClassname.length() > 0) {
271             setStreamFactory((StreamFactory) PropertyConfigurableFactory.create(StreamFactory.class, properties, getPrefix() + INDEXNODESTREAMFACTORY.getKey()));
272         }
273         setNodeSerializer(new Serializer(getStorageFactory(), getStreamFactory(), "NodeSerializer"));
274         
275         deleter = new Deleter("node storage deleter");
276
277         setDirtyNodeCache((TrimmingCache) PropertyConfigurableFactory.create(Cache.class, properties, getPrefix() + DIRTYINDEXNODECACHE.getKey()));
278         getDirtyNodeCache().setSynchronizer(this);
279         getDirtyNodeCache().setTrimHandler(new TrimmingCache.TrimHandler() {
280             public void trimming(Object JavaDoc key, Object JavaDoc value) {
281                 IndexNode indexNode = (IndexNode) value;
282                 if (log.isLoggable(Level.FINE)) log.fine("indexnode " + indexNode.getNodeId() + " is trimmed from dirty cache, going to serializer");
283                 getGeneralNodeCache().put(new Long JavaDoc(indexNode.getNodeId()), indexNode);
284                 serialize(indexNode);
285                 if (log.isLoggable(Level.FINER)) log.finer("put in serializer: " + indexNode.getNodeId());
286             }
287         });
288         
289         setGeneralNodeCache((TrimmingCache) PropertyConfigurableFactory.create(Cache.class, properties, getPrefix() + GENERALINDEXNODECACHE.getKey()));
290         getGeneralNodeCache().setSynchronizer(this);
291         getGeneralNodeCache().setTrimHandler(new TrimmingCache.TrimHandler() {
292             public void trimming(Object JavaDoc key, Object JavaDoc value) {
293                 IndexNode indexNode = (IndexNode) value;
294 if (indexNode.isDirty() && getDirtyNodeCache().get(new Long JavaDoc(indexNode.getNodeId())) == null) {
295     IndexNode serializing = (IndexNode) getNodeSerializer().remove(new Long JavaDoc(indexNode.getNodeId()));
296     if (serializing == null) {
297         log.severe("WTF? not in dirty and not serializing? " + indexNode.getNodeId());
298     }
299 }
300                 getBackupNodeCache().put(new Long JavaDoc(indexNode.getNodeId()), indexNode);
301                 if (log.isLoggable(Level.FINE)) log.fine("indexnode " + indexNode.getNodeId() + " is trimmed from general cache, going to backup cache");
302             }
303         });
304         
305         try {
306             String JavaDoc num = properties.getProperty(MAXBRANCHNODESIZE.getKey(), MAXBRANCHNODESIZE.getDefaultValue());
307             setMaxBranchNodeSize(Integer.parseInt(num));
308             num = properties.getProperty(MAXLEAFNODESIZE.getKey(), MAXLEAFNODESIZE.getDefaultValue());
309             setMaxLeafNodeSize(Integer.parseInt(num));
310         } catch (NumberFormatException JavaDoc e) {
311             throw new ConfigurationException(e);
312         }
313         dbDirectory = properties.getProperty(GammaStore.DIRECTORY.getKey());
314         if (initialize) {
315             log.info("deleting all files in index directory");
316             getStorageFactory().deleteAll();
317             
318             // always start with one branch node and one leaf node; that way we
319
// can assume that rootNode as well as newestLeafNode are both never
320
// null.
321
IndexBranchNode branchNode = new IndexBranchNode(this);
322             setRootNode(branchNode);
323             IndexLeafNode leafNode = new IndexLeafNode(this);
324             setNewestLeafNode(leafNode);
325             branchNode.putChildNode(leafNode);
326         } else {
327             ObjectInputStream JavaDoc config = new ObjectInputStream JavaDoc(new FileInputStream JavaDoc(new File JavaDoc(dbDirectory + File.separator + CONFIGNAME)));
328             setSize(config.readLong());
329             setNodeIdCounter(config.readLong());
330             if (log.isLoggable(Level.FINER)) log.finest("read nodeIdCounter: " + getNodeIdCounter());
331             long nodeId = config.readLong();
332             if (log.isLoggable(Level.FINER)) log.finest("read root nodeId: " + nodeId);
333             setRootNode((IndexBranchNode) loadNode(nodeId));
334             nodeId = config.readLong();
335             if (log.isLoggable(Level.FINER)) log.finest("read newest leaf nodeId: " + nodeId);
336             setNewestLeafNode((IndexLeafNode) loadNode(nodeId));
337         }
338     }
339     
340     /**
341      * Shuts the indexmanager down; writes all data needed for the constructor
342      * when the <code>initialize</code> parameter is <code>false</code>
343      */

344     public void shutdown() throws IOException JavaDoc {
345         if (log.isLoggable(Level.INFO)) log.info("IndexManager shutting down");
346         if (log.isLoggable(Level.INFO)) log.info("nodes loaded: " + nodeLoaded + "; direct: " + nodeLoadedDirect + ", from serializer: " + nodeLoadedSerializer + ", from cache: " + nodeLoadedCache + ", from disk: " + nodeLoadedDisk);
347         ObjectOutputStream JavaDoc config = new ObjectOutputStream JavaDoc(new FileOutputStream JavaDoc(new File JavaDoc(dbDirectory + File.separator + CONFIGNAME)));
348 // Storage config = getStorageFactory().createStorage(CONFIGNAME);
349
config.writeLong(getSize());
350         config.writeLong(getNodeIdCounter());
351         config.writeLong(getRootNode().getNodeId());
352         config.writeLong(getNewestLeafNode().getNodeId());
353         config.close();
354
355         if (log.isLoggable(Level.INFO)) log.info("IndexManager has " + getGeneralNodeCache().size() + " cached index nodes, " + getDirtyNodeCache().size() + " are dirty");
356         for(Iterator JavaDoc i = getDirtyNodeCache().copyToMap().values().iterator(); i.hasNext(); ) {
357             IndexNode indexNode = (IndexNode) i.next();
358             serialize(indexNode);
359         }
360         IndexNode n = getRootNode();
361         if (n.isDirty()) {
362             serialize(n);
363         }
364         n.endInvoke();
365         n = getNewestLeafNode();
366         if (n.isDirty()) {
367             serialize(n);
368         }
369         n.endInvoke();
370         if(log.isLoggable(Level.INFO)) log.info("serializer has " + getNodeSerializer().size() + " nodes to serialize");
371         getNodeSerializer().stopWhenReady();
372         if (log.isLoggable(Level.INFO)) log.info("IndexManager has shut down");
373     }
374         
375     
376     /**
377      * Returns the specified index node from the cache, or from disk. The node
378      * places itself in the node cache. Returned node is invoked.
379      *
380      * @param nodeId the id of the node
381      * @return IndexNode node containing the specified node, or <code>null</code>
382      * when there is no such node
383      */

384     IndexNode getNode(long nodeId) {
385         Long JavaDoc nodeIdLong = new Long JavaDoc(nodeId);
386         nodeLoaded++;
387         if (log.isLoggable(Level.FINER)) log.finer("getting node " + nodeId);
388         IndexNode result = getRootNode();
389         if (result.getNodeId() == nodeId) {
390             if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " is the root node");
391             nodeLoadedDirect++;
392         } else {
393             result.endInvoke();
394             result = getNewestLeafNode();
395             if (result.getNodeId() == nodeId) {
396                 if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " is the newest leaf node");
397                 nodeLoadedDirect++;
398             } else {
399                 result.endInvoke();
400                 result = getNodeFromCaches(nodeIdLong);
401                 if (result != null) {
402 //if (nodeId == 6) log.severe(nodeId + " was in the cache");
403
if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " was in some cache");
404                     nodeLoadedCache++;
405                     result.startInvoke();
406                 } else {
407 //if (nodeId == 6) log.severe(nodeId + " was NOT in the cache");
408
if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " was not in some cache");
409                     result = (IndexNode) getNodeSerializer().remove(nodeIdLong);
410                     if (result != null) {
411 //if (nodeId == 6) log.severe(nodeId + " was in the nodeSerializer");
412
if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " was in the nodeSerializer");
413                         nodeLoadedSerializer++;
414                         result.startInvoke();
415                         
416                         // Since a node is set to 'not dirty' when it is put
417
// into the serializer, we must now set it back to dirty
418
// again. It might be that the node has just been
419
// written and thus should not have to be set dirty
420
// again, but in that case we just write it twice,
421
// rather than risk skipping a write.
422
result.setDirty();
423                         putInCaches(result);
424                     } else {
425 //if (nodeId == 6) log.severe(nodeId + " was NOT in the nodeSerializer");
426
if (log.isLoggable(Level.FINEST)) log.finest(nodeId + " was not in the nodeSerializer");
427                         result = loadNode(nodeId);
428                     }
429                 }
430             }
431         }
432         return result;
433     }
434     
435     /**
436      * Loads an indexnode from disk.
437      * @throws OzoneInternalException if error during reading
438      */

439     private IndexNode loadNode(long nodeId) {
440         IndexNode result;
441         try {
442             result = IndexNode.read(this, nodeId);
443             nodeLoadedDisk++;
444         } catch (IOException JavaDoc e) {
445             log.log(Level.SEVERE, "could not read index node " + nodeId, e);
446             throw new OzoneInternalException("could not read index node " + nodeId, e);
447         }
448         return result;
449     }
450         
451     /**
452      * Checks the size of the serializer and suspends the current thread (sleep)
453      * if the serializer has too many nodes in it. Needed to ensure there is
454      * never an out of memory because nodes that have to be removed from memory
455      * and stored on disk are still in memory; those nodes are to be swapped
456      * out because of low memory condition (probably).
457      */

458     void checkSerializerSize() {
459         int size = getNodeSerializer().size();
460         int timeout = (size - 100) * 10; // TODO: config params
461
if (timeout > 0) {
462             if (log.isLoggable(Level.INFO)) log.info("timeout needed because serializer contains " + size + " indexnodes");
463             try {
464                 Thread.sleep(timeout);
465             } catch (InterruptedException JavaDoc ignore) {
466             }
467         }
468     }
469     
470     private IndexLeafNode getLeafNode(long objectId) {
471         if (log.isLoggable(Level.FINER)) log.finer("getting leafnode for " + objectId);
472         IndexLeafNode result = null;
473         IndexBranchNode parentNode = getRootNode();
474         do {
475             if (log.isLoggable(Level.FINEST)) log.finest("current branchnode: " + parentNode.getNodeId());
476             IndexNode childNode = getNode(parentNode.getChildNodeId(objectId));
477             parentNode.endInvoke();
478             if (childNode instanceof IndexLeafNode) {
479                 result = (IndexLeafNode) childNode;
480             } else {
481                 parentNode = (IndexBranchNode) childNode;
482             }
483         } while (result == null);
484         if (log.isLoggable(Level.FINER)) log.finer("found leafnode: " + result.getNodeId());
485         return result;
486     }
487 //
488
/**
489      * @param objectId id of the object for which the location is returned
490      * @return ContainerLocation location for the specified object
491      * @throws ObjectNotFoundException if no container location can be found
492      * for specified objectId
493      */

494     public synchronized ContainerLocation getContainerLocation(long objectId) {
495         IndexLeafNode l = getLeafNode(objectId);
496         try {
497             ContainerLocation result = l.getContainerLocation(objectId);
498             return result;
499         } finally {
500             l.endInvoke();
501         }
502     }
503     
504     /**
505      */

506     public synchronized void putContainerLocation(long objectId, ContainerLocation containerLocation) {
507         IndexLeafNode leafNode;
508         leafNode = getNewestLeafNode();
509         if (objectId < leafNode.getMinObjectId()) {
510             leafNode.endInvoke();
511             leafNode = getLeafNode(objectId);
512         }
513         if (!leafNode.existsContainerLocation(objectId)) {
514             size++;
515         }
516         leafNode.putContainerLocation(objectId, containerLocation);
517         leafNode.endInvoke();
518     }
519     
520     public synchronized void removeContainerLocation(long objectId) {
521         ContainerLocation containerLocation;
522         IndexLeafNode leafNode = getLeafNode(objectId);
523         leafNode.removeContainerLocation(objectId);
524         leafNode.endInvoke();
525         size--;
526     }
527     
528     /**
529      * Returns the number of container locations in this instance.
530      */

531     public long getSize() {
532         return size;
533     }
534     
535     long nextNodeId() {
536         return ++nodeIdCounter;
537     }
538
539     private long getNodeIdCounter() {
540         return nodeIdCounter;
541     }
542     
543     private void setNodeIdCounter(long nodeIdCounter) {
544         this.nodeIdCounter = nodeIdCounter;
545     }
546     
547     private void setSize(long size) {
548         this.size = size;
549     }
550     
551     int getMaxLeafNodeSize() {
552         return maxLeafNodeSize; // TODO: config param
553
}
554
555     private void setMaxLeafNodeSize(int maxLeafNodeSize) {
556         if (maxLeafNodeSize < MINMAXLEAFNODESIZE) {
557             throw new ConfigurationException("max leaf node size must be >= " + MINMAXLEAFNODESIZE);
558         }
559         this.maxLeafNodeSize = maxLeafNodeSize;
560     }
561     
562     int getMaxBranchNodeSize() {
563         return maxBranchNodeSize; // TODO: config param
564
}
565     
566     private void setMaxBranchNodeSize(int maxBranchNodeSize) {
567         if (maxBranchNodeSize < MINMAXBRANCHNODESIZE) {
568             throw new ConfigurationException("max branch node size must be >= " + MINMAXBRANCHNODESIZE);
569         }
570         this.maxBranchNodeSize = maxBranchNodeSize;
571     }
572     
573     private float getLeafNodeMergeThreshhold() {
574         return 0.5F; // TODO: config param
575
}
576     
577     private int getLeafNodeMergeDistance() {
578         return 4; // TODO: config param
579
}
580     
581     private int getNodeCacheSize() {
582         return 200; // TODO: config param
583
}
584     
585     private IndexBranchNode getRootNode() {
586         rootNode.startInvoke();
587         return rootNode;
588     }
589     
590     void putInCaches(IndexNode indexNode) {
591         Long JavaDoc id = new Long JavaDoc(indexNode.getNodeId());
592         // for performance reasons we do not put the node into the backup cache
593
// this will happen when trimmed from general cache
594
if (indexNode.isDirty()) {
595             getDirtyNodeCache().put(id, indexNode);
596         } else {
597             getGeneralNodeCache().put(id, indexNode);
598         }
599     }
600     
601     void removeFromCaches(IndexNode indexNode) {
602         Long JavaDoc id = new Long JavaDoc(indexNode.getNodeId());
603         getBackupNodeCache().remove(id);
604         getGeneralNodeCache().remove(id);
605         getDirtyNodeCache().remove(id);
606     }
607
608     private IndexNode getNodeFromCaches(Long JavaDoc id) {
609         IndexNode result = (IndexNode) getGeneralNodeCache().get(id);
610         if (result == null) {
611             result = (IndexNode) getDirtyNodeCache().get(id);
612             if (result == null) {
613
614                 // Node may be thrown out of general cache, but may be dirty and
615
// still in dirty cache. "Money talks, money talks. Dirty cash, I
616
// want you, dirty cash I need you, woo-ooh."
617
// In any case, if it is not in the backup cache, it has to be
618
// re-read from storage
619
result = (IndexNode) getBackupNodeCache().get(id);
620                 if (result != null) {
621
622                     // since the node was only found in the backup cache, we now
623
// put it back into the general cache, since it may be
624
// needed again soon
625
getGeneralNodeCache().put(id, result);
626                 }
627             }
628         }
629         return result;
630     }
631     
632     void setRootNode(IndexBranchNode rootNode) {
633         if (log.isLoggable(Level.FINE)) log.fine("old root node: " + this.rootNode);
634         if (this.rootNode != null) {
635             putInCaches(this.rootNode);
636         }
637         this.rootNode = rootNode;
638         if (log.isLoggable(Level.FINE)) log.fine("new root node: " + this.rootNode);
639     }
640     
641     private IndexLeafNode getNewestLeafNode() {
642         newestLeafNode.startInvoke();
643         return newestLeafNode;
644     }
645
646     void setNewestLeafNode(IndexLeafNode newestLeafNode) {
647         if (this.newestLeafNode != null) {
648             putInCaches(this.newestLeafNode);
649         }
650         this.newestLeafNode = newestLeafNode;
651     }
652     
653     StorageFactory getStorageFactory() {
654         return storageFactory;
655     }
656     
657     StreamFactory getStreamFactory() {
658         return streamFactory;
659     }
660     
661     private void setStorageFactory(StorageFactory storageFactory) {
662         this.storageFactory = storageFactory;
663     }
664     
665     private void setStreamFactory(StreamFactory streamFactory) {
666         this.streamFactory = streamFactory;
667     }
668
669     private TrimmingCache getDirtyNodeCache() {
670         return dirtyNodeCache;
671     }
672     
673     private void setDirtyNodeCache(TrimmingCache dirtyNodeCache) {
674         this.dirtyNodeCache = dirtyNodeCache;
675     }
676     
677     /**
678      * Keep in mind that dirty nodes may also be in this cache.
679      */

680     private TrimmingCache getGeneralNodeCache() {
681         return generalNodeCache;
682     }
683     
684     private void setGeneralNodeCache(TrimmingCache generalNodeCache) {
685         this.generalNodeCache = generalNodeCache;
686     }
687     
688     private WeakReferenceCache getBackupNodeCache() {
689         return backupNodeCache;
690     }
691
692     synchronized void serialize(IndexNode indexNode) {
693         if (log.isLoggable(Level.FINER)) log.finer("putting indexnode " + indexNode.getNodeId() + " into serializer");
694         // Note that we MUST put dirty back to true if we take the node out of the serializer!!!
695
indexNode.setDirty(false);
696         getNodeSerializer().put(new Long JavaDoc(indexNode.getNodeId()), indexNode);
697     }
698
699     Serializer getNodeSerializer() {
700         return nodeSerializer;
701     }
702     
703     Deleter getDeleter() {
704         return deleter;
705     }
706     
707     private void setNodeSerializer(Serializer nodeSerializer) {
708         this.nodeSerializer = nodeSerializer;
709     }
710
711     public Collection JavaDoc getPropertyInfos() {
712         Collection JavaDoc result = new LinkedList JavaDoc();
713         result.add(INDEXSTREAMFACTORY);
714         result.add(GENERALINDEXNODECACHE);
715         result.add(DIRTYINDEXNODECACHE);
716         result.add(MAXBRANCHNODESIZE);
717         result.add(MAXLEAFNODESIZE);
718         result.add(INDEXNODESTORAGEFACTORY);
719         return result;
720     }
721     
722     int getLeafNodeMergeReach() {
723         return leafNodeMergeReach;
724     }
725     
726     private void setLeafNodeMergeReach(int leafNodeMergeReach) {
727         this.leafNodeMergeReach = leafNodeMergeReach;
728     }
729     
730     int getBranchNodeMergeSize() {
731         return branchNodeMergeSize;
732     }
733     
734     private void setBranchNodeMergeSize(int branchNodeMergeSize) {
735         this.branchNodeMergeSize = branchNodeMergeSize;
736     }
737     
738     int getBranchNodeMergeReach() {
739         return branchNodeMergeReach;
740     }
741     
742     private void setBranchNodeMergeReach(int branchNodeMergeReach) {
743         this.branchNodeMergeReach = branchNodeMergeReach;
744     }
745     
746     void nodeBecameDirty(IndexNode indexNode) {
747         getDirtyNodeCache().put(new Long JavaDoc(indexNode.getNodeId()), indexNode);
748     }
749     
750     public String JavaDoc getPrefix() {
751         return prefix;
752     }
753     
754 }
Popular Tags