KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > ozoneDB > DxLib > DxDiskHashMap


1 // You can redistribute this software and/or modify it under the terms of
2
// the Ozone Library License version 1 published by ozone-db.org.
3
//
4
// The original code and portions created by SMB are
5
// Copyright (C) 1997-@year@ by SMB GmbH. All rights reserved.
6
//
7
// $Id: DxDiskHashMap.java,v 1.16 2004/01/27 21:51:54 wieslawf Exp $
8

9 package org.ozoneDB.DxLib;
10
11 import org.ozoneDB.io.stream.ResolvingObjectInputStream;
12 import java.io.*;
13 import java.util.Arrays JavaDoc;
14
15
16 /**
17  * @author <a HREF="http://www.softwarebuero.de/">SMB</a>
18  * @author <a HREF="http://www.medium.net/">Medium.net</a>
19  * @version $Revision: 1.16 $Date: 2004/01/27 21:51:54 $
20  */

21 public class DxDiskHashMap extends DxAbstractMap {
22     
23     final static long serialVersionUID = 2;
24     
25     public final static String JavaDoc ROOT_TABLE_NAME = ".rootTable";
26     
27     private String JavaDoc baseFileName;
28     
29     /** The directory where all tables reside. */
30     protected File tableDirectory;
31     
32     private long subTableNameCount = System.currentTimeMillis();
33     private DxDiskSubTable rootTable;
34     private int itemCount = 0;
35     private int[] tableBitSizes = new int[] {8, 8, 8, 8};
36     
37     // cache related stuff
38
private int cacheBits = 10;
39     private int cacheMask;
40     private DxKeyData[] cache;
41     
42     // buffer related stuff
43
protected int maxBufferSize = 10;
44     protected DxSet buffer;
45     
46     public int bufferAccesses;
47     public int bufferHits;
48     public int cacheAccesses;
49     public int cacheHits;
50     
51     // old tables hashMask bit shift
52
private int oldTablesHashMaskShift;
53     
54     public DxDiskHashMap( String JavaDoc _baseFileName, int _maxBufferSize, int _cacheBits, int[] _tableBitSizes ) {
55         if (_tableBitSizes != null) {
56             if (_tableBitSizes.length == 1) {
57                 if (_tableBitSizes[0] < 8 || _tableBitSizes[0] > 16) {
58                     throw new RuntimeException JavaDoc( "Illegal tableBitSize value, it should be from 8 to 16." );
59                 }
60                 int tbs = _tableBitSizes[0];
61                 int len = 32 / tbs + (32 % tbs > 0 ? 1 : 0);
62                 tableBitSizes = new int[len];
63                 Arrays.fill( tableBitSizes, tbs );
64                 if (32 % tbs > 0) {
65                     tableBitSizes[tableBitSizes.length-1] = 32 % tbs;
66                 }
67             } else {
68                 int size = 0;
69                 for (int i = 0; i < _tableBitSizes.length; i++) {
70                     size += _tableBitSizes[i];
71                 }
72                 if (size != 32) {
73                     throw new RuntimeException JavaDoc( "Illegal tableBitSize values, the summary bit size must be equal 32." );
74                 }
75                 tableBitSizes = _tableBitSizes;
76             }
77         }
78         
79         baseFileName = _baseFileName;
80         maxBufferSize = _maxBufferSize;
81         
82         int index = baseFileName.lastIndexOf(File.separatorChar);
83         
84         if (index != -1) {
85             tableDirectory = new File(baseFileName.substring(0, index) );
86         } else {
87             tableDirectory = new File(System.getProperty("user.dir","."));
88         }
89         
90         cacheBits = _cacheBits;
91         cacheMask = (1 << cacheBits) - 1;
92         
93         clear();
94     }
95     
96     
97     public DxDiskHashMap( String JavaDoc _baseFileName, int _maxBufferSize, int _cacheBits, int _tableBitSize ) {
98         this( _baseFileName, _maxBufferSize, _cacheBits, new int[] {_tableBitSize} );
99     }
100     
101     
102     public synchronized void clear() {
103         cache = new DxKeyData[1 << cacheBits];
104         
105         rootTable = new DxDiskSubTable( this, 0 );
106         buffer = new DxHashSet();
107         buffer.add( rootTable );
108     }
109     
110     
111     public DxDiskSubTable rootTable() {
112         return this.rootTable;
113     }
114     
115     
116     public DxDiskHashNodeLeaf newNodeLeaf() {
117         return new DxDiskHashNodeLeaf( this );
118     }
119     
120     
121     public DxDiskHashNodeBranch newNodeBranch() {
122         return new DxDiskHashNodeBranch( this );
123     }
124     
125     
126     public DxKeyData newKeyData() {
127         return new DxKeyData();
128     }
129     
130     
131     public boolean isDirtyTable( DxDiskSubTable table ) {
132         // in general we don't know if and when the data objects stored in
133
// this table has been changed since last write, so we have to assume
134
// it is always dirty
135
return true;
136     }
137     
138     
139     /**
140      * Reuse an existing table from disk. To do so a previously created table
141      * has to be correctly closed. Once a hash map has been re-used it has to
142      * closed before opening again.
143      */

144     public synchronized void re_use() throws Exception JavaDoc {
145         File f = new File( baseFileName + ROOT_TABLE_NAME );
146         
147         ObjectInputStream in = new ResolvingObjectInputStream( new BufferedInputStream( new FileInputStream( f ) ) );
148         try {
149             itemCount = in.readInt();
150             rootTable.readExternal( in );
151             rootTable.grandParent = this;
152             buffer.clear();
153             cache = new DxKeyData[1 << cacheBits];
154             rootTable.fetchTable();
155             // update main tableBitSizes array
156
if (rootTable.maxDepth() != tableBitSizes.length-1) {
157                 tableBitSizes = new int[rootTable.maxDepth() + 1];
158             }
159             tableBitSizes[0] = rootTable.bitSize();
160             DxDiskSubTable subTable = rootTable;
161             while (!subTable.isLeaf()) {
162                 DxDiskHashNode[] nodes = subTable.table();
163                 for (int i = 0; i < nodes.length; i++) {
164                     if (nodes[i] instanceof DxDiskHashNodeBranch) {
165                         subTable = ((DxDiskHashNodeBranch)nodes[i]).subTable;
166                         subTable.fetchTable();
167                         tableBitSizes[subTable.depth()] = subTable.bitSize();
168                         break;
169                     }
170                 }
171             }
172             // calculate hashMask bit shift for old tables
173
// it handles all old table configurations
174
int mask = rootTable.hashMask();
175             int maskBitSize = 0;
176             while ((mask & 0x80000000) != 0) {
177                 maskBitSize++;
178                 mask <<= 1;
179             }
180             oldTablesHashMaskShift = rootTable.bitSize() - maskBitSize;
181         } finally {
182             in.close();
183         }
184     }
185     
186     
187     /**
188      * Close this hash map. Write all changed tables to the disk. Store also
189      * all information that are needed to re-initialize this object from the
190      * disk data.
191      */

192     public synchronized void close() throws Exception JavaDoc {
193         writeAllTables();
194         setReusable( true );
195     }
196     
197     
198     public void setReusable( boolean flag ) throws IOException {
199         File f = new File( baseFileName + ROOT_TABLE_NAME );
200         
201         if (flag) {
202             ObjectOutputStream out = new ObjectOutputStream( new BufferedOutputStream( new FileOutputStream( f ) ) );
203             try {
204                 out.writeInt( itemCount );
205                 rootTable.writeExternal( out );
206             }
207             finally {
208                 out.close();
209             }
210         }
211         else {
212             if (f.exists() && !f.delete()) {
213                 throw new IOException( "Unable to delete file." );
214             }
215         }
216     }
217     
218     
219     public Object JavaDoc clone() {
220         throw new RuntimeException JavaDoc( getClass().getName() + ".clone() is not implemented yet." );
221     }
222     
223     
224     private final Object JavaDoc cachedElementForKey( Object JavaDoc key, int hashCode ) {
225         cacheAccesses++;
226         if (cacheMask == 0) {
227             return null;
228         }
229         
230         int cacheIndex = hashCode & cacheMask;
231         DxKeyData entry = cache[cacheIndex];
232         if (entry != null && (entry.key == key || entry.key.equals( key ))) {
233             // System.out.print (".");
234
cacheHits ++;
235             return entry.data;
236         } else {
237             return null;
238         }
239     }
240     
241     
242     private final void addElementToCache( Object JavaDoc obj, Object JavaDoc key, int hashCode ) {
243         if (cacheMask == 0) {
244             return;
245         }
246         
247         synchronized (cache) {
248             int cacheIndex = hashCode & cacheMask;
249             DxKeyData entry = cache[cacheIndex];
250             if (entry != null) {
251                 entry.set( key, obj );
252             } else {
253                 entry = newKeyData();
254                 entry.set( key, obj );
255                 cache[cacheIndex] = entry;
256             }
257         }
258     }
259     
260     
261     private final void removeElementFromCache( Object JavaDoc key ) {
262         if (cacheMask == 0) {
263             return;
264         }
265         
266         synchronized (cache) {
267             int cacheIndex = key.hashCode() & cacheMask;
268             cache[cacheIndex] = null;
269         }
270     }
271     
272     
273     public void printStatistics() {
274         System.out.println( "DxDiskHastable statistics:" );
275         System.out.println( " sub-table accesses: " + bufferAccesses + " hits: " + bufferHits + " loads: "
276         + (bufferAccesses - bufferHits) );
277         System.out.println( " cache accesses: " + cacheAccesses + " hits: " + cacheHits );
278     }
279     
280     
281     /**
282      * Eine sub-tabelle will nachladen. Wenn der buffer voll ist,
283      * muss eine andere verworfen werden. Die "aelteste" table ist
284      * immer ein blatt, da die zeiten mmer beim rekursiven aufstieg
285      * gesetzt werden.
286      */

287     protected synchronized void readRequest( DxDiskSubTable subTable ) throws Exception JavaDoc {
288         if (buffer.count() > maxBufferSize) {
289             //die blatt-sub-tabelle mit der aeltesten zugriffszeit
290
//suchen
291
long time = Long.MAX_VALUE;
292             DxIterator it = buffer.iterator();
293             DxDiskSubTable table;
294             DxDiskSubTable bestMatch = null;
295             while ((table = (DxDiskSubTable)it.next()) != null) {
296                 
297                 // check access time but do not deactivate rootTable
298
if (table.accessTime < time && table != rootTable) {
299                     time = table.accessTime;
300                     bestMatch = table;
301                 }
302             }
303             // System.out.println (" - " + time);
304

305             //test ob noch sub-tabels da sind
306
// for (int i=0; i<DxDiskSubTable.SIZE; i++) {
307
// DxDiskHashNode node = bestMatch.table[i];
308
// if (node != null) {
309
// if (node.element == null && node.subTable.table == null) {
310
// System.out.println ("Node is not a leaf!");
311
// break;
312
// }
313
// }
314
// }
315

316             //schreiben, leer-machen und aus buffer loeschen
317
if (isDirtyTable( bestMatch )) {
318                 bestMatch.writeTable();
319             }
320             deleteRequest( bestMatch );
321         }
322         // subTable.touch();
323
buffer.add( subTable );
324     }
325     
326     
327     /**
328      * The specified sub-table was deleted from the tree. So we have
329      * to delete it from the table buffer too.
330      */

331     public synchronized void deleteRequest( DxDiskSubTable subTable ) {
332         subTable.empty();
333         buffer.remove( subTable );
334     }
335     
336     /**
337      * This method is synchronized because sub table filenames have to be unique.
338      */

339     public synchronized File newSubTableFile() {
340         return new File(baseFileName + "." + String.valueOf( subTableNameCount++ ));
341     }
342     
343     /**
344      * Computes a File object which represents the DxDiskSubTable file denoted by the given filename.
345      * <div>
346      * There are two formats for the given filename:
347      * <ul>
348      * <li>
349      * The long format is a pathname relative to the current working directory of the
350      * Java application which wrote the table referring the DxDiskSubTable in question.
351      * It may also be an absolute pathname. This format is error prone as it does not
352      * allow changing the working directory of Java applications or changing the location
353      * of the table files within the filesystem. That's why the short format is used now.
354      * </li>
355      * <li>
356      * The short format is only the last pathname component of the pathname to the referred DxDiskSubTable file.
357      * The other pathname components (e.g. the directory where the DxDiskSubTable file resides) are determined
358      * by the directory where the root table resides. This is possible because als DxDiskSubTable files reside
359      * in the same directory as the root table file does.
360      * </li>
361      * </ul>
362      * </div>
363      * <div>
364      * For compatibility with old tables, the long format is broken up into pathname components
365      * and only the last pathname component is used then as a directory entry of the directory of the root table file.
366      * </div>
367      */

368     public File getFileForFilename(String JavaDoc filename) {
369         int index = filename.lastIndexOf(File.separatorChar);
370         
371         if (index!=-1) {
372             filename = filename.substring(index+1);
373         }
374         return new File(tableDirectory,filename);
375     }
376     
377     /**
378      * Delete all the files used by this hashtable.
379      */

380     public void cleanFiles() {
381         String JavaDoc baseName = new File( baseFileName ).getName();
382         File path = new File( new File( baseFileName ).getParent() );
383         String JavaDoc[] fileList = path.list();
384         
385         for (int i = 0; i < fileList.length; i++) {
386             if (fileList[i].startsWith( baseName )) {
387                 new File( path, fileList[i] ).delete();
388             }
389         }
390     }
391     
392     
393     public synchronized void writeAllTables() throws Exception JavaDoc {
394         DxIterator it = buffer.iterator();
395         DxDiskSubTable table = null;
396         while ((table = (DxDiskSubTable)it.next()) != null) {
397             table.writeTable();
398         }
399     }
400     
401     
402     public synchronized boolean addForKey( Object JavaDoc obj, Object JavaDoc key ) {
403         try {
404             if (rootTable.addForKey( obj, key )) {
405                 itemCount++;
406                 addElementToCache( obj, key, key.hashCode() );
407                 return true;
408             } else {
409                 return false;
410             }
411         } catch (Exception JavaDoc e) {
412             e.printStackTrace();
413             throw new RuntimeException JavaDoc( e.toString() );
414         }
415     }
416     
417     
418     /**
419      * Gives the element for the specified key.<p>
420      *
421      *
422      * Note: This method is synchronized because the cache of subtables may
423      * change.
424      */

425     public synchronized Object JavaDoc elementForKey( Object JavaDoc key ) {
426         try {
427             int hashCode = key.hashCode();
428             Object JavaDoc cacheEntry = cachedElementForKey( key, hashCode );
429             if (cacheEntry != null) {
430                 return cacheEntry;
431             } else {
432                 Object JavaDoc answer = rootTable.elementForKey( key, hashCode );
433                 addElementToCache( answer, key, hashCode );
434                 return answer;
435             }
436         } catch (Exception JavaDoc e) {
437             e.printStackTrace();
438             throw new RuntimeException JavaDoc( e.toString() );
439         }
440     }
441     
442     
443     protected synchronized void elementDone( DxDiskHashCompatible obj ) {
444         rootTable.elementDone( obj );
445     }
446     
447     
448     public Object JavaDoc keyForElement( Object JavaDoc obj ) {
449         throw new RuntimeException JavaDoc( "keyForElement() not implemented." );
450     }
451     
452     
453     public synchronized boolean remove( Object JavaDoc obj ) {
454         throw new RuntimeException JavaDoc( "remove() not implemented." );
455     }
456     
457     
458     public synchronized Object JavaDoc removeForKey( Object JavaDoc key ) {
459         try {
460             Object JavaDoc obj = rootTable.removeForKey( key );
461             if (obj != null) {
462                 itemCount--;
463                 removeElementFromCache( key );
464             }
465             return obj;
466         } catch (Exception JavaDoc e) {
467             e.printStackTrace();
468             throw new RuntimeException JavaDoc( e.toString() );
469         }
470     }
471     
472     
473     public DxIterator iterator() {
474         return new DxDiskHashIterator( this );
475     }
476     
477     
478     public int count() {
479         return itemCount;
480     }
481     
482     
483     public boolean isEmpty() {
484         return itemCount == 0;
485     }
486     
487     
488     public boolean containsKey( Object JavaDoc key ) {
489         return elementForKey( key ) != null;
490     }
491     
492     
493     public int levelTableBitSize(int depth) {
494         return tableBitSizes[depth];
495     }
496     
497     
498     public int maxDepth() {
499         return tableBitSizes.length - 1;
500     }
501     
502     
503     public int oldTablesHashMaskShift() {
504         return oldTablesHashMaskShift;
505     }
506     
507 }
Popular Tags