KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > ozoneDB > DxLib > DxDiskSubTable


1 // You can redistribute this software and/or modify it under the terms of
2
// the Ozone Library License version 1 published by ozone-db.org.
3
//
4
// The original code and portions created by SMB are
5
// Copyright (C) 1997-@year@ by SMB GmbH. All rights reserved.
6
//
7
// $Id: DxDiskSubTable.java,v 1.14 2004/03/03 19:06:47 wieslawf Exp $
8
package org.ozoneDB.DxLib;
9
10 import org.ozoneDB.io.stream.ResolvingObjectInputStream;
11 import java.io.*;
12 import java.util.zip.*;
13
14 /**
15  * @author <a HREF="http://www.softwarebuero.de/">SMB</a>
16  * @author <a HREF="http://www.medium.net/">Medium.net</a>
17  */

18 public final class DxDiskSubTable implements Externalizable {
19     
20     final static long serialVersionUID = 2;
21     
22     public static int timeCount = (int)System.currentTimeMillis();
23     
24     private int size;
25     private int bitSize;
26     private int maxDepth;
27     
28     //protected String fileName;
29
private File file;
30     private int depth;
31     private int hashMask;
32     private int hashMaskShift;
33     
34     // todo: ckeck if changing to private with an accessor
35
// provides acceptable performance
36
protected DxDiskHashMap grandParent;
37     
38     private DxDiskHashNode[] table;
39     
40     // todo: ckeck if changing to private with an accessor
41
// provides acceptable performance
42
protected long accessTime = timeCount++;
43     private boolean dirty;
44     private int subTableCount = 0;
45
46     /**
47      * All the items (including subTables) in the local list.
48      */

49     protected int itemCount = 0;
50     
51     
52     
53     public DxDiskSubTable(DxDiskHashMap grandParent) {
54         this.grandParent = grandParent;
55     }
56     
57     
58     public DxDiskSubTable( DxDiskHashMap _grandParent, int _depth ) {
59         grandParent = _grandParent;
60         depth = _depth;
61         bitSize = grandParent.levelTableBitSize( depth );
62         maxDepth = grandParent.maxDepth();
63         hashMaskShift = 0;
64         int i = depth;
65         while (i < maxDepth) {
66             hashMaskShift += grandParent.levelTableBitSize( ++i );
67         }
68         int bitMask = 1 << hashMaskShift;
69         hashMask = 0;
70         for (i = 0; i < bitSize; i++) {
71             hashMask = hashMask | bitMask;
72             bitMask <<= 1;
73         }
74         size = 1 << bitSize;
75         table = new DxDiskHashNode[size];
76         file = grandParent.newSubTableFile();
77     }
78     
79 /*
80     public String filename() {
81         return fileName;
82     }
83  */

84     
85     public File getFile() {
86         return file;
87     }
88     
89     public DxDiskHashNode[] table() {
90         return table;
91     }
92     
93     
94     public DxDiskHashNode[] fetchedTable() throws Exception JavaDoc {
95         fetchTable();
96         // touch();
97
return table;
98     }
99     
100     
101     public void empty() {
102         table = null;
103     }
104     
105     
106     public int count() {
107         return itemCount;
108     }
109     
110     
111     public int maxDepth() {
112         return maxDepth;
113     }
114     
115     
116     public int depth() {
117         return depth;
118     }
119     
120     
121     public int bitSize() {
122         return bitSize;
123     }
124     
125     
126     public int hashMask() {
127         return hashMask;
128     }
129     
130     
131     public void deleteFile() {
132         //System.out.println ("deleteFile()...");
133
//File file = new File( fileName );
134
if (file.exists()) {
135             file.delete();
136         }
137     }
138     
139     
140     public int hashKey( int key ) {
141         return (key & hashMask) >>> hashMaskShift;
142     }
143     
144     
145     protected void fetchTable() throws Exception JavaDoc {
146         grandParent.bufferAccesses++;
147         if (table == null) {
148             synchronized (this) {
149                 grandParent.readRequest( this );
150                 readTable();
151             }
152         } else {
153             grandParent.bufferHits++;
154         }
155     }
156     
157     
158     protected synchronized void touch() {
159         accessTime = timeCount++;
160     }
161     
162     
163     public boolean isLeaf() {
164         return subTableCount == 0;
165     }
166     
167     
168     public boolean isDirty() {
169         return dirty;
170     }
171     
172     
173     public synchronized boolean addForKey( Object JavaDoc obj, Object JavaDoc key ) throws Exception JavaDoc {
174         fetchTable();
175         
176         boolean answer = true;
177         int localKey = hashKey( key.hashCode() );
178         DxDiskHashNode node = table[localKey];
179         if (node != null) {
180             //node ist ein blatt
181
if (node instanceof DxDiskHashNodeLeaf) {
182                 DxDiskHashNodeLeaf oldNode = (DxDiskHashNodeLeaf)node;
183                 if (depth < maxDepth) {
184                     DxDiskHashNodeBranch newNode = grandParent.newNodeBranch();
185                     newNode.subTable = new DxDiskSubTable( grandParent, depth + 1 );
186                     //im alten node kann nur ein element stehen
187
newNode.subTable.addForKey( oldNode.element.data, oldNode.element.key );
188                     answer = newNode.subTable.addForKey( obj, key );
189                     if (answer) {
190                         grandParent.readRequest( newNode.subTable );
191                         
192                         // HACK: the readRequest() call did in one rare case set
193
// the table member variable to null. Maybe there is a
194
// cleaner solution than simply re-reading the table,
195
// like for instance preventing the table becoming
196
// null in the first place
197
fetchTable();
198                         
199                         table[localKey] = newNode;
200                         subTableCount++;
201                     } else {
202                         table[localKey] = oldNode;
203                     }
204                 } else {
205                     //maximale tiefe ist erreicht
206
answer = oldNode.addForKey( obj, key );
207                 }
208             } else {
209                 //node ist ein branch
210
((DxDiskHashNodeBranch)node).subTable.addForKey( obj, key );
211             }
212         } else {
213             //node existiert noch nicht
214
DxDiskHashNodeLeaf newNode = grandParent.newNodeLeaf();
215             newNode.addForKey( obj, key );
216             table[localKey] = newNode;
217             itemCount++;
218         }
219         //muss ganz unten stehen, damit abraeumen korrekt ist
220
touch();
221         dirty = true;
222         return answer;
223     }
224     
225     
226     public final Object JavaDoc elementForKey( Object JavaDoc key, int hashCode ) throws Exception JavaDoc {
227         fetchTable();
228         
229         int localKey = hashKey( hashCode );
230         Object JavaDoc answer = null;
231         DxDiskHashNode node = table[localKey];
232         if (node != null) {
233             if (node instanceof DxDiskHashNodeLeaf) {
234                 answer = ((DxDiskHashNodeLeaf)node).elementForKey( key, hashCode );
235             } else {
236                 answer = ((DxDiskHashNodeBranch)node).subTable.elementForKey( key, hashCode );
237             }
238         }
239         //muss ganz unten stehen, damit anraeumen korrekt ist
240
touch();
241         return answer;
242     }
243     
244     
245     protected synchronized void elementDone( DxDiskHashCompatible obj ) {
246     }
247     
248     
249     public synchronized Object JavaDoc removeForKey( Object JavaDoc key ) throws Exception JavaDoc {
250         fetchTable();
251         
252         Object JavaDoc answer = null;
253         int localKey = hashKey( key.hashCode() );
254         DxDiskHashNode node = table[localKey];
255         if (node != null) {
256             if (node instanceof DxDiskHashNodeLeaf) {
257                 answer = ((DxDiskHashNodeLeaf)node).removeForKey( key );
258                 if (((DxDiskHashNodeLeaf)node).element == null) {
259                     table[localKey] = null;
260                     itemCount--;
261                 }
262             } else {
263                 DxDiskHashNodeBranch oldNode = (DxDiskHashNodeBranch)node;
264                 answer = oldNode.subTable.removeForKey( key );
265                 if (oldNode.subTable.itemCount == 0) {
266                     grandParent.deleteRequest( oldNode.subTable );
267                     oldNode.subTable.deleteFile();
268                     table[localKey] = null;
269                     itemCount--;
270                     subTableCount--;
271                 }
272             }
273         }
274         // System.out.println ("remove: key: " + key + " - depth: " + depth + " - count: " + itemCount);
275

276         //muss ganz unten stehen, damit anraeumen korrekt ist
277
touch();
278         dirty = true;
279         return answer;
280     }
281     
282     
283     /**
284      * Schreibt nur die representation in einem HashNode aber
285      * nicht die tabelle selber.
286      */

287     public void writeExternal( ObjectOutput out ) throws IOException {
288         out.writeUTF( file.getName() );
289         out.writeInt( bitSize );
290         out.writeInt( size );
291         out.writeInt( maxDepth );
292         out.writeInt( depth );
293         // the tables are written in old file format
294
// it handles all old table configurations
295
// if we throw it away the tables will be converted automatically
296
int shift = grandParent.oldTablesHashMaskShift();
297         if (shift > 0 && depth != maxDepth) {
298             out.writeInt( hashMask << shift );
299         } else {
300             out.writeInt( hashMask );
301         }
302         out.writeInt( subTableCount );
303         out.writeInt( itemCount );
304     }
305     
306     
307     public void readExternal( ObjectInput in ) throws IOException, ClassNotFoundException JavaDoc {
308         String JavaDoc fileName = in.readUTF();
309         bitSize = in.readInt();
310         size = in.readInt();
311         maxDepth = in.readInt();
312         depth = in.readInt();
313         hashMask = in.readInt();
314         subTableCount = in.readInt();
315         itemCount = in.readInt();
316         // we have to modify the incorrect hashMask
317
// to backward compatibility with old ozone idtable files
318
// it handles all old table configurations
319
int shift = grandParent.oldTablesHashMaskShift();
320         if (shift > 0 && depth != maxDepth) {
321             hashMask >>>= shift;
322         }
323         file = grandParent.getFileForFilename(fileName);
324         // force tabel to be read on next access
325
if (itemCount > 0) {
326             table = null;
327         }
328         // for speed up and simplify a local hashKey() function
329
hashMaskShift = 0;
330         if (depth != maxDepth) {
331             int mask = hashMask;
332             while ((mask & 1) == 0) {
333                 mask >>>= 1;
334                 hashMaskShift++;
335             }
336         }
337         accessTime = 0;
338         dirty = false;
339     }
340     
341     
342     /**
343      * Schreibt den inhalt der ganzen tabelle aber nicht die
344      * sub-tabellen. Der name wird aus dem baseFileName und
345      */

346     public void writeTable() throws IOException {
347         // System.out.println ("schreiben: " + fileName);
348
// ObjectOutputStream out = new ObjectOutputStream (new BufferedOutputStream (new FileOutputStream (fileName), 4*1024));
349

350         OutputStream out = new FileOutputStream( file );
351         out = new GZIPOutputStream( out );
352         out = new BufferedOutputStream( out, 4096 );
353         ObjectOutputStream oout = new ObjectOutputStream( out );
354         try {
355             synchronized (table) {
356                 oout.writeInt( itemCount );
357                 for (int i = 0; i < size; i++) {
358                     if (table[i] != null) {
359                         oout.writeShort( i );
360                         oout.writeByte( table[i] instanceof DxDiskHashNodeLeaf ? 1 : 2 );
361                         table[i].writeExternal( oout );
362                     }
363                 }
364             }
365             dirty = false;
366         }
367         finally {
368             oout.close();
369         }
370     }
371     
372     
373     public synchronized void readTable() throws IOException, ClassNotFoundException JavaDoc {
374         // System.out.println ("lesen: " + fileName);
375
table = new DxDiskHashNode[size];
376         
377         // ObjectInputStream in = new ObjectInputStream (new BufferedInputStream (new FileInputStream (fileName), 4*1024));
378
InputStream in = new FileInputStream( file );
379         in = new GZIPInputStream( in );
380         in = new BufferedInputStream( in, 4096 );
381         ObjectInputStream oin = new ResolvingObjectInputStream( in );
382         
383         try {
384             int count = oin.readInt();
385             for (int i = 0; i < count; i++) {
386                 int index = oin.readShort();
387                 if (index < 0) {
388                     // correct for 2s complement negative
389
index += (int) Short.MAX_VALUE - (int) Short.MIN_VALUE + 1;
390                 }
391
392                 byte nodeType = oin.readByte();
393                 if (nodeType == 1) {
394                     table[index] = grandParent.newNodeLeaf();
395                 } else {
396                     table[index] = grandParent.newNodeBranch();
397                 }
398                 table[index].readExternal( oin );
399                 if (nodeType == 2) {
400                     ((DxDiskHashNodeBranch)table[index]).subTable.grandParent = grandParent;
401                 }
402             }
403             touch();
404             dirty = false;
405         }
406         finally {
407             oin.close();
408         }
409     }
410     
411 }
412
Popular Tags