KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > apache > derby > iapi > store > access > BackingStoreHashtable


1 /*
2
3    Derby - Class org.apache.derby.iapi.store.access.BackingStoreHashtable
4
5    Licensed to the Apache Software Foundation (ASF) under one or more
6    contributor license agreements. See the NOTICE file distributed with
7    this work for additional information regarding copyright ownership.
8    The ASF licenses this file to you under the Apache License, Version 2.0
9    (the "License"); you may not use this file except in compliance with
10    the License. You may obtain a copy of the License at
11
12       http://www.apache.org/licenses/LICENSE-2.0
13
14    Unless required by applicable law or agreed to in writing, software
15    distributed under the License is distributed on an "AS IS" BASIS,
16    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17    See the License for the specific language governing permissions and
18    limitations under the License.
19
20  */

21
22 package org.apache.derby.iapi.store.access;
23
24 import org.apache.derby.iapi.services.sanity.SanityManager;
25
26 import org.apache.derby.iapi.services.io.Storable;
27
28 import org.apache.derby.iapi.error.StandardException;
29
30 import org.apache.derby.iapi.types.CloneableObject;
31 import org.apache.derby.iapi.types.DataValueDescriptor;
32
33 import org.apache.derby.iapi.services.cache.ClassSize;
34
35 import java.util.Enumeration JavaDoc;
36 import java.util.Hashtable JavaDoc;
37 import java.util.Properties JavaDoc;
38 import java.util.Vector JavaDoc;
39 import java.util.NoSuchElementException JavaDoc;
40
41 /**
42 A BackingStoreHashtable is a utility class which will store a set of rows into
43 an in memory hash table, or overflow the hash table to a tempory on disk
44 structure.
45 <p>
46 All rows must contain the same number of columns, and the column at position
47 N of all the rows must have the same format id. If the BackingStoreHashtable needs to be
48 overflowed to disk, then an arbitrary row will be chosen and used as a template
49 for creating the underlying overflow container.
50
51 <p>
52 The hash table will be built logically as follows (actual implementation
53 may differ). The important points are that the hash value is the standard
54 java hash value on the row[key_column_numbers[0], if key_column_numbers.length is 1,
55 or row[key_column_numbers[0, 1, ...]] if key_column_numbers.length > 1,
56 and that duplicate detection is done by the standard java duplicate detection provided by
57 java.util.Hashtable.
58 <p>
59 import java.util.Hashtable;
60
61 hash_table = new Hashtable();
62
63 Object[] row;
64 boolean needsToClone = rowSource.needsToClone();
65
66 while((row = rowSource.getNextRowFromRowSource()) != null)
67 {
68     if (needsToClone)
69         row = clone_row_from_row(row);
70
71     Object key = KeyHasher.buildHashKey(row, key_column_numbers);
72
73     if ((duplicate_value =
74         hash_table.put(key, row)) != null)
75     {
76         Vector row_vec;
77
78         // inserted a duplicate
79         if ((duplicate_value instanceof vector))
80         {
81             row_vec = (Vector) duplicate_value;
82         }
83         else
84         {
85             // allocate vector to hold duplicates
86             row_vec = new Vector(2);
87
88             // insert original row into vector
89             row_vec.addElement(duplicate_value);
90
91             // put the vector as the data rather than the row
92             hash_table.put(key, row_vec);
93         }
94         
95         // insert new row into vector
96         row_vec.addElement(row);
97     }
98 }
99
100 **/

101
102 public class BackingStoreHashtable
103 {
104
105     /**************************************************************************
106      * Fields of the class
107      **************************************************************************
108      */

109     private TransactionController tc;
110     private Hashtable JavaDoc hash_table;
111     private int[] key_column_numbers;
112     private boolean remove_duplicates;
113     private boolean skipNullKeyColumns;
114     private Properties JavaDoc auxillary_runtimestats;
115     private RowSource row_source;
116     /* If max_inmemory_rowcnt > 0 then use that to decide when to spill to disk.
117      * Otherwise compute max_inmemory_size based on the JVM memory size when the BackingStoreHashtable
118      * is constructed and use that to decide when to spill to disk.
119      */

120     private long max_inmemory_rowcnt;
121     private long inmemory_rowcnt;
122     private long max_inmemory_size;
123     private boolean keepAfterCommit;
124
125     /**
126      * The estimated number of bytes used by Vector(0)
127      */

128     private final static int vectorSize = ClassSize.estimateBaseFromCatalog(java.util.Vector JavaDoc.class);
129     
130     private DiskHashtable diskHashtable;
131
132     /**************************************************************************
133      * Constructors for This class:
134      **************************************************************************
135      */

136     private BackingStoreHashtable(){}
137
138     /**
139      * Create the BackingStoreHashtable from a row source.
140      * <p>
141      * This routine drains the RowSource. The performance characteristics
142      * depends on the number of rows inserted and the parameters to the
143      * constructor.
144      * <p>
145      * If the number of rows is <= "max_inmemory_rowcnt", then the rows are
146      * inserted into a java.util.Hashtable. In this case no
147      * TransactionController is necessary, a "null" tc is valid.
148      * <p>
149      * If the number of rows is > "max_inmemory_rowcnt", then the rows will
150      * be all placed in some sort of Access temporary file on disk. This
151      * case requires a valid TransactionController.
152      *
153      * @param tc An open TransactionController to be used if the
154      * hash table needs to overflow to disk.
155      *
156      * @param row_source RowSource to read rows from.
157      *
158      * @param key_column_numbers The column numbers of the columns in the
159      * scan result row to be the key to the Hashtable.
160      * "0" is the first column in the scan result
161      * row (which may be different than the first
162      * row in the table of the scan).
163      *
164      * @param remove_duplicates Should the Hashtable automatically remove
165      * duplicates, or should it create the Vector of
166      * duplicates?
167      *
168      * @param estimated_rowcnt The estimated number of rows in the hash table.
169      * Pass in -1 if there is no estimate.
170      *
171      * @param max_inmemory_rowcnt
172      * The maximum number of rows to insert into the
173      * inmemory Hash table before overflowing to disk.
174      * Pass in -1 if there is no maximum.
175      *
176      * @param initialCapacity If not "-1" used to initialize the java
177      * Hashtable.
178      *
179      * @param loadFactor If not "-1" used to initialize the java
180      * Hashtable.
181      *
182      * @param skipNullKeyColumns Skip rows with a null key column, if true.
183      *
184      * @param keepAfterCommit If true the hash table is kept after a commit,
185      * if false the hash table is dropped on the next commit.
186      *
187      *
188      * @exception StandardException Standard exception policy.
189      **/

190     public BackingStoreHashtable(
191     TransactionController tc,
192     RowSource row_source,
193     int[] key_column_numbers,
194     boolean remove_duplicates,
195     long estimated_rowcnt,
196     long max_inmemory_rowcnt,
197     int initialCapacity,
198     float loadFactor,
199     boolean skipNullKeyColumns,
200     boolean keepAfterCommit)
201         throws StandardException
202     {
203         this.key_column_numbers = key_column_numbers;
204         this.remove_duplicates = remove_duplicates;
205         this.row_source = row_source;
206         this.skipNullKeyColumns = skipNullKeyColumns;
207         this.max_inmemory_rowcnt = max_inmemory_rowcnt;
208         if( max_inmemory_rowcnt > 0)
209             max_inmemory_size = Long.MAX_VALUE;
210         else
211             max_inmemory_size = Runtime.getRuntime().totalMemory()/100;
212         this.tc = tc;
213         this.keepAfterCommit = keepAfterCommit;
214
215         Object JavaDoc[] row;
216
217         // use passed in capacity and loadfactor if not -1, you must specify
218
// capacity if you want to specify loadfactor.
219
if (initialCapacity != -1)
220         {
221             hash_table =
222                 ((loadFactor == -1) ?
223                      new Hashtable JavaDoc(initialCapacity) :
224                      new Hashtable JavaDoc(initialCapacity, loadFactor));
225         }
226         else
227         {
228             /* We want to create the hash table based on the estimated row
229              * count if a) we have an estimated row count (i.e. it's greater
230              * than zero) and b) we think we can create a hash table to
231              * hold the estimated row count without running out of memory.
232              * The check for "b" is required because, for deeply nested
233              * queries and/or queries with a high number of tables in
234              * their FROM lists, the optimizer can end up calculating
235              * some very high row count estimates--even up to the point of
236              * Double.POSITIVE_INFINITY (see DERBY-1259 for an explanation
237              * of how that can happen). In that case any attempts to
238              * create a Hashtable of size estimated_rowcnt can cause
239              * OutOfMemory errors when we try to create the Hashtable.
240              * So as a "red flag" for that kind of situation, we check to
241              * see if the estimated row count is greater than the max
242              * in-memory size for this table. Unit-wise this comparison
243              * is relatively meaningless: rows vs bytes. But if our
244              * estimated row count is greater than the max number of
245              * in-memory bytes that we're allowed to consume, then
246              * it's very likely that creating a Hashtable with a capacity
247              * of estimated_rowcnt will lead to memory problems. So in
248              * that particular case we leave hash_table null here and
249              * initialize it further below, using the estimated in-memory
250              * size of the first row to figure out what a reasonable size
251              * for the Hashtable might be.
252              */

253             hash_table =
254                 (((estimated_rowcnt <= 0) || (row_source == null)) ?
255                      new Hashtable JavaDoc() :
256                      (estimated_rowcnt < max_inmemory_size) ?
257                          new Hashtable JavaDoc((int) estimated_rowcnt) :
258                          null);
259         }
260
261         if (row_source != null)
262         {
263             boolean needsToClone = row_source.needsToClone();
264
265             while ((row = getNextRowFromRowSource()) != null)
266             {
267                 // If we haven't initialized the hash_table yet then that's
268
// because a Hashtable with capacity estimated_rowcnt would
269
// probably cause memory problems. So look at the first row
270
// that we found and use that to create the hash table with
271
// an initial capacity such that, if it was completely full,
272
// it would still satisfy the max_inmemory condition. Note
273
// that this isn't a hard limit--the hash table can grow if
274
// needed.
275
if (hash_table == null)
276                 {
277                     // Check to see how much memory we think the first row
278
// is going to take, and then use that to set the initial
279
// capacity of the Hashtable.
280
double rowUsage = getEstimatedMemUsage(row);
281                     hash_table = new Hashtable JavaDoc((int)(max_inmemory_size / rowUsage));
282                 }
283                
284                 add_row_to_hash_table(hash_table, row, needsToClone);
285             }
286         }
287
288         // In the (unlikely) event that we received a "red flag" estimated_rowcnt
289
// that is too big (see comments above), it's possible that, if row_source
290
// was null or else didn't have any rows, hash_table could still be null
291
// at this point. So we initialize it to an empty hashtable (representing
292
// an empty result set) so that calls to other methods on this
293
// BackingStoreHashtable (ex. "size()") will have a working hash_table
294
// on which to operate.
295
if (hash_table == null)
296             hash_table = new Hashtable JavaDoc();
297     }
298
299     /**************************************************************************
300      * Private/Protected methods of This class:
301      **************************************************************************
302      */

303
304     /**
305      * Call method to either get next row or next row with non-null
306      * key columns.
307      *
308      *
309      * @exception StandardException Standard exception policy.
310      */

311     private Object JavaDoc[] getNextRowFromRowSource()
312         throws StandardException
313     {
314         Object JavaDoc[] row = row_source.getNextRowFromRowSource();
315
316         if (skipNullKeyColumns)
317         {
318             while (row != null)
319             {
320                 // Are any key columns null?
321
int index = 0;
322                 for ( ; index < key_column_numbers.length; index++)
323                 {
324                     if (SanityManager.DEBUG)
325                     {
326                         if (! (row[key_column_numbers[index]] instanceof Storable))
327                         {
328                             SanityManager.THROWASSERT(
329                                 "row[key_column_numbers[index]] expected to be Storable, not " +
330                                 row[key_column_numbers[index]].getClass().getName());
331                         }
332                     }
333                     Storable storable = (Storable) row[key_column_numbers[index]];
334                     if (storable.isNull())
335                     {
336                         break;
337                     }
338                 }
339                 // No null key columns
340
if (index == key_column_numbers.length)
341                 {
342                     return row;
343                 }
344                 // 1 or more null key columns
345
row = row_source.getNextRowFromRowSource();
346             }
347         }
348         return row;
349     }
350
351     /**
352      * Return a cloned copy of the row.
353      *
354      * @return The cloned row row to use.
355      *
356      * @exception StandardException Standard exception policy.
357      **/

358     static Object JavaDoc[] cloneRow(Object JavaDoc[] old_row)
359         throws StandardException
360     {
361         Object JavaDoc[] new_row = new DataValueDescriptor[old_row.length];
362
363         // the only difference between getClone and cloneObject is cloneObject does
364
// not objectify a stream. We use getClone here. Beetle 4896.
365
for (int i = 0; i < old_row.length; i++)
366         {
367             if( old_row[i] != null)
368                 new_row[i] = ((DataValueDescriptor) old_row[i]).getClone();
369         }
370
371         return(new_row);
372     }
373
374     /**
375      * Return a shallow cloned row
376      *
377      * @return The cloned row row to use.
378      *
379      * @exception StandardException Standard exception policy.
380      **/

381     static DataValueDescriptor[] shallowCloneRow(DataValueDescriptor[] old_row)
382         throws StandardException
383     {
384         DataValueDescriptor[] new_row = new DataValueDescriptor[old_row.length];
385         // the only difference between getClone and cloneObject is cloneObject does
386
// not objectify a stream. We use cloneObject here. DERBY-802
387
for (int i = 0; i < old_row.length; i++)
388         {
389             if( old_row[i] != null)
390                 new_row[i] = (DataValueDescriptor)
391                     ((CloneableObject) old_row[i]).cloneObject();
392         }
393
394         return(new_row);
395     }
396
397     /**
398      * Do the work to add one row to the hash table.
399      * <p>
400      *
401      * @param row Row to add to the hash table.
402      * @param hash_table The java HashTable to load into.
403      * @param needsToClone If the row needs to be cloned
404      *
405      * @exception StandardException Standard exception policy.
406      **/

407     private void add_row_to_hash_table(
408     Hashtable JavaDoc hash_table,
409     Object JavaDoc[] row,
410     boolean needsToClone )
411         throws StandardException
412     {
413         if( spillToDisk( hash_table, row))
414             return;
415         
416         if (needsToClone)
417         {
418             row = cloneRow(row);
419         }
420         Object JavaDoc key = KeyHasher.buildHashKey(row, key_column_numbers);
421         Object JavaDoc duplicate_value = null;
422
423         if ((duplicate_value = hash_table.put(key, row)) == null)
424             doSpaceAccounting( row, false);
425         else
426         {
427             if (!remove_duplicates)
428             {
429                 Vector JavaDoc row_vec;
430
431                 // inserted a duplicate
432
if ((duplicate_value instanceof Vector JavaDoc))
433                 {
434                     doSpaceAccounting( row, false);
435                     row_vec = (Vector JavaDoc) duplicate_value;
436                 }
437                 else
438                 {
439                     // allocate vector to hold duplicates
440
row_vec = new Vector JavaDoc(2);
441
442                     // insert original row into vector
443
row_vec.addElement(duplicate_value);
444                     doSpaceAccounting( row, true);
445                 }
446
447                 // insert new row into vector
448
row_vec.addElement(row);
449
450                 // store vector of rows back into hash table,
451
// overwriting the duplicate key that was
452
// inserted.
453
hash_table.put(key, row_vec);
454             }
455         }
456
457         row = null;
458     }
459
460     private void doSpaceAccounting( Object JavaDoc[] row,
461                                     boolean firstDuplicate)
462     {
463         inmemory_rowcnt++;
464         if( max_inmemory_rowcnt <= 0)
465         {
466             max_inmemory_size -= getEstimatedMemUsage(row);
467             if( firstDuplicate)
468                 max_inmemory_size -= vectorSize;
469         }
470     } // end of doSpaceAccounting
471

472     /**
473      * Determine whether a new row should be spilled to disk and, if so, do it.
474      *
475      * @param hash_table The in-memory hash table
476      * @param row
477      *
478      * @return true if the row was spilled to disk, false if not
479      *
480      * @exception StandardException Standard exception policy.
481      */

482     private boolean spillToDisk( Hashtable JavaDoc hash_table,
483                                  Object JavaDoc[] row)
484         throws StandardException
485     {
486         // Once we have started spilling all new rows will go to disk, even if we have freed up some
487
// memory by moving duplicates to disk. This simplifies handling of duplicates and accounting.
488
if( diskHashtable == null)
489         {
490             if( max_inmemory_rowcnt > 0)
491             {
492                 if( inmemory_rowcnt < max_inmemory_rowcnt)
493                     return false; // Do not spill
494
}
495             else if( max_inmemory_size > getEstimatedMemUsage(row))
496                 
497                 return false;
498             // Want to start spilling
499
if( ! (row instanceof DataValueDescriptor[]))
500             {
501                 if( SanityManager.DEBUG)
502                     SanityManager.THROWASSERT( "BackingStoreHashtable row is not DataValueDescriptor[]");
503                 // Do not know how to put it on disk
504
return false;
505             }
506             diskHashtable = new DiskHashtable( tc,
507                                                (DataValueDescriptor[]) row,
508                                                key_column_numbers,
509                                                remove_duplicates,
510                                                keepAfterCommit);
511         }
512         Object JavaDoc key = KeyHasher.buildHashKey(row, key_column_numbers);
513         Object JavaDoc duplicateValue = hash_table.get( key);
514         if( duplicateValue != null)
515         {
516             if( remove_duplicates)
517                 return true; // a degenerate case of spilling
518
// If we are keeping duplicates then move all the duplicates from memory to disk
519
// This simplifies finding duplicates: they are either all in memory or all on disk.
520
if( duplicateValue instanceof Vector JavaDoc)
521             {
522                 Vector JavaDoc duplicateVec = (Vector JavaDoc) duplicateValue;
523                 for( int i = duplicateVec.size() - 1; i >= 0; i--)
524                 {
525                     Object JavaDoc[] dupRow = (Object JavaDoc[]) duplicateVec.elementAt(i);
526                     diskHashtable.put( key, dupRow);
527                 }
528             }
529             else
530                 diskHashtable.put( key, (Object JavaDoc []) duplicateValue);
531             hash_table.remove( key);
532         }
533         diskHashtable.put( key, row);
534         return true;
535     } // end of spillToDisk
536

537     /**
538      * Take a row and return an estimate as to how much memory that
539      * row will consume.
540      *
541      * @param row The row for which we want to know the memory usage.
542      * @return A guess as to how much memory the current row will
543      * use.
544      */

545     private long getEstimatedMemUsage(Object JavaDoc [] row)
546     {
547         long rowMem = 0;
548         for( int i = 0; i < row.length; i++)
549         {
550             if (row[i] instanceof DataValueDescriptor)
551                 rowMem += ((DataValueDescriptor) row[i]).estimateMemoryUsage();
552             rowMem += ClassSize.refSize;
553         }
554
555         rowMem += ClassSize.refSize;
556         return rowMem;
557     }
558
559     /**************************************************************************
560      * Public Methods of This class:
561      **************************************************************************
562      */

563
564     /**
565      * Close the BackingStoreHashtable.
566      * <p>
567      * Perform any necessary cleanup after finishing with the hashtable. Will
568      * deallocate/dereference objects as necessary. If the table has gone
569      * to disk this will drop any on disk files used to support the hash table.
570      * <p>
571      *
572      * @exception StandardException Standard exception policy.
573      **/

574     public void close()
575         throws StandardException
576     {
577         hash_table = null;
578         if( diskHashtable != null)
579         {
580             diskHashtable.close();
581             diskHashtable = null;
582         }
583         return;
584     }
585
586     /**
587      * Return an Enumeration that can be used to scan entire table.
588      * <p>
589      * RESOLVE - is it worth it to support this routine when we have a
590      * disk overflow hash table?
591      *
592      * @return The Enumeration.
593      *
594      * @exception StandardException Standard exception policy.
595      **/

596     public Enumeration JavaDoc elements()
597         throws StandardException
598     {
599         if( diskHashtable == null)
600             return(hash_table.elements());
601         return new BackingStoreHashtableEnumeration();
602     }
603
604     /**
605      * get data associated with given key.
606      * <p>
607      * There are 2 different types of objects returned from this routine.
608      * <p>
609      * In both cases, the key value is either the object stored in
610      * row[key_column_numbers[0]], if key_column_numbers.length is 1,
611      * otherwise it is a KeyHasher containing
612      * the objects stored in row[key_column_numbers[0, 1, ...]].
613      * For every qualifying unique row value an entry is placed into the
614      * Hashtable.
615      * <p>
616      * For row values with duplicates, the value of the data is a Vector of
617      * rows.
618      * <p>
619      * The caller will have to call "instanceof" on the data value
620      * object if duplicates are expected, to determine if the data value
621      * of the Hashtable entry is a row or is a Vector of rows.
622      * <p>
623      * The BackingStoreHashtable "owns" the objects returned from the get()
624      * routine. They remain valid until the next access to the
625      * BackingStoreHashtable. If the client needs to keep references to these
626      * objects, it should clone copies of the objects. A valid
627      * BackingStoreHashtable can place all rows into a disk based conglomerate,
628      * declare a row buffer and then reuse that row buffer for every get()
629      * call.
630      *
631      * @return The value to which the key is mapped in this hashtable;
632      * null if the key is not mapped to any value in this hashtable.
633      *
634      * @param key The key to hash on.
635      *
636      * @exception StandardException Standard exception policy.
637      **/

638     public Object JavaDoc get(Object JavaDoc key)
639         throws StandardException
640     {
641         Object JavaDoc obj = hash_table.get(key);
642         if( diskHashtable == null || obj != null)
643             return obj;
644         return diskHashtable.get( key);
645     }
646
647     /**
648      * Return runtime stats to caller by adding them to prop.
649      * <p>
650      *
651      * @param prop The set of properties to append to.
652      *
653      * @exception StandardException Standard exception policy.
654      **/

655     public void getAllRuntimeStats(Properties JavaDoc prop)
656         throws StandardException
657     {
658         if (auxillary_runtimestats != null)
659             org.apache.derby.iapi.util.PropertyUtil.copyProperties(auxillary_runtimestats, prop);
660     }
661
662     /**
663      * remove a row from the hash table.
664      * <p>
665      * a remove of a duplicate removes the entire duplicate list.
666      *
667      * @param key The key of the row to remove.
668      *
669      * @exception StandardException Standard exception policy.
670      **/

671     public Object JavaDoc remove(
672     Object JavaDoc key)
673         throws StandardException
674     {
675         Object JavaDoc obj = hash_table.remove(key);
676         if( obj != null || diskHashtable == null)
677             return obj;
678         return diskHashtable.remove(key);
679     }
680
681     /**
682      * Set the auxillary runtime stats.
683      * <p>
684      * getRuntimeStats() will return both the auxillary stats and any
685      * BackingStoreHashtable() specific stats. Note that each call to
686      * setAuxillaryRuntimeStats() overwrites the Property set that was
687      * set previously.
688      *
689      * @param prop The set of properties to append from.
690      *
691      * @exception StandardException Standard exception policy.
692      **/

693     public void setAuxillaryRuntimeStats(Properties JavaDoc prop)
694         throws StandardException
695     {
696         auxillary_runtimestats = prop;
697     }
698
699     /**
700      * Put a row into the hash table.
701      * <p>
702      * The in memory hash table will need to keep a reference to the row
703      * after the put call has returned. If "needsToClone" is true then the
704      * hash table will make a copy of the row and put that, else if
705      * "needsToClone" is false then the hash table will keep a reference to
706      * the row passed in and no copy will be made.
707      * <p>
708      * If rouine returns false, then no reference is kept to the duplicate
709      * row which was rejected (thus allowing caller to reuse the object).
710      *
711      * @param needsToClone does this routine have to make a copy of the row,
712      * in order to keep a reference to it after return?
713      * @param row The row to insert into the table.
714      *
715      * @return true if row was inserted into the hash table. Returns
716      * false if the BackingStoreHashtable is eliminating
717      * duplicates, and the row being inserted is a duplicate,
718      * or if we are skipping rows with 1 or more null key columns
719      * and we find a null key column.
720      *
721      * @exception StandardException Standard exception policy.
722      **/

723     public boolean put(
724     boolean needsToClone,
725     Object JavaDoc[] row)
726         throws StandardException
727     {
728         // Are any key columns null?
729
if (skipNullKeyColumns)
730         {
731             int index = 0;
732             for ( ; index < key_column_numbers.length; index++)
733             {
734                 if (SanityManager.DEBUG)
735                 {
736                     if (! (row[key_column_numbers[index]] instanceof Storable))
737                     {
738                         SanityManager.THROWASSERT(
739                             "row[key_column_numbers[index]] expected to be Storable, not " +
740                             row[key_column_numbers[index]].getClass().getName());
741                     }
742                 }
743                 Storable storable = (Storable) row[key_column_numbers[index]];
744                 if (storable.isNull())
745                 {
746                     return false;
747                 }
748             }
749         }
750
751         Object JavaDoc key = KeyHasher.buildHashKey(row, key_column_numbers);
752
753         if ((remove_duplicates) && (get(key) != null))
754         {
755             return(false);
756         }
757         else
758         {
759             add_row_to_hash_table(hash_table, row, needsToClone);
760             return(true);
761         }
762     }
763
764     /**
765      * Return number of unique rows in the hash table.
766      * <p>
767      *
768      * @return The number of unique rows in the hash table.
769      *
770      * @exception StandardException Standard exception policy.
771      **/

772     public int size()
773         throws StandardException
774     {
775         if( diskHashtable == null)
776             return(hash_table.size());
777         return hash_table.size() + diskHashtable.size();
778     }
779
780     private class BackingStoreHashtableEnumeration implements Enumeration JavaDoc
781     {
782         private Enumeration JavaDoc memoryEnumeration;
783         private Enumeration JavaDoc diskEnumeration;
784
785         BackingStoreHashtableEnumeration()
786         {
787             memoryEnumeration = hash_table.elements();
788             if( diskHashtable != null)
789             {
790                 try
791                 {
792                     diskEnumeration = diskHashtable.elements();
793                 }
794                 catch( StandardException se)
795                 {
796                     diskEnumeration = null;
797                 }
798             }
799         }
800         
801         public boolean hasMoreElements()
802         {
803             if( memoryEnumeration != null)
804             {
805                 if( memoryEnumeration.hasMoreElements())
806                     return true;
807                 memoryEnumeration = null;
808             }
809             if( diskEnumeration == null)
810                 return false;
811             return diskEnumeration.hasMoreElements();
812         }
813
814         public Object JavaDoc nextElement() throws NoSuchElementException JavaDoc
815         {
816             if( memoryEnumeration != null)
817             {
818                 if( memoryEnumeration.hasMoreElements())
819                     return memoryEnumeration.nextElement();
820                 memoryEnumeration = null;
821             }
822             return diskEnumeration.nextElement();
823         }
824     } // end of class BackingStoreHashtableEnumeration
825
}
826
Popular Tags