KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > com > opensymphony > oscache > base > algorithm > AbstractConcurrentReadCache


1 /*
2  * Copyright (c) 2002-2003 by OpenSymphony
3  * All rights reserved.
4  */

5 /*
6         File: AbstractConcurrentReadCache
7
8         Written by Doug Lea. Adapted from JDK1.2 HashMap.java and Hashtable.java
9         which carries the following copyright:
10
11                  * Copyright 1997 by Sun Microsystems, Inc.,
12                  * 901 San Antonio Road, Palo Alto, California, 94303, U.S.A.
13                  * All rights reserved.
14                  *
15                  * This software is the confidential and proprietary information
16                  * of Sun Microsystems, Inc. ("Confidential Information"). You
17                  * shall not disclose such Confidential Information and shall use
18                  * it only in accordance with the terms of the license agreement
19                  * you entered into with Sun.
20
21         This class is a modified version of ConcurrentReaderHashMap, which was written
22         by Doug Lea (http://gee.cs.oswego.edu/dl/). The modifications where done
23         by Pyxis Technologies. This is a base class for the OSCache module of the
24         openSymphony project (www.opensymphony.com).
25
26         History:
27         Date Who What
28         28oct1999 dl Created
29         14dec1999 dl jmm snapshot
30         19apr2000 dl use barrierLock
31         12jan2001 dl public release
32         Oct2001 abergevin@pyxis-tech.com
33                                                                 Integrated persistence and outer algorithm support
34 */

35 package com.opensymphony.oscache.base.algorithm;
36
37
38 /** OpenSymphony BEGIN */
39 import com.opensymphony.oscache.base.CacheEntry;
40 import com.opensymphony.oscache.base.persistence.CachePersistenceException;
41 import com.opensymphony.oscache.base.persistence.PersistenceListener;
42
43 import org.apache.commons.logging.Log;
44 import org.apache.commons.logging.LogFactory;
45
46 import java.io.IOException JavaDoc;
47 import java.io.Serializable JavaDoc;
48
49 import java.util.*;
50
51 /**
52  * A version of Hashtable that supports mostly-concurrent reading, but exclusive writing.
53  * Because reads are not limited to periods
54  * without writes, a concurrent reader policy is weaker than a classic
55  * reader/writer policy, but is generally faster and allows more
56  * concurrency. This class is a good choice especially for tables that
57  * are mainly created by one thread during the start-up phase of a
58  * program, and from then on, are mainly read (with perhaps occasional
59  * additions or removals) in many threads. If you also need concurrency
60  * among writes, consider instead using ConcurrentHashMap.
61  * <p>
62  *
63  * Successful retrievals using get(key) and containsKey(key) usually
64  * run without locking. Unsuccessful ones (i.e., when the key is not
65  * present) do involve brief synchronization (locking). Also, the
66  * size and isEmpty methods are always synchronized.
67  *
68  * <p> Because retrieval operations can ordinarily overlap with
69  * writing operations (i.e., put, remove, and their derivatives),
70  * retrievals can only be guaranteed to return the results of the most
71  * recently <em>completed</em> operations holding upon their
72  * onset. Retrieval operations may or may not return results
73  * reflecting in-progress writing operations. However, the retrieval
74  * operations do always return consistent results -- either those
75  * holding before any single modification or after it, but never a
76  * nonsense result. For aggregate operations such as putAll and
77  * clear, concurrent reads may reflect insertion or removal of only
78  * some entries. In those rare contexts in which you use a hash table
79  * to synchronize operations across threads (for example, to prevent
80  * reads until after clears), you should either encase operations
81  * in synchronized blocks, or instead use java.util.Hashtable.
82  *
83  * <p>
84  *
85  * This class also supports optional guaranteed
86  * exclusive reads, simply by surrounding a call within a synchronized
87  * block, as in <br>
88  * <code>AbstractConcurrentReadCache t; ... Object v; <br>
89  * synchronized(t) { v = t.get(k); } </code> <br>
90  *
91  * But this is not usually necessary in practice. For
92  * example, it is generally inefficient to write:
93  *
94  * <pre>
95  * AbstractConcurrentReadCache t; ... // Inefficient version
96  * Object key; ...
97  * Object value; ...
98  * synchronized(t) {
99  * if (!t.containsKey(key))
100  * t.put(key, value);
101  * // other code if not previously present
102  * }
103  * else {
104  * // other code if it was previously present
105  * }
106  * }
107  *</pre>
108  * Instead, just take advantage of the fact that put returns
109  * null if the key was not previously present:
110  * <pre>
111  * AbstractConcurrentReadCache t; ... // Use this instead
112  * Object key; ...
113  * Object value; ...
114  * Object oldValue = t.put(key, value);
115  * if (oldValue == null) {
116  * // other code if not previously present
117  * }
118  * else {
119  * // other code if it was previously present
120  * }
121  *</pre>
122  * <p>
123  *
124  * Iterators and Enumerations (i.e., those returned by
125  * keySet().iterator(), entrySet().iterator(), values().iterator(),
126  * keys(), and elements()) return elements reflecting the state of the
127  * hash table at some point at or since the creation of the
128  * iterator/enumeration. They will return at most one instance of
129  * each element (via next()/nextElement()), but might or might not
130  * reflect puts and removes that have been processed since they were
131  * created. They do <em>not</em> throw ConcurrentModificationException.
132  * However, these iterators are designed to be used by only one
133  * thread at a time. Sharing an iterator across multiple threads may
134  * lead to unpredictable results if the table is being concurrently
135  * modified. Again, you can ensure interference-free iteration by
136  * enclosing the iteration in a synchronized block. <p>
137  *
138  * This class may be used as a direct replacement for any use of
139  * java.util.Hashtable that does not depend on readers being blocked
140  * during updates. Like Hashtable but unlike java.util.HashMap,
141  * this class does NOT allow <tt>null</tt> to be used as a key or
142  * value. This class is also typically faster than ConcurrentHashMap
143  * when there is usually only one thread updating the table, but
144  * possibly many retrieving values from it.
145  * <p>
146  *
147  * Implementation note: A slightly faster implementation of
148  * this class will be possible once planned Java Memory Model
149  * revisions are in place.
150  *
151  * <p>[<a HREF="http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/intro.html"> Introduction to this package. </a>]
152  **/

153 public abstract class AbstractConcurrentReadCache extends AbstractMap implements Map, Cloneable JavaDoc, Serializable JavaDoc {
154     /**
155      * The default initial number of table slots for this table (32).
156      * Used when not otherwise specified in constructor.
157      **/

158     public static int DEFAULT_INITIAL_CAPACITY = 32;
159
160     /**
161      * The minimum capacity.
162      * Used if a lower value is implicitly specified
163      * by either of the constructors with arguments.
164      * MUST be a power of two.
165      */

166     private static final int MINIMUM_CAPACITY = 4;
167
168     /**
169      * The maximum capacity.
170      * Used if a higher value is implicitly specified
171      * by either of the constructors with arguments.
172      * MUST be a power of two <= 1<<30.
173      */

174     private static final int MAXIMUM_CAPACITY = 1 << 30;
175
176     /**
177      * The default load factor for this table.
178      * Used when not otherwise specified in constructor, the default is 0.75f.
179      **/

180     public static final float DEFAULT_LOAD_FACTOR = 0.75f;
181
182     //OpenSymphony BEGIN (pretty long!)
183
protected static final String JavaDoc NULL = "_nul!~";
184     protected static Log log = LogFactory.getLog(AbstractConcurrentReadCache.class);
185
186     /*
187       The basic strategy is an optimistic-style scheme based on
188       the guarantee that the hash table and its lists are always
189       kept in a consistent enough state to be read without locking:
190
191       * Read operations first proceed without locking, by traversing the
192          apparently correct list of the apparently correct bin. If an
193          entry is found, but not invalidated (value field null), it is
194          returned. If not found, operations must recheck (after a memory
195          barrier) to make sure they are using both the right list and
196          the right table (which can change under resizes). If
197          invalidated, reads must acquire main update lock to wait out
198          the update, and then re-traverse.
199
200       * All list additions are at the front of each bin, making it easy
201          to check changes, and also fast to traverse. Entry next
202          pointers are never assigned. Remove() builds new nodes when
203          necessary to preserve this.
204
205       * Remove() (also clear()) invalidates removed nodes to alert read
206          operations that they must wait out the full modifications.
207
208     */

209
210     /**
211      * Lock used only for its memory effects. We use a Boolean
212      * because it is serializable, and we create a new one because
213      * we need a unique object for each cache instance.
214      **/

215     protected final Boolean JavaDoc barrierLock = new Boolean JavaDoc(true);
216
217     /**
218      * field written to only to guarantee lock ordering.
219      **/

220     protected transient Object JavaDoc lastWrite;
221
222     /**
223      * The hash table data.
224      */

225     protected transient Entry[] table;
226
227     /**
228      * The total number of mappings in the hash table.
229      */

230     protected transient int count;
231
232     /**
233      * Persistence listener.
234      */

235     protected transient PersistenceListener persistenceListener = null;
236
237     /**
238      * Use memory cache or not.
239      */

240     protected boolean memoryCaching = true;
241
242     /**
243      * Use unlimited disk caching.
244      */

245     protected boolean unlimitedDiskCache = false;
246
247     /**
248      * The load factor for the hash table.
249      *
250      * @serial
251      */

252     protected float loadFactor;
253
254     /**
255      * Default cache capacity (number of entries).
256      */

257     protected final int DEFAULT_MAX_ENTRIES = 100;
258
259     /**
260      * Max number of element in cache when considered unlimited.
261      */

262     protected final int UNLIMITED = 2147483646;
263     protected transient Collection values = null;
264
265     /**
266      * A HashMap containing the group information.
267      * Each entry uses the group name as the key, and holds a
268      * <code>Set</code> of containing keys of all
269      * the cache entries that belong to that particular group.
270      */

271     protected HashMap groups = new HashMap();
272     protected transient Set entrySet = null;
273
274     // Views
275
protected transient Set keySet = null;
276
277     /**
278      * Cache capacity (number of entries).
279      */

280     protected int maxEntries = DEFAULT_MAX_ENTRIES;
281
282     /**
283      * The table is rehashed when its size exceeds this threshold.
284      * (The value of this field is always (int)(capacity * loadFactor).)
285      *
286      * @serial
287      */

288     protected int threshold;
289
290     /**
291      * Use overflow persistence caching.
292      */

293     private boolean overflowPersistence = false;
294
295     /**
296      * Constructs a new, empty map with the specified initial capacity and the specified load factor.
297      *
298      * @param initialCapacity the initial capacity
299      * The actual initial capacity is rounded to the nearest power of two.
300      * @param loadFactor the load factor of the AbstractConcurrentReadCache
301      * @throws IllegalArgumentException if the initial maximum number
302      * of elements is less
303      * than zero, or if the load factor is nonpositive.
304      */

305     public AbstractConcurrentReadCache(int initialCapacity, float loadFactor) {
306         if (loadFactor <= 0) {
307             throw new IllegalArgumentException JavaDoc("Illegal Load factor: " + loadFactor);
308         }
309
310         this.loadFactor = loadFactor;
311
312         int cap = p2capacity(initialCapacity);
313         table = new Entry[cap];
314         threshold = (int) (cap * loadFactor);
315     }
316
317     /**
318      * Constructs a new, empty map with the specified initial capacity and default load factor.
319      *
320      * @param initialCapacity the initial capacity of the
321      * AbstractConcurrentReadCache.
322      * @throws IllegalArgumentException if the initial maximum number
323      * of elements is less
324      * than zero.
325      */

326     public AbstractConcurrentReadCache(int initialCapacity) {
327         this(initialCapacity, DEFAULT_LOAD_FACTOR);
328     }
329
330     /**
331      * Constructs a new, empty map with a default initial capacity and load factor.
332      */

333     public AbstractConcurrentReadCache() {
334         this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
335     }
336
337     /**
338      * Constructs a new map with the same mappings as the given map.
339      * The map is created with a capacity of twice the number of mappings in
340      * the given map or 11 (whichever is greater), and a default load factor.
341      */

342     public AbstractConcurrentReadCache(Map t) {
343         this(Math.max(2 * t.size(), 11), DEFAULT_LOAD_FACTOR);
344         putAll(t);
345     }
346
347     /**
348      * Returns <tt>true</tt> if this map contains no key-value mappings.
349      *
350      * @return <tt>true</tt> if this map contains no key-value mappings.
351      */

352     public synchronized boolean isEmpty() {
353         return count == 0;
354     }
355
356     /**
357      * Returns a set of the cache keys that reside in a particular group.
358      *
359      * @param groupName The name of the group to retrieve.
360      * @return a set containing all of the keys of cache entries that belong
361      * to this group, or <code>null</code> if the group was not found.
362      * @exception NullPointerException if the groupName is <code>null</code>.
363      */

364     public Set getGroup(String JavaDoc groupName) {
365         if (log.isDebugEnabled()) {
366             log.debug("getGroup called (group=" + groupName + ")");
367         }
368
369         Set groupEntries = null;
370
371         if (memoryCaching && (groups != null)) {
372             groupEntries = (Set) getGroupForReading(groupName);
373         }
374
375         if (groupEntries == null) {
376             // Not in the map, try the persistence layer
377
groupEntries = persistRetrieveGroup(groupName);
378         }
379
380         return groupEntries;
381     }
382
383     /**
384      * Set the cache capacity
385      */

386     public void setMaxEntries(int newLimit) {
387         if (newLimit > 0) {
388             maxEntries = newLimit;
389
390             synchronized (this) { // because remove() isn't synchronized
391

392                 while (size() > maxEntries) {
393                     remove(removeItem(), false, false);
394                 }
395             }
396         } else {
397             // Capacity must be at least 1
398
throw new IllegalArgumentException JavaDoc("Cache maximum number of entries must be at least 1");
399         }
400     }
401
402     /**
403      * Retrieve the cache capacity (number of entries).
404      */

405     public int getMaxEntries() {
406         return maxEntries;
407     }
408
409     /**
410      * Sets the memory caching flag.
411      */

412     public void setMemoryCaching(boolean memoryCaching) {
413         this.memoryCaching = memoryCaching;
414     }
415
416     /**
417      * Check if memory caching is used.
418      */

419     public boolean isMemoryCaching() {
420         return memoryCaching;
421     }
422
423     /**
424      * Set the persistence listener to use.
425      */

426     public void setPersistenceListener(PersistenceListener listener) {
427         this.persistenceListener = listener;
428     }
429
430     /**
431      * Get the persistence listener.
432      */

433     public PersistenceListener getPersistenceListener() {
434         return persistenceListener;
435     }
436
437     /**
438      * Sets the unlimited disk caching flag.
439      */

440     public void setUnlimitedDiskCache(boolean unlimitedDiskCache) {
441         this.unlimitedDiskCache = unlimitedDiskCache;
442     }
443
444     /**
445      * Check if we use unlimited disk cache.
446      */

447     public boolean isUnlimitedDiskCache() {
448         return unlimitedDiskCache;
449     }
450
451     /**
452      * Check if we use overflowPersistence
453      *
454      * @return Returns the overflowPersistence.
455      */

456     public boolean isOverflowPersistence() {
457         return this.overflowPersistence;
458     }
459
460     /**
461      * Sets the overflowPersistence flag
462      *
463      * @param overflowPersistence The overflowPersistence to set.
464      */

465     public void setOverflowPersistence(boolean overflowPersistence) {
466         this.overflowPersistence = overflowPersistence;
467     }
468
469     /**
470      * Return the number of slots in this table.
471      **/

472     public synchronized int capacity() {
473         return table.length;
474     }
475
476     /**
477      * Removes all mappings from this map.
478      */

479     public synchronized void clear() {
480         Entry[] tab = table;
481
482         for (int i = 0; i < tab.length; ++i) {
483             // must invalidate all to force concurrent get's to wait and then retry
484
for (Entry e = tab[i]; e != null; e = e.next) {
485                 e.value = null;
486
487                 /** OpenSymphony BEGIN */
488                 itemRemoved(e.key);
489
490                 /** OpenSymphony END */
491             }
492
493             tab[i] = null;
494         }
495
496         // Clean out the entire disk cache
497
persistClear();
498
499         count = 0;
500         recordModification(tab);
501     }
502
503     /**
504      * Returns a shallow copy of this.
505      * <tt>AbstractConcurrentReadCache</tt> instance: the keys and
506      * values themselves are not cloned.
507      *
508      * @return a shallow copy of this map.
509      */

510     public synchronized Object JavaDoc clone() {
511         try {
512             AbstractConcurrentReadCache t = (AbstractConcurrentReadCache) super.clone();
513             t.keySet = null;
514             t.entrySet = null;
515             t.values = null;
516
517             Entry[] tab = table;
518             t.table = new Entry[tab.length];
519
520             Entry[] ttab = t.table;
521
522             for (int i = 0; i < tab.length; ++i) {
523                 Entry first = tab[i];
524
525                 if (first != null) {
526                     ttab[i] = (Entry) (first.clone());
527                 }
528             }
529
530             return t;
531         } catch (CloneNotSupportedException JavaDoc e) {
532             // this shouldn't happen, since we are Cloneable
533
throw new InternalError JavaDoc();
534         }
535     }
536
537     /**
538      * Tests if some key maps into the specified value in this table.
539      * This operation is more expensive than the <code>containsKey</code>
540      * method.<p>
541      *
542      * Note that this method is identical in functionality to containsValue,
543      * (which is part of the Map interface in the collections framework).
544      *
545      * @param value a value to search for.
546      * @return <code>true</code> if and only if some key maps to the
547      * <code>value</code> argument in this table as
548      * determined by the <tt>equals</tt> method;
549      * <code>false</code> otherwise.
550      * @exception NullPointerException if the value is <code>null</code>.
551      * @see #containsKey(Object)
552      * @see #containsValue(Object)
553      * @see Map
554      */

555     public boolean contains(Object JavaDoc value) {
556         return containsValue(value);
557     }
558
559     /**
560      * Tests if the specified object is a key in this table.
561      *
562      * @param key possible key.
563      * @return <code>true</code> if and only if the specified object
564      * is a key in this table, as determined by the
565      * <tt>equals</tt> method; <code>false</code> otherwise.
566      * @exception NullPointerException if the key is
567      * <code>null</code>.
568      * @see #contains(Object)
569      */

570     public boolean containsKey(Object JavaDoc key) {
571         return get(key) != null;
572
573         /** OpenSymphony BEGIN */
574
575         // TODO: Also check the persistence?
576

577         /** OpenSymphony END */
578     }
579
580     /**
581      * Returns <tt>true</tt> if this map maps one or more keys to the
582      * specified value. Note: This method requires a full internal
583      * traversal of the hash table, and so is much slower than
584      * method <tt>containsKey</tt>.
585      *
586      * @param value value whose presence in this map is to be tested.
587      * @return <tt>true</tt> if this map maps one or more keys to the
588      * specified value.
589      * @exception NullPointerException if the value is <code>null</code>.
590      */

591     public boolean containsValue(Object JavaDoc value) {
592         if (value == null) {
593             throw new NullPointerException JavaDoc();
594         }
595
596         Entry[] tab = getTableForReading();
597
598         for (int i = 0; i < tab.length; ++i) {
599             for (Entry e = tab[i]; e != null; e = e.next) {
600                 Object JavaDoc v = e.value;
601
602                 if ((v != null) && value.equals(v)) {
603                     return true;
604                 }
605             }
606         }
607
608         return false;
609     }
610
611     /**
612      * Returns an enumeration of the values in this table.
613      * Use the Enumeration methods on the returned object to fetch the elements
614      * sequentially.
615      *
616      * @return an enumeration of the values in this table.
617      * @see java.util.Enumeration
618      * @see #keys()
619      * @see #values()
620      * @see Map
621      */

622     public Enumeration elements() {
623         return new ValueIterator();
624     }
625
626     /**
627      * Returns a collection view of the mappings contained in this map.
628      * Each element in the returned collection is a <tt>Map.Entry</tt>. The
629      * collection is backed by the map, so changes to the map are reflected in
630      * the collection, and vice-versa. The collection supports element
631      * removal, which removes the corresponding mapping from the map, via the
632      * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
633      * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
634      * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
635      *
636      * @return a collection view of the mappings contained in this map.
637      */

638     public Set entrySet() {
639         Set es = entrySet;
640
641         if (es != null) {
642             return es;
643         } else {
644             return entrySet = new AbstractSet() {
645                         public Iterator iterator() {
646                             return new HashIterator();
647                         }
648
649                         public boolean contains(Object JavaDoc o) {
650                             if (!(o instanceof Map.Entry)) {
651                                 return false;
652                             }
653
654                             Map.Entry entry = (Map.Entry) o;
655                             Object JavaDoc key = entry.getKey();
656                             Object JavaDoc v = AbstractConcurrentReadCache.this.get(key);
657
658                             return (v != null) && v.equals(entry.getValue());
659                         }
660
661                         public boolean remove(Object JavaDoc o) {
662                             if (!(o instanceof Map.Entry)) {
663                                 return false;
664                             }
665
666                             return AbstractConcurrentReadCache.this.findAndRemoveEntry((Map.Entry) o);
667                         }
668
669                         public int size() {
670                             return AbstractConcurrentReadCache.this.size();
671                         }
672
673                         public void clear() {
674                             AbstractConcurrentReadCache.this.clear();
675                         }
676                     };
677         }
678     }
679
680     /**
681      * Returns the value to which the specified key is mapped in this table.
682      *
683      * @param key a key in the table.
684      * @return the value to which the key is mapped in this table;
685      * <code>null</code> if the key is not mapped to any value in
686      * this table.
687      * @exception NullPointerException if the key is
688      * <code>null</code>.
689      * @see #put(Object, Object)
690      */

691     public Object JavaDoc get(Object JavaDoc key) {
692         if (log.isDebugEnabled()) {
693             log.debug("get called (key=" + key + ")");
694         }
695
696         // throw null pointer exception if key null
697
int hash = hash(key);
698
699         /*
700            Start off at the apparently correct bin. If entry is found, we
701            need to check after a barrier anyway. If not found, we need a
702            barrier to check if we are actually in right bin. So either
703            way, we encounter only one barrier unless we need to retry.
704            And we only need to fully synchronize if there have been
705            concurrent modifications.
706         */

707         Entry[] tab = table;
708         int index = hash & (tab.length - 1);
709         Entry first = tab[index];
710         Entry e = first;
711
712         for (;;) {
713             if (e == null) {
714                 // If key apparently not there, check to
715
// make sure this was a valid read
716
tab = getTableForReading();
717
718                 if (first == tab[index]) {
719                     /** OpenSymphony BEGIN */
720
721                     /* Previous code
722                     return null;*/

723
724                     // Not in the table, try persistence
725
Object JavaDoc value = persistRetrieve(key);
726
727                     if (value != null) {
728                         // Update the map, but don't persist the data
729
put(key, value, false);
730                     }
731
732                     return value;
733
734                     /** OpenSymphony END */
735                 } else {
736                     // Wrong list -- must restart traversal at new first
737
e = first = tab[index = hash & (tab.length - 1)];
738                 }
739             }
740             // checking for pointer equality first wins in most applications
741
else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
742                 Object JavaDoc value = e.value;
743
744                 if (value != null) {
745                     /** OpenSymphony BEGIN */
746
747                     /* Previous code
748                     return value;*/

749                     if (NULL.equals(value)) {
750                         // Memory cache disable, use disk
751
value = persistRetrieve(e.key);
752
753                         if (value != null) {
754                             itemRetrieved(key);
755                         }
756
757                         return value; // fix [CACHE-13]
758
} else {
759                         itemRetrieved(key);
760
761                         return value;
762                     }
763
764                     /** OpenSymphony END */
765                 }
766
767                 // Entry was invalidated during deletion. But it could
768
// have been re-inserted, so we must retraverse.
769
// To avoid useless contention, get lock to wait out modifications
770
// before retraversing.
771
synchronized (this) {
772                     tab = table;
773                 }
774
775                 e = first = tab[index = hash & (tab.length - 1)];
776             } else {
777                 e = e.next;
778             }
779         }
780     }
781
782     /**
783      * Returns a set view of the keys contained in this map.
784      * The set is backed by the map, so changes to the map are reflected in the set, and
785      * vice-versa. The set supports element removal, which removes the
786      * corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
787      * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
788      * <tt>clear</tt> operations. It does not support the <tt>add</tt> or
789      * <tt>addAll</tt> operations.
790      *
791      * @return a set view of the keys contained in this map.
792      */

793     public Set keySet() {
794         Set ks = keySet;
795
796         if (ks != null) {
797             return ks;
798         } else {
799             return keySet = new AbstractSet() {
800                         public Iterator iterator() {
801                             return new KeyIterator();
802                         }
803
804                         public int size() {
805                             return AbstractConcurrentReadCache.this.size();
806                         }
807
808                         public boolean contains(Object JavaDoc o) {
809                             return AbstractConcurrentReadCache.this.containsKey(o);
810                         }
811
812                         public boolean remove(Object JavaDoc o) {
813                             return AbstractConcurrentReadCache.this.remove(o) != null;
814                         }
815
816                         public void clear() {
817                             AbstractConcurrentReadCache.this.clear();
818                         }
819                     };
820         }
821     }
822
823     /**
824      * Returns an enumeration of the keys in this table.
825      *
826      * @return an enumeration of the keys in this table.
827      * @see Enumeration
828      * @see #elements()
829      * @see #keySet()
830      * @see Map
831      */

832     public Enumeration keys() {
833         return new KeyIterator();
834     }
835
836     /**
837      * Return the load factor
838      **/

839     public float loadFactor() {
840         return loadFactor;
841     }
842
843     /**
844      * Maps the specified <code>key</code> to the specified <code>value</code> in this table.
845      * Neither the key nor the
846      * value can be <code>null</code>. <p>
847      *
848      * The value can be retrieved by calling the <code>get</code> method
849      * with a key that is equal to the original key.
850      *
851      * @param key the table key.
852      * @param value the value.
853      * @return the previous value of the specified key in this table,
854      * or <code>null</code> if it did not have one.
855      * @exception NullPointerException if the key or value is
856      * <code>null</code>.
857      * @see Object#equals(Object)
858      * @see #get(Object)
859      */

860     /** OpenSymphony BEGIN */
861     public Object JavaDoc put(Object JavaDoc key, Object JavaDoc value) {
862         // Call the internal put using persistance
863
return put(key, value, true);
864     }
865
866     /**
867      * Copies all of the mappings from the specified map to this one.
868      *
869      * These mappings replace any mappings that this map had for any of the
870      * keys currently in the specified Map.
871      *
872      * @param t Mappings to be stored in this map.
873      */

874     public synchronized void putAll(Map t) {
875         for (Iterator it = t.entrySet().iterator(); it.hasNext();) {
876             Map.Entry entry = (Map.Entry) it.next();
877             Object JavaDoc key = entry.getKey();
878             Object JavaDoc value = entry.getValue();
879             put(key, value);
880         }
881     }
882
883     /**
884      * Removes the key (and its corresponding value) from this table.
885      * This method does nothing if the key is not in the table.
886      *
887      * @param key the key that needs to be removed.
888      * @return the value to which the key had been mapped in this table,
889      * or <code>null</code> if the key did not have a mapping.
890      * @exception NullPointerException if the key is
891      * <code>null</code>.
892      */

893     /** OpenSymphony BEGIN */
894     public Object JavaDoc remove(Object JavaDoc key) {
895         return remove(key, true, false);
896     }
897
898     /**
899      * Like <code>remove(Object)</code>, but ensures that the entry will be removed from the persistent store, too,
900      * even if overflowPersistence or unlimitedDiskcache are true.
901      *
902      * @param key
903      * @return
904      */

905     public Object JavaDoc removeForce(Object JavaDoc key) {
906       return remove(key, true, true);
907     }
908
909     /**
910      * Returns the total number of cache entries held in this map.
911      *
912      * @return the number of key-value mappings in this map.
913      */

914     public synchronized int size() {
915         return count;
916     }
917
918     /**
919      * Returns a collection view of the values contained in this map.
920      * The collection is backed by the map, so changes to the map are reflected in
921      * the collection, and vice-versa. The collection supports element
922      * removal, which removes the corresponding mapping from this map, via the
923      * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
924      * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
925      * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
926      *
927      * @return a collection view of the values contained in this map.
928      */

929     public Collection values() {
930         Collection vs = values;
931
932         if (vs != null) {
933             return vs;
934         } else {
935             return values = new AbstractCollection() {
936                         public Iterator iterator() {
937                             return new ValueIterator();
938                         }
939
940                         public int size() {
941                             return AbstractConcurrentReadCache.this.size();
942                         }
943
944                         public boolean contains(Object JavaDoc o) {
945                             return AbstractConcurrentReadCache.this.containsValue(o);
946                         }
947
948                         public void clear() {
949                             AbstractConcurrentReadCache.this.clear();
950                         }
951                     };
952         }
953     }
954
955     /**
956      * Get ref to group.
957      * CACHE-127 Synchronized copying of the group entry set since
958      * the new HashSet(Collection c) constructor uses the iterator.
959      * This may slow things down but it is better than a
960      * ConcurrentModificationException. We might have to revisit the
961      * code if performance is too adversely impacted.
962      **/

963     protected synchronized final Set getGroupForReading(String JavaDoc groupName) {
964         Set group = (Set) getGroupsForReading().get(groupName);
965         if (group == null) return null;
966         return new HashSet(group);
967     }
968
969     /**
970      * Get ref to groups.
971      * The reference and the cells it
972      * accesses will be at least as fresh as from last
973      * use of barrierLock
974      **/

975     protected final Map getGroupsForReading() {
976         synchronized (barrierLock) {
977             return groups;
978         }
979     }
980
981     /**
982      * Get ref to table; the reference and the cells it
983      * accesses will be at least as fresh as from last
984      * use of barrierLock
985      **/

986     protected final Entry[] getTableForReading() {
987         synchronized (barrierLock) {
988             return table;
989         }
990     }
991
992     /**
993      * Force a memory synchronization that will cause
994      * all readers to see table. Call only when already
995      * holding main synch lock.
996      **/

997     protected final void recordModification(Object JavaDoc x) {
998         synchronized (barrierLock) {
999             lastWrite = x;
1000        }
1001    }
1002
1003    /**
1004     * Helper method for entrySet remove.
1005     **/

1006    protected synchronized boolean findAndRemoveEntry(Map.Entry entry) {
1007        Object JavaDoc key = entry.getKey();
1008        Object JavaDoc v = get(key);
1009
1010        if ((v != null) && v.equals(entry.getValue())) {
1011            remove(key);
1012
1013            return true;
1014        } else {
1015            return false;
1016        }
1017    }
1018
1019    /**
1020     * Remove an object from the persistence.
1021     * @param key The key of the object to remove
1022     */

1023    protected void persistRemove(Object JavaDoc key) {
1024        if (log.isDebugEnabled()) {
1025            log.debug("PersistRemove called (key=" + key + ")");
1026        }
1027
1028        if (persistenceListener != null) {
1029            try {
1030                persistenceListener.remove((String JavaDoc) key);
1031            } catch (CachePersistenceException e) {
1032                log.error("[oscache] Exception removing cache entry with key '" + key + "' from persistence", e);
1033            }
1034        }
1035    }
1036
1037    /**
1038     * Removes a cache group using the persistence listener.
1039     * @param groupName The name of the group to remove
1040     */

1041    protected void persistRemoveGroup(String JavaDoc groupName) {
1042        if (log.isDebugEnabled()) {
1043            log.debug("persistRemoveGroup called (groupName=" + groupName + ")");
1044        }
1045
1046        if (persistenceListener != null) {
1047            try {
1048                persistenceListener.removeGroup(groupName);
1049            } catch (CachePersistenceException e) {
1050                log.error("[oscache] Exception removing group " + groupName, e);
1051            }
1052        }
1053    }
1054
1055    /**
1056     * Retrieve an object from the persistence listener.
1057     * @param key The key of the object to retrieve
1058     */

1059    protected Object JavaDoc persistRetrieve(Object JavaDoc key) {
1060        if (log.isDebugEnabled()) {
1061            log.debug("persistRetrieve called (key=" + key + ")");
1062        }
1063
1064        Object JavaDoc entry = null;
1065
1066        if (persistenceListener != null) {
1067            try {
1068                entry = persistenceListener.retrieve((String JavaDoc) key);
1069            } catch (CachePersistenceException e) {
1070                /**
1071                 * It is normal that we get an exception occasionally.
1072                 * It happens when the item is invalidated (written or removed)
1073                 * during read. The logic is constructed so that read is retried.
1074                 */

1075            }
1076        }
1077
1078        return entry;
1079    }
1080
1081    /**
1082     * Retrieves a cache group using the persistence listener.
1083     * @param groupName The name of the group to retrieve
1084     */

1085    protected Set persistRetrieveGroup(String JavaDoc groupName) {
1086        if (log.isDebugEnabled()) {
1087            log.debug("persistRetrieveGroup called (groupName=" + groupName + ")");
1088        }
1089
1090        if (persistenceListener != null) {
1091            try {
1092                return persistenceListener.retrieveGroup(groupName);
1093            } catch (CachePersistenceException e) {
1094                log.error("[oscache] Exception retrieving group " + groupName, e);
1095            }
1096        }
1097
1098        return null;
1099    }
1100
1101    /**
1102     * Store an object in the cache using the persistence listener.
1103     * @param key The object key
1104     * @param obj The object to store
1105     */

1106    protected void persistStore(Object JavaDoc key, Object JavaDoc obj) {
1107        if (log.isDebugEnabled()) {
1108            log.debug("persistStore called (key=" + key + ")");
1109        }
1110
1111        if (persistenceListener != null) {
1112            try {
1113                persistenceListener.store((String JavaDoc) key, obj);
1114            } catch (CachePersistenceException e) {
1115                log.error("[oscache] Exception persisting " + key, e);
1116            }
1117        }
1118    }
1119
1120    /**
1121     * Creates or Updates a cache group using the persistence listener.
1122     * @param groupName The name of the group to update
1123     * @param group The entries for the group
1124     */

1125    protected void persistStoreGroup(String JavaDoc groupName, Set group) {
1126        if (log.isDebugEnabled()) {
1127            log.debug("persistStoreGroup called (groupName=" + groupName + ")");
1128        }
1129
1130        if (persistenceListener != null) {
1131            try {
1132                if ((group == null) || group.isEmpty()) {
1133                    persistenceListener.removeGroup(groupName);
1134                } else {
1135                    persistenceListener.storeGroup(groupName, group);
1136                }
1137            } catch (CachePersistenceException e) {
1138                log.error("[oscache] Exception persisting group " + groupName, e);
1139            }
1140        }
1141    }
1142
1143    /**
1144     * Removes the entire cache from persistent storage.
1145     */

1146    protected void persistClear() {
1147        if (log.isDebugEnabled()) {
1148            log.debug("persistClear called");
1149            ;
1150        }
1151
1152        if (persistenceListener != null) {
1153            try {
1154                persistenceListener.clear();
1155            } catch (CachePersistenceException e) {
1156                log.error("[oscache] Exception clearing persistent cache", e);
1157            }
1158        }
1159    }
1160
1161    /**
1162     * Notify the underlying implementation that an item was put in the cache.
1163     *
1164     * @param key The cache key of the item that was put.
1165     */

1166    protected abstract void itemPut(Object JavaDoc key);
1167
1168    /**
1169     * Notify any underlying algorithm that an item has been retrieved from the cache.
1170     *
1171     * @param key The cache key of the item that was retrieved.
1172     */

1173    protected abstract void itemRetrieved(Object JavaDoc key);
1174
1175    /**
1176     * Notify the underlying implementation that an item was removed from the cache.
1177     *
1178     * @param key The cache key of the item that was removed.
1179     */

1180    protected abstract void itemRemoved(Object JavaDoc key);
1181
1182    /**
1183     * The cache has reached its cacpacity and an item needs to be removed.
1184     * (typically according to an algorithm such as LRU or FIFO).
1185     *
1186     * @return The key of whichever item was removed.
1187     */

1188    protected abstract Object JavaDoc removeItem();
1189
1190    /**
1191     * Reconstitute the <tt>AbstractConcurrentReadCache</tt>.
1192     * instance from a stream (i.e.,
1193     * deserialize it).
1194     */

1195    private synchronized void readObject(java.io.ObjectInputStream JavaDoc s) throws IOException JavaDoc, ClassNotFoundException JavaDoc {
1196        // Read in the threshold, loadfactor, and any hidden stuff
1197
s.defaultReadObject();
1198
1199        // Read in number of buckets and allocate the bucket array;
1200
int numBuckets = s.readInt();
1201        table = new Entry[numBuckets];
1202
1203        // Read in size (number of Mappings)
1204
int size = s.readInt();
1205
1206        // Read the keys and values, and put the mappings in the table
1207
for (int i = 0; i < size; i++) {
1208            Object JavaDoc key = s.readObject();
1209            Object JavaDoc value = s.readObject();
1210            put(key, value);
1211        }
1212    }
1213
1214    /**
1215     * Rehashes the contents of this map into a new table with a larger capacity.
1216     * This method is called automatically when the
1217     * number of keys in this map exceeds its capacity and load factor.
1218     */

1219    protected void rehash() {
1220        Entry[] oldMap = table;
1221        int oldCapacity = oldMap.length;
1222
1223        if (oldCapacity >= MAXIMUM_CAPACITY) {
1224            return;
1225        }
1226
1227        int newCapacity = oldCapacity << 1;
1228        Entry[] newMap = new Entry[newCapacity];
1229        threshold = (int) (newCapacity * loadFactor);
1230
1231        /*
1232          We need to guarantee that any existing reads of oldMap can
1233          proceed. So we cannot yet null out each oldMap bin.
1234
1235          Because we are using power-of-two expansion, the elements
1236          from each bin must either stay at same index, or move
1237          to oldCapacity+index. We also minimize new node creation by
1238          catching cases where old nodes can be reused because their
1239          .next fields won't change. (This is checked only for sequences
1240          of one and two. It is not worth checking longer ones.)
1241        */

1242        for (int i = 0; i < oldCapacity; ++i) {
1243            Entry l = null;
1244            Entry h = null;
1245            Entry e = oldMap[i];
1246
1247            while (e != null) {
1248                int hash = e.hash;
1249                Entry next = e.next;
1250
1251                if ((hash & oldCapacity) == 0) {
1252                    // stays at newMap[i]
1253
if (l == null) {
1254                        // try to reuse node
1255
if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) == 0))) {
1256                            l = e;
1257
1258                            break;
1259                        }
1260                    }
1261
1262                    l = new Entry(hash, e.key, e.value, l);
1263                } else {
1264                    // moves to newMap[oldCapacity+i]
1265
if (h == null) {
1266                        if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) != 0))) {
1267                            h = e;
1268
1269                            break;
1270                        }
1271                    }
1272
1273                    h = new Entry(hash, e.key, e.value, h);
1274                }
1275
1276                e = next;
1277            }
1278
1279            newMap[i] = l;
1280            newMap[oldCapacity + i] = h;
1281        }
1282
1283        table = newMap;
1284        recordModification(newMap);
1285    }
1286
1287    /**
1288     * Continuation of put(), called only when synch lock is
1289     * held and interference has been detected.
1290     **/

1291    /** OpenSymphony BEGIN */
1292
1293    /* Previous code
1294    protected Object sput(Object key, Object value, int hash) {*/

1295    protected Object JavaDoc sput(Object JavaDoc key, Object JavaDoc value, int hash, boolean persist) {
1296        /** OpenSymphony END */
1297        Entry[] tab = table;
1298        int index = hash & (tab.length - 1);
1299        Entry first = tab[index];
1300        Entry e = first;
1301
1302        for (;;) {
1303            if (e == null) {
1304                /** OpenSymphony BEGIN */
1305
1306                // Previous code
1307
// Entry newEntry = new Entry(hash, key, value, first);
1308
Entry newEntry;
1309
1310                if (memoryCaching) {
1311                    newEntry = new Entry(hash, key, value, first);
1312                } else {
1313                    newEntry = new Entry(hash, key, NULL, first);
1314                }
1315
1316                itemPut(key);
1317
1318                // Persist if required
1319
if (persist && !overflowPersistence) {
1320                    persistStore(key, value);
1321                }
1322
1323                // If we have a CacheEntry, update the group lookups
1324
if (value instanceof CacheEntry) {
1325                    updateGroups(null, (CacheEntry) value, persist);
1326                }
1327
1328                /** OpenSymphony END */
1329                tab[index] = newEntry;
1330
1331                if (++count >= threshold) {
1332                    rehash();
1333                } else {
1334                    recordModification(newEntry);
1335                }
1336
1337                return null;
1338            } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
1339                Object JavaDoc oldValue = e.value;
1340
1341                /** OpenSymphony BEGIN */
1342
1343                /* Previous code
1344                e.value = value; */

1345                if (memoryCaching) {
1346                    e.value = value;
1347                }
1348
1349                // Persist if required
1350
if (persist && overflowPersistence) {
1351                    persistRemove(key);
1352                } else if (persist) {
1353                    persistStore(key, value);
1354                }
1355
1356                updateGroups(oldValue, value, persist);
1357
1358                itemPut(key);
1359
1360                /** OpenSymphony END */
1361                return oldValue;
1362            } else {
1363                e = e.next;
1364            }
1365        }
1366    }
1367
1368    /**
1369     * Continuation of remove(), called only when synch lock is
1370     * held and interference has been detected.
1371     **/

1372    /** OpenSymphony BEGIN */
1373
1374    /* Previous code
1375    protected Object sremove(Object key, int hash) { */

1376    protected Object JavaDoc sremove(Object JavaDoc key, int hash, boolean invokeAlgorithm) {
1377        /** OpenSymphony END */
1378        Entry[] tab = table;
1379        int index = hash & (tab.length - 1);
1380        Entry first = tab[index];
1381        Entry e = first;
1382
1383        for (;;) {
1384            if (e == null) {
1385                return null;
1386            } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
1387                Object JavaDoc oldValue = e.value;
1388                if (persistenceListener != null && (oldValue == NULL)) {
1389                  oldValue = persistRetrieve(key);
1390                }
1391
1392                e.value = null;
1393                count--;
1394
1395                /** OpenSymphony BEGIN */
1396                if (!unlimitedDiskCache && !overflowPersistence) {
1397                    persistRemove(e.key);
1398                    // If we have a CacheEntry, update the groups
1399
if (oldValue instanceof CacheEntry) {
1400                      CacheEntry oldEntry = (CacheEntry)oldValue;
1401                      removeGroupMappings(oldEntry.getKey(),
1402                          oldEntry.getGroups(), true);
1403                }
1404                } else {
1405                  // only remove from memory groups
1406
if (oldValue instanceof CacheEntry) {
1407                    CacheEntry oldEntry = (CacheEntry)oldValue;
1408                    removeGroupMappings(oldEntry.getKey(),
1409                        oldEntry.getGroups(), false);
1410                  }
1411                }
1412
1413                if (overflowPersistence && ((size() + 1) >= maxEntries)) {
1414                    persistStore(key, oldValue);
1415                    // add key to persistent groups but NOT to the memory groups
1416
if (oldValue instanceof CacheEntry) {
1417                      CacheEntry oldEntry = (CacheEntry)oldValue;
1418                      addGroupMappings(oldEntry.getKey(), oldEntry.getGroups(), true, false);
1419                    }
1420                }
1421
1422                if (invokeAlgorithm) {
1423                    itemRemoved(key);
1424                }
1425
1426                /** OpenSymphony END */
1427                Entry head = e.next;
1428
1429                for (Entry p = first; p != e; p = p.next) {
1430                    head = new Entry(p.hash, p.key, p.value, head);
1431                }
1432
1433                tab[index] = head;
1434                recordModification(head);
1435
1436                return oldValue;
1437            } else {
1438                e = e.next;
1439            }
1440        }
1441    }
1442
1443    /**
1444     * Save the state of the <tt>AbstractConcurrentReadCache</tt> instance to a stream.
1445     * (i.e., serialize it).
1446     *
1447     * @serialData The <i>capacity</i> of the
1448     * AbstractConcurrentReadCache (the length of the
1449     * bucket array) is emitted (int), followed by the
1450     * <i>size</i> of the AbstractConcurrentReadCache (the number of key-value
1451     * mappings), followed by the key (Object) and value (Object)
1452     * for each key-value mapping represented by the AbstractConcurrentReadCache
1453     * The key-value mappings are emitted in no particular order.
1454     */

1455    private synchronized void writeObject(java.io.ObjectOutputStream JavaDoc s) throws IOException JavaDoc {
1456        // Write out the threshold, loadfactor, and any hidden stuff
1457
s.defaultWriteObject();
1458
1459        // Write out number of buckets
1460
s.writeInt(table.length);
1461
1462        // Write out size (number of Mappings)
1463
s.writeInt(count);
1464
1465        // Write out keys and values (alternating)
1466
for (int index = table.length - 1; index >= 0; index--) {
1467            Entry entry = table[index];
1468
1469            while (entry != null) {
1470                s.writeObject(entry.key);
1471                s.writeObject(entry.value);
1472                entry = entry.next;
1473            }
1474        }
1475    }
1476
1477    /**
1478     * Return hash code for Object x.
1479     * Since we are using power-of-two
1480     * tables, it is worth the effort to improve hashcode via
1481     * the same multiplicative scheme as used in IdentityHashMap.
1482     */

1483    private static int hash(Object JavaDoc x) {
1484        int h = x.hashCode();
1485
1486        // Multiply by 127 (quickly, via shifts), and mix in some high
1487
// bits to help guard against bunching of codes that are
1488
// consecutive or equally spaced.
1489
return ((h << 7) - h + (h >>> 9) + (h >>> 17));
1490    }
1491
1492    /**
1493     * Add this cache key to the groups specified groups.
1494     * We have to treat the
1495     * memory and disk group mappings seperately so they remain valid for their
1496     * corresponding memory/disk caches. (eg if mem is limited to 100 entries
1497     * and disk is unlimited, the group mappings will be different).
1498     *
1499     * @param key The cache key that we are ading to the groups.
1500     * @param newGroups the set of groups we want to add this cache entry to.
1501     * @param persist A flag to indicate whether the keys should be added to
1502     * the persistent cache layer.
1503     * @param memory A flag to indicate whether the key should be added to
1504     * the memory groups (important for overflow-to-disk)
1505     */

1506    private void addGroupMappings(String JavaDoc key, Set newGroups, boolean persist, boolean memory) {
1507        // Add this CacheEntry to the groups that it is now a member of
1508
for (Iterator it = newGroups.iterator(); it.hasNext();) {
1509            String JavaDoc groupName = (String JavaDoc) it.next();
1510
1511            // Update the in-memory groups
1512
if (memoryCaching && memory) {
1513                if (groups == null) {
1514                    groups = new HashMap();
1515                }
1516
1517                Set memoryGroup = (Set) groups.get(groupName);
1518
1519                if (memoryGroup == null) {
1520                    memoryGroup = new HashSet();
1521                    groups.put(groupName, memoryGroup);
1522                }
1523
1524                memoryGroup.add(key);
1525            }
1526
1527            // Update the persistent group maps
1528
if (persist) {
1529                Set persistentGroup = persistRetrieveGroup(groupName);
1530
1531                if (persistentGroup == null) {
1532                    persistentGroup = new HashSet();
1533                }
1534
1535                persistentGroup.add(key);
1536                persistStoreGroup(groupName, persistentGroup);
1537            }
1538        }
1539    }
1540
1541    /** OpenSymphony END (pretty long!) */
1542    /**
1543     * Returns the appropriate capacity (power of two) for the specified
1544     * initial capacity argument.
1545     */

1546    private int p2capacity(int initialCapacity) {
1547        int cap = initialCapacity;
1548
1549        // Compute the appropriate capacity
1550
int result;
1551
1552        if ((cap > MAXIMUM_CAPACITY) || (cap < 0)) {
1553            result = MAXIMUM_CAPACITY;
1554        } else {
1555            result = MINIMUM_CAPACITY;
1556
1557            while (result < cap) {
1558                result <<= 1;
1559            }
1560        }
1561
1562        return result;
1563    }
1564
1565    /* Previous code
1566    public Object put(Object key, Object value)*/

1567    private Object JavaDoc put(Object JavaDoc key, Object JavaDoc value, boolean persist) {
1568        /** OpenSymphony END */
1569        if (value == null) {
1570            throw new NullPointerException JavaDoc();
1571        }
1572
1573        int hash = hash(key);
1574        Entry[] tab = table;
1575        int index = hash & (tab.length - 1);
1576        Entry first = tab[index];
1577        Entry e = first;
1578
1579        for (;;) {
1580            if (e == null) {
1581                synchronized (this) {
1582                    tab = table;
1583
1584                    /** OpenSymphony BEGIN */
1585
1586                    // Previous code
1587

1588                    /* if (first == tab[index]) {
1589                                                                    // Add to front of list
1590                                                                    Entry newEntry = new Entry(hash, key, value, first);
1591                                                                    tab[index] = newEntry;
1592                                                                    if (++count >= threshold) rehash();
1593                                                                    else recordModification(newEntry);
1594                                                                    return null; */

1595
1596                    // Remove an item if the cache is full
1597
if (size() >= maxEntries) {
1598                        remove(removeItem(), false, false);
1599                    }
1600
1601                    if (first == tab[index]) {
1602                        // Add to front of list
1603
Entry newEntry = null;
1604
1605                        if (memoryCaching) {
1606                            newEntry = new Entry(hash, key, value, first);
1607                        } else {
1608                            newEntry = new Entry(hash, key, NULL, first);
1609                        }
1610
1611                        tab[index] = newEntry;
1612                        itemPut(key);
1613
1614                        // Persist if required
1615
if (persist && !overflowPersistence) {
1616                            persistStore(key, value);
1617                        }
1618
1619                        // If we have a CacheEntry, update the group lookups
1620
if (value instanceof CacheEntry) {
1621                            updateGroups(null, (CacheEntry) value, persist);
1622                        }
1623
1624                        if (++count >= threshold) {
1625                            rehash();
1626                        } else {
1627                            recordModification(newEntry);
1628                        }
1629
1630                        return newEntry;
1631
1632                        /** OpenSymphony END */
1633                    } else {
1634                        // wrong list -- retry
1635

1636                        /** OpenSymphony BEGIN */
1637
1638                        /* Previous code
1639                        return sput(key, value, hash);*/

1640                        return sput(key, value, hash, persist);
1641
1642                        /** OpenSymphony END */
1643                    }
1644                }
1645            } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
1646                // synch to avoid race with remove and to
1647
// ensure proper serialization of multiple replaces
1648
synchronized (this) {
1649                    tab = table;
1650
1651                    Object JavaDoc oldValue = e.value;
1652
1653                    // [CACHE-118] - get the old cache entry even if there's no memory cache
1654
if (persist && (oldValue == NULL)) {
1655                        oldValue = persistRetrieve(key);
1656                    }
1657
1658                    if ((first == tab[index]) && (oldValue != null)) {
1659                        /** OpenSymphony BEGIN */
1660
1661                        /* Previous code
1662                        e.value = value;
1663                        return oldValue; */

1664                        if (memoryCaching) {
1665                            e.value = value;
1666                        }
1667
1668                        // Persist if required
1669
if (persist && overflowPersistence) {
1670                            persistRemove(key);
1671                        } else if (persist) {
1672                            persistStore(key, value);
1673                        }
1674
1675                        updateGroups(oldValue, value, persist);
1676                        itemPut(key);
1677
1678                        return oldValue;
1679
1680                        /** OpenSymphony END */
1681                    } else {
1682                        // retry if wrong list or lost race against concurrent remove
1683

1684                        /** OpenSymphony BEGIN */
1685
1686                        /* Previous code
1687                        return sput(key, value, hash);*/

1688                        return sput(key, value, hash, persist);
1689
1690                        /** OpenSymphony END */
1691                    }
1692                }
1693            } else {
1694                e = e.next;
1695            }
1696        }
1697    }
1698
1699    private synchronized Object JavaDoc remove(Object JavaDoc key, boolean invokeAlgorithm, boolean forcePersist)
1700    /* Previous code
1701    public Object remove(Object key) */

1702
1703    /** OpenSymphony END */ {
1704        /*
1705          Strategy:
1706
1707          Find the entry, then
1708            1. Set value field to null, to force get() to retry
1709            2. Rebuild the list without this entry.
1710               All entries following removed node can stay in list, but
1711               all preceeding ones need to be cloned. Traversals rely
1712               on this strategy to ensure that elements will not be
1713              repeated during iteration.
1714        */

1715
1716        /** OpenSymphony BEGIN */
1717        if (key == null) {
1718            return null;
1719        }
1720
1721        /** OpenSymphony END */
1722        int hash = hash(key);
1723        Entry[] tab = table;
1724        int index = hash & (tab.length - 1);
1725        Entry first = tab[index];
1726        Entry e = first;
1727
1728        for (;;) {
1729            if (e == null) {
1730                tab = getTableForReading();
1731
1732                if (first == tab[index]) {
1733                    return null;
1734                } else {
1735                    // Wrong list -- must restart traversal at new first
1736

1737                    /** OpenSymphony BEGIN */
1738
1739                    /* Previous Code
1740                    return sremove(key, hash); */

1741                    return sremove(key, hash, invokeAlgorithm);
1742
1743                    /** OpenSymphony END */
1744                }
1745            } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
1746                synchronized (this) {
1747                    tab = table;
1748
1749                    Object JavaDoc oldValue = e.value;
1750                    if (persistenceListener != null && (oldValue == NULL)) {
1751                      oldValue = persistRetrieve(key);
1752                    }
1753
1754                    // re-find under synch if wrong list
1755
if ((first != tab[index]) || (oldValue == null)) {
1756                        /** OpenSymphony BEGIN */
1757
1758                        /* Previous Code
1759                        return sremove(key, hash); */

1760                        return sremove(key, hash, invokeAlgorithm);
1761                    }
1762
1763                    /** OpenSymphony END */
1764                    e.value = null;
1765                    count--;
1766
1767                    /** OpenSymphony BEGIN */
1768                    if (forcePersist || (!unlimitedDiskCache && !overflowPersistence)) {
1769                        persistRemove(e.key);
1770                        // If we have a CacheEntry, update the group lookups
1771
if (oldValue instanceof CacheEntry) {
1772                          CacheEntry oldEntry = (CacheEntry)oldValue;
1773                            removeGroupMappings(oldEntry.getKey(),
1774                                oldEntry.getGroups(), true);
1775                    }
1776                    } else {
1777                      // only remove from memory groups
1778
if (oldValue instanceof CacheEntry) {
1779                        CacheEntry oldEntry = (CacheEntry)oldValue;
1780                        removeGroupMappings(oldEntry.getKey(),
1781                            oldEntry.getGroups(), false);
1782                      }
1783                    }
1784
1785                    if (!forcePersist && overflowPersistence && ((size() + 1) >= maxEntries)) {
1786                        persistStore(key, oldValue);
1787                        // add key to persistent groups but NOT to the memory groups
1788
if (oldValue instanceof CacheEntry) {
1789                          CacheEntry oldEntry = (CacheEntry)oldValue;
1790                          addGroupMappings(oldEntry.getKey(), oldEntry.getGroups(), true, false);
1791                        }
1792                    }
1793
1794                    if (invokeAlgorithm) {
1795                        itemRemoved(key);
1796                    }
1797
1798                    /** OpenSymphony END */
1799                    Entry head = e.next;
1800
1801                    for (Entry p = first; p != e; p = p.next) {
1802                        head = new Entry(p.hash, p.key, p.value, head);
1803                    }
1804
1805                    tab[index] = head;
1806                    recordModification(head);
1807
1808                    return oldValue;
1809                }
1810            } else {
1811                e = e.next;
1812            }
1813        }
1814    }
1815
1816    /**
1817     * Remove this CacheEntry from the groups it no longer belongs to.
1818     * We have to treat the memory and disk group mappings separately so they remain
1819     * valid for their corresponding memory/disk caches. (eg if mem is limited
1820     * to 100 entries and disk is unlimited, the group mappings will be
1821     * different).
1822     *
1823     * @param key The cache key that we are removing from the groups.
1824     * @param oldGroups the set of groups we want to remove the cache entry
1825     * from.
1826     * @param persist A flag to indicate whether the keys should be removed
1827     * from the persistent cache layer.
1828     */

1829    private void removeGroupMappings(String JavaDoc key, Set oldGroups, boolean persist) {
1830        if (oldGroups == null) {
1831          return;
1832        }
1833
1834        for (Iterator it = oldGroups.iterator(); it.hasNext();) {
1835            String JavaDoc groupName = (String JavaDoc) it.next();
1836
1837            // Update the in-memory groups
1838
if (memoryCaching && (this.groups != null)) {
1839                Set memoryGroup = (Set) groups.get(groupName);
1840
1841                if (memoryGroup != null) {
1842                    memoryGroup.remove(key);
1843
1844                    if (memoryGroup.isEmpty()) {
1845                        groups.remove(groupName);
1846                    }
1847                }
1848            }
1849
1850            // Update the persistent group maps
1851
if (persist) {
1852                Set persistentGroup = persistRetrieveGroup(groupName);
1853
1854                if (persistentGroup != null) {
1855                    persistentGroup.remove(key);
1856
1857                    if (persistentGroup.isEmpty()) {
1858                        persistRemoveGroup(groupName);
1859                    } else {
1860                        persistStoreGroup(groupName, persistentGroup);
1861                    }
1862                }
1863            }
1864        }
1865    }
1866
1867    /**
1868     * Updates the groups to reflect the differences between the old and new
1869     * cache entries. Either of the old or new values can be <code>null</code>
1870     * or contain a <code>null</code> group list, in which case the entry's
1871     * groups will all be added or removed respectively.
1872     *
1873     * @param oldValue The old CacheEntry that is being replaced.
1874     * @param newValue The new CacheEntry that is being inserted.
1875     */

1876    private void updateGroups(Object JavaDoc oldValue, Object JavaDoc newValue, boolean persist) {
1877        // If we have/had a CacheEntry, update the group lookups
1878
boolean oldIsCE = oldValue instanceof CacheEntry;
1879        boolean newIsCE = newValue instanceof CacheEntry;
1880
1881        if (newIsCE && oldIsCE) {
1882            updateGroups((CacheEntry) oldValue, (CacheEntry) newValue, persist);
1883        } else if (newIsCE) {
1884            updateGroups(null, (CacheEntry) newValue, persist);
1885        } else if (oldIsCE) {
1886            updateGroups((CacheEntry) oldValue, null, persist);
1887        }
1888    }
1889
1890    /**
1891     * Updates the groups to reflect the differences between the old and new cache entries.
1892     * Either of the old or new values can be <code>null</code>
1893     * or contain a <code>null</code> group list, in which case the entry's
1894     * groups will all be added or removed respectively.
1895     *
1896     * @param oldValue The old CacheEntry that is being replaced.
1897     * @param newValue The new CacheEntry that is being inserted.
1898     */

1899    private void updateGroups(CacheEntry oldValue, CacheEntry newValue, boolean persist) {
1900        Set oldGroups = null;
1901        Set newGroups = null;
1902
1903        if (oldValue != null) {
1904            oldGroups = oldValue.getGroups();
1905        }
1906
1907        if (newValue != null) {
1908            newGroups = newValue.getGroups();
1909        }
1910
1911        // Get the names of the groups to remove
1912
if (oldGroups != null) {
1913            Set removeFromGroups = new HashSet();
1914
1915            for (Iterator it = oldGroups.iterator(); it.hasNext();) {
1916                String JavaDoc groupName = (String JavaDoc) it.next();
1917
1918                if ((newGroups == null) || !newGroups.contains(groupName)) {
1919                    // We need to remove this group
1920
removeFromGroups.add(groupName);
1921                }
1922            }
1923
1924            removeGroupMappings(oldValue.getKey(), removeFromGroups, persist);
1925        }
1926
1927        // Get the names of the groups to add
1928
if (newGroups != null) {
1929            Set addToGroups = new HashSet();
1930
1931            for (Iterator it = newGroups.iterator(); it.hasNext();) {
1932                String JavaDoc groupName = (String JavaDoc) it.next();
1933
1934                if ((oldGroups == null) || !oldGroups.contains(groupName)) {
1935                    // We need to add this group
1936
addToGroups.add(groupName);
1937                }
1938            }
1939
1940            addGroupMappings(newValue.getKey(), addToGroups, persist, true);
1941        }
1942    }
1943
1944    /**
1945     * AbstractConcurrentReadCache collision list entry.
1946     */

1947    protected static class Entry implements Map.Entry {
1948        protected final Entry next;
1949        protected final Object JavaDoc key;
1950
1951        /*
1952           The use of volatile for value field ensures that
1953           we can detect status changes without synchronization.
1954           The other fields are never changed, and are
1955           marked as final.
1956        */

1957        protected final int hash;
1958        protected volatile Object JavaDoc value;
1959
1960        Entry(int hash, Object JavaDoc key, Object JavaDoc value, Entry next) {
1961            this.hash = hash;
1962            this.key = key;
1963            this.next = next;
1964            this.value = value;
1965        }
1966
1967        // Map.Entry Ops
1968
public Object JavaDoc getKey() {
1969            return key;
1970        }
1971
1972        /**
1973         * Set the value of this entry.
1974         * Note: In an entrySet or
1975         * entrySet.iterator), unless the set or iterator is used under
1976         * synchronization of the table as a whole (or you can otherwise
1977         * guarantee lack of concurrent modification), <tt>setValue</tt>
1978         * is not strictly guaranteed to actually replace the value field
1979         * obtained via the <tt>get</tt> operation of the underlying hash
1980         * table in multithreaded applications. If iterator-wide
1981         * synchronization is not used, and any other concurrent
1982         * <tt>put</tt> or <tt>remove</tt> operations occur, sometimes
1983         * even to <em>other</em> entries, then this change is not
1984         * guaranteed to be reflected in the hash table. (It might, or it
1985         * might not. There are no assurances either way.)
1986         *
1987         * @param value the new value.
1988         * @return the previous value, or null if entry has been detectably
1989         * removed.
1990         * @exception NullPointerException if the value is <code>null</code>.
1991         *
1992         **/

1993        public Object JavaDoc setValue(Object JavaDoc value) {
1994            if (value == null) {
1995                throw new NullPointerException JavaDoc();
1996            }
1997
1998            Object JavaDoc oldValue = this.value;
1999            this.value = value;
2000
2001            return oldValue;
2002        }
2003
2004        /**
2005         * Get the value.
2006         * Note: In an entrySet or entrySet.iterator,
2007         * unless the set or iterator is used under synchronization of the
2008         * table as a whole (or you can otherwise guarantee lack of
2009         * concurrent modification), <tt>getValue</tt> <em>might</em>
2010         * return null, reflecting the fact that the entry has been
2011         * concurrently removed. However, there are no assurances that
2012         * concurrent removals will be reflected using this method.
2013         *
2014         * @return the current value, or null if the entry has been
2015         * detectably removed.
2016         **/

2017        public Object JavaDoc getValue() {
2018            return value;
2019        }
2020
2021        public boolean equals(Object JavaDoc o) {
2022            if (!(o instanceof Map.Entry)) {
2023                return false;
2024            }
2025
2026            Map.Entry e = (Map.Entry) o;
2027
2028            if (!key.equals(e.getKey())) {
2029                return false;
2030            }
2031
2032            Object JavaDoc v = value;
2033
2034            return (v == null) ? (e.getValue() == null) : v.equals(e.getValue());
2035        }
2036
2037        public int hashCode() {
2038            Object JavaDoc v = value;
2039
2040            return hash ^ ((v == null) ? 0 : v.hashCode());
2041        }
2042
2043        public String JavaDoc toString() {
2044            return key + "=" + value;
2045        }
2046
2047        protected Object JavaDoc clone() {
2048            return new Entry(hash, key, value, ((next == null) ? null : (Entry) next.clone()));
2049        }
2050    }
2051
2052    protected class HashIterator implements Iterator, Enumeration {
2053        protected final Entry[] tab; // snapshot of table
2054
protected Entry entry = null; // current node of slot
2055
protected Entry lastReturned = null; // last node returned by next
2056
protected Object JavaDoc currentKey; // key for current node
2057
protected Object JavaDoc currentValue; // value for current node
2058
protected int index; // current slot
2059

2060        protected HashIterator() {
2061            tab = AbstractConcurrentReadCache.this.getTableForReading();
2062            index = tab.length - 1;
2063        }
2064
2065        public boolean hasMoreElements() {
2066            return hasNext();
2067        }
2068
2069        public boolean hasNext() {
2070            /*
2071              currentkey and currentValue are set here to ensure that next()
2072              returns normally if hasNext() returns true. This avoids
2073              surprises especially when final element is removed during
2074              traversal -- instead, we just ignore the removal during
2075              current traversal.
2076            */

2077            for (;;) {
2078                if (entry != null) {
2079                    Object JavaDoc v = entry.value;
2080
2081                    if (v != null) {
2082                        currentKey = entry.key;
2083                        currentValue = v;
2084
2085                        return true;
2086                    } else {
2087                        entry = entry.next;
2088                    }
2089                }
2090
2091                while ((entry == null) && (index >= 0)) {
2092                    entry = tab[index--];
2093                }
2094
2095                if (entry == null) {
2096                    currentKey = currentValue = null;
2097
2098                    return false;
2099                }
2100            }
2101        }
2102
2103        public Object JavaDoc next() {
2104            if ((currentKey == null) && !hasNext()) {
2105                throw new NoSuchElementException();
2106            }
2107
2108            Object JavaDoc result = returnValueOfNext();
2109            lastReturned = entry;
2110            currentKey = currentValue = null;
2111            entry = entry.next;
2112
2113            return result;
2114        }
2115
2116        public Object JavaDoc nextElement() {
2117            return next();
2118        }
2119
2120        public void remove() {
2121            if (lastReturned == null) {
2122                throw new IllegalStateException JavaDoc();
2123            }
2124
2125            AbstractConcurrentReadCache.this.remove(lastReturned.key);
2126        }
2127
2128        protected Object JavaDoc returnValueOfNext() {
2129            return entry;
2130        }
2131    }
2132
2133    protected class KeyIterator extends HashIterator {
2134        protected Object JavaDoc returnValueOfNext() {
2135            return currentKey;
2136        }
2137    }
2138
2139    protected class ValueIterator extends HashIterator {
2140        protected Object JavaDoc returnValueOfNext() {
2141            return currentValue;
2142        }
2143    }
2144}
2145
Popular Tags