KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > continuent > sequoia > controller > cache > metadata > MetadataCache


1 /**
2  * Sequoia: Database clustering technology.
3  * Copyright (C) 2002-2004 French National Institute For Research In Computer
4  * Science And Control (INRIA).
5  * Contact: sequoia@continuent.org
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  * Initial developer(s): Emmanuel Cecchet.
20  * Contributor(s): ______________________________________.
21  */

22
23 package org.continuent.sequoia.controller.cache.metadata;
24
25 import java.util.Hashtable JavaDoc;
26
27 import org.continuent.sequoia.common.i18n.Translate;
28 import org.continuent.sequoia.common.log.Trace;
29 import org.continuent.sequoia.common.protocol.Field;
30 import org.continuent.sequoia.common.xml.DatabasesXmlTags;
31 import org.continuent.sequoia.controller.requests.AbstractRequest;
32
33 /**
34  * This class implements a ResultSet metadata cache.
35  * <p>
36  * ResultSet Fields are kept here to prevent recomputing them and allocating
37  * them each time a query is executed.
38  *
39  * @author <a HREF="mailto:Emmanuel.Cecchet@inria.fr">Emmanuel Cecchet </a>
40  * @version 1.0
41  */

42 public class MetadataCache
43 {
44   private static Trace logger = Trace.getLogger(MetadataCache.class.getName());
45
46   // SQL -> Field[]
47
private Hashtable JavaDoc metadataCache;
48
49   // Schema.Table.Column name -> Field
50
private Hashtable JavaDoc fieldCache;
51   private int maxNbOfMetadata;
52   private int maxNbOfField;
53
54   /**
55    * Constructor for MetadataCache.
56    *
57    * @param maxNbOfMetadata maximum nb of entries in metadata cache
58    * @param maxNbOfField maximum nb of entries in field cache
59    */

60   public MetadataCache(int maxNbOfMetadata, int maxNbOfField)
61   {
62     metadataCache = new Hashtable JavaDoc(maxNbOfMetadata == 0
63         ? 10000
64         : maxNbOfMetadata);
65     fieldCache = new Hashtable JavaDoc(maxNbOfField == 0 ? 100 : maxNbOfField);
66     if (maxNbOfMetadata < 0)
67       throw new RuntimeException JavaDoc(Translate.get("cache.metadata.invalid.size",
68           maxNbOfMetadata));
69     if (maxNbOfMetadata == 0)
70       this.maxNbOfMetadata = Integer.MAX_VALUE;
71     else
72       this.maxNbOfMetadata = maxNbOfMetadata;
73     if (maxNbOfField < 0)
74       throw new RuntimeException JavaDoc(Translate.get("cache.metadata.invalid.size",
75           maxNbOfField));
76     if (maxNbOfField == 0)
77       this.maxNbOfField = Integer.MAX_VALUE;
78     else
79       this.maxNbOfField = maxNbOfField;
80   }
81
82   /**
83    * Flush the cache
84    */

85   public void flushCache()
86   {
87     synchronized (metadataCache)
88     {
89       metadataCache.clear();
90     }
91     synchronized (fieldCache)
92     {
93       fieldCache.clear();
94     }
95   }
96
97   /**
98    * Get metadata associated to a request.
99    * <p>
100    * Returns null if the cache contains no metadata for the given request.
101    *
102    * @param request the request we look for
103    * @return the metadata or null if not in cache
104    */

105   public Field[] getMetadata(AbstractRequest request)
106   {
107     return (Field[]) metadataCache.get(request.getUniqueKey());
108   }
109
110   /**
111    * Add a metadata entry to the cache and associate it to the given request.
112    *
113    * @param request request to which the metadata belong
114    * @param metadata metadata to cache
115    */

116   public void addMetadata(AbstractRequest request, Field[] metadata)
117   {
118     // Note that the underlying cache Hashtable is synchronized and we usually
119
// do not need to synchronize on it.
120
// As we will have to add a cache entry, check if the cache size is ok
121
// else remove the first entry of the hashtable.
122
while (metadataCache.size() > maxNbOfMetadata)
123     { // Remove first entry from Hashtable. We need to synchronize here to be
124
// sure that we are not trying to concurrently remove the first cache
125
// entry.
126
synchronized (metadataCache)
127       {
128         try
129         {
130           metadataCache.remove(metadataCache.keys().nextElement());
131         }
132         catch (Exception JavaDoc ignore)
133         {
134           break;
135         }
136       }
137     }
138
139     // Add to cache
140
try
141     {
142       metadataCache.put(request.getUniqueKey(), metadata);
143     }
144     catch (OutOfMemoryError JavaDoc oome)
145     {
146       flushCache();
147       System.gc();
148       logger.warn(Translate.get("cache.memory.error.cache.flushed", this
149           .getClass()));
150     }
151   }
152
153   /**
154    * Get the field corresponding to a column name.
155    * <p>
156    * Returns null if the cache contains no field for the given name.
157    *
158    * @param fullyQualifiedFieldName the field name (table.column.label) to look
159    * for
160    * @return the corresponding Field or null if not in cache
161    */

162   public Field getField(String JavaDoc fullyQualifiedFieldName)
163   {
164     return (Field) fieldCache.get(fullyQualifiedFieldName);
165   }
166
167   /**
168    * Add a Field entry to the cache and associate it to the given name.
169    *
170    * @param fullyQualifiedFieldName table.column.label name that uniquely
171    * identifies the field
172    * @param field field to cache
173    */

174   public void addField(String JavaDoc fullyQualifiedFieldName, Field field)
175   {
176     // Note that the underlying cache Hashtable is synchronized and we usually
177
// do not need to synchronize on it.
178
// As we will have to add a cache entry, check if the cache size is ok
179
// else remove the first entry of the hashtable.
180
while (fieldCache.size() > maxNbOfField)
181     { // Remove first entry from Hashtable. We need to synchronize here to be
182
// sure that we are not trying to concurrently remove the first cache
183
// entry.
184
synchronized (fieldCache)
185       {
186         try
187         {
188           fieldCache.remove(fieldCache.keys().nextElement());
189         }
190         catch (Exception JavaDoc ignore)
191         {
192           break;
193         }
194       }
195     }
196     // Add to cache
197
try
198     {
199       fieldCache.put(fullyQualifiedFieldName, field);
200     }
201     catch (OutOfMemoryError JavaDoc oome)
202     {
203       flushCache();
204       System.gc();
205       logger.warn(Translate.get("cache.memory.error.cache.flushed", this
206           .getClass()));
207     }
208   }
209
210   /**
211    * Get xml information about this ParsingCache
212    *
213    * @return <code>String</code> in xml formatted text
214    */

215   public String JavaDoc getXml()
216   {
217     return "<" + DatabasesXmlTags.ELT_MetadataCache + " "
218         + DatabasesXmlTags.ATT_maxNbOfMetadata + "=\"" + maxNbOfMetadata
219         + "\" " + DatabasesXmlTags.ATT_maxNbOfField + "=\""
220         + (maxNbOfField == Integer.MAX_VALUE ? 0 : maxNbOfField) + "\"/>";
221   }
222
223 }
Popular Tags