KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > netbeans > mdr > persistence > btreeimpl > btreestorage > FileCache


1 /*
2  * The contents of this file are subject to the terms of the Common Development
3  * and Distribution License (the License). You may not use this file except in
4  * compliance with the License.
5  *
6  * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
7  * or http://www.netbeans.org/cddl.txt.
8  *
9  * When distributing Covered Code, include this CDDL Header Notice in each file
10  * and include the License file at http://www.netbeans.org/cddl.txt.
11  * If applicable, add the following below the CDDL Header, with the fields
12  * enclosed by brackets [] replaced by your own identifying information:
13  * "Portions Copyrighted [year] [name of copyright owner]"
14  *
15  * The Original Software is NetBeans. The Initial Developer of the Original
16  * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun
17  * Microsystems, Inc. All Rights Reserved.
18  */

19 package org.netbeans.mdr.persistence.btreeimpl.btreestorage;
20
21 import java.io.*;
22 import java.util.*;
23
24 import org.netbeans.mdr.persistence.*;
25 import org.netbeans.mdr.util.Logger;
26
27 /**
28 * FileCache provides transactional cached access to a set of files.
29 * Changes to these files are accumulated both in memory and on disk until
30 * a commit is requested, at which time they are flushed to disk. If the
31 * program exits for any reason without comitting, any changes which were
32 * written to disk are rolled back to the last commit point the next time
33 * the FileCache is opened.
34 */

35 public class FileCache {
36
37     private String JavaDoc[] fileNames;
38     /* sizes of files contained in the cache. */
39     private int fileSize[];
40
41     /* correct header for files in the cache */
42     private FileHeader header;
43
44     /* our log file */
45     private LogFile log;
46
47     /* true if cache has uncomitted changes */
48     private boolean inXact;
49
50     /* size of cached pages */
51     private static int pageSize;
52
53     /* all pages */
54     private static ArrayList pages;
55
56     /* pages hashed by their ID */
57     private static HashMap pageHash;
58
59     /* pages not curently pinned */
60     private static IntrusiveList freePages;
61     
62     private static HashSet/*<FileCache>*/ instances = new HashSet();
63     
64
65     /* dirty pages which cannot be written until the log is flushed */
66     private HashMap heldForLog;
67
68     /* time stmp for current transaction */
69     private long newTimeStamp;
70
71     /* caching stats */
72     private static int hits = 0;
73     private static int misses = 0;
74     private static int extensions = 0;
75     private static int pagesFlushed = 0;
76
77     private int logFlushes = 0;
78
79     /* for regression testing */
80     private static int flushFailure = -1; /* fail after this many flushes */
81     private static int commitFailure = -1; /* fail after this many commits */
82
83     /* A list of objects to notify before comitting */
84     private ArrayList toNotify;
85     
86     private static final int MAX_FILES = 200;
87     private static final Map OPEN_FILES_CACHE = new LinkedHashMap(MAX_FILES * 2, .5f, true) {
88         protected boolean removeEldestEntry(Map.Entry eldest) {
89             if (size() > MAX_FILES) {
90                 RandomAccessFile file = (RandomAccessFile) eldest.getValue();
91                 try {
92                     file.close();
93                 } catch (IOException e) {
94                     Logger.getDefault().notify(e);
95                 }
96                 return true;
97             }
98             return false;
99         }
100     };
101     
102     static void closeFile(String JavaDoc fileName) throws IOException {
103         RandomAccessFile file;
104         synchronized (FileCache.OPEN_FILES_CACHE) {
105             file = (RandomAccessFile) FileCache.OPEN_FILES_CACHE.remove(fileName);
106         }
107         if (file != null) file.close();
108     }
109
110     static RandomAccessFile getFile(String JavaDoc fileName) throws IOException {
111         RandomAccessFile result;
112         synchronized (FileCache.OPEN_FILES_CACHE) {
113             result = (RandomAccessFile) FileCache.OPEN_FILES_CACHE.get(fileName);
114             if (result == null) {
115                 result = new RandomAccessFile(fileName, "rw");
116                 FileCache.OPEN_FILES_CACHE.put(fileName, result);
117             }
118         }
119         return result;
120     }
121
122     static {
123         pages = new ArrayList(BtreeDatabase.FILE_CACHE_SIZE);
124         pageHash = new HashMap();
125         freePages = new IntrusiveList();
126         pageSize = BtreeDatabase.PAGE_SIZE;
127         addPages(BtreeDatabase.FILE_CACHE_SIZE);
128     }
129
130     static int checkForForcedFailure(String JavaDoc property, int count) {
131         if (count == -1) {
132             /* see if we're supposed to fail after N operations */
133             Integer JavaDoc failCount = Integer.getInteger(property);
134             count = (failCount == null) ? 0 : failCount.intValue();
135         }
136
137         if (count > 0 && --count == 0) {
138             System.exit(1);
139         }
140
141         //Logger.getDefault().log(property + ": : count is " + count);
142
return count;
143     }
144
145     /* extend the cache */
146     private static void addPages(int numToAdd) {
147         for (int i = 0; i < numToAdd; i++) {
148             CachedPage page = new CachedPage(pageSize);
149             pages.add(page);
150             freePages.addLast(page);
151         }
152         // Logger.getDefault().log("Cache is now " + pages.size() + " pages");
153
}
154         
155     /** A callback used by the logging system to indicate that a modified
156     * page cannot be written until the log file is flushed.
157     * @param page the page which is being held.
158     */

159     void holdForLog(CachedPage page) {
160         page.heldForLog = true;
161         heldForLog.put(page.key, page);
162     }
163
164     /** A callback used by the logging system to indicate that the
165     * log file was flushed.
166     */

167     void logWasFlushed() {
168         Iterator itr = heldForLog.values().iterator();
169         while (itr.hasNext()) {
170             CachedPage page = (CachedPage)itr.next();
171             page.heldForLog = false;
172             if (page.getPinCount() == 0)
173                 freePages.addFirst(page);
174         }
175         heldForLog.clear();
176         // Logger.getDefault().log("Log file flushed");
177
}
178         
179     /** Create the cache and open the files. The files are assumed
180     * already to exist and have valid, identical file headers.
181     * @param pgSize the cache's page size
182     * @param numBufs the number of page buffers to create
183     * @param names the files to access via the cache
184     * @param logName the name of the log file
185     * @exception StorageException I/O error opening or reading the files
186     * @exception BadParameterException if the file do not have identical file
187     * headers, or the log file exists but is not consistent with the files
188     * @exception ConsistencyException if the log file exists and is corrupted
189     */

190     public FileCache(String JavaDoc fileNames[], String JavaDoc baseName)
191             throws StorageException {
192
193         this.fileNames = new String JavaDoc[fileNames.length];
194         boolean failure = true;
195         try {
196             try {
197                 RandomAccessFile[] files = new RandomAccessFile[fileNames.length];
198                 fileSize = new int[fileNames.length];
199
200                 FileHeader tmpHeader;
201
202                 for (int i = 0; i < fileNames.length; i++) {
203                     files[i] = getFile(fileNames[i]);
204                     this.fileNames[i] = fileNames[i];
205                 }
206                 tmpHeader = new FileHeader(files[0]);
207
208                 log = new LogFile(
209                     this, baseName, BtreeDatabase.PAGE_SIZE, fileNames.length, tmpHeader.fileId);
210
211                 for (int i = 0; i < fileNames.length; i++) {
212                     fileSize[i] = (int)files[i].length();
213                     FileHeader hdr = new FileHeader(files[i]);
214
215                     // Note: we can't check that headers are consistent until
216
// after recovery, since previous to that, files may have
217
// inconsistent timestamps
218
if (i == 0) {
219                         header = hdr;
220                     }
221                     else if (!hdr.equals(header)) {
222                         throw new StoragePersistentDataException(
223                                         "Files are not consistent");
224                     }
225                 }
226
227                 heldForLog = new HashMap();
228                 failure = false;
229                 instances.add(this);
230             }
231             finally {
232                 if (failure) {
233                     for (int i = 0; i < this.fileNames.length; i++) {
234                         if (this.fileNames[i] != null) {
235                             closeFile(this.fileNames[i]);
236                         }
237                     }
238                     if (log != null)
239                         log.close();
240                 }
241             }
242         }
243         catch (IOException ex) {
244             throw new StorageIOException(ex);
245         }
246
247     }
248
249     /** return the array of open files
250     */

251     RandomAccessFile[] getFiles() throws IOException {
252         return getFiles(fileNames);
253     }
254     
255     static RandomAccessFile[] getFiles(String JavaDoc[] fileNames) throws IOException {
256         RandomAccessFile[] files = new RandomAccessFile[fileNames.length];
257         for (int i = 0; i < fileNames.length; i++) {
258             files[i] = FileCache.getFile(fileNames[i]);
259         }
260         return files;
261     }
262
263     /** close all files without comitting
264     * @exception StorageException I/O error closing the files
265     */

266     public synchronized void abort() throws StorageException {
267         closeFiles();
268     }
269
270     /** commit all changes and close all cached files
271     * @exception StorageException I/O error closing the files
272     */

273     public synchronized void close() throws StorageException {
274         commit();
275
276         // reInit or remove all pages used by this FileCache
277
Iterator itr = pages.iterator();
278         while (itr.hasNext()) {
279             CachedPage page = (CachedPage)itr.next();
280             if (page.getOwner() == this) {
281                 if (pages.size() > BtreeDatabase.FILE_CACHE_SIZE) {
282                     itr.remove();
283                     freePages.remove(page);
284                 } else {
285                     freePages.addLast(page);
286                     page.reInit(null, null);
287                 }
288             }
289         }
290         for (Iterator it = pageHash.keySet().iterator(); it.hasNext();) {
291             HashKey entry = (HashKey) it.next();
292             if (entry.owner == this) {
293                 it.remove();
294             }
295         }
296
297         closeFiles();
298     }
299
300     /* close all files */
301     private void closeFiles() throws StorageException {
302         try {
303             for (int i = 0; i < fileNames.length; i++)
304                 closeFile(fileNames[i]);
305             log.close();
306         }
307         catch (IOException ex) {
308             throw new StorageIOException(ex);
309         } finally {
310             instances.remove(this);
311         }
312     }
313
314     /** commit all changes
315     * @exception StorageException I/O error writing the files
316     */

317     public synchronized void commit() throws StorageException {
318         commitFailure = checkForForcedFailure(
319             "org.netbeans.mdr.persistence.btreeimpl.btreestorage.FileCache.commitFailure",
320             commitFailure);
321
322         if (toNotify != null) {
323             Iterator itr = toNotify.iterator();
324             while (itr.hasNext()) {
325                 NotifyOnCommit obj = (NotifyOnCommit)itr.next();
326                 obj.prepareToCommit();
327             }
328         }
329
330         if (inXact) {
331
332             /* update timestaps */
333             for (int i = 0; i < fileNames.length; i++) {
334                 CachedPage first = getPage(i, 0);
335                 setWritable(first);
336                 FileHeader.updateTime(first, newTimeStamp);
337         first.unpin();
338             }
339             log.flush();
340
341             // Flush all cache buffers.
342
Iterator itr = pages.iterator();
343             while (itr.hasNext()) {
344                 CachedPage page = (CachedPage)itr.next();
345                 if (page.isDirty && page.getOwner() == this)
346                     flushOne(page);
347             }
348             log.commit();
349         header.timeStamp = newTimeStamp;
350             inXact = false;
351         }
352     }
353
354         
355     /* write a dirty page to the disk */
356     private static void flushOne(CachedPage page) throws StorageException{
357         try {
358             flushFailure = checkForForcedFailure(
359                 "org.netbeans.mdr.persistence.btreeimpl.btreestorage.FileCache.flushFailure",
360                 flushFailure);
361             FileCache owner = page.getOwner();
362             assert owner != null;
363             
364             if (!instances.contains(owner)) return; // obsolete page from an old cache that was deactivated
365

366             RandomAccessFile file = getFile(owner.fileNames[page.key.fileIndex]);
367             file.seek(page.key.offset);
368             file.write(page.contents);
369             page.isDirty = false;
370             pagesFlushed++;
371             if (page.key.offset >= owner.fileSize[page.key.fileIndex]) {
372                 owner.fileSize[page.key.fileIndex] = page.key.offset + pageSize;
373             }
374         }
375         catch (IOException ex) {
376             throw new StorageIOException(ex);
377         }
378     }
379
380     /** unpin a set of pages. Until unpinned the same number of times
381     * that they have been pinned, pages cannot be released from the cache.
382     * @param pages the pages to unpin
383     * @exception BadParameterException if the page is not pinned
384     */

385     public synchronized void unpin(CachedPage pages[])
386         throws StorageException {
387         for (int i = 0; i < pages.length; i++)
388             unpin(pages[i]);
389     }
390
391     /** unpin a page. Until unpinned the same number of times it has
392     * been pinned, a page cannot be released from the cache.
393     * @param page the page to unpin
394     * @exception BadParameterException if any of the pages are not pinned
395     */

396     public synchronized void unpin(CachedPage page)
397                         throws StorageException {
398         if (page.getPinCount() <= 0) {
399             throw new StorageTransientDataException(
400                         "Attempt to unpin page which is not pinned");
401         }
402
403         if ((page.innerUnpin() == 0) && !page.heldForLog) {
404             freePages.addFirst(page);
405         }
406
407     }
408
409     /** Get the pages which contain the desired bytes from the file
410     * This implicitly pins these pages. Note that these pages may extend
411     * past the current EOF; that is, this routine may extend the file.
412     * @param fileidx the index of the file containing the page.
413     * @param first the number of the first page to get
414     * @param length the number of pages to get
415     * @return the array of pages requested.
416     * @exception StorageException I/O error reading the pages
417     */

418     public synchronized CachedPage[] getPages(int fileidx, int first, int size)
419         throws StorageException {
420
421         CachedPage retval[] = new CachedPage[size];
422         for (int i = 0 ; i < size; i++) {
423             retval[i] =
424                 getPage(this, new PageID(fileidx, pageSize * (first + i)), false);
425         }
426
427         return retval;
428     }
429
430     /** Get the single page at the desired offset into the file
431     * This implicitly pins that pages. Note that this may may exist
432     * past the current EOF; that is, this routine may extend the file.
433     * @param fileidx the index of the file containing the page.
434     * @param pageNum the page number to get
435     * @return the page requested
436     * @exception StorageException I/O error reading the page
437     */

438     public synchronized CachedPage getPage(int fileidx, int pageNum)
439                                                     throws StorageException {
440         return getPage(this, new PageID(fileidx, pageNum * pageSize), false);
441     }
442
443     /** Get the single page at the desired offset into the file
444     * This implicitly pins that pages. Note that this may may exist
445     * past the current EOF; that is, this routine may extend the file.
446     * @param page the PageId describing the desired page
447     * @return the page requested
448     * @exception StorageException I/O error reading the page
449     */

450     synchronized CachedPage getPage(PageID page) throws StorageException {
451         return getPage(this, page, false);
452     }
453     
454     private static class HashKey {
455         public final FileCache owner;
456         public final PageID id;
457         
458         public HashKey(FileCache owner, PageID id) {
459             this.owner = owner;
460             this.id = id;
461         }
462         
463         public boolean equals(Object JavaDoc o) {
464             return o == this || ((o instanceof HashKey) && (((HashKey) o).owner == owner) && (((HashKey) o).id.equals(id)));
465         }
466         
467         public int hashCode() {
468             return owner.hashCode() + 31 * id.hashCode();
469         }
470     }
471
472     /** Get the single page at the desired offset into the file
473     * This implicitly pins that pages. Note that this may may exist
474     * past the current EOF; that is, this routine may extend the file.
475     * @param page the PageId describing the desired page
476     * @param fromCacheOnly if true, only look inside the cache
477     * @return the page requested
478     * @exception StorageException I/O error reading the page
479     */

480     private static CachedPage getPage(FileCache instance, PageID id, boolean fromCacheOnly)
481             throws StorageException {
482         HashKey key = new HashKey(instance, id);
483         CachedPage page = (CachedPage)pageHash.get(key);
484         if (page != null)
485         {
486             if (page.pin(instance) == 0 && !page.heldForLog)
487                 freePages.remove(page);
488             hits++;
489             return page;
490         }
491         else if (fromCacheOnly) {
492             return null;
493         }
494
495         // Find a free page
496
CachedPage free = (CachedPage)freePages.removeLast();
497         if (free == null) {
498             /* if there are any waiting for the log to be flushed, flush it */
499             for (Iterator it = instances.iterator(); it.hasNext();) {
500                 FileCache cache = (FileCache) it.next();
501                 if (!cache.heldForLog.isEmpty())
502                 {
503                     cache.log.flush();
504                     cache.logFlushes++;
505                 }
506             }
507             free = (CachedPage)freePages.removeLast();
508         }
509
510         if (free == null) {
511             // cache is full -- make it half as big again
512
int increment = (pages.size() + 1) / 2;
513             addPages(increment);
514             extensions++;
515             free = (CachedPage)freePages.removeLast();
516         }
517
518         if (free.isDirty) {
519             flushOne(free);
520         }
521             
522         if (free.key != null && free.getOwner() != null) {
523             pageHash.remove(new HashKey(free.getOwner(), free.key));
524         }
525
526         free.reInit(instance, id);
527         pageHash.put(key, free);
528         if (id.offset >= instance.fileSize[id.fileIndex]) {
529             Arrays.fill(free.contents, (byte)0);
530         }
531         else {
532             try {
533                 RandomAccessFile file = getFile(instance.fileNames[id.fileIndex]);
534                 file.seek(id.offset);
535                 file.readFully(free.contents);
536             }
537             catch (IOException ex) {
538                 throw new StorageIOException(ex);
539             }
540         }
541
542         free.pin(instance);
543         misses++;
544         return free;
545     }
546
547     /** Make the specified page writable. If it was not writable previously,
548     * this causes it to be logged. This must be called before the page
549     * is modified. If the cache is not currently in a transaction,
550     * this implicitly begins one.
551     * @param page The page to be made writable.
552     * @exception StorageException I/O error logging the page
553     */

554     public synchronized void setWritable(CachedPage page)
555         throws StorageException{
556         if (page.isDirty)
557             return;
558
559         if (!inXact) {
560             newTimeStamp = System.currentTimeMillis();
561             log.begin(fileNames, header.timeStamp, newTimeStamp);
562             inXact = true;
563         }
564         log.addPageToLog(page);
565         page.isDirty = true;
566     }
567
568     /** Make the specified pages writable. If any were not writable previously,
569     * this causes them to be logged. This be called before the pages are
570     * modified.
571     * @param pages The pages to be made writable.
572     * @exception StorageException I/O error logging the pages
573     */

574     public synchronized void setWritable(CachedPage pages[])
575         throws StorageException{
576         for (int i = 0; i < pages.length; i++)
577             setWritable(pages[i]);
578     }
579
580     /**
581     * for debugging
582     */

583     public void dumpCache(PrintStream strm) {
584         strm.println("Cached files:");
585         for (int i = 0; i < fileNames.length; i++) {
586             strm.println(
587                 Integer.toString(i) + ": " + fileNames[i] +
588                 " size: " + fileSize[i]);
589         }
590         strm.println("");
591                 
592
593         strm.println(Integer.toString(pages.size()) + " pages");
594         Iterator itr = pages.iterator();
595         int num = 0;
596         while (itr.hasNext()) {
597             strm.println(Integer.toString(num++) + ":");
598             strm.print(((CachedPage)itr.next()).toString());
599         }
600     }
601
602     /**
603     * Show caching statistics
604     */

605     public void showStats(PrintStream strm) {
606         showStats(new PrintWriter(strm));
607     }
608
609     /**
610     * Show caching statistics
611     */

612     public void showStats(PrintWriter strm) {
613         int pinned = 0;
614     int dirty = 0;
615     int held = 0;
616     for (int i = 0; i < pages.size(); i++) {
617         CachedPage pg = (CachedPage)pages.get(i);
618         if (pg.getPinCount() > 0) {
619             pinned++;
620 // strm.println("Pinned page "+ pg.key + " count: " + pg.pinCount);
621
}
622         if (pg.isDirty)
623             dirty++;
624         if (pg.heldForLog)
625             held++;
626     }
627         strm.println("Page counts: total = " + pages.size() + " pinned = " +
628         pinned + " dirty = " + dirty + " held = " + held);
629         strm.println(
630             "Cache hits: " + hits + " misses: " + misses +
631             " hit rate: " + 100. * (float)hits / (float)(hits + misses));
632         strm.println(pagesFlushed + " pages written");
633         strm.println("Log file flushed to free pages " + logFlushes + " times");
634         strm.println("Cache made bigger " + extensions + " times");
635     strm.flush();
636     }
637
638     /**
639     * get page size
640     */

641     int getPageSize() {
642         return pageSize;
643     }
644
645     /**
646     * Add to the list of objects to be notified before commit
647     * @param notified the obect to add to the list
648     */

649     public synchronized void addNotifier(NotifyOnCommit notified) {
650         if (toNotify == null) {
651             toNotify = new ArrayList();
652         }
653         toNotify.add(notified);
654     }
655
656     /**
657     * An object which needs to be notified before the cache commits (for
658     * instance, to write any changes to the cache before the cache is
659     * flushed to disk) implements this interface, and calls addNotifier
660     * on the cache.
661     */

662     public interface NotifyOnCommit {
663
664         /** a callback method called before the cache commits. If
665         * the result is an exception, the commit does not take place.
666         */

667         void prepareToCommit() throws StorageException;
668     }
669
670 }
671
672
Popular Tags