KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > netbeans > mdr > persistence > btreeimpl > btreestorage > BtreeDataFile


1 /*
2  * The contents of this file are subject to the terms of the Common Development
3  * and Distribution License (the License). You may not use this file except in
4  * compliance with the License.
5  *
6  * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
7  * or http://www.netbeans.org/cddl.txt.
8  *
9  * When distributing Covered Code, include this CDDL Header Notice in each file
10  * and include the License file at http://www.netbeans.org/cddl.txt.
11  * If applicable, add the following below the CDDL Header, with the fields
12  * enclosed by brackets [] replaced by your own identifying information:
13  * "Portions Copyrighted [year] [name of copyright owner]"
14  *
15  * The Original Software is NetBeans. The Initial Developer of the Original
16  * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun
17  * Microsystems, Inc. All Rights Reserved.
18  */

19 package org.netbeans.mdr.persistence.btreeimpl.btreestorage;
20
21 import java.io.*;
22 import java.text.*;
23 import java.util.*;
24
25 import org.netbeans.mdr.persistence.*;
26 import org.netbeans.mdr.persistence.btreeimpl.btreeindex.*;
27
28 /**
29 * This class represents the Btree data file. This file is divided into chunks
30 * of BTREE_CHUNK_SIZE bytes each. It begins with a header of size
31 * BTREE_HEADER_SIZE bytes (an even number of chunks) followed by a set of
32 * variable-sized extents (each of which is also an even number of chunks).
33 * A record, which contains a key plus data, consists of one or more extents.
34 * If there is more than one, each other than the last points to its successor.
35 * There are also extents representing records which have been deleted. The
36 * data file header points to chains of deleted extents organized by size,
37 * allowing their efficient reuse. No attempt is made to coalesce deleted
38 * extents.
39 * <p>
40 * An extent is created slightly bigger than necessary to contain its record,
41 * to allow the record to grow subsequently without requiring a new extent
42 * to be allocated for it.
43 *
44 *
45 * <ol>
46 * Data file header disk format:
47 * <li>
48 * bytes 0-63 FileHeader
49 * <li>
50 * bytes 64-67 MAGIC
51 * <li>
52 * bytes 68-71 eof chunk number
53 * <li>
54 * bytes 72-75 version
55 * <li>
56 * bytes 76-113 Repository UUID string
57 * <li>
58 * bytes 114-117 first deleted extent header of size 1 chunk
59 * <li>
60 * bytes 118-121 first deleted extent header of size 2 chunks
61 * <li>
62 * ...
63 * <li>
64 * bytes 1134-1137 first deleted extent header of size 256 chunks
65 * <li>
66 * bytes 1138-1141 Number of objects currently in repository
67 * <li>
68 * bytes 1142-1149 Persistent counter
69 * </ol>
70 * Note that no extents larger than 256 (MAX_CHUNKS_IN_EXTENT) chunks are
71 * created.
72 * <p>
73 * @see BtreeExtent
74 */

75 class BtreeDataFile implements FileCache.NotifyOnCommit, MofidGenerator {
76
77     /** The btree data file is divided into chunks of this size. A
78     file cache page must be an even number of chunks. Value is 32 */

79     static final int BTREE_CHUNK_SIZE = 32;
80
81     /** the current version of btree. Inital value was 100 */
82     static final int BTREE_VERSION = 101;
83
84     /** the size of the data file header in bytes. This must be an
85     even number of chunks. Value is 2048 */

86     static final int BTREE_HEADER_SIZE = 2048;
87
88     /** Maximum number of chunks in a single extent. Value is 256 */
89     static final int MAX_CHUNKS_IN_EXTENT = 256;
90
91     /** Maximum bytes in a single extent */
92     static final int MAX_BYTES_IN_EXTENT =
93                     MAX_CHUNKS_IN_EXTENT * BTREE_CHUNK_SIZE;
94
95     /* magic number identifying btree data file header */
96     private static final int MAGIC = 0xFEEDBEEF;
97
98     /* the file cache from which we read pages */
99     private FileCache cache;
100
101     /* The index of the data file within the cache */
102     private int fileIndex;
103
104     /* The cache page size */
105     private int pageSize;
106
107     /* The number of chunks in a page */
108     private int chunksPerPage;
109
110     /* header magic number */
111     private int magic;
112
113     /* version of Btree in file */
114     private int version;
115
116     /* UUID of data file */
117     private String JavaDoc UUID;
118
119     /* persistent counter */
120     private long counter;
121
122     /* File header (only used when creating file) */
123     private FileHeader fileHeader;
124
125     /* Number of chunks in file */
126     private int eof;
127
128     /* Array of chains of deleted extents */
129     private int deletedExtents[];
130
131     /* true when header is different in memory than on disk */
132     private boolean dirty = false;
133
134     /* number of objects currently in repository */
135     private int numberOfObjects;
136
137     /* The number of changes made to the data file snce it was last opened */
138     int modificationLevel;
139
140     /* buffer for MOFID conversion. Any method which uses this *must*
141        be synchronized! */

142     private byte mofid[] = new byte[MOFID.LENGTH];
143     
144     /** BtreStorage*/
145     private BtreeStorage storage;
146
147     /** Create a BtreeDataFile by reading a data file from a File Cache
148     * @param theCache the file cache
149     * @param index index of the data file in the cache
150     * @exception StorageException if an IO error occurs or btree detects an inconsistency
151     */

152     BtreeDataFile(BtreeStorage storage, FileCache theCache, int index) throws StorageException {
153         this.storage = storage;
154         cache = theCache;
155         cache.addNotifier(this);
156         fileIndex = index;
157         pageSize = cache.getPageSize();
158         checkFit(pageSize);
159         chunksPerPage = pageSize / BTREE_CHUNK_SIZE;
160         fileHeader = null;
161         deletedExtents = new int[MAX_CHUNKS_IN_EXTENT];
162
163         IntHolder offset = new IntHolder();
164         CachedPage page = getChunk(0, offset);
165         try {
166             offset.setValue(offset.getValue() + FileHeader.HEADER_SIZE);
167             magic = Converter.readInt(page.contents, offset);
168             eof = Converter.readInt(page.contents, offset);
169             version = Converter.readInt(page.contents, offset);
170         UUID = Converter.readString(page.contents, offset);
171             if (magic != MAGIC) {
172                 throw new StoragePersistentDataException(
173                                 "Bad magic number in data file");
174             }
175             if (version != BTREE_VERSION) {
176                 throw new StoragePersistentDataException(
177                                 "Bad version number in data file");
178             }
179             for (int i = 0; i < MAX_CHUNKS_IN_EXTENT; i++) {
180                 deletedExtents[i] = Converter.readInt(page.contents, offset);
181             }
182             numberOfObjects = Converter.readInt(page.contents, offset);
183             counter = Converter.readLong(page.contents, offset);
184         }
185         finally {
186             page.unpin();
187         }
188         dirty = false;
189     }
190
191     /** Create a BtreeDataFile in memory. This is only used when creating a
192     * data file on disk.
193     * @param hdr FileHeader to write to disk
194     * @param vers version of Btree data file to create
195     * @param pgSize version of Btree data file to create
196     */

197     private BtreeDataFile(BtreeStorage storage, FileHeader hdr, int vers) {
198         this.storage = storage;
199         magic = MAGIC;
200         version = vers;
201         eof = BTREE_HEADER_SIZE / BTREE_CHUNK_SIZE;
202         if (storage.storageUUID != null)
203             UUID = storage.storageUUID;
204         else
205             UUID = new UUID().toString();
206         counter = initialMofIdConter();
207         fileHeader = hdr;
208         deletedExtents = new int[MAX_CHUNKS_IN_EXTENT];
209
210         cache = null;
211         fileIndex = -1;
212         pageSize = 0;
213         chunksPerPage = 0;
214     }
215
216     /** Create a btree data file
217     * @param fileName name of file to create
218     * @param hdr FileHeader to write to disk
219     * @param replace if true, overwrite an existing file
220     * @param pgSize the page size with which the file will be accessed
221     * @exception StorageException if the file exists and replace is false
222     * or if an I/O error occurs
223     */

224     static void create(BtreeStorage storage, String JavaDoc fileName, FileHeader hdr, int pgSize, boolean replace)
225                     throws StorageException {
226
227         checkFit(pgSize);
228
229         File old = new File(fileName);
230         if (old.exists()) {
231             if (replace) {
232                 old.delete();
233             }
234             else {
235                 throw new StorageBadRequestException(
236                     MessageFormat.format(
237                         "File {0} exists",
238                         new Object JavaDoc[] {fileName}));
239             }
240         }
241
242         BtreeDataFile dummy = new BtreeDataFile(storage, hdr, BTREE_VERSION);
243         byte buffer[] = new byte[pgSize];
244         dummy.writeHeader(buffer, true);
245
246         try {
247             RandomAccessFile file = new RandomAccessFile(fileName, "rw");
248             file.write(buffer);
249             file.close();
250         }
251         catch (IOException ex) {
252             throw new StorageIOException(ex);
253         }
254     }
255
256     /** Read a record from the data file
257     * @param chunkNum the number of the chunk where the record starts
258     * @param key the record's key
259     * @return a stream from which the record can be read. The stream must
260     * be closed when it is no longer needed to allow the pages to be
261     * release from the cache.
262     * @exception StorageException if btree detects an inconsistency
263     * or if an I/O error occurs
264     */

265     synchronized CachedPageInputStream get(int chunkNum, long key)
266                                 throws StorageException {
267         Converter.writeLong(mofid, 0, key);
268     return get(chunkNum, mofid);
269     }
270
271     /** Read a record from the data file
272     * @param chunkNum the number of the chunk where the record starts
273     * @param key the record's key
274     * @return a stream from which the record can be read. The stream must
275     * be closed when it is no longer needed to allow the pages to be
276     * release from the cache.
277     * @exception StorageException if btree detects an inconsistency
278     * or if an I/O error occurs
279     */

280     synchronized CachedPageInputStream get(int chunkNum, byte[] key)
281                                 throws StorageException {
282         return getStream(checkRecord(chunkNum, key));
283     }
284
285     /* create a stream from a normal extent and all of its continuations */
286     private CachedPageInputStream getStream(NormalBtreeExtent ext)
287             throws StorageException {
288
289         CachedPageInputStream strm = new CachedPageInputStream();
290         ext.addToStream(strm);
291         int next = ext.getNext();
292         while (next != 0) {
293             ContinuationBtreeExtent cext =
294                         (ContinuationBtreeExtent)getExtent(next);
295             cext.addToStream(strm);
296             next = cext.getNext();
297         }
298         return strm;
299     }
300
301     /** Write a new record to the file. Duplicate keys are not checked.
302     * If you wish to replace an existing record which has the same key,
303     * call replace instead.
304     * @param key the record's key
305     * @param record the bytes which make up the record. There is effectively
306     * no limit to the record length.
307     * return the chunk number of the first extent created
308     * @exception StorageException if btree detects an inconsistency
309     * or if an I/O error occurs
310     */

311     synchronized int put(long key, byte record[])
312                             throws StorageException {
313     Converter.writeLong(mofid, 0, key);
314     return put(mofid, record);
315     }
316
317     /** Write a new record to the file. Duplicate keys are not checked.
318     * If you wish to replace an existing record which has the same key,
319     * call replace instead.
320     * @param key the record's key
321     * @param record the bytes which make up the record. There is effectively
322     * no limit to the record length.
323     * return the chunk number of the first extent created
324     * @exception StorageException if btree detects an inconsistency
325     * or if an I/O error occurs
326     */

327     synchronized int put(byte[] key, byte record[])
328                             throws StorageException {
329
330         modificationLevel++;
331         NormalBtreeExtent first =
332             getNormalExtent(record.length, key, null);
333         ActiveBtreeExtent previous = first;
334         first.writeData(record, 0);
335
336         int offset = first.getMyDataLength();
337         int toAllocate = record.length - offset;
338         if (toAllocate > 0) {
339
340             while (toAllocate > 0) {
341                 ContinuationBtreeExtent cext =
342                     getContinuationExtent(toAllocate, null);
343                 cext.writeData(record, offset);
344                 previous.setNext(cext);
345                 previous.writeHeader();
346                 previous = cext;
347
348                 int written = cext.getMyDataLength();
349                 offset += written;
350                 toAllocate -= written;
351             }
352         }
353
354         previous.setNext(0);
355         previous.writeHeader();
356         numberOfObjects++;
357
358         return first.getOffset();
359     }
360
361     /** Replace an existing record to the file. This replaces only the bytes
362     * in the record: the key does not change. The size of the record may
363     * change. if possible, the existing extent(s) will be reused.
364     * @param key the existing record's key
365     * @param record the bytes which will make up the new version of the
366     * record. There is effectively no limit to the record length.
367     * return the new chunk number of the first extent
368     * @exception StorageException if btree detects an inconsistency
369     * or if an I/O error occurs
370     */

371     synchronized int replace(int chunkNum, long key, byte record[])
372         throws StorageException {
373
374     Converter.writeLong(mofid, 0, key);
375     return replace(chunkNum, mofid, record);
376     }
377
378     /** Replace an existing record to the file. This replaces only the bytes
379     * in the record: the key does not change. The size of the record may
380     * change. if possible, the existing extent(s) will be reused.
381     * @param key the existing record's key
382     * @param record the bytes which will make up the new version of the
383     * record. There is effectively no limit to the record length.
384     * return the new chunk number of the first extent
385     * @exception StorageException if btree detects an inconsistency
386     * or if an I/O error occurs
387     */

388     synchronized int replace(int chunkNum, byte key[], byte record[])
389         throws StorageException {
390         modificationLevel++;
391         NormalBtreeExtent exist = checkRecord(chunkNum, key);
392         ArrayList conts = null;
393         int next = exist.getNext();
394         if (next > 0) {
395             conts = new ArrayList();
396             while (next > 0) {
397                 ContinuationBtreeExtent ext =
398                     (ContinuationBtreeExtent)getExtent(next);
399                 conts.add(ext);
400                 next = ext.getNext();
401             }
402         }
403
404         NormalBtreeExtent first = getNormalExtent(record.length, key, exist);
405         ActiveBtreeExtent previous = first;
406         first.writeData(record, 0);
407
408         if (exist != first) {
409             DeletedBtreeExtent dext = new DeletedBtreeExtent(exist);
410             dext.writeHeader();
411         }
412
413         int offset = first.getMyDataLength();
414         int toAllocate = record.length - offset;
415         int contIndex = 0;
416         if (toAllocate > 0) {
417
418             while (toAllocate > 0) {
419                 ContinuationBtreeExtent candidate = null;
420                 if (conts != null && contIndex < conts.size()) {
421                     candidate =
422                         (ContinuationBtreeExtent)conts.get(contIndex);
423                 }
424                 ContinuationBtreeExtent cext =
425                     getContinuationExtent(toAllocate, candidate);
426                 if (cext == candidate) {
427                     contIndex++;
428                 }
429                 cext.writeData(record, offset);
430                 previous.setNext(cext);
431                 previous.writeHeader();
432                 previous = cext;
433
434                 int written = cext.getMyDataLength();
435                 offset += written;
436                 toAllocate -= written;
437             }
438         }
439
440         if (conts != null) {
441             for (int i = contIndex; i < conts.size(); i++) {
442                 DeletedBtreeExtent dext =
443                     new DeletedBtreeExtent(
444                             (ContinuationBtreeExtent)conts.get(i));
445                 dext.writeHeader();
446             }
447         }
448         previous.setNext(0);
449         previous.writeHeader();
450
451         return first.getOffset();
452     }
453
454     /** Remove a record
455     * @param chunkNum the chunk number for the record's first extent
456     * @param key the record's key
457     * @exception StorageException if btree detects an inconsistency
458     * or if an I/O error occurs
459     */

460     synchronized void remove(int chunkNum, long id) throws StorageException {
461     Converter.writeLong(mofid, 0, id);
462         remove(chunkNum, mofid);
463     }
464
465     /** Remove a record
466     * @param chunkNum the chunk number for the record's first extent
467     * @param key the record's key
468     * @exception StorageException if btree detects an inconsistency
469     * or if an I/O error occurs
470     */

471     synchronized void remove(int chunkNum, byte key[]) throws StorageException {
472         modificationLevel++;
473         NormalBtreeExtent ext = checkRecord(chunkNum, key);
474         DeletedBtreeExtent dext = new DeletedBtreeExtent(ext);
475         dext.writeHeader();
476
477         int next = ext.getNext();
478         while (next != 0) {
479             ContinuationBtreeExtent cex =
480                         (ContinuationBtreeExtent)getExtent(next);
481             DeletedBtreeExtent dex = new DeletedBtreeExtent(cex);
482             dex.writeHeader();
483             next = cex.getNext();
484         }
485         numberOfObjects--;
486     }
487
488     /** get the page size used by the file cache
489     * @return page size
490     */

491     int getPageSize() {
492         return pageSize;
493     }
494
495     /** get number of objects currently conatined in the repository
496     * @return number of objects.
497     */

498     int size() {
499         return numberOfObjects;
500     }
501
502     /** Add a newly deleted extent to the chains of deleted extents
503     * @param extent the newly deleted extent
504     */

505     void deleteExtent(DeletedBtreeExtent extent) {
506         int size = extent.getSize();
507         extent.setNext(deletedExtents[size-1]);
508         deletedExtents[size-1] = extent.getOffset();
509         dirty = true;
510     }
511
512     /* add in a fudge factor for record size, so a slightly bigger
513     * record still fits */

514     private int computeMaxExtentSize(int numChunks) {
515         // if there is none of the correct size available, use a slightly
516
// bigger one
517
int extra = Math.min((int)(numChunks * .2), 3);
518         return Math.min(numChunks + extra, MAX_CHUNKS_IN_EXTENT);
519     }
520         
521     /* Get a deleted record of the same size, or slightly bigger */
522     private DeletedBtreeExtent getDeletedExtent(int numChunks)
523         throws StorageException {
524
525         for (int i = numChunks; i <= computeMaxExtentSize(numChunks); i++) {
526             if (deletedExtents[i-1] > 0) {
527                 DeletedBtreeExtent ext =
528                     (DeletedBtreeExtent)getExtent(deletedExtents[i-1]);
529                 deletedExtents[i-1] = ext.getNext();
530                 dirty = true;
531                 return ext;
532             }
533         }
534
535         return null;
536     }
537
538     /** get a page containing the desired chunk.
539     * Note that the returned page is pinned.
540     * @param chunkNum the number of the chunk to get
541     * @param offset used to return the offset within the page whee the
542     * chunk begins
543     * @return the page containing the chunk
544     * @exception StorageException if an I/O error occurs
545     */

546     CachedPage getChunk(int chunkNum, IntHolder offset)
547                                         throws StorageException{
548         int pageNum = chunkNum / chunksPerPage;
549         CachedPage page = cache.getPage(fileIndex, pageNum);
550         offset.setValue((chunkNum % chunksPerPage) * BTREE_CHUNK_SIZE);
551
552         return page;
553     }
554
555     /** get an array of page containing the desired chunks.
556     * Note that the returned pages are pinned. Note also that the last page
557     * returned may contain chunks following the ones requested.
558     * @param chunkNum the number of the first chunk to get
559     * @param numChunks how many consecutive chunks to read
560     * @param offset used to return the offset within the page where the
561     * chunk begins
562     * @return the pages containing the chunks
563     * @exception StorageException if an I/O error occurs
564     */

565     CachedPage[] getChunks(int chunkNum, int numChunks, IntHolder offset)
566                                                 throws StorageException {
567         int startPageNum = chunkNum / chunksPerPage;
568         int endPageNum = (chunkNum + numChunks - 1) / chunksPerPage;
569         CachedPage pages[] = cache.getPages(
570                     fileIndex, startPageNum, endPageNum - startPageNum + 1);
571         offset.setValue((chunkNum % chunksPerPage) * BTREE_CHUNK_SIZE);
572
573         return pages;
574     }
575
576     /* Get the extent which begins at the supplied chunk */
577     private BtreeExtent getExtent(int chunkNum)
578                                 throws StorageException {
579         return BtreeExtent.readExtent(this, chunkNum);
580     }
581
582     /**
583     * write the header to disk if it is not up-to-date there. This
584     * is part of the FileCache.NotifyOnCommit interface.
585     * @exception StorageException if btree detects an inconsistency
586     * or if an I/O error occurs
587     */

588     public void prepareToCommit() throws StorageException {
589         if (!dirty)
590             return;
591         CachedPage page = cache.getPage(fileIndex, 0);
592         try {
593             page.setWritable();
594             writeHeader(page.contents, false);
595         }
596         finally {
597             page.unpin();
598         }
599     }
600
601     /** Copy to another file. Since deleted extents are not copied, the
602     * copied file is a compressed version of the original. It is assumed
603     * that all changes to this file have been comitted, since we iterate
604     * through the file to find records to copy.
605     * @param target file to copy to. This should be empty.
606     */

607     public void copy(BtreeDataFile target) throws StorageException {
608         Iterator iter = iterator(ITERATE_NORMAL_EXTENTS);
609     while (iter.hasNext()) {
610         NormalBtreeExtent ext = (NormalBtreeExtent)iter.next();
611         CachedPageInputStream strm = get(ext.myChunkNum, ext.key);
612         int offset = 0;
613         int len = strm.available();
614         byte data[] = new byte[len];
615         do {
616             int read = strm.read(data, offset, len);
617         offset += read;
618         len -= read;
619         } while (len > 0);
620         target.put(ext.key, data);
621     }
622     }
623
624     /** allocate some number of objects using the persistent counter */
625     public synchronized long allocateObjects(int number) {
626         //if (number < 0) Thread.dumpStack();
627
counter += number;
628         dirty = true;
629     return counter;
630     }
631
632     /** Get data file UUID, which will be the prefix of all MOFIDs
633     */

634     public String JavaDoc getMofidPrefix() {
635         return UUID;
636     }
637
638     /** Get new MOFID
639     */

640     public long getNextMofid() {
641         return allocateObjects(1);
642     }
643
644     /** Get value of counter. Used in transaction cache
645      * to store state of mof id generator (after commit). */

646     long getMofIdCounter () {
647         return counter;
648     }
649     
650     /** Set value of counter. Used in transaction cache to perform rollback on mof id generator. */
651     void setMofIdCounter (long counter) {
652         //Thread.dumpStack();
653
this.counter = counter;
654         dirty = true;
655     }
656     
657     /** computes first serial number of extenal MOFID */
658     long initialMofIdConter() {
659         return BtreeFactory.FIRST_EXTERNAL_ID + (UUID.hashCode() % 2048) + 2048; // adding a random increment to fix #46323
660
}
661     
662     /** dump basic header info */
663     public static final int DUMP_HEADER = 1;
664
665     /** dump deleted extent chanins */
666     public static final int DUMP_DELETED_CHAINS = 2;
667
668     /** dump active extent chains */
669     public static final int DUMP_ACTIVE_CHAINS = 4;
670
671     /** dump extent headers */
672     public static final int DUMP_EXTENT_HEADERS = 8;
673
674     /** do full consistency check */
675     public static final int DUMP_CONSISTENTCY_INFO = 16;
676
677     /** dump file as text (for debugging)
678     * "level" is a bitmask.
679     * <ol>
680     * <li>
681     * DUMP_HEADER -- basic header info
682     * <li>
683     * DUMP_DELETED_CHAINS -- deleted extent chains
684     * <li>
685     * DUMP_ACTIVE_CHAINS -- active extent chains
686     * <li>
687     * DUMP_EXTENT_HEADERS -- extent headers
688     * <li>
689     * DUMP_CONSISTENCY_INFO -- full consitency check
690     * </ol>
691     * hdrLevel determines what to dump for each extent header.
692     * @see BtreeExtent#dump
693     * @param level bitmask of what to dump.
694     * @param hdrLevel bitmask of what to dump for each extent header.
695     * @param strm where to dump it to
696     * @return number of errors encounters
697     */

698     public int dump(int level, int hdrLevel, PrintWriter strm)
699                     throws StorageException{
700         return dump(level, hdrLevel, true, strm);
701     }
702
703     int dump(int level, int hdrLevel, boolean headAndTrail, PrintWriter strm)
704                     throws StorageException{
705         boolean doHeader = (level & DUMP_HEADER) != 0;
706         boolean doDelChains = (level & DUMP_DELETED_CHAINS) != 0;
707         boolean doActChains = (level & DUMP_ACTIVE_CHAINS) != 0;
708         boolean doExtHeaders = (level & DUMP_EXTENT_HEADERS) != 0;
709         boolean doChecks = (level & DUMP_CONSISTENTCY_INFO) != 0;
710
711         if (strm == null) {
712             strm = new PrintWriter(System.out);
713         }
714
715     if (headAndTrail) {
716         strm.println("Btree data file dump:\n");
717     }
718         if (doHeader) {
719             strm.println("Magic number: " + magic);
720             strm.println("Btree version: " + version);
721             strm.println("EOF chunk: " + eof);
722             strm.println("" + numberOfObjects + " objects");
723             strm.println();
724         }
725
726         if (doDelChains) {
727             boolean sawDeleted = false;
728             for (int i = 0; i < MAX_CHUNKS_IN_EXTENT; i++) {
729                 if (deletedExtents[i] > 0) {
730                     if (!sawDeleted) {
731                         strm.println("Deleted extents:");
732                         sawDeleted = true;
733                     }
734
735                     strm.println("\t" + (i+1) + " chunks:");
736                     int next = deletedExtents[i];
737                     while (next > 0) {
738                         strm.println("\t\t" + next);
739                         BtreeExtent del = getExtent(next);
740                         next = del.getNext();
741                     }
742                 }
743             }
744             if (!sawDeleted) {
745                 strm.println("No deleted extents");
746             }
747
748             strm.println();
749         }
750
751         if (doActChains) {
752             strm.println("Active chains:");
753             Iterator iter = iterator(ITERATE_NORMAL_EXTENTS);
754             while (iter.hasNext()) {
755                 BtreeExtent ext = (NormalBtreeExtent)iter.next();
756                 strm.println("\t" + ext.getOffset());
757                 int next = ext.getNext();
758                 while (next > 0) {
759                     strm.println("\t\t" + next);
760                     next = getExtent(next).getNext();
761                 }
762             }
763             strm.println();
764         }
765
766         if (doExtHeaders) {
767             strm.println("All extent headers:");
768             Iterator iter = iterator(ITERATE_ALL_EXTENTS );
769             while (iter.hasNext()) {
770                 BtreeExtent ext = (BtreeExtent)iter.next();
771                 ext.dump(hdrLevel, strm);
772                 strm.println();
773             }
774             strm.println();
775         }
776
777     int numErrs = 0;
778         if (doChecks) {
779             // Ensure all extents are on exactly one chain
780
// Ensure all records on deleted chains are deleted and
781
// of the correct size
782
// Ensure records on active chains have the corrct type
783
// Ensure only maximum-sized active extents are in chains
784
// Ensure the repository knows how many objects it has
785
final byte WAS_SEEN = 4;
786
787
788             strm.println("Consistentcy check:");
789
790             byte extents[] = new byte[eof];
791
792             Iterator iter = iterator(ITERATE_ALL_EXTENTS );
793             while (iter.hasNext()) {
794                 BtreeExtent ext = (BtreeExtent)iter.next();
795                 extents[ext.getOffset()] = ext.getType();
796
797                 if (ext instanceof ActiveBtreeExtent) {
798                     ActiveBtreeExtent aext = (ActiveBtreeExtent)ext;
799                     if (aext.getNext() > 0 &&
800                         aext.getMyDataLength() <
801                                 aext.getAvailableDataLength()) {
802                         strm.println(
803                             aext.getTypeName() + " extent " + ext.getOffset() +
804                             " of size " + aext.getMyDataLength() +
805                             "has a successor extent");
806                     }
807                 }
808             }
809
810             int totalNormal = 0;
811             for (int i = 0; i < eof; i++) {
812                 if ((extents[i] & 0x3) == BtreeExtent.IS_NORMAL) {
813                     totalNormal++;
814                     extents[i] |= WAS_SEEN;
815                     BtreeExtent ext = getExtent(i);
816                     int next = ext.getNext();
817                     while (next != 0) {
818                         BtreeExtent cext = getExtent(next);
819                         int type = cext.getType();
820                         if (type != BtreeExtent.IS_CONTINUATION) {
821                             strm.println(
822                                 BtreeExtent.getTypeName(type) +
823                                 " extent " + next + " is on active chain " + i);
824                             numErrs++;
825                         }
826                         else if ((extents[next] & WAS_SEEN) != 0) {
827                             strm.println("extent " + next +
828                                 " is on more than one chain");
829                             numErrs++;
830                         }
831                         extents[next] |= WAS_SEEN;
832                         next = cext.getNext();
833                     }
834                 }
835             }
836
837             if (totalNormal != numberOfObjects) {
838                 strm.println("Repository has " + totalNormal +
839                     " normal extents, but thinks it has " + numberOfObjects +
840                     " objects");
841                 numErrs++;
842             }
843
844             for (int i = 0; i < MAX_CHUNKS_IN_EXTENT; i++) {
845                 int next = deletedExtents[i];
846                 while (next != 0) {
847                     BtreeExtent ext = getExtent(next);
848                     int type = ext.getType();
849                     if (type != BtreeExtent.IS_DELETED) {
850                         strm.println(
851                             BtreeExtent.getTypeName(type) +
852                             " extent " + next + " is on deleted chain " +
853                             (i + 1));
854                             numErrs++;
855                     }
856                     else {
857                         extents[next] |= WAS_SEEN;
858                         int size = ext.getSize();
859                         if (size != i + 1) {
860                             strm.println("Deleted extent " + next +
861                                 " of size " + size + " is on chain for size " +
862                                 (i + 1));
863                             numErrs++;
864                         }
865                     }
866                     next = ext.getNext();
867                 }
868             }
869
870             for (int i = 0; i < eof; i++) {
871                 boolean wasSeen = (extents[i] & WAS_SEEN) != 0;
872                 int type = (extents[i] & 0x3);
873                 if (type > 0 && !wasSeen) {
874                     strm.println("Extent " + i + " is not on any chain");
875                     numErrs++;
876                 }
877             }
878                             
879         if (headAndTrail) {
880         strm.println("" + numErrs + " error(s) detected.");
881         }
882             strm.println();
883         }
884
885
886     if (headAndTrail) {
887         strm.println("End of tree data file dump\n");
888     }
889         strm.flush();
890     return numErrs;
891     }
892
893             
894
895     /** Get a normal extent of the desired size, and write the supplied key
896     * to it. There are three ways to get the extent, list in decreasing order
897     * of desirability:
898     * <p>
899     * Use the candidate. This will be done if it is large enough. The
900     * candidate alrady contains the supplied key.
901     * <p>
902     * Use a deleted extent of the desired size. This will be done if the
903     * candidate isn't suitable, but a suitable deleted extent exists.
904     * <p>
905     * add a new extent at the end of the file.
906     */

907     private NormalBtreeExtent getNormalExtent(
908         int length, byte key[], NormalBtreeExtent candidate)
909             throws StorageException {
910
911         if (candidate != null) {
912             if (candidate.isMaximum() ||
913                     candidate.getAvailableDataLength() >= length) {
914                 candidate.setMyDataLength(length);
915                 return candidate;
916             }
917         }
918
919         int numChunks =
920             NormalBtreeExtent.getNumChunks(key.length, length);
921         DeletedBtreeExtent del = getDeletedExtent(numChunks);
922         NormalBtreeExtent first;
923         if (del != null) {
924             first = new NormalBtreeExtent(del, key, length);
925         }
926         else {
927             first = addNormalExtent(length, key);
928         }
929
930         return first;
931     }
932
933     /* The same algorithm is used here as for getNormalExtent */
934     private ContinuationBtreeExtent getContinuationExtent(
935         int length, ContinuationBtreeExtent candidate)
936             throws StorageException {
937
938         if (candidate != null) {
939             if (candidate.isMaximum() ||
940                     candidate.getAvailableDataLength() >= length){
941                 candidate.setMyDataLength(length);
942                 return candidate;
943             }
944         }
945
946         int nChunks = ContinuationBtreeExtent.getNumChunks(length);
947         DeletedBtreeExtent dext = getDeletedExtent(nChunks);
948         ContinuationBtreeExtent cext;
949         if (dext != null) {
950             cext = new ContinuationBtreeExtent(dext, length);
951         }
952         else {
953             cext = addContinuationExtent(length);
954         }
955
956         return cext;
957     }
958
959     /* Add a normal extent at the end of the file */
960     private NormalBtreeExtent addNormalExtent(int length, byte key[] )
961                         throws StorageException {
962         int numChunks = NormalBtreeExtent.getNumChunks(length, key.length);
963         NormalBtreeExtent ext =
964             new NormalBtreeExtent(
965                 this, eof, (short)computeMaxExtentSize(numChunks), length, key);
966         eof += ext.getSize();;
967         dirty = true;
968         return ext;
969     }
970
971     /* Add a continuation extent at the end of the file */
972     private ContinuationBtreeExtent addContinuationExtent(int length) {
973         int numChunks =
974             ContinuationBtreeExtent.getNumChunks(length);
975         ContinuationBtreeExtent ext =
976             new ContinuationBtreeExtent(
977                 this, eof, (short)computeMaxExtentSize(numChunks), length);
978         eof += ext.getSize();
979         dirty = true;
980         return ext;
981     }
982
983         
984     /* write the data file header to disk. The FileHeader is written
985     * only when creating the data file.
986     */

987     private void writeHeader(byte buffer[], boolean writeFileHeader) {
988         if (writeFileHeader) {
989             fileHeader.write(buffer);
990         }
991         IntHolder offset = new IntHolder(FileHeader.HEADER_SIZE);
992         Converter.writeInt(buffer, offset, magic);
993         Converter.writeInt(buffer, offset, eof);
994         Converter.writeInt(buffer, offset, version);
995         Converter.writeString(buffer, offset, UUID);
996         for (int i = 0; i < MAX_CHUNKS_IN_EXTENT; i++) {
997             Converter.writeInt(buffer, offset, deletedExtents[i]);
998         }
999         Converter.writeInt(buffer, offset, numberOfObjects);
1000        Converter.writeLong(buffer, offset, counter);
1001    }
1002
1003    /* check that a normal extent with the supplied key begins at the
1004    * given chunk number. if it does, return it; if not, throw
1005    * an exception
1006    */

1007    private NormalBtreeExtent checkRecord(int chunkNum, byte key[])
1008                                    throws StorageException {
1009        BtreeExtent ext = getExtent(chunkNum);
1010        if (!(ext instanceof NormalBtreeExtent)) {
1011            throw new
1012                StoragePersistentDataException(
1013                    MessageFormat.format(
1014                        "Not a normal extent at offset {0}",
1015                        new Object JavaDoc[] { new Integer JavaDoc(chunkNum) } ));
1016        }
1017
1018        NormalBtreeExtent norm = (NormalBtreeExtent)ext;
1019        if (!Arrays.equals(key, norm.key)) {
1020            throw new StorageBadRequestException(
1021                MessageFormat.format(
1022                    "Incorrect key at offset {0}",
1023                    new Object JavaDoc[] { new Integer JavaDoc(chunkNum) } ));
1024        }
1025
1026        return norm;
1027    }
1028
1029    /** ensure everything that needs to fit evenly does
1030    * @param pageSz the page size with which this file will be accessed
1031    * @exception BadParameterException if any constrainst are violated
1032    */

1033    private static void checkFit(int pageSz) throws StorageException {
1034
1035        String JavaDoc msg = null;
1036        if (BTREE_HEADER_SIZE % BTREE_CHUNK_SIZE != 0)
1037            msg = "Btree header size is not an even number of chunks";
1038        else if (pageSz % BTREE_CHUNK_SIZE != 0)
1039            msg = "File cache page size is not an even number of chunks";
1040
1041        if (msg != null)
1042            throw new StorageBadRequestException(msg);
1043    }
1044
1045    /** iterate over all extents */
1046    public static final int ITERATE_ALL_EXTENTS = 0;
1047
1048    /** iterate over normal extents */
1049    public static final int ITERATE_NORMAL_EXTENTS = 1;
1050
1051    /** iterate over all keys */
1052    public static final int ITERATE_KEYS = 2;
1053
1054    /** Create an iterator which iterates over the records in this file
1055    * @param iterType. What to iterate over. See the constants in this
1056    * class.
1057    */

1058    Iterator iterator(int type) {
1059        return this.new BtreeDataFileIterator(type);
1060    }
1061
1062    /**
1063     * Iterate over the extents in the data file. Optionally,
1064     * either all extents or the normal extents only. This is normally
1065     * created with the BtreeDataFile.iterator method.
1066     */

1067    class BtreeDataFileIterator implements Iterator {
1068
1069        /* The level of the data file when we were created */
1070        private int iterModLevel;
1071
1072        /* current extent */
1073        private BtreeExtent iterCurExtent;
1074
1075        /* what we iterate over */
1076        private int iterType;
1077
1078        /** Create the iterator
1079        * @param iterType. What to iterate over. See the constants in the
1080        * enclosing class.
1081        */

1082        BtreeDataFileIterator(int type) {
1083            iterType = type;
1084            iterCurExtent = null;
1085            findNextExtent(true);
1086        }
1087
1088        /** Remove is not suppported
1089        * @exception UnsupportedOperationException always thrown
1090        */

1091        public void remove() {
1092            /* an optional operation which we do not support */
1093            throw new UnsupportedOperationException JavaDoc(
1094                "Remove is not supported");
1095        }
1096
1097        /** Is there another extent to return
1098        * @return true if there is another extent
1099        * @exception ConcurrentModificationException if the data file has
1100        * been modified since the iterator was created
1101        */

1102        public boolean hasNext() {
1103            return iterCurExtent != null;
1104        }
1105
1106        /** return the next extent, if there is one
1107        * @return the next extent
1108        * @exception ConcurrentModificationException if the data file has
1109        * been modified since the iterator was created
1110        * @exception NoSuchElementException if no extents remain
1111        */

1112        public Object JavaDoc next() {
1113            synchronized(BtreeDataFile.this) {
1114                checkModLevel();
1115                BtreeExtent ext = iterCurExtent;
1116                if (ext == null) {
1117                    throw new NoSuchElementException("At EOF");
1118                }
1119                findNextExtent(false);
1120
1121                Object JavaDoc retval;
1122
1123                switch (iterType)
1124                {
1125                    case ITERATE_KEYS:
1126                        try {
1127                            retval = BtreeDataFile.this.storage.readMOFIDData (new ByteArrayInputStream (((NormalBtreeExtent)ext).key));
1128                        } catch (StorageException ex) {
1129                            throw new RuntimeStorageException(ex);
1130                        }
1131                        break;
1132
1133                    default:
1134                        retval = ext;
1135                }
1136                return retval;
1137            }
1138        }
1139
1140        /* find and read next extent */
1141        private void findNextExtent(boolean atStart) {
1142            synchronized(BtreeDataFile.this) {
1143                int offset;
1144
1145                if (atStart) {
1146                    iterModLevel = modificationLevel;
1147                    offset = BTREE_HEADER_SIZE/BTREE_CHUNK_SIZE;
1148                }
1149                else if (iterCurExtent != null) {
1150                    offset =
1151                        iterCurExtent.getOffset() + iterCurExtent.getSize();
1152                }
1153                else {
1154                    return;
1155                }
1156
1157                checkModLevel();
1158
1159                boolean allExtents = (iterType == ITERATE_ALL_EXTENTS);
1160                while (offset < eof) {
1161                    BtreeExtent ext;
1162                    try {
1163                        ext = getExtent(offset);
1164                    }
1165                    catch (StorageException ex) {
1166                        throw new RuntimeStorageException(ex);
1167                    }
1168                    if (allExtents || (ext instanceof NormalBtreeExtent)){
1169
1170                        iterCurExtent = ext;
1171                        return;
1172                    }
1173                    offset += ext.getSize();
1174                }
1175                iterCurExtent = null;
1176            }
1177        }
1178
1179        /* check to see if the data file has been changed out from under us */
1180        private void checkModLevel() {
1181            if (iterModLevel != modificationLevel) {
1182                throw new ConcurrentModificationException(
1183                    "Data file had been modified");
1184            }
1185        }
1186    }
1187}
1188
Popular Tags