KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > netbeans > mdr > persistence > btreeimpl > btreestorage > ActiveBtreeExtent


1 /*
2  * The contents of this file are subject to the terms of the Common Development
3  * and Distribution License (the License). You may not use this file except in
4  * compliance with the License.
5  *
6  * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
7  * or http://www.netbeans.org/cddl.txt.
8  *
9  * When distributing Covered Code, include this CDDL Header Notice in each file
10  * and include the License file at http://www.netbeans.org/cddl.txt.
11  * If applicable, add the following below the CDDL Header, with the fields
12  * enclosed by brackets [] replaced by your own identifying information:
13  * "Portions Copyrighted [year] [name of copyright owner]"
14  *
15  * The Original Software is NetBeans. The Initial Developer of the Original
16  * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun
17  * Microsystems, Inc. All Rights Reserved.
18  */

19 package org.netbeans.mdr.persistence.btreeimpl.btreestorage;
20
21 import java.io.*;
22 import java.util.*;
23 import java.util.zip.*;
24
25 import org.netbeans.mdr.persistence.*;
26 import org.netbeans.mdr.util.Logger;
27
28 /**
29 * This is the superclass for extents which are parts of records
30 */

31 abstract class ActiveBtreeExtent extends BtreeExtent {
32
33     /** How much data this extent contains */
34     int dataLength;
35
36     /** The offset at which the data starts */
37     int dataStart;
38
39     /** Convert a deleted extent to an active one. The deleted extent has
40     * already been removed from its chain
41     * @param del the extent to convert
42     */

43     ActiveBtreeExtent(DeletedBtreeExtent del) {
44         super(del);
45         headerIsDirty = true;
46     }
47
48
49     /** called by subclasses to initialize an ActiveBtreeExtent
50     * @param file the BtreeDataFile this extent will belong to
51     * @param chunkNum where this extent begins
52     * @param numChunks the size of the extent
53     */

54     ActiveBtreeExtent(
55         BtreeDataFile file, int chunkNum, short numChunks) {
56
57         super(file, chunkNum, numChunks);
58     }
59
60     /** get the amount of data contained in this extent
61     * @return amount of data
62     */

63     abstract int getMyDataLength();
64
65     /** set the amount of data contained in this extent
66     * @param length amount of data
67     */

68     abstract int setMyDataLength(int length);
69
70     /** get how much data this extent could contain
71     * @return maximum amount of data which would fit
72     */

73     abstract int getAvailableDataLength();
74
75     /* write this extent's data to the cache. The data is supplied in
76     * a separate buffer; the extent describes where in the cache to write it to.
77     * @param dataBuffer data to write
78     * @param dataOffset where in the buffer to begin writing wrom
79     */

80     void writeData(byte dataBuffer[], int dataOffset)
81             throws StorageException {
82         int toCopy = dataLength;
83         IntHolder offst = new IntHolder();
84         int numChunks =
85             (dataStart + toCopy - 1) / BtreeDataFile.BTREE_CHUNK_SIZE + 1;
86         
87         if (numChunks > chunks) {
88             StorageException se = new StoragePersistentDataException ("Number of chunks does not match.");
89             Logger.getDefault().annotate(se, "Bad number of chunks: ----------------------");
90             Logger.getDefault().annotate(se, "start chunk number: " + myChunkNum);
91             Logger.getDefault().annotate(se, "#chunks: " + chunks + " computed #chunks: " + numChunks);
92             Logger.getDefault().annotate(se, "dataLength: " + dataLength + " dataSart: " + dataStart);
93             throw se;
94         }
95         
96         CachedPage pages[] = owner.getChunks(myChunkNum, numChunks, offst);
97         try {
98             int pageNum = 0;
99             int pageSize = pages[0].contents.length;
100             int offset = offst.getValue() + dataStart;
101             while (offset >= pageSize) {
102                 pageNum++;
103                 offset -= pageSize;
104             }
105             while (toCopy > 0) {
106                 int thisPage = Math.min(pageSize - offset, toCopy);
107                 pages[pageNum].setWritable();
108                 System.arraycopy(dataBuffer, dataOffset,
109                                  pages[pageNum].contents, offset, thisPage);
110                 dataOffset += thisPage;
111                 toCopy -= thisPage;
112                 pageNum++;
113                 offset = 0;
114             }
115         }
116         finally {
117             for (int i = 0; i < pages.length; i++) {
118                 pages[i].unpin();
119             }
120         }
121     }
122
123     /** Add the data desribed by this extent to a CachedPageInputStream.
124     * The pages of data are already in the cache
125     * @param strm stream to add pages to.
126     */

127     void addToStream(CachedPageInputStream strm) throws StorageException {
128         IntHolder offst = new IntHolder();
129         int toAppend = getMyDataLength();
130         int numChunks =
131             (dataStart + toAppend - 1) / BtreeDataFile.BTREE_CHUNK_SIZE + 1;
132         CachedPage[] pages = owner.getChunks(myChunkNum, numChunks, offst);
133
134         if (numChunks > chunks) {
135             StorageException se = new StoragePersistentDataException ("Number of chunks does not match.");
136             Logger.getDefault().annotate(se, "Bad number of chunks: ----------------------");
137             Logger.getDefault().annotate(se, "start chunk number: " + myChunkNum);
138             Logger.getDefault().annotate(se, "#chunks: " + chunks + " computed #chunks: " + numChunks);
139             Logger.getDefault().annotate(se, "dataLength: " + dataLength + " dataSart: " + dataStart);
140             throw se;
141         }
142         
143         int pageNum = 0;
144         int pageSize = pages[0].contents.length;
145         int offset = offst.getValue() + dataStart;
146         while (offset >= pageSize) {
147             pageNum++;
148             offset -= pageSize;
149         }
150
151         for (; pageNum < pages.length; pageNum++) {
152             int thisPage = Math.min(pageSize - offset, toAppend);
153             strm.addPage(pages[pageNum], offset, thisPage);
154             offset = 0;
155             toAppend -= thisPage;
156         }
157
158         for (; pageNum < pages.length; pageNum++) {
159             pages[pageNum].unpin();
160         }
161
162     }
163
164
165     /** Get CRC of record's data
166     * @return CRC
167     */

168     long getCRC() throws StorageException {
169         CachedPageInputStream dstrm = new CachedPageInputStream();
170         CheckedInputStream cis = null;
171         try {
172             try {
173                 addToStream(dstrm);
174                 cis = new CheckedInputStream(dstrm, new CRC32());
175                 while (cis.read() >= 0)
176                     ;
177                 return cis.getChecksum().getValue();
178             }
179             finally {
180                 if (cis != null)
181                     cis.close();
182                 else
183                     dstrm.close();
184             }
185         }
186         catch (IOException exc) {
187             throw new StorageIOException(exc);
188         }
189     }
190
191     /** is the extent already full of data
192     * @return true if the extent has no room for more data
193     */

194     abstract boolean isMaximum();
195
196     /** dump extent as text (for debugging)
197     * @param level bitmask of what to dump. See the superclass for the
198     * meaning of the levels.
199     * @param strm where to dump it to
200     */

201     void dump(int level, PrintWriter strm) throws StorageException{
202         super.dump(level, strm);
203         boolean dumpData = (level & DUMP_DATA) != 0;
204         boolean showCheckSum = (level & DUMP_DATA_CHECKSUM) != 0;
205
206         strm.println("" + dataLength + " data bytes");
207         if (dumpData) {
208             CachedPageInputStream dstrm = new CachedPageInputStream();
209             try {
210                 try {
211                     addToStream(dstrm);
212                     if (dumpData) {
213                         dumpBytesAsHex(dstrm, strm, "\t");
214                         strm.println();
215                     }
216                 }
217                 finally {
218                     dstrm.close();
219                 }
220             }
221             catch (IOException exc) {
222                 throw new StorageIOException(exc);
223             }
224         }
225
226         if (showCheckSum) {
227             strm.println("Data checksum: " + getCRC());
228             strm.println();
229         }
230     }
231 }
232
Popular Tags