KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > apache > poi > hssf > record > SSTSerializer


1
2 /* ====================================================================
3    Copyright 2002-2004 Apache Software Foundation
4
5    Licensed under the Apache License, Version 2.0 (the "License");
6    you may not use this file except in compliance with the License.
7    You may obtain a copy of the License at
8
9        http://www.apache.org/licenses/LICENSE-2.0
10
11    Unless required by applicable law or agreed to in writing, software
12    distributed under the License is distributed on an "AS IS" BASIS,
13    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14    See the License for the specific language governing permissions and
15    limitations under the License.
16 ==================================================================== */

17         
18
19 package org.apache.poi.hssf.record;
20
21 import org.apache.poi.util.BinaryTree;
22
23 import java.util.List JavaDoc;
24 import java.util.ArrayList JavaDoc;
25 import java.util.Map JavaDoc;
26
27 /**
28  * This class handles serialization of SST records. It utilizes the record processor
29  * class write individual records. This has been refactored from the SSTRecord class.
30  *
31  * @author Glen Stampoultzis (glens at apache.org)
32  */

33 class SSTSerializer
34 {
35
36     // todo: make private again
37
private List JavaDoc recordLengths;
38     private BinaryTree strings;
39
40     private int numStrings;
41     private int numUniqueStrings;
42     private SSTRecordHeader sstRecordHeader;
43
44     /** Offsets from the beginning of the SST record (even across continuations) */
45     int[] bucketAbsoluteOffsets;
46     /** Offsets relative the start of the current SST or continue record */
47     int[] bucketRelativeOffsets;
48     int startOfSST, startOfRecord;
49
50     public SSTSerializer( List JavaDoc recordLengths, BinaryTree strings, int numStrings, int numUniqueStrings )
51     {
52         this.recordLengths = recordLengths;
53         this.strings = strings;
54         this.numStrings = numStrings;
55         this.numUniqueStrings = numUniqueStrings;
56         this.sstRecordHeader = new SSTRecordHeader( numStrings, numUniqueStrings );
57
58         int infoRecs = ExtSSTRecord.getNumberOfInfoRecsForStrings(strings.size());
59         this.bucketAbsoluteOffsets = new int[infoRecs];
60         this.bucketRelativeOffsets = new int[infoRecs];
61     }
62
63     /**
64      * Create a byte array consisting of an SST record and any
65      * required Continue records, ready to be written out.
66      * <p>
67      * If an SST record and any subsequent Continue records are read
68      * in to create this instance, this method should produce a byte
69      * array that is identical to the byte array produced by
70      * concatenating the input records' data.
71      *
72      * @return the byte array
73      */

74     public int serialize( int record_size, int offset, byte[] data )
75     {
76         int record_length_index = 0;
77
78         if ( calculateUnicodeSize() > SSTRecord.MAX_DATA_SPACE )
79             serializeLargeRecord( record_size, record_length_index, data, offset );
80         else
81             serializeSingleSSTRecord( data, offset, record_length_index );
82         return record_size;
83     }
84
85
86
87     /**
88      * Calculates the total unicode size for all the strings.
89      *
90      * @return the total size.
91      */

92     public static int calculateUnicodeSize(Map JavaDoc strings)
93     {
94         int retval = 0;
95
96         for ( int k = 0; k < strings.size(); k++ )
97         {
98             retval += getUnicodeString( strings, k ).getRecordSize();
99         }
100         return retval;
101     }
102
103     public int calculateUnicodeSize()
104     {
105         return calculateUnicodeSize(strings);
106     }
107
108     /**
109      * This case is chosen when an SST record does not span over to a continue record.
110      */

111     private void serializeSingleSSTRecord( byte[] data, int offset, int record_length_index )
112     {
113         int len = ( (Integer JavaDoc) recordLengths.get( record_length_index ) ).intValue();
114         int recordSize = len - SSTRecord.STD_RECORD_OVERHEAD;
115         sstRecordHeader.writeSSTHeader( data, 0 + offset, recordSize );
116         int pos = SSTRecord.SST_RECORD_OVERHEAD;
117
118         for ( int k = 0; k < strings.size(); k++ )
119         {
120             if (k % ExtSSTRecord.DEFAULT_BUCKET_SIZE == 0)
121             {
122               int index = k/ExtSSTRecord.DEFAULT_BUCKET_SIZE;
123               if (index < ExtSSTRecord.MAX_BUCKETS) {
124                 //Excel only indexes the first 128 buckets.
125
bucketAbsoluteOffsets[index] = pos;
126                 bucketRelativeOffsets[index] = pos;
127               }
128             }
129             System.arraycopy( getUnicodeString( k ).serialize(), 0, data, pos + offset, getUnicodeString( k ).getRecordSize() );
130             pos += getUnicodeString( k ).getRecordSize();
131         }
132     }
133
134     /**
135      * Large records are serialized to an SST and to one or more CONTINUE records. Joy. They have the special
136      * characteristic that they can change the option field when a single string is split across to a
137      * CONTINUE record.
138      */

139     private void serializeLargeRecord( int record_size, int record_length_index, byte[] buffer, int offset )
140     {
141
142         startOfSST = offset;
143
144         byte[] stringReminant = null;
145         int stringIndex = 0;
146         boolean lastneedcontinue = false;
147         boolean first_record = true;
148         int totalWritten = 0;
149
150         while ( totalWritten != record_size )
151         {
152             //Total record length, including excel record header and sst/continue header
153
final int recordLength = ( (Integer JavaDoc) recordLengths.get( record_length_index++ ) ).intValue();
154             //Total available data length (minus the excel record header size)
155
final int recordDataLength = recordLength - 4;
156             RecordProcessor recordProcessor = new RecordProcessor( buffer,
157                     recordDataLength, numStrings, numUniqueStrings );
158
159             // write the appropriate header
160
startOfRecord = offset + totalWritten;
161             recordProcessor.writeRecordHeader( offset, totalWritten, recordDataLength, first_record );
162             first_record = false;
163
164             // now, write the rest of the data into the current
165
// record space
166
if ( lastneedcontinue )
167             {
168                 lastneedcontinue = stringReminant.length > recordProcessor.getAvailable();
169                 // the last string in the previous record was not written out completely
170
stringReminant = recordProcessor.writeStringRemainder( lastneedcontinue,
171                         stringReminant, offset, totalWritten );
172                 //Check to see if still not written out completely
173
if (lastneedcontinue) {
174                   totalWritten += recordLength;
175                   continue;
176                 }
177             }
178
179             // last string's remnant, if any, is cleaned up as best as can be done ... now let's try and write
180
// some more strings
181
for ( ; stringIndex < strings.size(); stringIndex++ )
182             {
183                 UnicodeString unistr = getUnicodeString( stringIndex );
184
185                 if (stringIndex % ExtSSTRecord.DEFAULT_BUCKET_SIZE == 0)
186                 {
187                   int index = stringIndex / ExtSSTRecord.DEFAULT_BUCKET_SIZE;
188                   if (index < ExtSSTRecord.MAX_BUCKETS) {
189                     bucketAbsoluteOffsets[index] = offset + totalWritten +
190                         recordProcessor.getRecordOffset() - startOfSST;
191                     bucketRelativeOffsets[index] = offset + totalWritten +
192                         recordProcessor.getRecordOffset() - startOfRecord;
193                   }
194                 }
195
196                 if ( unistr.getRecordSize() <= recordProcessor.getAvailable() )
197                 {
198                     recordProcessor.writeWholeString( unistr, offset, totalWritten );
199                 }
200                 else
201                 {
202
203                     // can't write the entire string out
204
if ( recordProcessor.getAvailable() >= SSTRecord.STRING_MINIMAL_OVERHEAD )
205                     {
206
207                         // we can write some of it
208
stringReminant = recordProcessor.writePartString( unistr, offset, totalWritten );
209                         lastneedcontinue = true;
210                         stringIndex++;
211                     }
212                     break;
213                 }
214             }
215             totalWritten += recordLength;
216         }
217     }
218
219     private UnicodeString getUnicodeString( int index )
220     {
221         return getUnicodeString(strings, index);
222     }
223
224     private static UnicodeString getUnicodeString( Map JavaDoc strings, int index )
225     {
226         Integer JavaDoc intunipos = new Integer JavaDoc( index );
227         return ( (UnicodeString) strings.get( intunipos ) );
228     }
229
230     public int getRecordSize()
231     {
232         SSTRecordSizeCalculator calculator = new SSTRecordSizeCalculator(strings);
233         int recordSize = calculator.getRecordSize();
234         recordLengths = calculator.getRecordLengths();
235         return recordSize;
236     }
237
238     public List JavaDoc getRecordLengths()
239     {
240         return recordLengths;
241     }
242
243     public int[] getBucketAbsoluteOffsets()
244     {
245         return bucketAbsoluteOffsets;
246     }
247
248     public int[] getBucketRelativeOffsets()
249     {
250         return bucketRelativeOffsets;
251     }
252 }
253
Popular Tags