KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > apache > derby > impl > store > raw > data > StoredPage


1 /*
2
3    Derby - Class org.apache.derby.impl.store.raw.data.StoredPage
4
5    Licensed to the Apache Software Foundation (ASF) under one or more
6    contributor license agreements. See the NOTICE file distributed with
7    this work for additional information regarding copyright ownership.
8    The ASF licenses this file to you under the Apache License, Version 2.0
9    (the "License"); you may not use this file except in compliance with
10    the License. You may obtain a copy of the License at
11
12       http://www.apache.org/licenses/LICENSE-2.0
13
14    Unless required by applicable law or agreed to in writing, software
15    distributed under the License is distributed on an "AS IS" BASIS,
16    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17    See the License for the specific language governing permissions and
18    limitations under the License.
19
20 */

21
22 package org.apache.derby.impl.store.raw.data;
23
24 import org.apache.derby.iapi.reference.SQLState;
25
26 import org.apache.derby.impl.store.raw.data.BasePage;
27
28 import org.apache.derby.impl.store.raw.data.LongColumnException;
29 import org.apache.derby.impl.store.raw.data.OverflowInputStream;
30 import org.apache.derby.impl.store.raw.data.PageVersion;
31 import org.apache.derby.impl.store.raw.data.RecordId;
32 import org.apache.derby.impl.store.raw.data.RawField;
33 import org.apache.derby.impl.store.raw.data.ReclaimSpace;
34 import org.apache.derby.impl.store.raw.data.StoredFieldHeader;
35 import org.apache.derby.impl.store.raw.data.StoredRecordHeader;
36
37 import org.apache.derby.iapi.services.io.FormatIdUtil;
38 import org.apache.derby.iapi.services.io.FormatIdInputStream;
39 import org.apache.derby.iapi.services.io.FormatIdOutputStream;
40 import org.apache.derby.iapi.services.io.StoredFormatIds;
41 import org.apache.derby.iapi.services.io.StreamStorable;
42 import org.apache.derby.iapi.services.io.TypedFormat;
43 import org.apache.derby.iapi.services.sanity.SanityManager;
44
45 import org.apache.derby.iapi.store.access.conglomerate.LogicalUndo;
46 import org.apache.derby.iapi.store.access.Qualifier;
47 import org.apache.derby.iapi.store.access.RowUtil;
48
49 import org.apache.derby.iapi.store.raw.ContainerHandle;
50 import org.apache.derby.iapi.store.raw.FetchDescriptor;
51 import org.apache.derby.iapi.store.raw.Page;
52 import org.apache.derby.iapi.store.raw.PageKey;
53 import org.apache.derby.iapi.store.raw.PageTimeStamp;
54 import org.apache.derby.iapi.store.raw.RawStoreFactory;
55 import org.apache.derby.iapi.store.raw.RecordHandle;
56 import org.apache.derby.iapi.store.raw.log.LogInstant;
57 import org.apache.derby.iapi.store.raw.xact.RawTransaction;
58
59 import org.apache.derby.iapi.error.StandardException;
60
61 import org.apache.derby.iapi.types.DataValueDescriptor;
62
63 import org.apache.derby.iapi.types.Orderable;
64
65 import org.apache.derby.iapi.services.io.ArrayInputStream;
66 import org.apache.derby.iapi.services.io.ArrayOutputStream;
67 import org.apache.derby.iapi.services.io.FormatableBitSet;
68 import org.apache.derby.iapi.util.ByteArray;
69 import org.apache.derby.iapi.services.io.CompressedNumber;
70 import org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream;
71 import org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream;
72 import org.apache.derby.iapi.services.io.LimitObjectInput;
73 import org.apache.derby.iapi.services.io.ErrorObjectInput;
74
75
76 import java.util.zip.CRC32 JavaDoc;
77
78 import java.io.IOException JavaDoc;
79 import java.io.EOFException JavaDoc;
80 import java.io.Externalizable JavaDoc;
81 import java.io.InvalidClassException JavaDoc;
82
83 import java.io.ObjectOutput JavaDoc;
84 import java.io.ObjectInput JavaDoc;
85 import java.io.DataInput JavaDoc;
86 import java.io.DataOutput JavaDoc;
87 import java.io.InputStream JavaDoc;
88 import java.io.OutputStream JavaDoc;
89 import java.io.ByteArrayInputStream JavaDoc;
90 import java.io.ByteArrayOutputStream JavaDoc;
91
92
93 /**
94     StoredPage is a sub class of CachedPage that stores page data in a
95     fixed size byte array and is designed to be written out to a file
96     through a DataInput/DataOutput interface. A StoredPage can exist
97     in its clean or dirty state without the FileContainer it was created
98     from being in memory.
99
100   <P><B>Page Format</B><BR>
101   The page is broken into five sections
102   <PRE>
103   +----------+-------------+-------------------+-------------------+----------+
104   | formatId | page header | records | slot offset table | checksum |
105   +----------+-------------+-------------------+-------------------+----------+
106   </PRE>
107   <BR><B>FormatId</B><BR>
108   The formatId is a 4 bytes array, it contains the format Id of this page.
109   <BR><B>Page Header</B><BR>
110   The page header is a fixed size, 56 bytes
111   <PRE>
112   1 byte boolean is page an overflow page
113   1 byte byte page status (a field maintained in base page)
114   8 bytes long pageVersion (a field maintained in base page)
115   2 bytes unsigned short number of slots in slot offset table
116   4 bytes integer next record identifier
117   4 bytes integer generation number of this page (Future Use)
118   4 bytes integer previous generation of this page (Future Use)
119   8 bytes bipLocation the location of the beforeimage page (Future Use)
120   2 bytes unsigned short number of deleted rows on page. (new release 2.0)
121   2 bytes unsigned short % of the page to keep free for updates
122   2 bytes short spare for future use
123   4 bytes long spare for future use (encryption uses to write
124                                                   random bytes here).
125   8 bytes long spare for future use
126   8 bytes long spare for future use
127
128   </PRE>
129
130   Note that spare space has been guaranteed to be writen with "0", so
131   that future use of field should not either not use "0" as a valid data
132   item or pick 0 as a valid default value so that on the fly upgrade can
133   assume that 0 means field was never assigned.
134
135   <BR><B>Records</B>
136   The records section contains zero or more records, the format of each record
137   follows.
138   minimumRecordSize is the minimum user record size, excluding the space we
139   use for the record header and field headers. When a record is inserted, it
140   is stored in a space at least as large as the sum of the minimumRecordSize
141   and total header size.
142         For example,
143             If minimumRecordSize is 10 bytes,
144             the user record is 7 bytes,
145             we used 5 bytes for record and field headers,
146             this record will take (10 + 5) bytes of space, extra 3 bytes is
147             put into reserve.
148
149             If minimumRecordSize is 10 bytes,
150             user record is 17 bytes,
151             we used 5 bytes for record and field headers,
152             this record will take (17 + 5) bytes of space, no reserve space
153             here.
154
155   minimumRecordSize is defined by user on per container basis.
156   The default for minimumRecordSize is set to 1.
157
158   This implementation always keeps occupied bytes at the low end of the record
159   section. Thus removing (purging) a record moves all other records down, and
160   their slots are also moved down.
161   A page has no empty slot (an empty page has no slot)
162
163    <BR><B>Record & Field Format</B>
164
165   Record Header format is defined in the StoredRecordHeader class.
166   
167 <PRE>
168   <BR><B>Fields</B>
169
170   1 byte Boolean - is null, if true no more data follows.
171   4 bytes Integer - length of field that follows (excludes these four bytes).
172
173   StoredPage will use the static method provided by StoredFieldHeader
174   to read/write field status and field data length.
175
176   Field Header format is defined in the StoredFieldHeader class.
177   <data>
178
179   </PRE>
180     <BR><B>Slot Offset Table</B><BR>
181     The slot offset table is a table of 6 or 12 bytes per record, depending on
182     the pageSize being less or greater than 64K:
183     2 bytes (unsigned short) or 4 bytes (int) page offset for the record that
184     is assigned to the slot, and 2 bytes (unsigned short) or 4 bytes (int)
185     for the length of the record on this page.
186     2 bytes (unsigned short) or 4 bytes (int) for the length of the reserved
187     number of bytes for this record on this page.
188     First slot is slot 0. The slot table grows backwards. Slots are never
189     left empty.
190     <BR><B>Checksum</B><BR>
191     8 bytes of a java.util.zip.CRC32 checksum of the entire's page contents
192     without the 8 bytes representing the checksum.
193
194     <P><B>Page Access</B>
195     The page data is accessed in this class by one of three methods.
196     <OL>
197     <LI>As a byte array using pageData (field in cachedPage). This is the
198     fastest.
199     <LI>As an ArrayInputStream (rawDataIn) and ArrayOutputStream (rawDataOut),
200     this is used to set limits on any one reading the page logically.
201     <LI>Logically through rawDataIn (ArrayInputStream) and
202     logicalDataOut (FormatIdOutputStream), this provides the methods to write
203     logical data (e.g. booleans and integers etc.) and the ObjectInput
204     and ObjectOutput interfaces for DataValueDescriptor's. These logical
205     streams are constructed using the array streams.
206     </OL>
207
208     @see java.util.zip.CRC32
209     @see ArrayInputStream
210     @see ArrayOutputStream
211  **/

212
213
214
215     /**************************************************************************
216      * Public Methods of This class:
217      **************************************************************************
218      */

219
220     /**************************************************************************
221      * Public Methods of XXXX class:
222      **************************************************************************
223      */

224
225 public class StoredPage extends CachedPage
226 {
227     /**************************************************************************
228      * static final Fields of the class
229      **************************************************************************
230      */

231
232     /*
233      * typed format
234      */

235
236     public static final int FORMAT_NUMBER =
237         StoredFormatIds.RAW_STORE_STORED_PAGE;
238
239     /**
240      * Return my format identifier.
241      **/

242     public int getTypeFormatId()
243     {
244         return StoredFormatIds.RAW_STORE_STORED_PAGE;
245     }
246
247
248     /**
249      * Constants used to find different portions of data on the page.
250      * <p>
251      * The page is laid out as follows:
252      * The page is broken into five sections
253      * +----------+-------------+---------+-------------------+----------+
254      * | formatId | page header | records | slot offset table | checksum |
255      * +----------+-------------+---------+-------------------+----------+
256      *
257      * offset size section
258      * ------ ------------------- --------------------------
259      * 0 PAGE_FORMAT_ID_SIZE formatId
260      * PAGE_FORMAT_ID_SIZE: PAGE_HEADER_SIZE (56) page header
261      * RECORD_SPACE_OFFSET: variable records
262      **/

263
264
265     /**
266      * Start of page, formatId must fit in 4 bytes.
267      * <p>
268      * where the page header starts - page format is mandated by cached page
269      **/

270     protected static final int PAGE_HEADER_OFFSET = PAGE_FORMAT_ID_SIZE;
271
272
273     /**
274      * Fixed size of the page header
275      **/

276     protected static final int PAGE_HEADER_SIZE = 56;
277
278
279     /**
280         Start of the record storage area
281     */

282     /**
283      * Start of the record storage area.
284      * <p>
285      * Note: a subclass may change the start of the record storage area.
286      * Don't always count on this number.
287      **/

288     protected static final int RECORD_SPACE_OFFSET =
289         PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE;
290
291     /**
292      * offset of the page version number
293      **/

294     protected static final int PAGE_VERSION_OFFSET = PAGE_HEADER_OFFSET + 2;
295
296     /**
297      * SMALL_SLOT_SIZE are for pages smaller than 64K,
298      * LARGE_SLOT_SIZE is for pages bigger than 64K.
299      **/

300     protected static final int SMALL_SLOT_SIZE = 2;
301     protected static final int LARGE_SLOT_SIZE = 4;
302
303     /**
304      * Size of the checksum stored on the page.
305      *
306      * The checksum is stored in the last 8 bytes of the page, the slot table
307      * grows backward up the page starting at the end of the page just before
308      * the checksum.
309      **/

310     protected static final int CHECKSUM_SIZE = 8;
311
312     /**
313      * OVERFLOW_POINTER_SIZE - Number of bytes to reserve for overflow pointer
314      *
315      * The overflow pointer is the pointer that the takes the place of the
316      * last column of a row if the row can't fit on the page. The pointer
317      * then points to another page where the next column of the row can be
318      * found. The overflow pointer can be bigger than a row, so when
319      * overflowing a row the code must overflow enough columns so that there
320      * is enough free space to write the row. Note this means that the
321      * minimum space a row can take on a page must allow for at least the
322      * size of the overflow pointers so that if the row is updated it can
323      * write the over flow pointer.
324      *
325      **/

326     protected static final int OVERFLOW_POINTER_SIZE = 12;
327
328     /**
329      * OVERFLOW_PTR_FIELD_SIZE - Number of bytes of an overflow field
330      *
331      * This is the length to reserve for either an column or row overflow
332      * pointer field. It includes the size of the field header plus the
333      * maxium length of the overflow pointer (it could be shorter due to
334      * compressed storage).
335      *
336      * The calcualtion is:
337      *
338      * OVERFLOW_PTR_FIELD_SIZE =
339      * OVERFLOW_POINTER_SIZE +
340      * sizeof(status byte) +
341      * sizeof(field length field for a field which is just an overflow ptr)
342      *
343      *
344      **/

345     protected static final int OVERFLOW_PTR_FIELD_SIZE =
346         OVERFLOW_POINTER_SIZE + 1 + 1;
347
348     /**
349      * In memory buffer used as scratch space for streaming columns.
350      **/

351     ByteHolder bh = null;
352
353     /**************************************************************************
354      * Fields of the class
355      **************************************************************************
356      */

357
358
359     /**
360      * Constants used in call to logColumn.
361      * <p>
362      * Action taken in this routine is determined by the kind of column as
363      * specified in the columnFlag:
364      * COLUMN_NONE - the column is insignificant
365      * COLUMN_FIRST - this is the first column in a logRow() call
366      * COLUMN_LONG - this is a known long column, therefore we will
367      * store part of the column on the current page and
368      * overflow the rest if necessary.
369      **/

370     protected static final int COLUMN_NONE = 0;
371     protected static final int COLUMN_FIRST = 1;
372     protected static final int COLUMN_LONG = 2;
373
374
375     /**
376      * maxFieldSize is a worst case calculation for the size of a record
377      * on an empty page, with a single field, but still allow room for
378      * an overflow pointer if another field is to be added. See initSpace().
379      * maxFieldSize is a worst case calculation for the size of a record
380      *
381      * This is used as the threshold for a long column.
382      *
383      * maxFieldSize =
384      * totalSpace * (1 - spareSpace/100) -
385      * slotEntrySize * - 16 - OVERFLOW_POINTER_SIZE;
386      **/

387     protected int maxFieldSize;
388
389
390     /**
391      * The page header is a fixed size, 56 bytes, following are variables used
392      * to access the fields in the header:
393      * <p>
394      * 1 byte boolean isOverflowPage is page an overflow page
395      * 1 byte byte pageStatus page status (field in base page)
396      * 8 bytes long pageVersion page version (field in base page)
397      * 2 bytes ushort slotsInUse number of slots in slot offset table
398      * 4 bytes integer nextId next record identifier
399      * 4 bytes integer generation generation number of this page(FUTURE USE)
400      * 4 bytes integer prevGeneration previous generation of page (FUTURE USE)
401      * 8 bytes long bipLocation the location of the BI page (FUTURE USE)
402      * 2 bytes ushort deletedRowCount number of deleted rows on page.(rel 2.0)
403      * 2 bytes long spare for future use
404      * 4 bytes long spare (encryption writes random bytes)
405      * 8 bytes long spare for future use
406      * 8 bytes long spare for future use
407      *
408      * Note that spare space has been guaranteed to be writen with "0", so
409      * that future use of field should not either not use "0" as a valid data
410      * item or pick 0 as a valid default value so that on the fly upgrade can
411      * assume that 0 means field was never assigned.
412      *
413      **/

414     private boolean isOverflowPage; // is page an overflow page?
415
private int slotsInUse; // number of slots in slot offset table.
416
private int nextId; // next record identifier
417
private int generation; // (Future Use) generation number of this page
418
private int prevGeneration; // (Future Use) previous generation of page
419
private long bipLocation; // (Future Use) the location of the BI page
420
private int deletedRowCount; // number of deleted rows on page.
421

422     /**
423      * Is the header in the byte array out of date wrt the fields.
424      * <p>
425      * this field must be set to true whenever one of the above header fields
426      * is modified. Ie any of (isOverflowPage, slotsInUse, nextId, generation,
427      * prevGeneration, bipLocation, deletedRowCount)
428      **/

429     private boolean headerOutOfDate;
430
431     /**
432      * holder for the checksum.
433      **/

434     private CRC32 JavaDoc checksum;
435
436     /**
437      * Minimum space to reserve for record portion length of row.
438      * <p>
439      * minimumRecordSize is stored in the container handle. It is used to
440      * reserved minimum space for recordPortionLength. Default is 1. To
441      * get the value from the container handle:
442      * myContainer.getMinimumRecordSize();
443      *
444      * minimumRecordSize is the minimum user record size, excluding the space we
445      * use for the record header and field headers. When a record is inserted,
446      * it is stored in a space at least as large as the sum of the
447      * minimumRecordSize and total header size.
448      *
449      * For example,
450      * If minimumRecordSize is 10 bytes,
451      * the user record is 7 bytes,
452      * we used 5 bytes for record and field headers,
453      * this record will take (10 + 5) bytes of space, extra 3 bytes is
454      * put into reserve.
455      *
456      * If minimumRecordSize is 10 bytes,
457      * user record is 17 bytes,
458      * we used 5 bytes for record and field headers,
459      * this record will take (17 + 5) bytes of space, no reserve space
460      * here.
461      *
462      * minimumRecordSize is defined by user on per container basis.
463      * The default for minimumRecordSize is set to 1.
464      *
465      **/

466     protected int minimumRecordSize;
467
468     /**
469      * scratch variable used to keep track of the total user size for the row.
470      * the information is used by logRow to maintain minimumRecordSize
471      * on Page. minimumRecordSize is only considered for main data pages,
472      * therefore, the page must be latched during an insert operation.
473      **/

474     private int userRowSize;
475     
476     /**
477      * slot field and slot entry size.
478      * <p>
479      * The size of these fields is dependant on the page size.
480      * These 2 variables should be set when pageSize is determined, and should
481      * not be changed for that page.
482      *
483      * Each slot entry contains 3 fields (slotOffet, recordPortionLength and
484      * reservedSpace) for the record the slot is pointing to.
485      * slotFieldSize is the size for each of the slot field.
486      * slotEntrySize is the total space used for a single slot entry.
487      **/

488     private int slotFieldSize;
489     private int slotEntrySize;
490
491     /**
492      * Offset of the first entry in the slot table.
493      * <p>
494      * Offset table is located at end of page, just before checksum. It
495      * grows backward as an array from this point toward the middle of the
496      * page.
497      * <p>
498      * slotTableOffsetToFirstEntry is the offset to the beginning of the
499      * first entry (slot[0]) in the slot table. This allows the following
500      * math to get to the offset of N'th entry in the slot table:
501      *
502      * offset of slot[N] = slotTableOffsetToFirstEntry + (N * slotEntrySize)
503      **/

504     private int slotTableOffsetToFirstEntry;
505
506     /**
507      * Offset of the record length entry in the 1st slot table entry.
508      * <p>
509      * Offset table is located at end of page, just before checksum. It
510      * grows backward as an array from this point toward the middle of the
511      * page. The record length is stored as the second "field" of the
512      * slot table entry.
513      * <p>
514      * slotTableOffsetToFirstRecordLengthField is the offset to the beginning
515      * of the record length field in the first entry (slot[0]) in the slot
516      * table. This allows the following
517      * math to get to the record length field of N'th entry in the slot table:
518      *
519      * offset of record length of slot[N] slot entry =
520      * slotTableOffsetToFirstRecordLengthField + (N * slotEntrySize)
521      **/

522     private int slotTableOffsetToFirstRecordLengthField;
523
524
525     /**
526      * Offset of the reserved space length entry in the 1st slot table entry.
527      * <p>
528      * Offset table is located at end of page, just before checksum. It
529      * grows backward as an array from this point toward the middle of the
530      * page. The reserved space length is stored as the third "field" of the
531      * slot table entry.
532      * <p>
533      * slotTableOffsetToFirstReservedSpaceField is the offset to the beginning
534      * of the reserved space field in the first entry (slot[0]) in the slot
535      * table. This allows the following
536      * math to get to the reserved space field of N'th entry in the slot table:
537      *
538      * offset of reserved space of slot[N] slot entry =
539      * slotTableOffsetToFirstReservedSpaceField + (N * slotEntrySize)
540      **/

541     private int slotTableOffsetToFirstReservedSpaceField;
542
543     /**
544      * total usable space on a page.
545      * <p>
546      * This is the space not taken by page hdr, page table, and existing
547      * slot entries/rows.
548      **/

549     protected int totalSpace; // total usable space on a page
550

551     // freeSpace and firstFreeByte are initliazed to a minimum value.
552
protected int freeSpace = Integer.MIN_VALUE; // free space on the page
553
private int firstFreeByte = Integer.MIN_VALUE; // 1st free byte on page
554

555
556     /**
557      * % of page to keep free for updates.
558      * <p>
559      * How much of a head page should be reserved as "free" so that the space
560      * can be used by update which expands the row without needing to overflow
561      * it. 1 means save 1% of the free space for expansion.
562      **/

563     protected int spareSpace;
564
565     /**
566      * Scratch variable used when you need a overflowRecordHeader. Declared
567      * globally so that object is only allocated once per page.
568      **/

569     private StoredRecordHeader overflowRecordHeader;
570
571     /**
572      * Input streams used to read/write bytes to/from the page byte array.
573      **/

574     protected ArrayInputStream rawDataIn;
575     protected ArrayOutputStream rawDataOut;
576     protected FormatIdOutputStream logicalDataOut;
577
578
579     /**************************************************************************
580      * Constructors for This class:
581      **************************************************************************
582      */

583
584
585     /**
586      * Simple no-arg constructor for StoredPage.
587      **/

588     public StoredPage()
589     {
590         super();
591     }
592
593     /**************************************************************************
594      * Private/Protected methods of This class:
595      **************************************************************************
596      */

597
598     /**
599      * get scratch space for over flow record header.
600      * <p>
601      *
602      * @exception StandardException Standard exception policy.
603      **/

604     
605     private StoredRecordHeader getOverFlowRecordHeader()
606         throws StandardException
607     {
608         return(
609             overflowRecordHeader != null ?
610                 overflowRecordHeader :
611                 (overflowRecordHeader = new StoredRecordHeader()));
612     }
613
614     /**
615      * Initialize the StoredPage.
616      * <p>
617      * Initialize the object, ie. perform work normally perfomed in constructor.
618      * Called by setIdentity() and createIdentity() - the Cacheable interfaces
619      * which are used to move a page in/out of cache.
620      **/

621     protected void initialize()
622     {
623         super.initialize();
624
625         if (rawDataIn == null)
626         {
627             rawDataIn = new ArrayInputStream();
628             checksum = new CRC32 JavaDoc();
629         }
630
631         if (pageData != null)
632             rawDataIn.setData(pageData);
633     }
634
635
636     /**
637      * Create the output streams.
638      * <p>
639      * Create the output streams, these are created on demand
640      * to avoid creating unrequired objects for pages that are
641      * never modified during their lifetime in the cache.
642      * <p>
643      *
644      * @exception StandardException Standard exception policy.
645      **/

646     private void createOutStreams()
647     {
648         rawDataOut = new ArrayOutputStream();
649         rawDataOut.setData(pageData);
650
651         logicalDataOut = new FormatIdOutputStream(rawDataOut);
652     }
653
654     /**
655      * Tie the logical output stream to a passed in OutputStream.
656      * <p>
657      * Tie the logical output stream to a passed in OutputStream with
658      * no limit as to the number of bytes that can be written.
659      **/

660     private void setOutputStream(OutputStream out)
661     {
662         if (rawDataOut == null)
663             createOutStreams();
664
665         logicalDataOut.setOutput(out);
666     }
667
668     /**
669      * Reset the logical output stream.
670      * <p>
671      * Reset the logical output stream (logicalDataOut) to be attached
672      * to the page array stream as is the norm, no limits are placed
673      * on any writes.
674      *
675      **/

676     private void resetOutputStream()
677     {
678
679         logicalDataOut.setOutput(rawDataOut);
680     }
681
682     /**************************************************************************
683      * Protected Methods of CachedPage class: (create, read and write a page.)
684      **************************************************************************
685      */

686
687     /**
688      * use this passed in page buffer as this object's page data.
689      * <p>
690      * The page content may not have been read in from disk yet.
691      * For pagesize smaller than 64K:
692      * Size of the record offset stored in a slot (unsigned short)
693      * Size of the record portion length stored in a slot (unsigned short)
694      * Size of the record portion length stored in a slot (unsigned short)
695      * For pagesize greater than 64K, but less than 2gig:
696      * Size of the record offset stored in a slot (int)
697      * Size of the record portion length stored in a slot (int)
698      * Size of the record portion length stored in a slot (int)
699      * <p>
700      *
701      * @param pageBuffer The array of bytes to use as the page buffer.
702      **/

703     protected void usePageBuffer(byte[] pageBuffer)
704     {
705         pageData = pageBuffer;
706
707         int pageSize = pageData.length;
708         if (rawDataIn != null)
709             rawDataIn.setData(pageData);
710
711         initSpace();
712
713         if (pageSize >= 65536)
714             slotFieldSize = LARGE_SLOT_SIZE;
715         else
716             slotFieldSize = SMALL_SLOT_SIZE;
717         
718         slotEntrySize = 3 * slotFieldSize;
719
720         // offset of slot table entry[0]
721
slotTableOffsetToFirstEntry =
722             (pageSize - CHECKSUM_SIZE - slotEntrySize);
723
724         // offset of record length field in slot table entry[0]
725
slotTableOffsetToFirstRecordLengthField =
726             slotTableOffsetToFirstEntry + slotFieldSize;
727
728         // offset of reserved space field in slot table entry[0]
729
slotTableOffsetToFirstReservedSpaceField =
730             slotTableOffsetToFirstEntry + (2 * slotFieldSize);
731
732         if (rawDataOut != null)
733             rawDataOut.setData(pageData);
734     }
735
736
737     /**
738      * Create a new StoredPage.
739      * <p>
740      * Make this object represent a new page (ie. a page that never existed
741      * before, as opposed to reading in an existing page from disk).
742      * <p>
743      *
744      * @param newIdentity The key describing page (segment,container,page).
745      * @param args information stored about the page, once in the
746      * container header and passed in through the array.
747      *
748      * @exception StandardException Standard exception policy.
749      **/

750     protected void createPage(
751     PageKey newIdentity,
752     int[] args)
753          throws StandardException
754     {
755         // arg[0] is the formatId of the page
756
// arg[1] is whether to sync the page to disk or not
757

758         int pageSize = args[2];
759         spareSpace = args[3];
760         minimumRecordSize = args[4];
761
762         setPageArray(pageSize);
763
764         cleanPage(); // clean up the page array
765

766         setPageVersion(0); // page is being created for the first time
767

768         nextId = RecordHandle.FIRST_RECORD_ID; // first record Id
769
generation = 0;
770         prevGeneration = 0; // there is no previous generation
771
bipLocation = 0L;
772
773         createOutStreams();
774     }
775
776     /**
777      * Initialize the page from values in the page buffer.
778      * <p>
779      * Initialize in memory structure using the buffer in pageData. This
780      * is how a StoredPage object is intialized to represent page read in
781      * from disk.
782      * <p>
783      *
784      * @param myContainer The container to read the page in from.
785      * @param newIdentity The key representing page being read in (segment,
786      * container, page number)
787      *
788      * @exception StandardException If the page cannot be read correctly,
789      * or is inconsistent.
790      **/

791     protected void initFromData(
792     FileContainer myContainer,
793     PageKey newIdentity)
794          throws StandardException
795     {
796         if (myContainer != null)
797         {
798             // read in info about page stored once in the container header.
799

800             spareSpace = myContainer.getSpareSpace();
801             minimumRecordSize = myContainer.getMinimumRecordSize();
802         }
803
804         // if it is null, assume spareSpace and minimumRecordSize is the
805
// same. We would only call initFromData after a restore then.
806

807         try
808         {
809             readPageHeader();
810             initSlotTable();
811         }
812         catch (IOException JavaDoc ioe)
813         {
814             // i/o methods on the byte array have thrown an IOException
815
throw dataFactory.markCorrupt(
816                 StandardException.newException(
817                     SQLState.DATA_CORRUPT_PAGE, ioe, newIdentity));
818         }
819
820         try
821         {
822             validateChecksum(newIdentity);
823         }
824         catch (StandardException se)
825         {
826             if (se.getMessageId().equals(SQLState.FILE_BAD_CHECKSUM))
827             {
828                 // it is remotely possible that the disk transfer got garbled,
829
// i.e., the page is actually fine on disk but the version we
830
// got has some rubbish on it. Double check.
831
int pagesize = getPageSize();
832                 byte[] corruptPage = pageData;
833                 pageData = null; // clear this
834

835                 // set up the new page array
836
setPageArray(pagesize);
837
838                 try
839                 {
840                     myContainer.readPage(newIdentity.getPageNumber(), pageData);
841                 }
842                 catch (IOException JavaDoc ioe)
843                 {
844                     throw dataFactory.markCorrupt(
845                         StandardException.newException(
846                             SQLState.DATA_CORRUPT_PAGE, ioe, newIdentity));
847                 }
848
849                 if (SanityManager.DEBUG)
850                 {
851                     SanityManager.DEBUG_CLEAR("TEST_BAD_CHECKSUM");
852                 }
853                 
854                 // see if this read confirms the checksum error
855
try
856                 {
857                     validateChecksum(newIdentity);
858                 }
859                 catch (StandardException sse)
860                 {
861                     // really bad
862
throw dataFactory.markCorrupt(se);
863                 }
864
865                 // If we got here, this means the first read is bad but the
866
// second read is good. This could be due to disk I/O error or
867
// a bug in the way the file pointer is mis-managed.
868
String JavaDoc firstImage = pagedataToHexDump(corruptPage);
869                 String JavaDoc secondImage =
870                     (SanityManager.DEBUG) ?
871                         toString() : pagedataToHexDump(corruptPage);
872
873                 throw StandardException.newException(
874                         SQLState.FILE_IO_GARBLED, se,
875                         newIdentity, firstImage, secondImage);
876             }
877             else
878             {
879                 throw se;
880             }
881         }
882     
883
884     }
885
886     /**
887      * Validate the check sum on the page.
888      * <p>
889      * Compare the check sum stored in the page on disk with the checksum
890      * calculated from the bytes on the page.
891      * <p>
892      *
893      * @param id The key that describes the page.
894      *
895      * @exception StandardException Standard exception policy.
896      **/

897     protected void validateChecksum(PageKey id)
898         throws StandardException
899     {
900         long onDiskChecksum;
901
902         try
903         {
904             // read the checksum stored on the page on disk. It is stored
905
// in the last "CHECKSUM_SIZE" bytes of the page, and is a long.
906

907             rawDataIn.setPosition(getPageSize() - CHECKSUM_SIZE);
908             onDiskChecksum = rawDataIn.readLong();
909         }
910         catch (IOException JavaDoc ioe)
911         {
912
913             // i/o methods on the byte array have thrown an IOException
914
throw dataFactory.markCorrupt(
915                 StandardException.newException(
916                     SQLState.DATA_CORRUPT_PAGE, ioe, id));
917         }
918
919         // Force the checksum to be recalculated based on the current page.
920
checksum.reset();
921         checksum.update(pageData, 0, getPageSize() - CHECKSUM_SIZE);
922         
923         // force a bad checksum error
924
if (SanityManager.DEBUG)
925         {
926             if (SanityManager.DEBUG_ON("TEST_BAD_CHECKSUM"))
927             {
928                 // set on disk checksum to wrong value
929
onDiskChecksum = 123456789;
930             }
931         }
932
933         if (onDiskChecksum != checksum.getValue())
934         {
935             // try again using new checksum object to be doubly sure
936
CRC32 JavaDoc newChecksum = new CRC32 JavaDoc();
937             newChecksum.reset();
938             newChecksum.update(pageData, 0, getPageSize()-CHECKSUM_SIZE);
939             if (onDiskChecksum != newChecksum.getValue())
940             {
941                 throw StandardException.newException(
942                     SQLState.FILE_BAD_CHECKSUM,
943                     id,
944                     new Long JavaDoc(checksum.getValue()),
945                     new Long JavaDoc(onDiskChecksum),
946                     pagedataToHexDump(pageData));
947             }
948             else
949             {
950                 // old one is bad, get rid of it
951
if (SanityManager.DEBUG)
952                     SanityManager.THROWASSERT("old checksum gets wrong value");
953
954                 checksum = newChecksum;
955             }
956         }
957     }
958
959     /**
960      * Recalculate checksum and write it to the page array.
961      * <p>
962      * Recalculate the checksum of the page, and write the result back into
963      * the last bytes of the page.
964      *
965      * @exception IOException if writing to end of array fails.
966      **/

967     protected void updateChecksum() throws IOException JavaDoc
968     {
969         checksum.reset();
970         checksum.update(pageData, 0, getPageSize() - CHECKSUM_SIZE);
971
972         rawDataOut.setPosition(getPageSize() - CHECKSUM_SIZE);
973         logicalDataOut.writeLong(checksum.getValue());
974     }
975
976     /**
977      * Write information about page from variables into page byte array.
978      * <p>
979      * This routine insures that all information about the page is reflected
980      * in the page byte buffer. This involves moving information from local
981      * variables into encoded version on the page in page header and checksum.
982      * <p>
983      *
984      * @param identity The key of this page.
985      *
986      * @exception StandardException Standard exception policy.
987      **/

988     protected void writePage(PageKey identity)
989         throws StandardException
990     {
991         if (SanityManager.DEBUG)
992         {
993             // some consistency checks on fields of the page, good to check
994
// before we write them into the page.
995

996             if ((freeSpace < 0) ||
997                 (firstFreeByte + freeSpace) != (getSlotOffset(slotsInUse - 1)))
998             {
999                 // make sure free space is not negative and does not overlap
1000
// used space.
1001

1002                SanityManager.THROWASSERT("slotsInUse = " + slotsInUse
1003                    + ", firstFreeByte = " + firstFreeByte
1004                    + ", freeSpace = " + freeSpace
1005                    + ", slotOffset = " + (getSlotOffset(slotsInUse - 1))
1006                    + ", page = " + this);
1007            }
1008
1009            if ((slotsInUse == 0) &&
1010                (firstFreeByte != (getPageSize() - totalSpace - CHECKSUM_SIZE)))
1011            {
1012                SanityManager.THROWASSERT("slotsInUse = " + slotsInUse
1013                    + ", firstFreeByte = " + firstFreeByte
1014                    + ", freeSpace = " + freeSpace
1015                    + ", slotOffset = " + (getSlotOffset(slotsInUse - 1))
1016                    + ", page = " + this);
1017            }
1018
1019        }
1020
1021        try
1022        {
1023            if (headerOutOfDate)
1024            {
1025                updatePageHeader();
1026            }
1027            else
1028            {
1029                // page version always need to be updated if page is dirty,
1030
// either do it in updatePageHeader or by itself
1031
updatePageVersion();
1032            }
1033
1034            updateChecksum();
1035
1036        }
1037        catch (IOException JavaDoc ioe)
1038        {
1039            // i/o methods on the byte array have thrown an IOException
1040
throw dataFactory.markCorrupt(
1041                StandardException.newException(
1042                    SQLState.DATA_CORRUPT_PAGE, ioe, identity));
1043        }
1044
1045    }
1046
1047    /**
1048     * Write out the format id of this page
1049     *
1050     * @param identity The key of this page.
1051     *
1052     * @exception StandardException Standard exception policy.
1053     **/

1054    protected void writeFormatId(PageKey identity) throws StandardException
1055    {
1056        try
1057        {
1058            if (rawDataOut == null)
1059                createOutStreams();
1060
1061            rawDataOut.setPosition(0);
1062
1063            FormatIdUtil.writeFormatIdInteger(
1064                logicalDataOut, getTypeFormatId());
1065
1066        }
1067        catch (IOException JavaDoc ioe)
1068        {
1069            // i/o methods on the byte array have thrown an IOException
1070
throw dataFactory.markCorrupt(
1071                StandardException.newException(
1072                    SQLState.DATA_CORRUPT_PAGE, ioe, identity));
1073        }
1074    }
1075
1076
1077    /**************************************************************************
1078     * Protected Methods of Cacheable Interface:
1079     **************************************************************************
1080     */

1081
1082    /**************************************************************************
1083     * Protected OverRidden Methods of BasePage:
1084     **************************************************************************
1085     */

1086
1087    /**
1088     * Ensure that the page is released from the cache when it is unlatched.
1089     *
1090     * @see org.apache.derby.impl.store.raw.data.BasePage#releaseExclusive
1091     *
1092     **/

1093    protected void releaseExclusive()
1094    {
1095        super.releaseExclusive();
1096
1097        pageCache.release(this);
1098    }
1099
1100
1101    /**
1102     * Return the total number of bytes used, reserved, or wasted by the
1103     * record at this slot.
1104     * <p>
1105     * The amount of space the record on this slot is currently taking on the
1106     * page.
1107     *
1108     * If there is any reserve space or wasted space, count that in also
1109     * Do NOT count the slot entry size
1110     * <p>
1111     *
1112     * @return The number of bytes used by the row at slot "slot".
1113     *
1114     * @param slot look at row at this slot.
1115     *
1116     * @exception StandardException Standard exception policy.
1117     **/

1118    public int getTotalSpace(int slot)
1119        throws StandardException
1120    {
1121        try
1122        {
1123            // A slot entry looks like the following:
1124
// 1st field: offset of the record on the page
1125
// 2nd field: length of the record on the page
1126
// 3rd field: amount of space reserved for the record to grow.
1127

1128            // position the read at the beginning of the 2nd field.
1129
rawDataIn.setPosition(getSlotOffset(slot) + slotFieldSize);
1130
1131            // return the size of the record + size of the reserved space.
1132
// the size of the fields to read is determined by slotFieldSize.
1133

1134            return(
1135                ((slotFieldSize == SMALL_SLOT_SIZE) ?
1136                    (rawDataIn.readUnsignedShort() +
1137                     rawDataIn.readUnsignedShort()) :
1138                    (rawDataIn.readInt() +
1139                     rawDataIn.readInt())));
1140                
1141        }
1142        catch (IOException JavaDoc ioe)
1143        {
1144            throw dataFactory.markCorrupt(
1145                StandardException.newException(
1146                    SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
1147        }
1148    }
1149
1150    /**
1151     * Is there minimal space for insert?
1152     * <p>
1153     * Does quick calculation to see if average size row on this page could
1154     * be inserted on the page. This is done because the actual row size
1155     * being inserted isn't known until we actually copy the columns from
1156     * their object form into their on disk form which is expensive. So
1157     * we use this calculation so that in the normal case we only do one
1158     * copy of the row directly onto the page.
1159     * <p>
1160     *
1161     * @return true if we think the page will allow an insert, false otherwise.
1162     *
1163     * @exception StandardException Standard exception policy.
1164     **/

1165    public boolean spaceForInsert()
1166        throws StandardException
1167    {
1168        // is this an empty page
1169
if (slotsInUse == 0)
1170            return(true);
1171
1172        if (!allowInsert())
1173            return(false);
1174
1175        int usedSpace = totalSpace - freeSpace;
1176        int bytesPerRow = usedSpace / slotsInUse;
1177
1178        return(bytesPerRow <= freeSpace);
1179    }
1180
1181    /**
1182     * Is row guaranteed to be inserted successfully on this page?
1183     * <p>
1184     * Return true if this record is guaranteed to be inserted successfully
1185     * using insert() or insertAtSlot(). This guarantee is only valid while
1186     * the row remains unchanged and the page latch is held.
1187     * <p>
1188     *
1189     * @return bolean indicating if row can be inserted on this page.
1190     *
1191     * @param row The row to check for insert.
1192     * @param validColumns bit map to interpret valid columns in row.
1193     * @param overflowThreshold The percentage of the page to use for the
1194     * insert. 100 means use 100% of the page,
1195     * 50 means use 50% of page (ie. make sure
1196     * 2 rows fit per page).
1197     *
1198     * @exception StandardException Standard exception policy.
1199     **/

1200    public boolean spaceForInsert(
1201    Object JavaDoc[] row,
1202    FormatableBitSet validColumns,
1203    int overflowThreshold)
1204        throws StandardException
1205    {
1206
1207        // is this an empty page
1208
if (slotsInUse == 0)
1209            return true;
1210        
1211        // does the estimate think it won't fit, if not return false to avoid
1212
// cost of calling logRow() just to figure out if the row will fit.
1213
if (!allowInsert())
1214            return false;
1215
1216        DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();
1217
1218        try
1219        {
1220            // This is a public call, start column is rawstore only.
1221
// set the starting Column for the row to be 0.
1222
logRow(
1223                0, true, nextId, row, validColumns, out,
1224                0, Page.INSERT_DEFAULT, -1, -1, overflowThreshold);
1225
1226        }
1227        catch (NoSpaceOnPage nsop)
1228        {
1229            return false;
1230        }
1231        catch (IOException JavaDoc ioe)
1232        {
1233            throw StandardException.newException(
1234                SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
1235        }
1236
1237        return true;
1238    }
1239
1240    /**
1241     * Is row guaranteed to be inserted successfully on this page?
1242     * <p>
1243     * Return true if this record is guaranteed to be inserted successfully
1244     * using insert() or insertAtSlot(). This guarantee is only valid while
1245     * the row remains unchanged and the page latch is held.
1246     * <p>
1247     * This is a private call only used when calculating whether an overflow
1248     * page can be used to insert part of an overflow row/column.
1249     *
1250     * @return bolean indicating if row can be inserted on this page.
1251     *
1252     * @param row The row to check for insert.
1253     * @param validColumns bit map to interpret valid columns in row.
1254     * @param overflowThreshold The percentage of the page to use for the
1255     * insert. 100 means use 100% of the page,
1256     * 50 means use 50% of page (ie. make sure
1257     * 2 rows fit per page).
1258     *
1259     * @exception StandardException Standard exception policy.
1260     **/

1261    private boolean spaceForInsert(
1262    Object JavaDoc[] row,
1263    FormatableBitSet validColumns,
1264    int spaceNeeded,
1265    int startColumn,
1266    int overflowThreshold)
1267        throws StandardException
1268    {
1269        if (!(spaceForInsert() && (freeSpace >= spaceNeeded)))
1270            return false;
1271
1272        DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();
1273
1274        try
1275        {
1276            logRow(
1277                0, true, nextId, row, validColumns, out, startColumn,
1278                Page.INSERT_DEFAULT, -1, -1, overflowThreshold);
1279
1280        }
1281        catch (NoSpaceOnPage nsop)
1282        {
1283            return false;
1284        }
1285        catch (IOException JavaDoc ioe)
1286        {
1287            throw StandardException.newException(
1288                SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
1289        }
1290
1291        return true;
1292    }
1293
1294    /**
1295     * Is this page unfilled?
1296     * <p>
1297     * Returns true if page is relatively unfilled,
1298     * which means the page is < 1/2 full and has enough space to insert an
1299     * "average" sized row onto the page.
1300     * <p>
1301     *
1302     * @return true if page is relatively unfilled.
1303     **/

1304    public boolean unfilled()
1305    {
1306        return (allowInsert() && (freeSpace > (getPageSize() / 2)));
1307    }
1308
1309    /**
1310     * Is there enough space on the page to insert a minimum size row?
1311     * <p>
1312     * Calculate whether there is enough space on the page to insert a
1313     * minimum size row. The calculation includes maintaining the required
1314     * reserved space on the page for existing rows to grow on the page.
1315     * <p>
1316     *
1317     * @return boolean indicating if a minimum sized row can be inserted.
1318     **/

1319    public boolean allowInsert()
1320    {
1321        // is this an empty page
1322
if (slotsInUse == 0)
1323            return true;
1324
1325        int spaceAvailable = freeSpace;
1326
1327        spaceAvailable -= slotEntrySize; // need to account new slot entry
1328

1329        if (spaceAvailable < minimumRecordSize)
1330            return false;
1331
1332        // see that we reserve enough space for existing rows to grow on page
1333
if (((spaceAvailable * 100) / totalSpace) < spareSpace)
1334            return false;
1335
1336        return true;
1337    }
1338
1339    /**
1340     * Does this page have enough space to insert the input rows?
1341     * <p>
1342     * Can the rows with lengths spaceNeeded[0..num_rows-1] be copied onto
1343     * this page?
1344     * <p>
1345     *
1346     * @return true if the sum of the lengths will fit on the page.
1347     *
1348     * @param num_rows number of rows to check for.
1349     * @param spaceNeeded array of lengths of the rows to insert.
1350     **/

1351    public boolean spaceForCopy(int num_rows, int[] spaceNeeded)
1352    {
1353        // determine how many more bytes are needed for the slot entries
1354
int bytesNeeded = slotEntrySize * num_rows;
1355
1356        for (int i = 0; i < num_rows; i++)
1357        {
1358            if (spaceNeeded[i] > 0)
1359            {
1360                // add up the space needed by the rows, add in minimumRecordSize
1361
// if length of actual row is less than minimumRecordSize.
1362

1363                bytesNeeded +=
1364                    (spaceNeeded[i] >= minimumRecordSize ?
1365                         spaceNeeded[i] : minimumRecordSize);
1366            }
1367        }
1368
1369        return((freeSpace - bytesNeeded) >= 0);
1370    }
1371
1372    protected boolean spaceForCopy(int spaceNeeded)
1373    {
1374        // add up the space needed by the rows, add in minimumRecordSize
1375
// if length of actual row is less than minimumRecordSize.
1376
int bytesNeeded = slotEntrySize +
1377            (spaceNeeded >= minimumRecordSize ?
1378                 spaceNeeded : minimumRecordSize);
1379
1380        return((freeSpace - bytesNeeded) >= 0);
1381    }
1382
1383    /**
1384     * Read the record at the given slot into the given row.
1385     * <P>
1386     * This reads and initializes the columns in the row array from the raw
1387     * bytes stored in the page associated with the given slot. If validColumns
1388     * is non-null then it will only read those columns indicated by the bit
1389     * set, otherwise it will try to read into every column in row[].
1390     * <P>
1391     * If there are more columns than entries in row[] then it just stops after
1392     * every entry in row[] is full.
1393     * <P>
1394     * If there are more entries in row[] than exist on disk, the requested
1395     * excess columns will be set to null by calling the column's object's
1396     * restoreToNull() routine (ie. ((Object) column).restoreToNull() ).
1397     * <P>
1398     * If a qualifier list is provided then the row will only be read from
1399     * disk if all of the qualifiers evaluate true. Some of the columns may
1400     * have been read into row[] in the process of evaluating the qualifier.
1401     * <p>
1402     * This routine should only be called on the head portion of a row, it
1403     * will call a utility routine to read the rest of the row if it is a
1404     * long row.
1405     *
1406     *
1407     * @param slot the slot number
1408     * @param row (out) filled in sparse row
1409     * @param fetchDesc Information describing fetch, including what
1410     * columns to fetch and qualifiers.
1411     * @param recordToLock the record handle for the row at top level,
1412     * and is used in OverflowInputStream to lock the
1413     * row for Blobs/Clobs.
1414     * @param isHeadRow The row on this page includes the head record
1415     * handle. Will be false for the overflow portions
1416     * of a "long" row, where columns of a row span
1417     * multiple pages.
1418     *
1419     * @return false if a qualifier_list is provided and the row does not
1420     * qualifier (no row read in that case), else true.
1421     *
1422     * @exception StandardException Standard Cloudscape error policy
1423     **/

1424    protected boolean restoreRecordFromSlot(
1425    int slot,
1426    Object JavaDoc[] row,
1427    FetchDescriptor fetchDesc,
1428    RecordHandle recordToLock,
1429    StoredRecordHeader recordHeader,
1430    boolean isHeadRow)
1431        throws StandardException
1432    {
1433        try
1434        {
1435            int offset_to_row_data =
1436                getRecordOffset(slot) + recordHeader.size();
1437
1438            if (SanityManager.DEBUG)
1439            {
1440                if (getRecordOffset(slot) <
1441                        (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE))
1442                {
1443                    SanityManager.THROWASSERT(
1444                        "Incorrect offset. offset = " +
1445                            getRecordOffset(slot) +
1446                        ", offset should be < " +
1447                        "(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) = " +
1448                            (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) +
1449                        ", current slot = " + slot +
1450                        ", total slotsInUse = " + slotsInUse);
1451                }
1452
1453                SanityManager.ASSERT(
1454                    isHeadRow, "restoreRecordFromSlot called on a non-headrow");
1455                SanityManager.ASSERT(
1456                    !isOverflowPage(),
1457                    "restoreRecordFromSlot called on an overflow page.");
1458            }
1459
1460            // position the array reading stream at beginning of row data just
1461
// past the record header.
1462
ArrayInputStream lrdi = rawDataIn;
1463            lrdi.setPosition(offset_to_row_data);
1464
1465            if (!recordHeader.hasOverflow())
1466            {
1467                if (isHeadRow)
1468                {
1469                    if (fetchDesc != null &&
1470                        fetchDesc.getQualifierList() != null)
1471                    {
1472                        fetchDesc.reset();
1473
1474                        if (!qualifyRecordFromSlot(
1475                                row,
1476                                offset_to_row_data,
1477                                fetchDesc,
1478                                recordHeader,
1479                                recordToLock))
1480                        {
1481                            return(false);
1482                        }
1483                        else
1484                        {
1485                            // reset position back for subsequent record read.
1486
lrdi.setPosition(offset_to_row_data);
1487                        }
1488                    }
1489                }
1490
1491                // call routine to do the real work. Note that
1492
// readRecordFromStream() may return false for non-overflow
1493
// record, this is in the case where caller requests more
1494
// columns than exist on disk. In that case we still return
1495
// true at this point as there are no more columns that we
1496
// can return.
1497
if (fetchDesc != null)
1498                {
1499                    readRecordFromArray(
1500                        row,
1501                        (fetchDesc.getValidColumns() == null) ?
1502                            row.length -1 : fetchDesc.getMaxFetchColumnId(),
1503                        fetchDesc.getValidColumnsArray(),
1504                        fetchDesc.getMaterializedColumns(),
1505                        lrdi,
1506                        recordHeader,
1507                        (ErrorObjectInput) null /* always null */,
1508                        recordToLock);
1509                }
1510                else
1511                {
1512                    readRecordFromArray(
1513                        row,
1514                        row.length - 1,
1515                        (int[]) null,
1516                        (int[]) null,
1517                        lrdi,
1518                        recordHeader,
1519                        (ErrorObjectInput) null /* always null */,
1520                        recordToLock);
1521                }
1522
1523                return(true);
1524            }
1525            else
1526            {
1527                if (fetchDesc != null)
1528                {
1529                    if (fetchDesc.getQualifierList() != null)
1530                    {
1531                        fetchDesc.reset();
1532                    }
1533
1534                    readRecordFromArray(
1535                        row,
1536                        (fetchDesc.getValidColumns() == null) ?
1537                            row.length - 1 : fetchDesc.getMaxFetchColumnId(),
1538                        fetchDesc.getValidColumnsArray(),
1539                        fetchDesc.getMaterializedColumns(),
1540                        lrdi,
1541                        recordHeader,
1542                        (ErrorObjectInput) null /* always null */,
1543                        recordToLock);
1544                }
1545                else
1546                {
1547                    readRecordFromArray(
1548                        row,
1549                        row.length - 1,
1550                        (int[]) null,
1551                        (int[]) null,
1552                        lrdi,
1553                        recordHeader,
1554                        (ErrorObjectInput) null /* always null */,
1555                        recordToLock);
1556                }
1557
1558                // call routine to loop through all the overflow portions of
1559
// the row, reading it into "row".
1560
while (recordHeader != null)
1561                {
1562                    // The record is a long row, loop callng code to read the
1563
// pieces of the row located in a linked list of rows on
1564
// overflow pages.
1565
StoredPage overflowPage =
1566                        getOverflowPage(recordHeader.getOverflowPage());
1567                     
1568                    if (SanityManager.DEBUG)
1569                    {
1570                        if (overflowPage == null)
1571                            SanityManager.THROWASSERT(
1572                                "cannot get overflow page");
1573                    }
1574
1575                    // This call reads in the columns of the row that reside
1576
// on "overflowPage", and if there is another piece it
1577
// returns the recordHeader of the row on overFlowPage,
1578
// from which we can find the next piece of the row. A
1579
// null return means that we have read in the entire row,
1580
// and are done.
1581
recordHeader =
1582                        overflowPage.restoreLongRecordFromSlot(
1583                            row,
1584                            fetchDesc,
1585                            recordToLock,
1586                            recordHeader);
1587
1588                    overflowPage.unlatch();
1589                    overflowPage = null;
1590                }
1591
1592                // for overflow rows just apply qualifiers at end for now.
1593

1594                if ((fetchDesc != null) &&
1595                    (fetchDesc.getQualifierList() != null))
1596                {
1597                    if (!qualifyRecordFromRow(
1598                            row, fetchDesc.getQualifierList()))
1599                    {
1600                        return(false);
1601                    }
1602                }
1603
1604                return(true);
1605            }
1606        }
1607        catch (IOException JavaDoc ioe)
1608        {
1609
1610            if (SanityManager.DEBUG)
1611            {
1612                if (pageData == null)
1613                {
1614                    SanityManager.DEBUG_PRINT("DEBUG_TRACE",
1615                        "caught an IOException in restoreRecordFromSlot " +
1616                        (PageKey)getIdentity() + " slot " + slot +
1617                        ", pageData is null");
1618                }
1619                else
1620                {
1621                    SanityManager.DEBUG_PRINT("DEBUG_TRACE",
1622                        "caught an IOException in reestoreRecordFromSlot, " +
1623                        (PageKey)getIdentity() + " slot " + slot +
1624                        ", pageData.length = " +
1625                        pageData.length + " pageSize = " + getPageSize());
1626                    SanityManager.DEBUG_PRINT("DEBUG_TRACE",
1627                        "Hex dump of pageData \n " +
1628                        "--------------------------------------------------\n" +
1629                        pagedataToHexDump(pageData) +
1630                        "--------------------------------------------------\n");
1631                    SanityManager.DEBUG_PRINT("DEBUG_TRACE",
1632                        "Attempt to dump page " + this.toString());
1633                }
1634            }
1635
1636            // i/o methods on the byte array have thrown an IOException
1637
throw dataFactory.markCorrupt(
1638                StandardException.newException(
1639                    SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
1640        }
1641    }
1642
1643    private StoredRecordHeader restoreLongRecordFromSlot(
1644    Object JavaDoc[] row,
1645    FetchDescriptor fetchDesc,
1646    RecordHandle recordToLock,
1647    StoredRecordHeader parent_recordHeader)
1648        throws StandardException
1649    {
1650
1651        int slot =
1652            findRecordById(
1653                parent_recordHeader.getOverflowId(), Page.FIRST_SLOT_NUMBER);
1654
1655        StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
1656
1657        try
1658        {
1659            int offset_to_row_data =
1660                getRecordOffset(slot) + recordHeader.size();
1661
1662            if (SanityManager.DEBUG)
1663            {
1664                if (getRecordOffset(slot) <
1665                        (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE))
1666                {
1667                    SanityManager.THROWASSERT(
1668                        "Incorrect offset. offset = " +
1669                            getRecordOffset(slot) +
1670                        ", offset should be < " +
1671                        "(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) = " +
1672                            (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) +
1673                        ", current slot = " + slot +
1674                        ", total slotsInUse = " + slotsInUse);
1675                }
1676            }
1677
1678            // position the array reading stream at beginning of row data
1679
// just past the record header.
1680
ArrayInputStream lrdi = rawDataIn;
1681            lrdi.setPosition(offset_to_row_data);
1682
1683            if (fetchDesc != null)
1684            {
1685                if (fetchDesc.getQualifierList() != null)
1686                {
1687                    fetchDesc.reset();
1688                }
1689
1690                readRecordFromArray(
1691                    row,
1692                    (fetchDesc.getValidColumns() == null) ?
1693                        row.length - 1 : fetchDesc.getMaxFetchColumnId(),
1694                    fetchDesc.getValidColumnsArray(),
1695                    fetchDesc.getMaterializedColumns(),
1696                    lrdi,
1697                    recordHeader,
1698                    (ErrorObjectInput) null /* always null */,
1699                    recordToLock);
1700            }
1701            else
1702            {
1703                readRecordFromArray(
1704                    row,
1705                    row.length - 1,
1706                    (int[]) null,
1707                    (int[]) null,
1708                    lrdi,
1709                    recordHeader,
1710                    (ErrorObjectInput) null /* always null */,
1711                    recordToLock);
1712            }
1713
1714            return(recordHeader.hasOverflow() ? recordHeader : null);
1715        }
1716        catch (IOException JavaDoc ioe)
1717        {
1718            if (SanityManager.DEBUG)
1719            {
1720                if (pageData == null)
1721                {
1722                    SanityManager.DEBUG_PRINT("DEBUG_TRACE",
1723                        "caught an IOException in restoreRecordFromSlot " +
1724                        (PageKey)getIdentity() + " slot " + slot +
1725                        ", pageData is null");
1726                }
1727                else
1728                {
1729                    SanityManager.DEBUG_PRINT("DEBUG_TRACE",
1730                        "caught an IOException in reestoreRecordFromSlot, " +
1731                        (PageKey)getIdentity() + " slot " + slot +
1732                        ", pageData.length = " +
1733                        pageData.length + " pageSize = " + getPageSize());
1734                    SanityManager.DEBUG_PRINT("DEBUG_TRACE",
1735                        "Hex dump of pageData \n " +
1736                        "--------------------------------------------------\n" +
1737                        pagedataToHexDump(pageData) +
1738                        "--------------------------------------------------\n");
1739                    SanityManager.DEBUG_PRINT("DEBUG_TRACE",
1740                        "Attempt to dump page " + this.toString());
1741                }
1742            }
1743
1744            // i/o methods on the byte array have thrown an IOException
1745
throw dataFactory.markCorrupt(
1746                StandardException.newException(
1747                    SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
1748        }
1749    }
1750
1751    /**
1752     * Create a new record handle.
1753     * <p>
1754     * Return the next record id for allocation. Callers of this interface
1755     * expect the next id to get bumped some where else - probably by
1756     * storeRecordForInsert().
1757     * <p>
1758     *
1759     * @return The next id to assing to a row.
1760     **/

1761    public int newRecordId()
1762    {
1763        return nextId;
1764    }
1765
1766    /**
1767     * Create a new record handle, and bump the id.
1768     * <p>
1769     * Create a new record handle, and bump the id while holding the latch
1770     * so that no other user can ever see this record id. This will lead
1771     * to unused record id's in the case where an insert fails because there
1772     * is not enough space on the page.
1773     * <p>
1774     *
1775     * @return The next id to assing to a row.
1776     **/

1777    public int newRecordIdAndBump()
1778    {
1779        // headerOutOfDate must be bumped as nextId is changing, and must
1780
// eventually be updated in the page array.
1781
headerOutOfDate = true;
1782                            
1783        return nextId++;
1784    }
1785
1786
1787    /**
1788     * Create a new record id based on current one passed in.
1789     * <p>
1790     * This interface is used for the "copy" insert interface of raw store
1791     * where multiple rows are inserted into a page in a single logged
1792     * operation. We don't want to bump the id until the operation is logged
1793     * so we just allocated each id in order and then bump the next id at
1794     * the end of the operation.
1795     * <p>
1796     *
1797     * @return the next id based on the input id.
1798     *
1799     * @param recordId The id caller just used, return the next one.
1800     *
1801     **/

1802    protected int newRecordId(int recordId)
1803    {
1804        if (SanityManager.DEBUG)
1805        {
1806            SanityManager.ASSERT(
1807                recordId >= nextId,
1808                "should not create a record Id that is already given out");
1809        }
1810
1811        return recordId + 1;
1812    }
1813
1814    public boolean isOverflowPage()
1815    {
1816        return isOverflowPage;
1817    }
1818
1819
1820
1821    /**************************************************************************
1822     * Public Methods specific to StoredPage:
1823     **************************************************************************
1824     */

1825
1826    /**
1827     * Get the full size of the page.
1828     **/

1829    public final int getPageSize()
1830    {
1831        return pageData.length;
1832    }
1833
1834
1835    /**
1836     * Zero out a portion of the page.
1837     * <p>
1838     **/

1839    protected final void clearSection(int offset, int length)
1840    {
1841        int endOffset = offset + length;
1842
1843        while (offset < endOffset)
1844            pageData[offset++] = 0;
1845    }
1846
1847    /**
1848     * The maximum free space on this page possible.
1849     * <p>
1850     * The the maximum amount of space that can be used on the page
1851     * for the records and the slot offset table.
1852     * NOTE: subclass may have overwitten it to report less freeSpace
1853     *
1854     * @return the maximum free space on this page possible.
1855     *
1856     **/

1857    protected int getMaxFreeSpace()
1858    {
1859        return getPageSize() - RECORD_SPACE_OFFSET - CHECKSUM_SIZE;
1860    }
1861
1862    /**
1863     * The current free space on the page.
1864     **/

1865    protected int getCurrentFreeSpace()
1866    {
1867        return freeSpace;
1868    }
1869
1870    /**************************************************************************
1871     * Page header routines
1872     **************************************************************************
1873     */

1874
1875    /**
1876     * Read the page header from the page array.
1877     * <p>
1878     * Read the page header from byte form in the page array into in memory
1879     * variables.
1880     **/

1881    private void readPageHeader()
1882        throws IOException JavaDoc
1883    {
1884        // these reads are always against the page array
1885
ArrayInputStream lrdi = rawDataIn;
1886
1887        lrdi.setPosition(PAGE_HEADER_OFFSET);
1888        long spare;
1889
1890        isOverflowPage = lrdi.readBoolean();
1891        setPageStatus (lrdi.readByte());
1892        setPageVersion (lrdi.readLong());
1893        slotsInUse = lrdi.readUnsignedShort();
1894        nextId = lrdi.readInt();
1895        generation = lrdi.readInt(); // page generation (Future Use)
1896
prevGeneration = lrdi.readInt(); // previous generation (Future Use)
1897
bipLocation = lrdi.readLong(); // BIPage location (Future Use)
1898

1899        // number of deleted rows on page, we start to store this release 2.0.
1900
// for upgrade reasons, a 0 on disk means -1, so, we subtract one here.
1901
deletedRowCount = lrdi.readUnsignedShort() - 1;
1902
1903        // the next 4 (total 22 bytes) are reserved for future
1904
spare = lrdi.readUnsignedShort();
1905        spare = lrdi.readInt(); // used by encryption
1906
spare = lrdi.readLong();
1907        spare = lrdi.readLong();
1908    }
1909
1910
1911    /**
1912     * Update the page header in the page array.
1913     * <p>
1914     * Write the bytes of the page header, taking the values from those
1915     * in the in memory variables.
1916     **/

1917    private void updatePageHeader()
1918        throws IOException JavaDoc
1919    {
1920        rawDataOut.setPosition(PAGE_HEADER_OFFSET);
1921
1922        logicalDataOut.writeBoolean(isOverflowPage);
1923        logicalDataOut.writeByte(getPageStatus());
1924        logicalDataOut.writeLong(getPageVersion());
1925        logicalDataOut.writeShort(slotsInUse);
1926        logicalDataOut.writeInt(nextId);
1927        logicalDataOut.writeInt(generation); // page generation (Future Use)
1928
logicalDataOut.writeInt(prevGeneration); // previous generation (Future Use)
1929
logicalDataOut.writeLong(bipLocation); // BIPage location (Future Use)
1930

1931        // number of deleted rows on page, we start to store this release 2.0.
1932
// for upgrade reasons, a 0 on disk means -1, so, we add one when we
1933
// write it to disk.
1934
logicalDataOut.writeShort(deletedRowCount + 1);
1935
1936        logicalDataOut.writeShort(0); // reserved for future
1937
logicalDataOut.writeInt(
1938                dataFactory.random()); // random bytes for encryption
1939
logicalDataOut.writeLong(0); // reserved for future
1940
logicalDataOut.writeLong(0); // reserved for future
1941

1942        // we put a random value int into the page if the database is encrypted
1943
// so that the checksum will be very different even with the same
1944
// page image, when we encrypt or decrypt the page, we move the
1945
// checksum to the front so that the encrypted page will look very
1946
// different even with just the one int difference. We never look at
1947
// the value of the random number and we could have put it anywhere in
1948
// the page as long as it doesn't obscure real data.
1949

1950        headerOutOfDate = false;
1951    }
1952
1953    /**
1954     * Update the page version number in the byte array
1955     **/

1956    private void updatePageVersion()
1957        throws IOException JavaDoc
1958    {
1959        rawDataOut.setPosition(PAGE_VERSION_OFFSET);
1960        logicalDataOut.writeLong(getPageVersion());
1961    }
1962
1963    /**************************************************************************
1964     * Slot Offset & Length table manipulation
1965     **************************************************************************
1966     */

1967
1968    /**
1969     * Get the page offset of a given slot entry.
1970     * <p>
1971     * Get the page offset of a slot entry, this is not the offset of
1972     * the record stored in the slot, but the offset of the actual slot.
1973     *
1974     * @return The page offset of a given slot entry.
1975     *
1976     * @param slot The array entry of the slot to find.
1977     **/

1978    private int getSlotOffset(int slot)
1979    {
1980        // slot table grows backward from the spot at the end of the page just
1981
// before the checksum which is located in the last 8 bytes of the page.
1982

1983        return(slotTableOffsetToFirstEntry - (slot * slotEntrySize));
1984    }
1985
1986    /**
1987     * Get the page offset of the record associated with the input slot.
1988     * <p>
1989     * This is the actual offset on the page of the beginning of the record.
1990     *
1991     * @return The page offset of the record associated with the input slot.
1992     *
1993     * @param slot The array entry of the slot to find.
1994     **/

1995    private int getRecordOffset(int slot)
1996    {
1997        byte[] data = pageData;
1998        int offset = slotTableOffsetToFirstEntry - (slot * slotEntrySize);
1999
2000        // offset on the page of the record is stored in the first 2 or 4 bytes
2001
// of the slot table entry. Code has been inlined for performance
2002
// critical low level routine.
2003
//
2004
// return(
2005
// (slotFieldSize == SMALL_SLOT_SIZE) ?
2006
// readUnsignedShort() : readInt());
2007

2008        return(
2009            (slotFieldSize == SMALL_SLOT_SIZE) ?
2010
2011             ((data[offset++] & 0xff) << 8) |
2012              (data[offset] & 0xff) :
2013
2014             (((data[offset++] & 0xff) << 24) |
2015              ((data[offset++] & 0xff) << 16) |
2016              ((data[offset++] & 0xff) << 8) |
2017              ((data[offset] & 0xff) )));
2018    }
2019
2020    /**
2021     * Set the page offset of the record associated with the input slot.
2022     * <p>
2023     * This is the actual offset on the page of the beginning of the record.
2024     *
2025     * @param slot The array entry of the slot to set.
2026     * @param recordOffset the new offset to set.
2027     **/

2028    private void setRecordOffset(int slot, int recordOffset)
2029        throws IOException JavaDoc
2030    {
2031        rawDataOut.setPosition(getSlotOffset(slot));
2032
2033        if (slotFieldSize == SMALL_SLOT_SIZE)
2034            logicalDataOut.writeShort(recordOffset);
2035        else
2036            logicalDataOut.writeInt(recordOffset);
2037    }
2038
2039    /**
2040     * Return length of row on this page.
2041     * <p>
2042     * Return the total length of data and header stored on this page for
2043     * this record. This length is stored as the second "field" of the
2044     * slot table entry.
2045     *
2046     * @return The length of the row on this page.
2047     *
2048     * @param slot the slot of the row to look up the length of.
2049     *
2050     **/

2051    protected int getRecordPortionLength(int slot)
2052        throws IOException JavaDoc
2053    {
2054        if (SanityManager.DEBUG)
2055        {
2056            SanityManager.ASSERT(getRecordOffset(slot) != 0);
2057        }
2058
2059        // these reads are always against the page array
2060
ArrayInputStream lrdi = rawDataIn;
2061
2062        lrdi.setPosition(
2063            slotTableOffsetToFirstRecordLengthField - (slot * slotEntrySize));
2064
2065        return(
2066            (slotFieldSize == SMALL_SLOT_SIZE) ?
2067                lrdi.readUnsignedShort() : lrdi.readInt());
2068    }
2069
2070    /**
2071     * Return reserved length of row on this page.
2072     * <p>
2073     * Return the reserved length of this record.
2074     * This length is stored as the third "field" of the slot table entry.
2075     *
2076     * @return The reserved length of the row on this page.
2077     *
2078     * @param slot the slot of the row to look up the length of.
2079     *
2080     **/

2081    public int getReservedCount(int slot) throws IOException JavaDoc
2082    {
2083        if (SanityManager.DEBUG)
2084        {
2085            SanityManager.ASSERT(getRecordOffset(slot) != 0);
2086        }
2087
2088        // these reads are always against the page array
2089
ArrayInputStream lrdi = rawDataIn;
2090
2091        lrdi.setPosition(
2092            slotTableOffsetToFirstReservedSpaceField - (slot * slotEntrySize));
2093
2094        return(
2095            (slotFieldSize == SMALL_SLOT_SIZE) ?
2096                lrdi.readUnsignedShort() : lrdi.readInt());
2097    }
2098
2099
2100    /**
2101        Update the length of data stored on this page for this record
2102    */

2103    /**
2104     * Update the length of data stored on this page for this record
2105     * <p>
2106     * Update both the record length "field" and the reserved space "field"
2107     * of the slot table entry associated with "slot". This length is stored
2108     * as the second "field" of the slot table entry. The changes to these
2109     * 2 fields are represented as the delta to apply to each field as input
2110     * in "delta" and "reservedDelta."
2111     * <p>
2112     *
2113     * @param slot the slot of the record to set.
2114     * @param delta The amount the record length changed.
2115     * @param reservedDelta The amount the reserved length changed.
2116     *
2117     * @exception StandardException Standard exception policy.
2118     **/

2119    private void updateRecordPortionLength(
2120    int slot,
2121    int delta,
2122    int reservedDelta)
2123        throws IOException JavaDoc
2124    {
2125        if (SanityManager.DEBUG)
2126        {
2127            SanityManager.ASSERT(getRecordOffset(slot) != 0);
2128
2129            if ((delta + reservedDelta) < 0)
2130                SanityManager.THROWASSERT(
2131                    "total space of record is not allowed to shrink, delta == "
2132                    + delta + " reservedDelta = " + reservedDelta);
2133
2134            if ((getRecordPortionLength(slot) + delta) < 0)
2135                SanityManager.THROWASSERT(
2136                    "record portion length cannot be < 0.recordPortionLength = "
2137                    + getRecordPortionLength(slot) + " delta = " + delta);
2138
2139            if ((getReservedCount(slot) + reservedDelta) < 0)
2140                SanityManager.THROWASSERT(
2141                    "reserved space for record cannot be < 0. reservedCount = "
2142                    + getReservedCount(slot) + " reservedDelta = "
2143                    + reservedDelta);
2144        }
2145
2146        // position the stream to beginning of 2nd field of slot entry.
2147
rawDataOut.setPosition(
2148            slotTableOffsetToFirstRecordLengthField - (slot * slotEntrySize));
2149
2150        // write the new record length to 2nd field
2151
if (slotFieldSize == SMALL_SLOT_SIZE)
2152            logicalDataOut.writeShort(getRecordPortionLength(slot) + delta);
2153        else
2154            logicalDataOut.writeInt(getRecordPortionLength(slot) + delta);
2155
2156        // if necessary, write the 3rd field - above write has positioned the
2157
// stream to the 3rd field.
2158
if (reservedDelta != 0)
2159        {
2160            if (slotFieldSize == SMALL_SLOT_SIZE)
2161            {
2162                logicalDataOut.writeShort(
2163                    getReservedCount(slot) + reservedDelta);
2164            }
2165            else
2166            {
2167                logicalDataOut.writeInt(
2168                    getReservedCount(slot) + reservedDelta);
2169            }
2170        }
2171    }
2172
2173    /**
2174     * Initialize the in-memory slot table.
2175     * <p>
2176     * Initialize the in-memory slot table, ie. that of our super-class
2177     * BasePage. Go through all the records on the page and set the
2178     * freeSpace and firstFreeByte on page.
2179     * <p>
2180     *
2181     * @exception StandardException Standard exception policy.
2182     **/

2183    private void initSlotTable()
2184        throws StandardException
2185    {
2186        int localSlotsInUse = slotsInUse;
2187
2188        // must initialize the header now
2189
initializeHeaders(localSlotsInUse);
2190
2191        // mark all the space on the page as free
2192
clearAllSpace();
2193        
2194        // first count the space occupied by the slot table
2195
freeSpace -= localSlotsInUse * slotEntrySize;
2196
2197        int lastSlotOnPage = -1;
2198        int lastRecordOffset = -1;
2199        
2200        try
2201        {
2202            for (int slot = 0; slot < localSlotsInUse; slot++)
2203            {
2204                if (SanityManager.DEBUG)
2205                {
2206                    if (!isOverflowPage() &&
2207                        minimumRecordSize > getTotalSpace(slot))
2208                    {
2209                        SanityManager.THROWASSERT(
2210                            " slot " + slot +
2211                            " minimumRecordSize = " + minimumRecordSize +
2212                            " totalSpace = " + getTotalSpace(slot) +
2213                            "recordPortionLength = " +
2214                                getRecordPortionLength(slot)
2215                            + " reservedCount = " + getReservedCount(slot));
2216                    }
2217                }
2218
2219                int recordOffset = getRecordOffset(slot);
2220
2221                // check that offset points into the record space area.
2222
if ((recordOffset < RECORD_SPACE_OFFSET) ||
2223                    (recordOffset >= (getPageSize() - CHECKSUM_SIZE)))
2224                {
2225                    throw dataFactory.markCorrupt(
2226                        StandardException.newException(
2227                            SQLState.DATA_CORRUPT_PAGE, getPageId()));
2228                }
2229
2230                if (recordOffset > lastRecordOffset)
2231                {
2232                    lastRecordOffset = recordOffset;
2233                    lastSlotOnPage = slot;
2234                }
2235            }
2236
2237            bumpRecordCount(localSlotsInUse);
2238
2239            if (lastSlotOnPage != -1)
2240            {
2241                // Calculate the firstFreeByte for the page,
2242
// and the freeSpace on Page
2243

2244                firstFreeByte =
2245                    lastRecordOffset + getTotalSpace(lastSlotOnPage);
2246                freeSpace -= firstFreeByte - RECORD_SPACE_OFFSET;
2247            }
2248
2249            if (SanityManager.DEBUG)
2250            {
2251                if ((freeSpace < 0) ||
2252                    ((firstFreeByte + freeSpace) !=
2253                         (getSlotOffset(slotsInUse - 1))))
2254                {
2255                    SanityManager.THROWASSERT(
2256                        "firstFreeByte = " + firstFreeByte
2257                        + ", freeSpace = " + freeSpace
2258                        + ", slotOffset = " + (getSlotOffset(slotsInUse - 1))
2259                        + ", slotsInUse = " + localSlotsInUse);
2260                }
2261
2262                if (localSlotsInUse == 0)
2263                {
2264                    SanityManager.ASSERT(
2265                        firstFreeByte ==
2266                            (getPageSize() - totalSpace - CHECKSUM_SIZE));
2267                }
2268            }
2269
2270            // upgrade issue. Pre 1.5 release, we do not store deletedRowCount
2271
// therefore, if we are accessing an older database,
2272
// we need to calculate the deletedRowCount here.
2273
if (deletedRowCount == -1)
2274            {
2275                int count = 0;
2276                int maxSlot = slotsInUse;
2277                for (int slot = FIRST_SLOT_NUMBER ; slot < maxSlot; slot++)
2278                {
2279                    if (isDeletedOnPage(slot))
2280                        count++;
2281                }
2282                deletedRowCount = count;
2283            }
2284
2285        }
2286        catch (IOException JavaDoc ioe)
2287        {
2288            // i/o methods on the byte array have thrown an IOException
2289
throw dataFactory.markCorrupt(
2290                StandardException.newException(
2291                    SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
2292        }
2293    }
2294
2295    
2296    /**
2297     * Set up a new slot entry.
2298     * <p>
2299     *
2300     * @param slot the slot to initialize.
2301     * @param recordOffset the offset on the page to find the record.
2302     * @param recordPortionLength the actual length of record+hdr on page.
2303     * @param reservedSpace the reserved length associated with record.
2304     *
2305     * @exception StandardException Standard exception policy.
2306     **/

2307    private void setSlotEntry(
2308    int slot,
2309    int recordOffset,
2310    int recordPortionLength,
2311    int reservedSpace)
2312        throws IOException JavaDoc
2313    {
2314        rawDataOut.setPosition(getSlotOffset(slot));
2315
2316        if (SanityManager.DEBUG)
2317        {
2318            if ((recordPortionLength < 0) ||
2319                (reservedSpace < 0) ||
2320                (recordPortionLength >= getPageSize()) ||
2321                (reservedSpace >= getPageSize()))
2322            {
2323                SanityManager.THROWASSERT(
2324                    "recordPortionLength and reservedSpace must " +
2325                    "be > 0, and < page size."
2326                    + " slot = " + slot
2327                    + ", in use = " + slotsInUse
2328                    + ", recordOffset = " + recordOffset
2329                    + ", recordPortionLength = " + recordPortionLength
2330                    + ", reservedSpace = " + reservedSpace);
2331            }
2332
2333            if (recordOffset < (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE))
2334            {
2335                SanityManager.THROWASSERT(
2336                    "Record offset must be after the page header."
2337                    + " slot = " + slot
2338                    + ", in use = " + slotsInUse
2339                    + ", recordOffset = " + recordOffset
2340                    + ", recordPortionLength = " + recordPortionLength
2341                    + ", reservedSpace = " + reservedSpace);
2342            }
2343        }
2344
2345        if (slotFieldSize == SMALL_SLOT_SIZE)
2346        {
2347            logicalDataOut.writeShort(recordOffset);
2348            logicalDataOut.writeShort(recordPortionLength);
2349            logicalDataOut.writeShort(reservedSpace);
2350        }
2351        else
2352        {
2353            logicalDataOut.writeInt(recordOffset);
2354            logicalDataOut.writeInt(recordPortionLength);
2355            logicalDataOut.writeInt(reservedSpace);
2356        }
2357    }
2358
2359    /**
2360     * Insert a new slot entry into the current slot array.
2361     * <p>
2362     * Shift the existing slots from slot to (slotsInUse - 1) up by one.
2363     * Up here means from low slot to high slot (e.g from slot 2 to slot 3).
2364     * Our slot table grows backward so we have to be careful here.
2365     *
2366     * @param slot Position the new slot will take
2367     * @param recordOffset Offset of the record for the new slot
2368     * @param recordPortionLength Length of the record stored in the new slot
2369     * @param reservedSpace Length of reserved space of record in slot
2370     *
2371     **/

2372    private void addSlotEntry(
2373    int slot,
2374    int recordOffset,
2375    int recordPortionLength,
2376    int reservedSpace)
2377        throws IOException JavaDoc
2378    {
2379        if (SanityManager.DEBUG)
2380        {
2381            if ((slot < 0) || (slot > slotsInUse))
2382                SanityManager.THROWASSERT("invalid slot " + slot);
2383            
2384            if ((recordPortionLength < 0) || (reservedSpace < 0))
2385                SanityManager.THROWASSERT(
2386                    "recordPortionLength and reservedSpace must be > 0." +
2387                    "recordPortionLength = " + recordPortionLength +
2388                    " reservedSpace = " + reservedSpace);
2389
2390            if (recordOffset < (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE))
2391            {
2392                SanityManager.THROWASSERT(
2393                    "Record offset must be after the page header."
2394                        + " slot = " + slot
2395                        + ", in use = " + slotsInUse
2396                        + ", recordOffset = " + recordOffset
2397                        + ", recordPortionLength = " + recordPortionLength
2398                        + ", reservedSpace = " + reservedSpace);
2399            }
2400        }
2401
2402        int newSlotOffset;
2403
2404        // TODO - (mikem) - I think the math below could be slightly optimized.
2405

2406        if (slot < slotsInUse)
2407        {
2408            // inserting a slot into the middle of array so shift all the
2409
// slots from "slot" logically up by one
2410

2411            int startOffset =
2412                getSlotOffset(slotsInUse - 1);
2413
2414            int length =
2415                (getSlotOffset(slot) + slotEntrySize) - startOffset;
2416
2417            newSlotOffset = getSlotOffset(slotsInUse);
2418
2419            System.arraycopy(
2420                pageData, startOffset, pageData, newSlotOffset, length);
2421        }
2422        else
2423        {
2424            // We are adding at end of slot table, so no moving necessary.
2425
newSlotOffset = getSlotOffset(slot);
2426        }
2427
2428        freeSpace -= slotEntrySize;
2429
2430        slotsInUse++;
2431        headerOutOfDate = true; // headerOutOfDate must be set after setDirty
2432
// because isDirty may be called unlatched
2433

2434        setSlotEntry(slot, recordOffset, recordPortionLength, reservedSpace);
2435    }
2436
2437    /**
2438     * Remove slot entry from slot array.
2439     * <p>
2440     * Remove a storage slot at slot. Shift the existing slots from
2441     * slot+1 to (slotsInUse - 1) down by one..
2442     * Down here means from high slot to low slot (e.g from slot 3 to slot 2)
2443     *
2444     * @param slot The slot to delete.
2445     *
2446     **/

2447    private void removeSlotEntry(int slot)
2448        throws IOException JavaDoc
2449    {
2450        if (SanityManager.DEBUG)
2451        {
2452            if ((slot < 0) || (slot >= slotsInUse))
2453                SanityManager.THROWASSERT("invalid slot " + slot);
2454        }
2455
2456        int oldEndOffset = getSlotOffset(slotsInUse - 1);
2457        int newEndOffset = getSlotOffset(slotsInUse - 2);
2458
2459        if (slot != slotsInUse - 1)
2460        {
2461            // if not removing the last slot, need to shift
2462

2463            // now shift all the slots logically down by one
2464
// from (slot+1 to slotsInUse-1) to (slot and slotsInUse-2)
2465
int length = getSlotOffset(slot) - oldEndOffset;
2466
2467            System.arraycopy(
2468                pageData, oldEndOffset, pageData, newEndOffset, length);
2469        }
2470
2471        // clear out the last slot
2472
clearSection(oldEndOffset, slotEntrySize);
2473
2474        // mark the space as free after we have removed the slot
2475
// no need to keep the space reserved for rollback as this is only
2476
// called for purge.
2477
freeSpace += slotEntrySize;
2478
2479        slotsInUse--;
2480
2481        headerOutOfDate = true; // headerOutOfDate must be set after setDirty
2482
// because isDirty maybe called unlatched
2483
}
2484
2485    /**
2486     * create the record header for the specific slot.
2487     * <p>
2488     * Create a new record header object, initialize it, and add it
2489     * to the array of cache'd record headers on this page. Finally return
2490     * reference to the initialized record header.
2491     *
2492     * @return The record header for the specific slot.
2493     *
2494     * @param slot return record header of this slot.
2495     *
2496     * @exception StandardException Standard exception policy.
2497     **/

2498    public StoredRecordHeader recordHeaderOnDemand(int slot)
2499    {
2500        StoredRecordHeader recordHeader =
2501            new StoredRecordHeader(pageData, getRecordOffset(slot));
2502
2503        setHeaderAtSlot(slot, recordHeader);
2504
2505        return recordHeader;
2506    }
2507
2508    /**************************************************************************
2509     * Record based routines.
2510     **************************************************************************
2511     */

2512
2513    /**
2514     * Is entire record on the page?
2515     * <p>
2516     *
2517     * @return true if the entire record at slot is on this page,
2518     * i.e, no overflow row or long columns.
2519     *
2520     * @param slot Check record at this slot.
2521     *
2522     * @exception StandardException Standard exception policy.
2523     **/

2524    public boolean entireRecordOnPage(int slot)
2525         throws StandardException
2526    {
2527        if (SanityManager.DEBUG)
2528        {
2529            SanityManager.ASSERT(isLatched());
2530        }
2531
2532        StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
2533
2534        if (recordHeader.hasOverflow())
2535            return false;
2536
2537        // the row chain does not overflow, we need to walk all the fields to
2538
// make sure they are not long columns.
2539

2540        try
2541        {
2542
2543            int offset = getRecordOffset(slot);
2544        
2545            if (SanityManager.DEBUG)
2546            {
2547                if (offset < (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE))
2548                {
2549                    SanityManager.THROWASSERT(
2550                        "Incorrect offset. offset = " + offset +
2551                        ", offset should be < " +
2552                        "(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) = " +
2553                             (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) +
2554                        ", current slot = " + slot +
2555                        ", total slotsInUse = " + slotsInUse);
2556                }
2557
2558                SanityManager.ASSERT(recordHeader.getFirstField() == 0,
2559                     "Head row piece should start at field 0 but is not");
2560            }
2561
2562            int numberFields = recordHeader.getNumberFields();
2563
2564            // these reads are always against the page array
2565
ArrayInputStream lrdi = rawDataIn;
2566
2567            // position after the record header, at 1st column.
2568
lrdi.setPosition(offset + recordHeader.size());
2569        
2570            for (int i = 0; i < numberFields; i++)
2571            {
2572                int fieldStatus = StoredFieldHeader.readStatus(lrdi);
2573                if (StoredFieldHeader.isOverflow(fieldStatus))
2574                    return false;
2575
2576                int fieldLength =
2577                    StoredFieldHeader.readFieldDataLength(
2578                        lrdi, fieldStatus, slotFieldSize);
2579
2580                if (fieldLength != 0)
2581                    lrdi.setPosition(lrdi.getPosition() + fieldLength);
2582            }
2583        }
2584        catch (IOException JavaDoc ioe)
2585        {
2586            throw dataFactory.markCorrupt(
2587                StandardException.newException(
2588                    SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
2589        }
2590
2591        // we have examined all the fields on this page and none overflows
2592
return true;
2593    }
2594
2595    /**
2596     * Purge one row on an overflow page.
2597     * <p>
2598     * HeadRowHandle is the recordHandle pointing to the head row piece.
2599     * <p>
2600     *
2601     * @param slot slot number of row to purge.
2602     * @param headRowHandle recordHandle of the head row piece.
2603     * @param needDataLogged when true data is logged for purges otherwise just headers.
2604     *
2605     * @exception StandardException Standard exception policy.
2606     **/

2607    protected void purgeOverflowAtSlot(
2608    int slot,
2609    RecordHandle headRowHandle,
2610    boolean needDataLogged)
2611         throws StandardException
2612    {
2613        if (SanityManager.DEBUG)
2614        {
2615            SanityManager.ASSERT(isLatched());
2616            SanityManager.ASSERT(isOverflowPage());
2617        }
2618
2619        if ((slot < 0) || (slot >= slotsInUse))
2620        {
2621            throw StandardException.newException(
2622                    SQLState.DATA_SLOT_NOT_ON_PAGE);
2623        }
2624
2625        // TODO (mikem) - should a global scratch variable be used?
2626

2627        // this is an overflow page purge, no need to lock the head row (it
2628
// has already been locked, hopefully). No need to check for long rows
2629
// (they have already been deleted, hopefully).
2630
RawTransaction t = owner.getTransaction();
2631        int[] recordId = new int[1];
2632
2633        recordId[0] = getHeaderAtSlot(slot).getId();
2634
2635        owner.getActionSet().actionPurge(t, this, slot, 1, recordId, needDataLogged);
2636    }
2637
2638    /**
2639     * Purge the column chain that starts at overflowPageId, overflowRecordId
2640     * <p>
2641     * Purge just the column chain that starts at the input address.
2642     * The long column chain is pointed at by a field in a row. The long
2643     * column is then chained as a sequence of "rows", the last column then
2644     * points to the next segment of the chain on each page.
2645     * Long columns chains currently are only one row per page so the next
2646     * slot of a row in a long row chain should always be the first slot.
2647     * <p>
2648     *
2649     * @param overflowPageId The page where the long column chain starts.
2650     * @param overflowRecordId The record id where long column chain starts.
2651     *
2652     * @exception StandardException Standard exception policy.
2653     **/

2654    private void purgeOneColumnChain(
2655    long overflowPageId,
2656    int overflowRecordId)
2657         throws StandardException
2658    {
2659        StoredPage pageOnColumnChain = null;
2660        boolean removePageHappened = false;
2661
2662        try
2663        {
2664            while (overflowPageId != ContainerHandle.INVALID_PAGE_NUMBER)
2665            {
2666
2667                // Now loop over the column chain and get all the column pieces.
2668
pageOnColumnChain = getOverflowPage(overflowPageId);
2669                removePageHappened = false;
2670
2671                if (pageOnColumnChain == null)
2672                {
2673                    if (SanityManager.DEBUG)
2674                        SanityManager.THROWASSERT(
2675                              "got null page following long column chain. " +
2676                                "Head column piece at " + getIdentity() +
2677                                " null page at " + overflowPageId);
2678
2679                    break; // Don't know what to do here, the column chain
2680
// is broken. Don't bomb, go to the next field.
2681
}
2682                    
2683                int overflowSlotId = FIRST_SLOT_NUMBER;
2684                if (SanityManager.DEBUG)
2685                {
2686                    int checkSlot =
2687                        pageOnColumnChain.findRecordById(
2688                                overflowRecordId, FIRST_SLOT_NUMBER);
2689
2690                    if (overflowSlotId != checkSlot)
2691                    {
2692                        SanityManager.THROWASSERT(
2693                            "Long column is not at the expected " +
2694                            FIRST_SLOT_NUMBER + " slot, instead at slot " +
2695                            checkSlot);
2696                    }
2697
2698                    SanityManager.ASSERT(pageOnColumnChain.recordCount() == 1,
2699                         "long column page has > 1 record");
2700                }
2701
2702                // Hold on to the pointer to next page on the chain before
2703
// we remove the long column page.
2704
RecordHandle nextColumnPiece =
2705                    pageOnColumnChain.getNextColumnPiece(overflowSlotId);
2706
2707                if (pageOnColumnChain.recordCount() == 1)
2708                {
2709                    removePageHappened = true;
2710                    owner.removePage(pageOnColumnChain);
2711                }
2712                else
2713                {
2714                    if (SanityManager.DEBUG)
2715                        SanityManager.THROWASSERT(
2716                          "page on column chain has more then one record" +
2717                          pageOnColumnChain.toString());
2718
2719                    pageOnColumnChain.unlatch();
2720                    pageOnColumnChain = null;
2721                }
2722
2723                // Chase the column chain pointer.
2724
if (nextColumnPiece != null)
2725                {
2726                    overflowPageId = nextColumnPiece.getPageNumber();
2727                    overflowRecordId = nextColumnPiece.getId();
2728                }
2729                else
2730                {
2731                    // terminate the loop
2732
overflowPageId = ContainerHandle.INVALID_PAGE_NUMBER;
2733                }
2734            }
2735        }
2736        finally
2737        {
2738            // if we raised an exception before the page is removed, make sure
2739
// we unlatch the page
2740

2741            if (!removePageHappened && pageOnColumnChain != null)
2742            {
2743                pageOnColumnChain.unlatch();
2744                pageOnColumnChain = null;
2745            }
2746        }
2747    }
2748
2749    /**
2750     * purge long columns chains which eminate from this page.
2751     * <p>
2752     * Purge all the long column chains emanating from the record on this slot
2753     * of this page. The headRowHandle is the record handle of the head row
2754     * piece of this row - if this page is the head row, then headRowHandle is
2755     * the record handle at the slot. Otherwise, headRowHandle points to a
2756     * row on a different page, i.e., the head page.
2757     * <p>
2758     *
2759     * @param t The raw transaction doing the purging.
2760     * @param slot The slot of the row to purge.
2761     * @param headRowHandle The RecordHandle of the head row.
2762     *
2763     *
2764     * @exception StandardException Standard exception policy.
2765     **/

2766    private void purgeColumnChains(
2767    RawTransaction t,
2768    int slot,
2769    RecordHandle headRowHandle)
2770         throws StandardException
2771    {
2772        try
2773        {
2774            StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
2775
2776            int numberFields = recordHeader.getNumberFields();
2777
2778            // these reads are always against the page array
2779
ArrayInputStream lrdi = rawDataIn;
2780
2781            // position the stream to just after record header.
2782
int offset = getRecordOffset(slot) + recordHeader.size();
2783            lrdi.setPosition(offset);
2784
2785            for (int i = 0; i < numberFields; i++)
2786            {
2787                int fieldStatus = StoredFieldHeader.readStatus(lrdi);
2788                int fieldLength =
2789                    StoredFieldHeader.readFieldDataLength(
2790                        lrdi, fieldStatus, slotFieldSize);
2791
2792                if (!StoredFieldHeader.isOverflow(fieldStatus))
2793                {
2794                    // skip this field, it is not an long column
2795
if (fieldLength != 0)
2796                        lrdi.setPosition(lrdi.getPosition() + fieldLength);
2797                    continue;
2798                }
2799                else
2800                {
2801
2802                    // Got an overflow field. The column value is the
2803
// <pageId, recordId> pair where the next column piece is
2804
// residing
2805

2806                    long overflowPageId =
2807                        CompressedNumber.readLong((InputStream)lrdi);
2808                    int overflowRecordId =
2809                        CompressedNumber.readInt((InputStream)lrdi);
2810
2811                    purgeOneColumnChain(overflowPageId, overflowRecordId);
2812                }
2813            }
2814        }
2815        catch (IOException JavaDoc ioe)
2816        {
2817            throw dataFactory.markCorrupt(
2818                StandardException.newException(
2819                    SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
2820        }
2821    }
2822
2823    /**
2824     * Purge all the overflow columns and overflow rows of the record at slot.
2825     * <p>
2826     * Purge all the overflow columns and overflow rows of the record at slot.
2827     * This is called by BasePage.purgeAtSlot, the head row piece is purged
2828     * there.
2829     * <p>
2830     *
2831     * @param t The raw transaction doing the purging.
2832     * @param slot The slot of the row to purge.
2833     * @param headRowHandle The RecordHandle of the head row.
2834     * @param needDataLogged when true data is logged for purges otherwise just headers.
2835     *
2836     * @exception StandardException Standard exception policy.
2837     **/

2838    protected void purgeRowPieces(
2839    RawTransaction t,
2840    int slot,
2841    RecordHandle headRowHandle,
2842    boolean needDataLogged)
2843         throws StandardException
2844    {
2845        if (SanityManager.DEBUG)
2846            SanityManager.ASSERT(isOverflowPage() == false,
2847                 "not expected to call purgeRowPieces on a overflow page");
2848
2849        // purge the long columns which start on this page.
2850
purgeColumnChains(t, slot, headRowHandle);
2851
2852        // drive this loop from the head page. Walk each "long" row piece in
2853
// the row chain.
2854
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
2855
2856        while (recordHeader.hasOverflow())
2857        {
2858
2859            // nextPageInRowChain, is the page with the next row piece
2860
StoredPage nextPageInRowChain =
2861                getOverflowPage(recordHeader.getOverflowPage());
2862
2863            if (nextPageInRowChain == null)
2864            {
2865                if (SanityManager.DEBUG)
2866                {
2867                    SanityManager.THROWASSERT(
2868                            "got null page following long row chain. " +
2869                            "Head row piece at " + getIdentity() + " slot " +
2870                            slot + " headRecord " + headRowHandle +
2871                            ". Broken row chain at " +
2872                            recordHeader.getOverflowPage() + ", " +
2873                            recordHeader.getOverflowId());
2874
2875                }
2876
2877                break; // Don't know what to do here, the row chain is
2878
// broken. Don't bomb, just return.
2879
}
2880
2881            try
2882            {
2883
2884                int nextPageSlot =
2885                    getOverflowSlot(nextPageInRowChain, recordHeader);
2886
2887                // First get rid of all long columns from the next row piece.
2888
nextPageInRowChain.purgeColumnChains(
2889                    t, nextPageSlot, headRowHandle);
2890
2891                // Before we purge the next row piece, get the row header to
2892
// see if we need to continue the loop.
2893
recordHeader = nextPageInRowChain.getHeaderAtSlot(nextPageSlot);
2894
2895                // Lastly, purge the next row piece. If the next row piece is
2896
// the only thing in the entire page, just deallocate the page.
2897
// We can do this because the page is deallocated in this
2898
// transaction. If we defer this to post commit processing,
2899
// then we have to first purge the row piece and also remember
2900
// the page time stamp.
2901

2902                if (nextPageSlot == 0 && nextPageInRowChain.recordCount() == 1)
2903                {
2904                    // This is an overflow page and we just purged the last row.
2905
// Free the page. Cannot do it in post commit because the
2906
// head row is gone and cannot be locked at post commit to
2907
// stablelize the row chain.
2908

2909                    try
2910                    {
2911                        owner.removePage(nextPageInRowChain);
2912                    }
2913                    finally
2914                    {
2915                        // Remove Page guarantees to unlatch the page even
2916
// if an exception is thrown, need not unlatch it
2917
// again.
2918
nextPageInRowChain = null;
2919                    }
2920                }
2921                else
2922                {
2923                    nextPageInRowChain.purgeOverflowAtSlot(
2924                        nextPageSlot, headRowHandle, needDataLogged);
2925
2926                    nextPageInRowChain.unlatch();
2927                    nextPageInRowChain = null;
2928                }
2929            }
2930            finally
2931            {
2932                // Unlatch the next row piece before getting the next page in
2933
// the row chain.
2934
if (nextPageInRowChain != null)
2935                {
2936                    nextPageInRowChain.unlatch();
2937                    nextPageInRowChain = null;
2938                }
2939            }
2940        }
2941    }
2942
2943
2944    /**
2945     * Remove a column chain that may have been orphaned by an update.
2946     * <p>
2947     * Remove a column chain that may have been orphaned by an update. This
2948     * is executed as a post commit operation. This page is the head page of
2949     * the row which used to point to the column chain in question. The
2950     * location of the orphaned column chain is in the ReclaimSpace record.
2951     * <BR>
2952     * MT - latched. No lock will be gotten, the head record must already be
2953     * locked exclusive with no outstanding changes that can be rolled back.
2954     * <p>
2955     *
2956     * @param work object describing the chain to remove.
2957     * @param containerHdl open container handle to use to remove chain.
2958     *
2959     * @exception StandardException Standard exception policy.
2960     **/

2961    /* package */
2962    void removeOrphanedColumnChain(
2963    ReclaimSpace work,
2964    ContainerHandle containerHdl)
2965         throws StandardException
2966    {
2967        // First we need to make sure that this is the first and only time
2968
// this long column is begin reclaimed, to do this we get the first
2969
// page on the long column chain and compare its page time stamp.
2970
// If it is different, don't do anything.
2971
//
2972
// Next we need to make sure the update operation commits - we do
2973
// this by finding the row headed by headRecord, go to the column
2974
// in question and see if it points to the first page of the long
2975
// column chain we want to reclaim. If it does then the update
2976
// operation has rolled back and we don't want to reclaim it.
2977
//
2978
// After we do the above 2 checks, we can reclaim the column
2979
// chain.
2980
StoredPage headOfChain =
2981            (StoredPage)containerHdl.getPageNoWait(work.getColumnPageId());
2982
2983        // If someone has it latched, not reclaimable
2984
if (headOfChain == null)
2985            return;
2986
2987        // If the column has been touched, it is not orphaned. Not reclaimable.
2988
boolean pageUnchanged =
2989            headOfChain.equalTimeStamp(work.getPageTimeStamp());
2990
2991        headOfChain.unlatch(); // unlatch it for now.
2992

2993        if (pageUnchanged == false)
2994            return;
2995
2996        // Now get to the column in question and make sure it is no longer
2997
// pointing to the column chain.
2998

2999        RecordHandle headRowHandle = work.getHeadRowHandle();
3000
3001        if (SanityManager.DEBUG)
3002        {
3003            // System.out.println("Executing in removeOrphanedColumnChain.");
3004
// System.out.println("work = " + work);
3005
// System.out.println("head = " + headOfChain);
3006
// System.out.println("this = " + this);
3007

3008            SanityManager.ASSERT(isLatched());
3009            SanityManager.ASSERT(
3010                headRowHandle.getPageNumber() == getPageNumber(),
3011                "got wrong head page");
3012        }
3013
3014        // First get the row.
3015
int slot =
3016            findRecordById(
3017                headRowHandle.getId(), headRowHandle.getSlotNumberHint());
3018
3019        // If slot < 0, it means the whole record is gone, the column chain is
3020
// definitely orphaned.
3021

3022        if (slot >= 0)
3023        {
3024            if (SanityManager.DEBUG)
3025            {
3026                if (isOverflowPage())
3027                {
3028                    SanityManager.THROWASSERT(
3029                        "Page " + getPageNumber() + " is overflow " +
3030                        "\nwork = " + work +
3031                        "\nhead = " + headOfChain +
3032                        "\nthis = " + this);
3033                }
3034            }
3035
3036            // Find the page with the column in question on it.
3037
StoredPage pageInRowChain = this; // Start with the head page.
3038

3039            try
3040            {
3041
3042                int columnId = work.getColumnId();
3043                StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
3044
3045                if (SanityManager.DEBUG)
3046                    SanityManager.ASSERT(recordHeader.getFirstField() == 0,
3047                        "Head row piece should start at field 0 but is not");
3048
3049                // See if columnId is on pageInRowChain.
3050
while ((recordHeader.getNumberFields() +
3051                        recordHeader.getFirstField()) <= columnId)
3052                {
3053                    // The column in question is not on pageInRowChain.
3054

3055                    if (pageInRowChain != this)
3056                    {
3057                        // Keep the head page latched.
3058
pageInRowChain.unlatch();
3059                        pageInRowChain = null;
3060                    }
3061
3062                    if (recordHeader.hasOverflow())
3063                    {
3064                        // Go to the next row piece
3065
pageInRowChain =
3066                            getOverflowPage(recordHeader.getOverflowPage());
3067                        recordHeader =
3068                            pageInRowChain.getHeaderAtSlot(
3069                                getOverflowSlot(pageInRowChain, recordHeader));
3070                    }
3071                    else
3072                    {
3073                        // Don't know why, but this is the last column.
3074
// Anyway, the column chain is definite orphaned.
3075
// This can happen if the update, or subsequent
3076
// updates, shrink the number of columns in the row.
3077
break;
3078                    }
3079                }
3080
3081                if ((recordHeader.getNumberFields() +
3082                            recordHeader.getFirstField()) > columnId)
3083                {
3084                    // RecordHeader is the record header of the row piece on
3085
// pageInRowChain. The column in question exists and is in
3086
// that row piece.
3087
if (!pageInRowChain.isColumnOrphaned(
3088                            recordHeader, columnId,
3089                            work.getColumnPageId(), work.getColumnRecordId()))
3090                    {
3091                        // The column is not orphaned, row still points to it.
3092
if (pageInRowChain != this)
3093                        {
3094                            // Keep the head page latched.
3095
pageInRowChain.unlatch();
3096                            pageInRowChain = null;
3097                        }
3098                        return;
3099                    }
3100                }
3101
3102            }
3103            catch (IOException JavaDoc ioe)
3104            {
3105                throw StandardException.newException(
3106                        SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
3107            }
3108            finally
3109            {
3110                if (pageInRowChain != this && pageInRowChain != null)
3111                    pageInRowChain.unlatch();
3112            }
3113        }
3114
3115        // If we get this far, we have verified that the column chain is indeed
3116
// orphaned. Get rid of the column chain.
3117

3118        long nextPageId = work.getColumnPageId();
3119        int nextRecordId = work.getColumnRecordId();
3120
3121        purgeOneColumnChain(nextPageId, nextRecordId);
3122    }
3123
3124    /**
3125     * See if there is a orphaned long colum chain or not.
3126     * <p>
3127     * See if there is a orphaned long colum chain or not. This is a helper
3128     * function for removeOrphanedChain. This page, which may be a head page
3129     * or overflow page, contains the column specified in columnId. It used to
3130     * point to a long column chain at oldPageId and oldRecordId. Returns true
3131     * if it no longer points to that long column chain.
3132     * <p>
3133     *
3134     * @return true if page no longer points to the long column chain.
3135     *
3136     * @param recordHeader record header which used to point at the long column
3137     * @param columnId column id of the long column in head.
3138     * @param oldPageId the page id where the long column used to be.
3139     * @param oldRecordId the record id where the long column used to be.
3140     *
3141     * @exception StandardException Standard exception policy.
3142     **/

3143    private boolean isColumnOrphaned(
3144    StoredRecordHeader recordHeader,
3145    int columnId,
3146    long oldPageId,
3147    long oldRecordId)
3148         throws StandardException, IOException JavaDoc
3149    {
3150        int slot = findRecordById(recordHeader.getId(), Page.FIRST_SLOT_NUMBER);
3151
3152        if (SanityManager.DEBUG)
3153        {
3154            SanityManager.ASSERT(slot >= 0, "overflow row chain truncated");
3155
3156            SanityManager.ASSERT(
3157                columnId >= recordHeader.getFirstField(),
3158                "first column on page > expected");
3159        }
3160
3161        // these reads are always against the page array
3162
ArrayInputStream lrdi = rawDataIn;
3163
3164        // set read position to data portion of record to check.
3165
int offset = getRecordOffset(slot);
3166        lrdi.setPosition(offset + recordHeader.size());
3167
3168        // skip until you get to the record in question.
3169
for (int i = recordHeader.getFirstField(); i < columnId; i++)
3170            skipField(lrdi);
3171
3172        // read in the info of the column we are interested in.
3173
int fieldStatus = StoredFieldHeader.readStatus(lrdi);
3174        int fieldLength = StoredFieldHeader.readFieldDataLength
3175                (lrdi, fieldStatus, slotFieldSize);
3176
3177        if (StoredFieldHeader.isOverflow(fieldStatus))
3178        {
3179            // it is still an overflow field, check if it still points to
3180
// overflow column in question.
3181

3182            long ovflowPage = CompressedNumber.readLong((InputStream) lrdi);
3183            int ovflowRid = CompressedNumber.readInt((InputStream) lrdi);
3184
3185            if (ovflowPage == oldPageId && ovflowRid == oldRecordId)
3186            {
3187                // This field still points to the column chain, the
3188
// update must have rolled back.
3189
return false;
3190            }
3191        }
3192
3193        // Else, either the field is no longer a long column, or it doesn't
3194
// point to oldPageId, oldRecordId. The column chain is orphaned.
3195
return true;
3196    }
3197
3198    /**
3199        @return a recordHandle pointing to the next piece of the column chain.
3200        This page must be an overflow page that is in a column chain. If this
3201        is the last piece of the overflow colum, return null.
3202
3203        @param slot the slot number where the current piece of overflow column
3204        is at.
3205        @exception StandardException Cloudscape Standard Error Policy
3206     */

3207    /**
3208     * Return the next recordHandle in a long column chain.
3209     * <p>
3210     * Return a recordHandle pointing to the next piece of the column chain.
3211     * This page must be an overflow page that is in a column chain. If this
3212     * is the last piece of the overflow colum, return null.
3213     * <p>
3214     *
3215     * @return The next record handle in a long column chain.
3216     *
3217     * @param slot The slot of the current long column piece.
3218     *
3219     * @exception StandardException Standard exception policy.
3220     **/

3221    private RecordHandle getNextColumnPiece(int slot)
3222        throws StandardException
3223    {
3224        if (SanityManager.DEBUG)
3225        {
3226            SanityManager.ASSERT(isLatched());
3227            SanityManager.ASSERT(isOverflowPage(),
3228                "not expected to call getNextColumnPiece on non-overflow page");
3229
3230            if (recordCount() != 1)
3231            {
3232                SanityManager.THROWASSERT(
3233                    "getNextColumnPiece called on a page with " +
3234                    recordCount() + " rows");
3235            }
3236        }
3237
3238        try
3239        {
3240            StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
3241            int numberFields =
3242                recordHeader.getNumberFields();
3243
3244            if (SanityManager.DEBUG)
3245            {
3246                if ((numberFields > 2) || (numberFields < 1))
3247                {
3248                    SanityManager.THROWASSERT(
3249                        "longColumn record header must have 1 or 2 fields." +
3250                        " numberFields = " + numberFields);
3251                }
3252            }
3253
3254            if (numberFields != 2) // End of column chain.
3255
return null;
3256
3257            // these reads are always against the page array
3258
ArrayInputStream lrdi = rawDataIn;
3259
3260            // The 2nd field is the pointer to the next page in column chain.
3261

3262            int offset = getRecordOffset(slot) + recordHeader.size();
3263            lrdi.setPosition(offset);
3264
3265            // skip the first field
3266
skipField(lrdi);
3267
3268            // the 2nd field should be <pageId, recordId> pair, return the
3269
// pageId part and skip over the length.
3270
int fieldStatus = StoredFieldHeader.readStatus(lrdi);
3271            int fieldLength = StoredFieldHeader.readFieldDataLength
3272                (lrdi, fieldStatus, slotFieldSize);
3273
3274            long ovflowPage = CompressedNumber.readLong((InputStream) lrdi);
3275            int ovflowRid = CompressedNumber.readInt((InputStream) lrdi);
3276
3277            if (SanityManager.DEBUG)
3278            {
3279                if (!StoredFieldHeader.isOverflow(fieldStatus))
3280                {
3281                    // In version 1.5, the first field is overflow and the
3282
// second is not. In version 2.0 onwards, the first field
3283
// is not overflow and the second is overflow (the overflow
3284
// bit goes with the overflow pointer). Check first field
3285
// to make sure its overflow bit is set on.
3286
// Offset still points to the first column.
3287
lrdi.setPosition(offset);
3288                    fieldStatus = StoredFieldHeader.readStatus(lrdi);
3289                    SanityManager.ASSERT(
3290                            StoredFieldHeader.isOverflow(fieldStatus));
3291                }
3292            }
3293
3294            // RESOLVE : this new can get expensive if the column chain is very
3295
// long. The reason we do this is because we need to return the
3296
// page number and the rid, if we assume that the long column is
3297
// always at slot 0, we can return only the page.
3298

3299            return owner.makeRecordHandle(ovflowPage, ovflowRid);
3300
3301        }
3302        catch (IOException JavaDoc ioe)
3303        {
3304            throw dataFactory.markCorrupt(
3305                StandardException.newException(
3306                    SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
3307        }
3308    }
3309     
3310
3311    /**************************************************************************
3312     * Page space usage
3313     **************************************************************************
3314     */

3315
3316    /**
3317     * initialize the in memory variables associated with space maintenance.
3318     * <p>
3319     * Get the total available space on an empty page.
3320     * initSlotTable() must be called after the page has been read in.
3321     **/

3322    private void initSpace()
3323    {
3324        // NOTE: subclass may have overwitten it to report less freeSpace,
3325
// always call getMaxFreeSpace() to get total space.
3326
totalSpace = getMaxFreeSpace();
3327
3328        // estimate RH will be about 16 bytes:
3329
// (1 - status, 1 - id, 1 - #fields, 1 - 1stField, 12 - overflow ptr)
3330

3331        // RESOLVED: track# 3370, 3368
3332
// In the old code below, spareSpace/100 is integer division. This means
3333
// that you get a value of 0 for it as long as spareSpace is between 0
3334
// and 99. But if spareSpace is 100 you get a value of 1. This resulted
3335
// in a negative value for maxFieldSize. This caused e.g. the isLong
3336
// method to behave incorrectly when spareSpace is 100.
3337
//
3338
// RESOLVED: track# 4385
3339
// maxFieldSize is a worst case calculation for the size of a record
3340
// on an empty page, with a single field, but still allow room for
3341
// an overflow pointer if another field is to be added. If you don't
3342
// account for the overflow pointer then you can get into the situation
3343
// where the code puts the field on the page (not making it a long
3344
// column), then runs out of space on next column but can't fit overflow
3345
// pointer, so backs up and removes the column from page, and tries
3346
// again on next overflow page - looping forever.
3347
//
3348
// maxFieldSize =
3349
// totalSpace * (1 - spareSpace/100) - slotEntrySize
3350
// - 16 - OVERFLOW_POINTER_SIZE;
3351

3352        maxFieldSize = totalSpace - slotEntrySize - 16 - OVERFLOW_POINTER_SIZE;
3353
3354        if (SanityManager.DEBUG)
3355            SanityManager.ASSERT(maxFieldSize >= 0);
3356    }
3357
3358    /**
3359     * Initialize the freeSpace count and set the firstFreeByte on page
3360     **/

3361    private void clearAllSpace()
3362    {
3363        freeSpace = totalSpace;
3364        firstFreeByte = getPageSize() - totalSpace - CHECKSUM_SIZE;
3365    }
3366
3367    /**
3368     * Compress out the space specified by startByte and endByte.
3369     * <p>
3370     * As part of moving rows, updating rows, purging rows compact the space
3371     * left between rows.
3372     * <p>
3373     *
3374     * @param startByte compress out space starting at startByte offset
3375     * @param endByte compress out space ending at endByte offset
3376     *
3377     **/

3378    private void compressPage(
3379    int startByte,
3380    int endByte)
3381        throws IOException JavaDoc
3382    {
3383        if (SanityManager.DEBUG)
3384        {
3385            if (((endByte + 1) > firstFreeByte) || (startByte > firstFreeByte))
3386            {
3387                SanityManager.THROWASSERT(
3388                    "startByte = " + startByte + " endByte = " + endByte +
3389                    " firstFreeByte = " + firstFreeByte);
3390            }
3391        }
3392
3393        int lengthToClear = endByte + 1 - startByte;
3394
3395        // see if these were not the last occupied record space on the page
3396
if ((endByte + 1) != firstFreeByte)
3397        {
3398            // Shift everything down the page.
3399
int moveLength = (firstFreeByte - endByte - 1);
3400
3401            System.arraycopy(
3402                pageData, (endByte + 1), pageData, startByte, moveLength);
3403
3404            // fix the page offsets of the rows further down the page
3405
for (int slot = 0; slot < slotsInUse; slot++)
3406            {
3407                int offset = getRecordOffset(slot);
3408
3409                if (offset >= (endByte + 1))
3410                {
3411                    offset -= lengthToClear;
3412                    setRecordOffset(slot, offset);
3413                }
3414            }
3415        }
3416        
3417        freeSpace += lengthToClear;
3418        firstFreeByte -= lengthToClear;
3419
3420        clearSection(firstFreeByte, lengthToClear);
3421    }
3422
3423    /**
3424     * Free up required bytes by shifting rows "down" the page.
3425     * <p>
3426     * Expand page, move all the data from start Offset down the page by
3427     * the amount required to free up the required bytes.
3428     *
3429     * @param startOffset offset on page to begin the shift
3430     * @param requiredBytes the number of bytes that must be freed.
3431     *
3432     * @exception IOException If IOException is raised during the page mod.
3433     **/

3434    protected void expandPage(
3435    int startOffset,
3436    int requiredBytes)
3437        throws IOException JavaDoc
3438    {
3439        if (SanityManager.DEBUG)
3440        {
3441            SanityManager.ASSERT(requiredBytes <= freeSpace);
3442            SanityManager.ASSERT(startOffset <= firstFreeByte);
3443        }
3444
3445        int totalLength = firstFreeByte - startOffset;
3446
3447        if (totalLength > 0)
3448        {
3449            System.arraycopy(
3450                pageData, startOffset,
3451                pageData, startOffset + requiredBytes, totalLength);
3452
3453            // fix the page offsets of the rows further down the page
3454
for (int slot = 0; slot < slotsInUse; slot++)
3455            {
3456                int offset = getRecordOffset(slot);
3457                if (offset >= startOffset)
3458                {
3459                    offset += requiredBytes;
3460                    setRecordOffset(slot, offset);
3461                }
3462            }
3463        }
3464
3465        freeSpace -= requiredBytes;
3466        firstFreeByte += requiredBytes;
3467    }
3468
3469    /**
3470     * Shrink page.
3471     * <p>
3472     * move all the data from start Offset up the page by the amount shrunk.
3473     *
3474     *
3475     * @param startOffset offset on page to begin the shift
3476     * @param shrinkBytes the number of bytes that must be moved.
3477     *
3478     * @exception IOException some IOException is raised during the page mod,
3479     * (unlikely as this is just writing to array).
3480     **/

3481    private void shrinkPage(int startOffset, int shrinkBytes)
3482         throws IOException JavaDoc
3483    {
3484        // the number of bytes that needs to be moved up.
3485
int totalLength = firstFreeByte - startOffset;
3486
3487        if (SanityManager.DEBUG)
3488        {
3489            SanityManager.DEBUG(
3490                "shrinkPage", "page " + getIdentity() +
3491                " shrinking " + shrinkBytes +
3492                " from offset " + startOffset +
3493                " to offset " + (startOffset-shrinkBytes) +
3494                " moving " + totalLength +
3495                " bytes. FirstFreeByte at " + firstFreeByte);
3496
3497            SanityManager.ASSERT(
3498                totalLength >= 0, "firstFreeByte - startOffset <= 0");
3499
3500            SanityManager.ASSERT(
3501                (startOffset-shrinkBytes) > RECORD_SPACE_OFFSET ,
3502                "shrinking too much ");
3503
3504            if (startOffset != firstFreeByte)
3505            {
3506                // make sure startOffset is at the beginning of a record
3507
boolean foundslot = false;
3508                for (int slot = 0; slot < slotsInUse; slot++)
3509                {
3510                    if (getRecordOffset(slot) == startOffset)
3511                    {
3512                        foundslot = true;
3513                        break;
3514                    }
3515                }
3516
3517                if (!foundslot)
3518                {
3519                    SanityManager.THROWASSERT(
3520                        "startOffset " + startOffset +
3521                        " not at the beginning of a record");
3522                }
3523            }
3524        }
3525
3526        if (totalLength > 0)
3527        {
3528            System.arraycopy(
3529                pageData, startOffset,
3530                pageData, startOffset-shrinkBytes , totalLength);
3531
3532            // fix the page offsets of the rows further down the page
3533
for (int slot = 0; slot < slotsInUse; slot++)
3534            {
3535                int offset = getRecordOffset(slot);
3536                if (offset >= startOffset)
3537                {
3538                    offset -= shrinkBytes;
3539                    setRecordOffset(slot, offset);
3540                }
3541            }
3542        }
3543
3544        freeSpace += shrinkBytes;
3545        firstFreeByte -= shrinkBytes;
3546    }
3547
3548    public int getRecordLength(int slot) throws IOException JavaDoc
3549    {
3550        return getRecordPortionLength(slot);
3551    }
3552    protected boolean getIsOverflow(int slot) throws IOException JavaDoc
3553    {
3554        return getHeaderAtSlot(slot).hasOverflow();
3555    }
3556
3557    /**
3558        Log a row into the StoreOuput stream.
3559
3560        <P>
3561
3562        @exception StandardException Standard Cloudscape error policy
3563        @exception IOException RESOLVE
3564
3565    */

3566    /**
3567     * Log a row into the StoreOuput stream.
3568     * <p>
3569     * Write the row in its record format to the stream. Record format is a
3570     * record header followed by each field with its field header. See this
3571     * class's description for the specifics of these headers.
3572     *
3573     * startColumn is used to specified which column for this logRow to
3574     * start logging. When realStartColumn is specified, that means part of
3575     * the row has already been logged. startColumn here indicates that the
3576     * first column was logged in the logBuffer, need to continue log the rest
3577     * of the row starting at realStartColumn.
3578     *
3579     * This is used when a longColumn is encountered during a long row.
3580     * After done logging the long column, we need to continue logging the
3581     * rest of the row.
3582     * A -1 value for realStartColumn, means that it is not significant.
3583     *
3584     * logRow will not throw an noSpaceOnPage exception, if it is an overflow
3585     * page, and the record we are inserting is the only record on the page.
3586     * We are supporting rows expanding multiple pages through this mechanism.
3587     *
3588     * logRow expects row to be a sparse row.
3589     * <p>
3590     *
3591     * @return the "realStartColumn" value, -1 if not a long row.
3592     *
3593     * @param slot the slot of the row being logged.
3594     * @param forInsert this is logging an insert (not update/delete).
3595     * @param recordId record id of the row being logged.
3596     * @param row actual data of row in object form. If row is
3597     * null then we are logging an overflow pointer.
3598     * @param validColumns bit map describing valid columns in row.
3599     * @param out stream to log to.
3600     * @param startColumn what column to start with (see above for detail)
3601     * @param insertFlag flag indicating mode we are in,
3602     * INSERT_DEFAULT - default insert
3603     * INSERT_SPLIT - splitting a row/column
3604     * across pages.
3605     * @param realStartColumn If -1 ignore variable, else part of row has
3606     * already been logged, and should continue with
3607     * this column.
3608     * @param realSpaceOnPage Use this as space on page if realStartColumn
3609     * is not -1.
3610     * @param overflowThreshold How much of the page to use before deciding
3611     * to overflow a row.
3612     *
3613     * @exception IOException RESOLVE
3614     * @exception StandardException Standard exception policy.
3615     *
3616     * @see BasePage#logRow
3617     **/

3618    public int logRow(
3619    int slot,
3620    boolean forInsert,
3621    int recordId,
3622    Object JavaDoc[] row,
3623    FormatableBitSet validColumns,
3624    DynamicByteArrayOutputStream out,
3625    int startColumn,
3626    byte insertFlag,
3627    int realStartColumn,
3628    int realSpaceOnPage,
3629    int overflowThreshold)
3630        throws StandardException, IOException JavaDoc
3631    {
3632        // Is this an update that just went through handleIncompleteLogRow
3633
// and handleIncompleteLogRow threw an excepiton. In this case the
3634
// buffer is already finished.
3635
if (!forInsert)
3636        {
3637            if ((realStartColumn != -1) && (realSpaceOnPage == -1))
3638            {
3639                return realStartColumn;
3640            }
3641        }
3642
3643        int spaceAvailable = freeSpace;
3644        setOutputStream(out);
3645        int beginPosition = out.getPosition();
3646
3647        // if we are inserting in the headPage,
3648
// we need to make sure that there is enough room
3649
// on the page for the reserve space.
3650
userRowSize = 0;
3651        boolean calcMinimumRecordSize = false;
3652
3653        if (realStartColumn != (-1))
3654        {
3655            // in the middle of logging a long row/column.
3656

3657            spaceAvailable = realSpaceOnPage;
3658            beginPosition = out.getBeginPosition();
3659        }
3660        else
3661        {
3662            // logging row part that is on head page.
3663

3664            if (!forInsert)
3665            {
3666                // an update can use the total space of the record,
3667
// even if not all of the fields are being updated.
3668
// If the updated columns will not fit then some
3669
// columns will move off the page to a new chunk.
3670
spaceAvailable += getTotalSpace(slot);
3671
3672            }
3673            else
3674            {
3675                // need to account for the slot table using extra space...
3676
spaceAvailable -= slotEntrySize;
3677
3678                if (startColumn == 0)
3679                    calcMinimumRecordSize = true;
3680            }
3681
3682            // <= is ok here as we know we want to write at least one more byte
3683
if (spaceAvailable <= 0)
3684                throw new NoSpaceOnPage(isOverflowPage());
3685        }
3686
3687        try
3688        {
3689            if (row == null)
3690            {
3691                // if the row is null, we must be writing an overflow pointer.
3692

3693                return(logOverflowRecord(slot, spaceAvailable, out));
3694            }
3695
3696            int numberFields = 0;
3697            StoredRecordHeader recordHeader;
3698
3699            if (forInsert)
3700            {
3701                recordHeader = new StoredRecordHeader();
3702            }
3703            else
3704            {
3705                // Get a full copy of the record header since we might change
3706
// it, and we can't modify the one on the page
3707
recordHeader =
3708                    new StoredRecordHeader(getHeaderAtSlot(slot));
3709
3710                // an update always starts at the first column on this page
3711
startColumn = recordHeader.getFirstField();
3712            }
3713
3714            if (validColumns == null)
3715            {
3716                // all columns in row[] are valid, we will be logging them all.
3717

3718                numberFields = row.length - startColumn;
3719            }
3720            else
3721            {
3722                // RESOLVE (mikem) - counting on validColumns.length may be bad
3723
// for performance.
3724

3725                for (int i = validColumns.getLength() - 1;
3726                     i >= startColumn;
3727                     i--)
3728                {
3729                    if (validColumns.isSet(i))
3730                    {
3731                        numberFields = i + 1 - startColumn;
3732                        break;
3733                    }
3734                }
3735            }
3736
3737            int onPageNumberFields = -1; // only valid for update
3738

3739            if (forInsert)
3740            {
3741                recordHeader.setId(recordId);
3742                recordHeader.setNumberFields(numberFields);
3743            }
3744            else
3745            {
3746                // an update
3747

3748                onPageNumberFields = recordHeader.getNumberFields();
3749
3750                if (numberFields > onPageNumberFields)
3751                {
3752                    // number of fields *might* be increasing
3753
if (recordHeader.hasOverflow())
3754                    {
3755                        // other fields will be handled in next portion update
3756

3757                        numberFields = onPageNumberFields;
3758                    }
3759                    else
3760                    {
3761                        // number of fields is increasing
3762

3763                        recordHeader.setNumberFields(numberFields);
3764                    }
3765                }
3766                else if (numberFields < onPageNumberFields)
3767                {
3768                    if (validColumns == null)
3769                    {
3770                        // number of fields is decreasing,
3771
// but only allowed when the complete
3772
// row is being updated.
3773
recordHeader.setNumberFields(numberFields);
3774
3775                        // RESOLVE -
3776
// need some post commit work if row has overflow
3777

3778                        // if (recordHeader.hasOverflow()) {
3779
// remove overflow portion after commit.
3780
// }
3781

3782                    }
3783                    else
3784                    {
3785                        // we process all the fields, the unchanged ones
3786
// at the end will have a single byte written out
3787
// indicating they are unchanged (nonexistent)
3788
numberFields = onPageNumberFields;
3789                    }
3790                }
3791            }
3792
3793            int endFieldExclusive = startColumn + numberFields;
3794
3795            if (realStartColumn >= endFieldExclusive)
3796            {
3797                // The realStartColumn is greater than the last column we need
3798
// to log, so we are done.
3799
return (-1);
3800            }
3801
3802            if ((insertFlag & Page.INSERT_DEFAULT) != Page.INSERT_DEFAULT)
3803            {
3804                // if this is not logging the part of the row being inserted
3805
// on the main page, then use startColumn as first field.
3806
recordHeader.setFirstField(startColumn);
3807            }
3808
3809            // what column to start with?
3810

3811            int firstColumn = realStartColumn;
3812            if (realStartColumn == (-1))
3813            {
3814                // logging on the head page.
3815

3816                int recordHeaderLength = recordHeader.write(logicalDataOut);
3817
3818                spaceAvailable -= recordHeaderLength;
3819                if (spaceAvailable < 0)
3820                {
3821                    // ran out of space just writing the record header.
3822
throw new NoSpaceOnPage(isOverflowPage());
3823                }
3824
3825                firstColumn = startColumn;
3826            }
3827
3828
3829            boolean monitoringOldFields = false;
3830            int validColumnsSize =
3831                (validColumns == null) ? 0 : validColumns.getLength();
3832            
3833            if (validColumns != null)
3834            {
3835                if (!forInsert)
3836                {
3837                    // we monitor the length of the old fields by skipping them
3838
// but only on a partial update.
3839
if ((validColumns != null) &&
3840                        (firstColumn < (startColumn + onPageNumberFields)))
3841                    {
3842                        rawDataIn.setPosition(
3843                            getFieldOffset(slot, firstColumn));
3844
3845                        monitoringOldFields = true;
3846                    }
3847                }
3848            }
3849
3850            int lastSpaceAvailable = spaceAvailable;
3851            int recordSize = 0;
3852            int lastColumnPositionAllowOverflow = out.getPosition();
3853            int lastColumnAllowOverflow = startColumn;
3854
3855            if (spaceAvailable > OVERFLOW_POINTER_SIZE)
3856                lastColumnPositionAllowOverflow = -1;
3857            int columnFlag = COLUMN_FIRST;
3858
3859            for (int i = firstColumn; i < endFieldExclusive; i++)
3860            {
3861                Object JavaDoc ref = null;
3862                boolean ignoreColumn = false;
3863
3864
3865                // should we log this column or not?
3866
if ((validColumns == null) ||
3867                    (validColumnsSize > i && validColumns.isSet(i)))
3868                {
3869                    if (i < row.length)
3870                        ref = row[i];
3871                }
3872                else if (!forInsert)
3873                {
3874                    // field is not supplied, log as non-existent
3875
ignoreColumn = true;
3876                }
3877
3878                if (spaceAvailable > OVERFLOW_POINTER_SIZE)
3879                {
3880                    lastColumnPositionAllowOverflow = out.getPosition();
3881                    lastColumnAllowOverflow = i;
3882                }
3883
3884                lastSpaceAvailable = spaceAvailable;
3885
3886                if (ignoreColumn)
3887                {
3888                    if (SanityManager.DEBUG)
3889                    {
3890                        SanityManager.ASSERT(
3891                            ref == null,
3892                            "ref should be null for an ignored column");
3893
3894                        SanityManager.ASSERT(
3895                            validColumns != null,
3896                            "validColumns should be non-null for ignored col");
3897                    }
3898
3899                    if (i < (startColumn + onPageNumberFields))
3900                    {
3901                        if (SanityManager.DEBUG)
3902                        {
3903                            SanityManager.ASSERT(
3904                                monitoringOldFields,
3905                                "monitoringOldFields must be true");
3906                        }
3907
3908                        // need to keep track of the old field lengths
3909
// as they are remaining in the row.
3910
int oldOffset = rawDataIn.getPosition();
3911                        skipField(rawDataIn);
3912                        int oldFieldLength =
3913                            rawDataIn.getPosition() - oldOffset;
3914
3915                        if (oldFieldLength <= spaceAvailable)
3916                        {
3917                            // if field doesn't fit,
3918
// spaceAvailable must be left unchanged.
3919

3920                            logColumn(
3921                                null, 0, out, Integer.MAX_VALUE,
3922                                COLUMN_NONE, overflowThreshold);
3923
3924                            spaceAvailable -= oldFieldLength;
3925                        }
3926
3927                    }
3928                    else
3929                    {
3930                        // this is an update that is increasing the number of
3931
// columns but not providing any value, strange ...
3932

3933                        spaceAvailable =
3934                            logColumn(
3935                                null, 0, out, spaceAvailable,
3936                                columnFlag, overflowThreshold);
3937                    }
3938
3939                }
3940                else
3941                {
3942                    // ignoreColumn is false, we are logging this column.
3943

3944                    if (monitoringOldFields &&
3945                        (i < (startColumn + onPageNumberFields)))
3946                    {
3947                        // skip the old version of the field so that
3948
// rawDataIn is correctly positioned.
3949
skipField(rawDataIn);
3950                    }
3951
3952
3953                    try
3954                    {
3955                        if (ref == null)
3956                        {
3957                            // no new value to provide, use the on page value.
3958
spaceAvailable =
3959                                logColumn(
3960                                    null, 0, out, spaceAvailable,
3961                                    columnFlag, overflowThreshold);
3962                        }
3963                        else
3964                        {
3965                            // log the value provided in the row[i]
3966
spaceAvailable =
3967                                logColumn(
3968                                    row, i, out, spaceAvailable,
3969                                    columnFlag, overflowThreshold);
3970                        }
3971
3972                    }
3973                    catch (LongColumnException lce)
3974                    {
3975                        // logColumn determined that the column would not fit
3976
// and that the column length exceeded the long column
3977
// threshold so turn this column into a long column.
3978

3979
3980                        if ((insertFlag & Page.INSERT_DEFAULT) ==
3981                                Page.INSERT_DEFAULT)
3982                        {
3983                            // if default insert, just throw no space exception.
3984

3985                            // if the lce has throw the column as an InputStream,
3986
// in the following 2 situations
3987
// 1. If column came in 'row[i]' as InputStream
3988
// 2. If the object stream of 'row[i]' is not
3989
// null, which means that the object state of
3990
// the column is null.
3991
//
3992
// we need to set the original InputStream column to
3993
// the column that has been thrown by lce. It is a
3994
// store formated InputStream which remembers all
3995
// the bytes that has been read, but not yet stored.
3996
// Therefore, we will not lose any bytes.
3997
//
3998
// In any other situation, we should not change the
3999
// state of the column,
4000
// i.e. if 'row[i]' has an object state, it should
4001
// not be turned into an InputStream.
4002

4003                            if ((lce.getColumn() instanceof InputStream)
4004                                    && (row[i] instanceof StreamStorable) )
4005                            {
4006                                if ((row[i] instanceof InputStream) ||
4007                                    (((StreamStorable) row[i]).returnStream()
4008                                         != null) )
4009                                {
4010                                    // change state of stream so that it uses
4011
// the stream just created by the lce -
4012
// which is remembering the bytes it has
4013
// already read from the stream but couldn't
4014
// log as there was not enough room on
4015
// current page.
4016

4017                                    ((StreamStorable) row[i]).setStream(
4018                                                (InputStream) lce.getColumn());
4019                                }
4020                            }
4021
4022                            throw new NoSpaceOnPage(isOverflowPage());
4023                        }
4024
4025                        // When one of the following two conditions is true,
4026
// we will allow the insert of the long column:
4027
//
4028
// 1. if this is the last field,
4029
// and overflow field header fits on page.
4030
// 2. if it is not the last field,
4031
// and overflow field header fits on page (for col)
4032
// and another overflow ptr fits (for row).
4033
//
4034
//
4035

4036                        if (((spaceAvailable >= OVERFLOW_PTR_FIELD_SIZE) &&
4037                             (i == (endFieldExclusive - 1))) ||
4038                            ((spaceAvailable >= (OVERFLOW_PTR_FIELD_SIZE * 2))&&
4039                             (i < (endFieldExclusive - 1))))
4040                        {
4041                            // If the column is a long column, it must be a
4042
// InputStream. We have made the input stream into
4043
// a RememberBytesInputStream, have to set the
4044
// column to that, in order to preserve the bytes
4045
// we already read off the stream.
4046

4047                            // caught a long column exception,
4048
// set the variables, and rethrow the error
4049
out.setBeginPosition(beginPosition);
4050                            lce.setExceptionInfo(out, i, spaceAvailable);
4051                            throw (lce);
4052                        }
4053                    }
4054                }
4055
4056                int nextColumn;
4057
4058                recordSize += (lastSpaceAvailable - spaceAvailable);
4059                boolean recordIsLong =
4060                    (overflowThreshold == 100) ?
4061                        false : isLong(recordSize, overflowThreshold);
4062
4063                // get the no overflow case out of the way asap
4064
if ((lastSpaceAvailable == spaceAvailable) || recordIsLong)
4065                {
4066                    if ((insertFlag & Page.INSERT_DEFAULT) ==
4067                            Page.INSERT_DEFAULT)
4068                    {
4069                        throw new NoSpaceOnPage(isOverflowPage());
4070                    }
4071
4072                    if (recordIsLong)
4073                    {
4074                        // if the record is long because of threshold,
4075
// then, we need to reset the logicalOut.
4076
// set position to the end of the previous field
4077

4078                        out.setPosition(out.getPosition() - recordSize);
4079                    }
4080
4081                    // did not write this column
4082
nextColumn = i;
4083                }
4084                else
4085                {
4086                    // assume that all fields will be written to this page.
4087
nextColumn = endFieldExclusive;
4088                }
4089
4090                // See if we have enough room to write an overflow field if the
4091
// row needs to overflow. We need overflow if we need to
4092
// write another portion or another portion already exists and
4093
// we will need to point to it.
4094

4095                if ((lastSpaceAvailable == spaceAvailable) ||
4096                    ((insertFlag & Page.INSERT_FOR_SPLIT) ==
4097                         Page.INSERT_FOR_SPLIT))
4098                {
4099                    // The current row has filled the page.
4100

4101                    if (spaceAvailable <= OVERFLOW_POINTER_SIZE)
4102                    {
4103                        if ((i == startColumn) ||
4104                            (lastColumnPositionAllowOverflow < 0))
4105                        {
4106                            // not enough room for the overflow recordheader,
4107
// and this is the first column on this page so
4108
// need to try another page.
4109
throw new NoSpaceOnPage(isOverflowPage());
4110                        }
4111                        else
4112                        {
4113                            // we need to go back to the last column
4114
// that left enough room for an overflow pointer.
4115

4116                            out.setPosition(lastColumnPositionAllowOverflow);
4117                            nextColumn = lastColumnAllowOverflow;
4118                        }
4119                    }
4120                }
4121
4122                if (nextColumn < endFieldExclusive)
4123                {
4124                    // If the number of cols has been reduced.
4125

4126                    int actualNumberFields = nextColumn - startColumn;
4127
4128                    // go back and update that numberFields in recordHeader.
4129
// no need to update spaceAvailable here, because if we are
4130
// here, we will be returning any way, and spaceAvailable
4131
// will be thrown away.
4132

4133                    int oldSize = recordHeader.size();
4134                    recordHeader.setNumberFields(actualNumberFields);
4135
4136                    int newSize = recordHeader.size();
4137                    
4138                    // now we are ready to write the new record header.
4139
int endPosition = out.getPosition();
4140
4141                    if (oldSize > newSize)
4142                    {
4143                        // if the old size is bigger than the new size, then
4144
// leave extra bytes at the beginning of byte stream.
4145

4146                        int delta = oldSize - newSize;
4147                        out.setBeginPosition(beginPosition + delta);
4148                        out.setPosition(beginPosition + delta);
4149                    }
4150                    else if (newSize > oldSize)
4151                    {
4152                        out.setPosition(beginPosition);
4153
4154                    }
4155                    else
4156                    {
4157                        out.setBeginPosition(beginPosition);
4158                        out.setPosition(beginPosition);
4159                    }
4160
4161                    int realLen = recordHeader.write(logicalDataOut);
4162                    if (SanityManager.DEBUG)
4163                    {
4164                        if ((realLen + (oldSize - newSize)) != oldSize)
4165                        {
4166                            SanityManager.THROWASSERT(
4167                                "recordHeader size incorrect. realLen = " +
4168                                realLen + ", delta = " +
4169                                (oldSize - newSize) + ", oldSize = " + oldSize);
4170                        }
4171                    }
4172
4173                    out.setPosition(endPosition);
4174
4175                    if (!forInsert)
4176                    {
4177                        // The update is incomplete, fields beyond this
4178
// point will have to move off the page. For any fields
4179
// that are not being updated we have to save their
4180
// values from this page to insert into an overflow
4181
// portion.
4182
//
4183
// When the complete row is being updated there is no
4184
// need to save any fields so just return.
4185
if (validColumns != null)
4186                        {
4187                            handleIncompleteLogRow(
4188                                slot, nextColumn, validColumns, out);
4189                        }
4190                    }
4191
4192                    return (nextColumn);
4193                }
4194                
4195                columnFlag = COLUMN_NONE;
4196            }
4197
4198            out.setBeginPosition(beginPosition);
4199            startColumn = -1;
4200
4201            if ((calcMinimumRecordSize) &&
4202                (spaceAvailable < (minimumRecordSize - userRowSize)))
4203            {
4204                throw new NoSpaceOnPage(isOverflowPage());
4205            }
4206
4207        }
4208        finally
4209        {
4210            resetOutputStream();
4211        }
4212
4213        return (startColumn);
4214    }
4215
4216    /**
4217     * Handle an update of a record portion that is incomplete.
4218     * <p>
4219     * Handle an update of a record portion that is incomplete.
4220     * Ie. Columns have expanded that require other columns to move
4221     * off the page into a new portion.
4222     * <P>
4223     * This method works out of the columns that need to be moved which are not
4224     * being updated and makes a copy of their data. It then throws an
4225     * exception with this data, much like the long column exception which will
4226     * then allow the original insert to complete.
4227     * <P>
4228     * If no columns need to be saved (ie all the ones that would move are
4229     * being updated) then no exception is thrown, logRow() will return and the
4230     * update completes normally.
4231     * <p>
4232     *
4233     * @param slot slot of the current update.
4234     * @param startColumn column to start at, handles start in middle of row
4235     * @param columnList bit map indicating which columns are being updated.
4236     * @param out place to lot to.
4237     *
4238     * @exception StandardException Standard exception policy.
4239     **/

4240    private void handleIncompleteLogRow(
4241    int slot,
4242    int startColumn,
4243    FormatableBitSet columnList,
4244    DynamicByteArrayOutputStream out)
4245        throws StandardException
4246    {
4247        if (SanityManager.DEBUG)
4248            SanityManager.ASSERT(columnList != null);
4249
4250        StoredRecordHeader rh = getHeaderAtSlot(slot);
4251
4252        int endFieldExclusive = rh.getFirstField() + rh.getNumberFields();
4253
4254        // first see if any fields are not being modified
4255
boolean needSave = false;
4256        int columnListSize = columnList.size();
4257        for (int i = startColumn; i < endFieldExclusive; i++)
4258        {
4259            if (!(columnListSize > i && columnList.get(i)))
4260            {
4261                needSave = true;
4262                break;
4263            }
4264        }
4265        if (!needSave)
4266            return;
4267
4268        Object JavaDoc[] savedFields =
4269            new Object JavaDoc[endFieldExclusive - startColumn];
4270
4271        ByteArrayOutputStream fieldStream = null;
4272
4273        for (int i = startColumn; i < endFieldExclusive; i++)
4274        {
4275            // row is being updated - ignore
4276
if (columnListSize > i && columnList.get(i))
4277                continue;
4278
4279            // save the data
4280

4281            try
4282            {
4283                // use the old value - we use logField to ensure that we
4284
// get the raw contents of the field and don't follow
4285
// any long columns. In addition we save this as a RawField
4286
// so that we preserve the state of the field header.
4287
if (fieldStream == null)
4288                    fieldStream = new ByteArrayOutputStream();
4289                else
4290                    fieldStream.reset();
4291
4292                logField(slot, i, fieldStream);
4293
4294                savedFields[i - startColumn] =
4295                    new RawField(fieldStream.toByteArray());
4296
4297            }
4298            catch (IOException JavaDoc ioe)
4299            {
4300                throw dataFactory.markCorrupt(
4301                    StandardException.newException(
4302                        SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
4303            }
4304        }
4305
4306        // Use a long column exception to notify the caller of the need
4307
// to perform an insert of the columns that need to move
4308

4309        LongColumnException lce = new LongColumnException();
4310        lce.setExceptionInfo(
4311            out, startColumn, -1 /* indicates not actual long column */);
4312        lce.setColumn(savedFields);
4313
4314        throw lce;
4315    }
4316
4317    /**
4318
4319        @param row (IN/OUT) the row that is to be restored (sparse representation)
4320        @param limitInput the limit input stream
4321        @param objectInput the object input stream
4322
4323        @exception StandardException Standard Cloudscape error policy
4324        @exception IOException I/O exception in reading meta data.
4325    */

4326
4327    /**
4328     * Restore a storable row from a LimitInputStream.
4329     * <p>
4330     * Restore a storable row from an LimitInputStream - user must supply two
4331     * streams on top of the same data, one implements ObjectInput interface
4332     * that knows how to restore the object, the other one implements
4333     * LimitInputStream.
4334     * <p>
4335     * @param in the limit input stream
4336     * @param row (IN/OUT) row that is to be restored
4337     * (sparse representation)
4338     *
4339     * @exception StandardException Standard exception policy.
4340     **/

4341    public void restoreRecordFromStream(
4342    LimitObjectInput in,
4343    Object JavaDoc[] row)
4344        throws StandardException, IOException JavaDoc
4345    {
4346
4347        StoredRecordHeader recordHeader = new StoredRecordHeader();
4348        recordHeader.read(in);
4349        readRecordFromStream(
4350            row,
4351            row.length - 1,
4352            (int[]) null,
4353            (int[]) null,
4354            in,
4355            recordHeader,
4356            (ErrorObjectInput) null /* always null */, null);
4357    }
4358
4359    /**
4360     * Process the qualifier list on the row, return true if it qualifies.
4361     * <p>
4362     * A two dimensional array is to be used to pass around a AND's and OR's in
4363     * conjunctive normal form. The top slot of the 2 dimensional array is
4364     * optimized for the more frequent where no OR's are present. The first
4365     * array slot is always a list of AND's to be treated as described above
4366     * for single dimensional AND qualifier arrays. The subsequent slots are
4367     * to be treated as AND'd arrays or OR's. Thus the 2 dimensional array
4368     * qual[][] argument is to be treated as the following, note if
4369     * qual.length = 1 then only the first array is valid and it is and an
4370     * array of and clauses:
4371     *
4372     * (qual[0][0] and qual[0][0] ... and qual[0][qual[0].length - 1])
4373     * and
4374     * (qual[1][0] or qual[1][1] ... or qual[1][qual[1].length - 1])
4375     * and
4376     * (qual[2][0] or qual[2][1] ... or qual[2][qual[2].length - 1])
4377     * ...
4378     * and
4379     * (qual[qual.length - 1][0] or qual[1][1] ... or qual[1][2])
4380     *
4381     *
4382     * @return true if the row qualifies.
4383     *
4384     * @param row The row being qualified.
4385     * @param qual_list 2 dimensional array representing conjunctive
4386     * normal form of simple qualifiers.
4387     *
4388     * @exception StandardException Standard exception policy.
4389     **/

4390    private boolean qualifyRecordFromRow(
4391    Object JavaDoc[] row,
4392    Qualifier[][] qual_list)
4393         throws StandardException
4394    {
4395        boolean row_qualifies = true;
4396
4397        if (SanityManager.DEBUG)
4398        {
4399            SanityManager.ASSERT(row != null);
4400        }
4401
4402        // First do the qual[0] which is an array of qualifer terms.
4403

4404        if (SanityManager.DEBUG)
4405        {
4406            // routine should not be called if there is no qualifier
4407
SanityManager.ASSERT(qual_list != null);
4408            SanityManager.ASSERT(qual_list.length > 0);
4409        }
4410
4411        for (int i = 0; i < qual_list[0].length; i++)
4412        {
4413            // process each AND clause
4414

4415            row_qualifies = false;
4416
4417            // process each OR clause.
4418

4419            Qualifier q = qual_list[0][i];
4420
4421            // Get the column from the possibly partial row, of the
4422
// q.getColumnId()'th column in the full row.
4423
DataValueDescriptor columnValue =
4424                    (DataValueDescriptor) row[q.getColumnId()];
4425
4426            row_qualifies =
4427                columnValue.compare(
4428                    q.getOperator(),
4429                    q.getOrderable(),
4430                    q.getOrderedNulls(),
4431                    q.getUnknownRV());
4432
4433            if (q.negateCompareResult())
4434                row_qualifies = !row_qualifies;
4435
4436            // Once an AND fails the whole Qualification fails - do a return!
4437
if (!row_qualifies)
4438                return(false);
4439        }
4440
4441        // all the qual[0] and terms passed, now process the OR clauses
4442

4443        for (int and_idx = 1; and_idx < qual_list.length; and_idx++)
4444        {
4445            // loop through each of the "and" clause.
4446

4447            row_qualifies = false;
4448
4449            if (SanityManager.DEBUG)
4450            {
4451                // Each OR clause must be non-empty.
4452
SanityManager.ASSERT(qual_list[and_idx].length > 0);
4453            }
4454
4455            for (int or_idx = 0; or_idx < qual_list[and_idx].length; or_idx++)
4456            {
4457                // Apply one qualifier to the row.
4458
Qualifier q = qual_list[and_idx][or_idx];
4459                int col_id = q.getColumnId();
4460
4461                if (SanityManager.DEBUG)
4462                {
4463                    SanityManager.ASSERT(
4464                        (col_id < row.length),
4465                        "Qualifier is referencing a column not in the row.");
4466                }
4467
4468                // Get the column from the possibly partial row, of the
4469
// q.getColumnId()'th column in the full row.
4470
DataValueDescriptor columnValue =
4471                    (DataValueDescriptor) row[q.getColumnId()];
4472
4473                if (SanityManager.DEBUG)
4474                {
4475                    if (columnValue == null)
4476                        SanityManager.THROWASSERT(
4477                            "1:row = " + RowUtil.toString(row) +
4478                            "row.length = " + row.length +
4479                            ";q.getColumnId() = " + q.getColumnId());
4480                }
4481
4482                // do the compare between the column value and value in the
4483
// qualifier.
4484
row_qualifies =
4485                    columnValue.compare(
4486                            q.getOperator(),
4487                            q.getOrderable(),
4488                            q.getOrderedNulls(),
4489                            q.getUnknownRV());
4490
4491                if (q.negateCompareResult())
4492                    row_qualifies = !row_qualifies;
4493
4494                // SanityManager.DEBUG_PRINT("StoredPage.qual", "processing qual[" + and_idx + "][" + or_idx + "] = " + qual_list[and_idx][or_idx] );
4495

4496                // SanityManager.DEBUG_PRINT("StoredPage.qual", "value = " + row_qualifies);
4497

4498                // processing "OR" clauses, so as soon as one is true, break
4499
// to go and process next AND clause.
4500
if (row_qualifies)
4501                    break;
4502
4503            }
4504
4505            // The qualifier list represented a set of "AND'd"
4506
// qualifications so as soon as one is false processing is done.
4507
if (!row_qualifies)
4508                break;
4509        }
4510
4511        return(row_qualifies);
4512    }
4513
4514    /**
4515     * Read just one column from stream into row.
4516     * <p>
4517     * The routine reads just one column from the row, it is mostly code
4518     * taken from readRecordFromStream, but highly optimized to just get
4519     * one column from a non-overflow row. It can only be called to read
4520     * a row from the pageData array as it directly accesses the page array
4521     * to avoid the Stream overhead while processing non-user data which
4522     * does not need the limit functionality.
4523     * <p>
4524     * It is expected that this code will be called to read in a column
4525     * associated with a qualifiers which are applied one column at a time,
4526     * and has been specialized to proved the greatest peformance for
4527     * processing qualifiers. This kind of access is done when scanning
4528     * large datasets while applying qualifiers and thus any performance
4529     * gain at this low level is multiplied by the large number of rows that
4530     * may be iterated over.
4531     * <p>
4532     * The column is read into the object located in row[qual_colid].
4533     *
4534     * @param row col is read into object in row[qual_colid].
4535     * @param offset_to_field_data offset in bytes from top of page to field
4536     * @param colid the column id to read, colid N is row[N]
4537     * @param recordHeader record header of row to read column from.
4538     * @param recordToLock record handle to lock,
4539     * used by overflow column code.
4540     *
4541     * @exception StandardException Standard exception policy.
4542     **/

4543    private final void readOneColumnFromPage(
4544    Object JavaDoc[] row,
4545    int colid,
4546    int offset_to_field_data,
4547    StoredRecordHeader recordHeader,
4548    RecordHandle recordToLock)
4549         throws StandardException, IOException JavaDoc
4550    {
4551        ErrorObjectInput inUserCode = null;
4552
4553        // Reads in this routine are always against the raw data in the
4554
// pageData array, thus it can assume array access to page data array.
4555
ArrayInputStream lrdi = rawDataIn;
4556
4557        try
4558        {
4559            if (SanityManager.DEBUG)
4560            {
4561                if (colid >= row.length)
4562                    SanityManager.THROWASSERT(
4563                        "colid = " + colid +
4564                        ";row length = " + row.length);
4565
4566                // currently this routine will not work on long rows.
4567
if (recordHeader.getFirstField() != 0)
4568                {
4569                    SanityManager.THROWASSERT(
4570                        "recordHeader.getFirstField() = " +
4571                        recordHeader.getFirstField());
4572                }
4573            }
4574
4575            Object JavaDoc column = row[colid];
4576
4577            // if the column id exists on this page.
4578
if (colid <= (recordHeader.getNumberFields() - 1))
4579            {
4580                // skip the fields before colid, the column in question
4581
// existent on this page.
4582

4583                for (int columnId = colid; columnId > 0; columnId--)
4584                {
4585                    offset_to_field_data +=
4586                        StoredFieldHeader.readTotalFieldLength(
4587                            pageData, offset_to_field_data);
4588                }
4589
4590
4591
4592                // read the field header
4593

4594                // read the status byte.
4595
int fieldStatus =
4596                    StoredFieldHeader.readStatus(
4597                        pageData, offset_to_field_data);
4598
4599                // read the field data length, and position on 1st byte of data.
4600
int fieldDataLength =
4601                    StoredFieldHeader.readFieldLengthAndSetStreamPosition(
4602                        pageData,
4603                        offset_to_field_data +
4604                            StoredFieldHeader.STORED_FIELD_HEADER_STATUS_SIZE,
4605                        fieldStatus,
4606                        slotFieldSize,
4607                        lrdi);
4608
4609                if (SanityManager.DEBUG)
4610                {
4611                    SanityManager.ASSERT(
4612                        !StoredFieldHeader.isExtensible(fieldStatus),
4613                        "extensible fields not supported yet");
4614                }
4615
4616                // SRW-DJD code assumes non-extensible case ...
4617

4618                if (!StoredFieldHeader.isNonexistent(fieldStatus))
4619                {
4620                    boolean isOverflow =
4621                        StoredFieldHeader.isOverflow(fieldStatus);
4622
4623                    OverflowInputStream overflowIn = null;
4624
4625                    if (isOverflow)
4626                    {
4627                        // A fetched long column is returned as a stream
4628
long overflowPage =
4629                            CompressedNumber.readLong((InputStream) lrdi);
4630
4631                        int overflowId =
4632                            CompressedNumber.readInt((InputStream) lrdi);
4633
4634                        // Prepare the stream for results...
4635
// create the byteHolder the size of a page, so, that it
4636
// will fit the field Data that would fit on a page.
4637
MemByteHolder byteHolder =
4638                            new MemByteHolder(pageData.length);
4639
4640                        overflowIn = new OverflowInputStream(
4641                            byteHolder, owner, overflowPage,
4642                            overflowId, recordToLock);
4643                    }
4644
4645                    // Deal with Storable columns
4646
if (column instanceof DataValueDescriptor)
4647                    {
4648                        DataValueDescriptor sColumn =
4649                            (DataValueDescriptor) column;
4650
4651                        // is the column null ?
4652
if (StoredFieldHeader.isNull(fieldStatus))
4653                        {
4654                            sColumn.restoreToNull();
4655                        }
4656                        else
4657                        {
4658                            // set the limit for the user read
4659
if (!isOverflow)
4660                            {
4661                                // normal, non-overflow column case.
4662

4663                                lrdi.setLimit(fieldDataLength);
4664                                inUserCode = lrdi;
4665                                sColumn.readExternalFromArray(lrdi);
4666                                inUserCode = null;
4667                                int unread = lrdi.clearLimit();
4668                                if (unread != 0)
4669                                    lrdi.skipBytes(unread);
4670                            }
4671                            else
4672                            {
4673                                // fetched column is a Storable long column.
4674

4675                                FormatIdInputStream newIn =
4676                                    new FormatIdInputStream(overflowIn);
4677
4678                                if ((sColumn instanceof StreamStorable))
4679                                {
4680                                    ((StreamStorable)sColumn).setStream(newIn);
4681                                }
4682                                else
4683                                {
4684                                    inUserCode = newIn;
4685                                    sColumn.readExternal(newIn);
4686                                    inUserCode = null;
4687                                }
4688                            }
4689                        }
4690                    }
4691                    else
4692                    {
4693                        // At this point only non-Storable columns.
4694

4695                        if (StoredFieldHeader.isNull(fieldStatus))
4696                        {
4697                            // Only Storables can be null ...
4698

4699                            throw StandardException.newException(
4700                                    SQLState.DATA_NULL_STORABLE_COLUMN,
4701                                    Integer.toString(colid));
4702                        }
4703
4704                        // This is a non-extensible field, which means the
4705
// caller must know the correct type and thus the
4706
// element in row is the correct type or null. It must
4707
// be Serializable.
4708
//
4709
// We do not support Externalizable here.
4710

4711                        lrdi.setLimit(fieldDataLength);
4712                        inUserCode = lrdi;
4713                        // RESOLVE (no non-storables?)
4714
row[colid] = (Object JavaDoc) lrdi.readObject();
4715                        inUserCode = null;
4716                        int unread = lrdi.clearLimit();
4717                        if (unread != 0)
4718                            lrdi.skipBytes(unread);
4719                    }
4720
4721                }
4722                else
4723                {
4724                    // column does not exist in the row, return null.
4725

4726                    // field is non-existent
4727

4728                    if (column instanceof DataValueDescriptor)
4729                    {
4730                        // RESOLVE - This is in place for 1.2. In the future
4731
// we may want to return this column as non-existent
4732
// even if it is a storable column, or maybe use a
4733
// supplied default.
4734

4735                        ((DataValueDescriptor) column).restoreToNull();
4736                    }
4737                    else
4738                    {
4739                        row[colid] = null;
4740                    }
4741                }
4742            }
4743            else
4744            {
4745                // field does not exist on this page.
4746

4747                if (column instanceof DataValueDescriptor)
4748                {
4749                    // RESOLVE - This is in place for 1.2. In the future
4750
// we may want to return this column as non-existent
4751
// even if it is a storable column, or maybe use a
4752
// supplied default.
4753
((DataValueDescriptor) column).restoreToNull();
4754                }
4755                else
4756                {
4757                    row[colid] = null;
4758                }
4759            }
4760        }
4761        catch (IOException JavaDoc ioe)
4762        {
4763            // an exception during the restore of a user column, this doesn't
4764
// make the database corrupt, just that this field is inaccessable
4765

4766            if (inUserCode != null)
4767            {
4768                lrdi.clearLimit();
4769
4770                if (ioe instanceof EOFException JavaDoc)
4771                {
4772                    if (SanityManager.DEBUG)
4773                    {
4774                        SanityManager.DEBUG_PRINT("DEBUG_TRACE",
4775                            "StoredPage.readOneColumnFromPage - EOF while restoring record: " +
4776                                recordHeader +
4777                            "Page dump = " + this);
4778                        SanityManager.showTrace(ioe);
4779                    }
4780
4781                    // going beyond the limit in a DataInput class results in
4782
// an EOFException when it sees the -1 from a read
4783
throw StandardException.newException(
4784                            SQLState.DATA_STORABLE_READ_MISMATCH,
4785                            ioe, inUserCode.getErrorInfo());
4786                }
4787
4788                // some SQLData error reporting
4789
Exception JavaDoc ne = inUserCode.getNestedException();
4790                if (ne != null)
4791                {
4792                    if (ne instanceof InstantiationException JavaDoc)
4793                    {
4794                        throw StandardException.newException(
4795                            SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION,
4796                            ne, inUserCode.getErrorInfo());
4797                    }
4798
4799                    if (ne instanceof IllegalAccessException JavaDoc)
4800                    {
4801                        throw StandardException.newException(
4802                            SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION,
4803                            ne, inUserCode.getErrorInfo());
4804                    }
4805
4806                    if (ne instanceof StandardException)
4807                    {
4808                        throw (StandardException) ne;
4809                    }
4810                }
4811
4812                throw StandardException.newException(
4813                        SQLState.DATA_STORABLE_READ_EXCEPTION,
4814                        ioe, inUserCode.getErrorInfo());
4815            }
4816
4817            // re-throw to higher levels so they can put it in correct context.
4818
throw ioe;
4819
4820        }
4821        catch (ClassNotFoundException JavaDoc cnfe)
4822        {
4823            lrdi.clearLimit();
4824
4825            // an exception during the restore of a user column, this doesn't
4826
// make the database corrupt, just that this field is inaccessable
4827
throw StandardException.newException(
4828                    SQLState.DATA_STORABLE_READ_MISSING_CLASS,
4829                    cnfe, inUserCode.getErrorInfo());
4830
4831        }
4832        catch (LinkageError JavaDoc le)
4833        {
4834            // Some error during the link of a user class
4835
if (inUserCode != null)
4836            {
4837                lrdi.clearLimit();
4838
4839                throw StandardException.newException(
4840                        SQLState.DATA_STORABLE_READ_EXCEPTION,
4841                        le, inUserCode.getErrorInfo());
4842            }
4843            throw le;
4844        }
4845
4846    }
4847
4848
4849
4850    /**
4851     * Process the list of qualifiers on the row in the stream.
4852     * <p>
4853     * The rawDataIn stream is expected to be positioned after the record
4854     * header. The inUserCode parameter here is only to get around a
4855     * JDK 1.1.x (at least 1.1.7) JIT bug. If inUserCode was a local variable
4856     * then it is not correctly set on an exception, the only time we care
4857     * about its value. It seems to work when its a parameter. Null should
4858     * always be passed in. This bug is fixed in the JDK 1.2 JIT.
4859     * <p>
4860     * Check all qualifiers in the qualifier array against row. Return true
4861     * if all compares specified by the qualifier array return true, else
4862     * return false.
4863     * <p>
4864     * This routine assumes client caller has already checked if the row
4865     * is deleted or not. The row that it get's is expected to match
4866     * the partial column list of the scan.
4867     * <p>
4868     * On entering this routine the stream should be positioned to the
4869     * beginning of the row data, just after the row header. On exit
4870     * the stream will also be positioned there.
4871     *
4872     * A two dimensional array is to be used to pass around a AND's and OR's in
4873     * conjunctive normal form. The top slot of the 2 dimensional array is
4874     * optimized for the more frequent where no OR's are present. The first
4875     * array slot is always a list of AND's to be treated as described above
4876     * for single dimensional AND qualifier arrays. The subsequent slots are
4877     * to be treated as AND'd arrays or OR's. Thus the 2 dimensional array
4878     * qual[][] argument is to be treated as the following, note if
4879     * qual.length = 1 then only the first array is valid and it is and an
4880     * array of and clauses:
4881     *
4882     * (qual[0][0] and qual[0][0] ... and qual[0][qual[0].length - 1])
4883     * and
4884     * (qual[1][0] or qual[1][1] ... or qual[1][qual[1].length - 1])
4885     * and
4886     * (qual[2][0] or qual[2][1] ... or qual[2][qual[2].length - 1])
4887     * ...
4888     * and
4889     * (qual[qual.length - 1][0] or qual[1][1] ... or qual[1][2])
4890     *
4891     * @return Whether or not the row input qualifies.
4892     *
4893     * @param row restore row into this object array.
4894     * @param offset_to_row_data offset in bytes from top of page to row
4895     * @param fetchDesc Description of fetch including which cols
4896     * and qualifiers.
4897     * @param recordHeader The record header of the row, it was read
4898     * in from stream and dataIn is positioned
4899     * after it.
4900     * @param recordToLock The head row to use for locking, used to
4901     * lock head row of overflow columns/rows.
4902     *
4903     * @exception StandardException Standard exception policy.
4904     **/

4905    private final boolean qualifyRecordFromSlot(
4906    Object JavaDoc[] row,
4907    int offset_to_row_data,
4908    FetchDescriptor fetchDesc,
4909    StoredRecordHeader recordHeader,
4910    RecordHandle recordToLock)
4911         throws StandardException, IOException JavaDoc
4912    {
4913        boolean row_qualifies = true;
4914        Qualifier[][] qual_list = fetchDesc.getQualifierList();
4915        int[] materializedCols = fetchDesc.getMaterializedColumns();
4916
4917        if (SanityManager.DEBUG)
4918        {
4919            SanityManager.ASSERT(qual_list != null, "Not coded yet!");
4920        }
4921
4922        if (SanityManager.DEBUG)
4923        {
4924            SanityManager.ASSERT(row != null);
4925        }
4926
4927        // First process the initial list of AND's in the 1st array
4928

4929        for (int i = 0; i < qual_list[0].length; i++)
4930        {
4931            // process each AND clause
4932

4933            row_qualifies = false;
4934
4935            // Apply one qualifier to the row.
4936
Qualifier q = qual_list[0][i];
4937            int col_id = q.getColumnId();
4938
4939            if (SanityManager.DEBUG)
4940            {
4941                SanityManager.ASSERT(
4942                    (col_id < row.length),
4943                    "Qualifier is referencing a column not in the row.");
4944            }
4945
4946            // materialize the column object if we haven't done it yet.
4947
if (materializedCols[col_id] == 0)
4948            {
4949                // materialize just this column from the row, no qualifiers
4950
readOneColumnFromPage(
4951                    row,
4952                    col_id,
4953                    offset_to_row_data,
4954                    recordHeader,
4955                    recordToLock);
4956
4957                // mark offset, indicating the row has been read in.
4958
//
4959
// RESOLVE (mikem) - right now value of entry is useless, it
4960
// is an int so that in the future we could cache the offset
4961
// to fields to improve performance of getting to a column
4962
// after qualifying.
4963
materializedCols[col_id] = offset_to_row_data;
4964            }
4965
4966            // Get the column from the possibly partial row, of the
4967
// q.getColumnId()'th column in the full row.
4968

4969            if (SanityManager.DEBUG)
4970            {
4971                if (row[col_id] == null)
4972                    SanityManager.THROWASSERT(
4973                        "1:row = " + RowUtil.toString(row) +
4974                        "row.length = " + row.length +
4975                        ";q.getColumnId() = " + q.getColumnId());
4976            }
4977
4978            // do the compare between the column value and value in the
4979
// qualifier.
4980
row_qualifies =
4981                ((DataValueDescriptor) row[col_id]).compare(
4982                        q.getOperator(),
4983                        q.getOrderable(),
4984                        q.getOrderedNulls(),
4985                        q.getUnknownRV());
4986
4987            if (q.negateCompareResult())
4988                row_qualifies = !row_qualifies;
4989
4990            // Once an AND fails the whole Qualification fails - do a return!
4991
if (!row_qualifies)
4992                return(false);
4993        }
4994
4995        // Now process the Subsequent OR clause's, beginning with qual_list[1]
4996

4997        for (int and_idx = 1; and_idx < qual_list.length; and_idx++)
4998        {
4999            // loop through each of the "and" clause.
5000

5001            row_qualifies = false;
5002
5003            for (int or_idx = 0; or_idx < qual_list[and_idx].length; or_idx++)
5004            {
5005                // Apply one qualifier to the row.
5006
Qualifier q = qual_list[and_idx][or_idx];
5007                int col_id = q.getColumnId();
5008
5009                if (SanityManager.DEBUG)
5010                {
5011                    SanityManager.ASSERT(
5012                        (col_id < row.length),
5013                        "Qualifier is referencing a column not in the row.");
5014                }
5015
5016                // materialize the column object if we haven't done it yet.
5017
if (materializedCols[col_id] == 0)
5018                {
5019                    // materialize just this column from the row, no qualifiers
5020
readOneColumnFromPage(
5021                        row,
5022                        col_id,
5023                        offset_to_row_data,
5024                        recordHeader,
5025                        recordToLock);
5026
5027                    // mark offset, indicating the row has been read in.
5028
//
5029
// RESOLVE (mikem) - right now value of entry is useless, it
5030
// is an int so that in the future we could cache the offset
5031
// to fields to improve performance of getting to a column
5032
// after qualifying.
5033
materializedCols[col_id] = offset_to_row_data;
5034                }
5035
5036                // Get the column from the possibly partial row, of the
5037
// q.getColumnId()'th column in the full row.
5038

5039                if (SanityManager.DEBUG)
5040                {
5041                    if (row[col_id] == null)
5042                        SanityManager.THROWASSERT(
5043                            "1:row = " + RowUtil.toString(row) +
5044                            "row.length = " + row.length +
5045                            ";q.getColumnId() = " + q.getColumnId());
5046                }
5047
5048                // do the compare between the column value and value in the
5049
// qualifier.
5050
row_qualifies =
5051                    ((DataValueDescriptor) row[col_id]).compare(
5052                            q.getOperator(),
5053                            q.getOrderable(),
5054                            q.getOrderedNulls(),
5055                            q.getUnknownRV());
5056
5057
5058                if (q.negateCompareResult())
5059                    row_qualifies = !row_qualifies;
5060
5061                // SanityManager.DEBUG_PRINT("StoredPage.qual", "processing qual[" + and_idx + "][" + or_idx + "] = " + qual_list[and_idx][or_idx] );
5062

5063                // SanityManager.DEBUG_PRINT("StoredPage.qual", "value = " + row_qualifies);
5064

5065                // processing "OR" clauses, so as soon as one is true, break
5066
// to go and process next AND clause.
5067
if (row_qualifies)
5068                    break;
5069
5070            }
5071
5072            // The qualifier list represented a set of "AND'd"
5073
// qualifications so as soon as one is false processing is done.
5074
if (!row_qualifies)
5075                break;
5076        }
5077
5078        return(row_qualifies);
5079    }
5080
5081    /**
5082     * restore a record from a stream.
5083     * <p>
5084     * The rawDataIn stream is expected to be positioned after the record
5085     * header. The inUserCode parameter here is only to get around a
5086     * JDK 1.1.x (at least 1.1.7) JIT bug. If inUserCode was a local variable
5087     * then it is not correctly set on an exception, the only time we care
5088     * about its value. It seems to work when its a parameter. Null should
5089     * always be passed in. This bug is fixed in the JDK 1.2 JIT.
5090     * <p>
5091     *
5092     * @return The identifier to be used to open the conglomerate later.
5093     *
5094     * @param row restore row into this object array.
5095     * @param max_colid The maximum numbered column id that will be
5096     * requested by caller. It should be:
5097     * min(row.length - 1, maximum bit set in vCols)
5098     * It is used to stop the inner most loop from
5099     * looking at more columns in the row.
5100     * @param vCols If not null, bit map indicates valid cols.
5101     * @param mCols If not null, int array indicates columns already
5102     * read in from the stream. A non-zero entry
5103     * means the column has already been read in.
5104     * @param dataIn restore row from this stream.
5105     * @param recordHeader The record header of the row, it was read in
5106     * from stream and dataIn is positioned after it.
5107     * @param inUserCode see comments above about jit bug.
5108     * @param recordToLock The head row to use for locking, used to lock
5109     * head row of overflow columns/rows.
5110     *
5111     * @exception StandardException Standard exception policy.
5112     **/

5113    private final boolean readRecordFromStream(
5114    Object JavaDoc[] row,
5115    int max_colid,
5116    int[] vCols,
5117    int[] mCols,
5118    LimitObjectInput dataIn,
5119    StoredRecordHeader recordHeader,
5120    ErrorObjectInput inUserCode,
5121    RecordHandle recordToLock)
5122         throws StandardException, IOException JavaDoc
5123    {
5124        try
5125        {
5126            // Get the number of columns in the row.
5127
int numberFields = recordHeader.getNumberFields();
5128
5129            int startColumn = recordHeader.getFirstField();
5130
5131            if (startColumn > max_colid)
5132            {
5133                // done if the startColumn is higher than highest column.
5134
return true;
5135            }
5136
5137            // For each column in the row, restore the column from
5138
// the corresponding field in the record. If the field
5139
// is missing or not set, set the column to null.
5140

5141            int highestColumnOnPage = numberFields + startColumn;
5142
5143            int vColsSize = (vCols == null ) ? 0 : vCols.length;
5144
5145            for (int columnId = startColumn; columnId <= max_colid; columnId++)
5146            {
5147                // skip any "existing" columns not requested, or requested cols
5148
// that have already been read.
5149
if (((vCols != null) &&
5150                     (!(vColsSize > columnId && (vCols[columnId] != 0)))) ||
5151                    ((mCols != null) && (mCols[columnId] != 0)))
5152                {
5153                    if (columnId < highestColumnOnPage)
5154                    {
5155                        // If the field exists in the row on the page, but the
5156
// partial row being returned does not include it,
5157
// skip the field ...
5158

5159                        skipField(dataIn);
5160                    }
5161
5162                    continue;
5163                }
5164
5165                // See if the column identifier is beyond the number of fields
5166
// that this record has
5167
if (columnId >= highestColumnOnPage)
5168                {
5169                    // field is non-existent
5170
Object JavaDoc column = row[columnId];
5171
5172                    if (column instanceof DataValueDescriptor)
5173                    {
5174                        // RESOLVE - This is in place for 1.2. In the future
5175
// we may want to return this column as non-existent
5176
// even if it is a storable column, or maybe use a
5177
// supplied default.
5178

5179                        ((DataValueDescriptor) column).restoreToNull();
5180                    }
5181                    else
5182                    {
5183                        row[columnId] = null;
5184                    }
5185                    continue;
5186                }
5187
5188                // read the field header
5189
int fieldStatus =
5190                    StoredFieldHeader.readStatus(dataIn);
5191
5192                int fieldDataLength =
5193                    StoredFieldHeader.readFieldDataLength(
5194                        dataIn, fieldStatus, slotFieldSize);
5195
5196                if (SanityManager.DEBUG)
5197                {
5198                    SanityManager.ASSERT(
5199                        !StoredFieldHeader.isExtensible(fieldStatus),
5200                        "extensible fields not supported yet");
5201                }
5202
5203                Object JavaDoc column = row[columnId];
5204
5205                OverflowInputStream overflowIn = null;
5206
5207                // SRW-DJD code assumes non-extensible case ...
5208

5209                // field is non-existent, return null
5210
if (StoredFieldHeader.isNonexistent(fieldStatus))
5211                {
5212
5213                    if (column instanceof DataValueDescriptor)
5214                    {
5215                        // RESOLVE - This is in place for 1.2. In the future
5216
// we may want to return this column as non-existent
5217
// even if it is a storable column, or maybe use a
5218
// supplied default.
5219
((DataValueDescriptor) column).restoreToNull();
5220                    }
5221                    else
5222                    {
5223                        row[columnId] = null;
5224                    }
5225                    continue;
5226                }
5227
5228                boolean isOverflow = StoredFieldHeader.isOverflow(fieldStatus);
5229
5230                if (isOverflow)
5231                {
5232
5233                    // A fetched long column needs to be returned as a stream
5234
//
5235
long overflowPage =
5236                        CompressedNumber.readLong((InputStream) dataIn);
5237
5238                    int overflowId =
5239                        CompressedNumber.readInt((InputStream) dataIn);
5240
5241                    // Prepare the stream for results...
5242
// create the byteHolder the size of a page, so, that it
5243
// will fit the field Data that would fit on a page.
5244
MemByteHolder byteHolder =
5245                        new MemByteHolder(pageData.length);
5246
5247                    overflowIn = new OverflowInputStream(
5248                        byteHolder, owner, overflowPage,
5249                        overflowId, recordToLock);
5250                }
5251
5252                // Deal with Object columns
5253
if (column instanceof DataValueDescriptor)
5254                {
5255                    DataValueDescriptor sColumn = (DataValueDescriptor) column;
5256
5257                    // is the column null ?
5258
if (StoredFieldHeader.isNull(fieldStatus))
5259                    {
5260                        sColumn.restoreToNull();
5261                        continue;
5262                    }
5263
5264                    // set the limit for the user read
5265
if (!isOverflow)
5266                    {
5267                        // normal, non-overflow column case.
5268

5269                        dataIn.setLimit(fieldDataLength);
5270                        inUserCode = dataIn;
5271                        sColumn.readExternal(dataIn);
5272                        inUserCode = null;
5273                        int unread = dataIn.clearLimit();
5274                        if (unread != 0)
5275                            dataIn.skipBytes(unread);
5276                    }
5277                    else
5278                    {
5279                        // column being fetched is a Object long column.
5280

5281                        FormatIdInputStream newIn =
5282                            new FormatIdInputStream(overflowIn);
5283
5284                        // if a column is a long column, store recommends user
5285
// fetch it as a stream.
5286
boolean fetchStream = true;
5287
5288                        if (!(sColumn instanceof StreamStorable))
5289                        {
5290                            fetchStream = false;
5291                        }
5292
5293                        if (fetchStream)
5294                        {
5295                            ((StreamStorable)sColumn).setStream(newIn);
5296                        }
5297                        else
5298                        {
5299                            inUserCode = newIn;
5300                            sColumn.readExternal(newIn);
5301                            inUserCode = null;
5302                        }
5303
5304                    }
5305
5306                    continue;
5307                }
5308
5309                // At this point only non-Storable columns.
5310

5311                if (StoredFieldHeader.isNull(fieldStatus))
5312                {
5313                    // Only Storables can be null ...
5314

5315                    throw StandardException.newException(
5316                            SQLState.DATA_NULL_STORABLE_COLUMN,
5317                            Integer.toString(columnId));
5318                }
5319
5320                // This is a non-extensible field, which means the caller must
5321
// know the correct type and thus the element in row is the
5322
// correct type or null. It must be Serializable.
5323
//
5324
// We do not support Externalizable here.
5325

5326                dataIn.setLimit(fieldDataLength);
5327                inUserCode = dataIn;
5328                row[columnId] = (Object JavaDoc) dataIn.readObject();
5329                inUserCode = null;
5330                int unread = dataIn.clearLimit();
5331                if (unread != 0)
5332                    dataIn.skipBytes(unread);
5333
5334                continue;
5335            }
5336
5337            // if the last column on this page is bigger than the highest
5338
// column we are looking for, then we are done restoring the record.
5339

5340            if ((numberFields + startColumn) > max_colid)
5341                return true;
5342            else
5343                return false;
5344
5345        }
5346        catch (IOException JavaDoc ioe)
5347        {
5348            // an exception during the restore of a user column, this doesn't
5349
// make the database corrupt, just that this field is inaccessable
5350

5351            if (inUserCode != null)
5352            {
5353                dataIn.clearLimit();
5354
5355                if (ioe instanceof EOFException JavaDoc)
5356                {
5357                    if (SanityManager.DEBUG)
5358                    {
5359                        SanityManager.DEBUG_PRINT("DEBUG_TRACE",
5360                            "StoredPage - EOF while restoring record: " +
5361                                recordHeader +
5362                            "Page dump = " + this);
5363                    }
5364
5365                    // going beyond the limit in a DataInput class results in
5366
// an EOFException when it sees the -1 from a read
5367
throw StandardException.newException(
5368                            SQLState.DATA_STORABLE_READ_MISMATCH,
5369                            ioe, inUserCode.getErrorInfo());
5370                }
5371
5372                // some SQLData error reporting
5373
Exception JavaDoc ne = inUserCode.getNestedException();
5374                if (ne != null)
5375                {
5376                    if (ne instanceof InstantiationException JavaDoc)
5377                    {
5378                        throw StandardException.newException(
5379                            SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION,
5380                            ne, inUserCode.getErrorInfo());
5381                    }
5382
5383                    if (ne instanceof IllegalAccessException JavaDoc)
5384                    {
5385                        throw StandardException.newException(
5386                            SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION,
5387                            ne, inUserCode.getErrorInfo());
5388                    }
5389
5390                    if (ne instanceof StandardException)
5391                    {
5392                        throw (StandardException) ne;
5393                    }
5394                }
5395
5396                throw StandardException.newException(
5397                        SQLState.DATA_STORABLE_READ_EXCEPTION,
5398                        ioe, inUserCode.getErrorInfo());
5399            }
5400
5401            // re-throw to higher levels so they can put it in correct context.
5402
throw ioe;
5403
5404        }
5405        catch (ClassNotFoundException JavaDoc cnfe)
5406        {
5407            dataIn.clearLimit();
5408
5409            // an exception during the restore of a user column, this doesn't
5410
// make the database corrupt, just that this field is inaccessable
5411
throw StandardException.newException(
5412                    SQLState.DATA_STORABLE_READ_MISSING_CLASS,
5413                    cnfe, inUserCode.getErrorInfo());
5414
5415        }
5416        catch (LinkageError JavaDoc le)
5417        {
5418            // Some error during the link of a user class
5419
if (inUserCode != null)
5420            {
5421                dataIn.clearLimit();
5422
5423                throw StandardException.newException(
5424                        SQLState.DATA_STORABLE_READ_EXCEPTION,
5425                        le, inUserCode.getErrorInfo());
5426            }
5427            throw le;
5428        }
5429
5430    }
5431
5432    private final boolean readRecordFromArray(
5433    Object JavaDoc[] row,
5434    int max_colid,
5435    int[] vCols,
5436    int[] mCols,
5437    ArrayInputStream dataIn,
5438    StoredRecordHeader recordHeader,
5439    ErrorObjectInput inUserCode,
5440    RecordHandle recordToLock)
5441         throws StandardException, IOException JavaDoc
5442    {
5443        try
5444        {
5445            // Get the number of columns in the row.
5446
int numberFields = recordHeader.getNumberFields();
5447
5448            int startColumn = recordHeader.getFirstField();
5449
5450            if (startColumn > max_colid)
5451            {
5452                // done if the startColumn is higher than highest column.
5453
return true;
5454            }
5455
5456            // For each column in the row, restore the column from
5457
// the corresponding field in the record. If the field
5458
// is missing or not set, set the column to null.
5459

5460            int highestColumnOnPage = numberFields + startColumn;
5461
5462            int vColsSize = (vCols == null ) ? 0 : vCols.length;
5463
5464            int offset_to_field_data = dataIn.getPosition();
5465
5466            for (int columnId = startColumn; columnId <= max_colid; columnId++)
5467            {
5468                // skip any "existing" columns not requested, or requested cols
5469
// that have already been read.
5470
if (((vCols != null) &&
5471                     (!(vColsSize > columnId && (vCols[columnId] != 0)))) ||
5472                    ((mCols != null) && (mCols[columnId] != 0)))
5473                {
5474                    if (columnId < highestColumnOnPage)
5475                    {
5476                        // If the field exists in the row on the page, but the
5477
// partial row being returned does not include it,
5478
// skip the field ...
5479

5480                        offset_to_field_data +=
5481                            StoredFieldHeader.readTotalFieldLength(
5482                                pageData, offset_to_field_data);
5483                    }
5484
5485                    continue;
5486                }
5487                else if (columnId < highestColumnOnPage)
5488                {
5489                    // the column is on this page.
5490

5491                    // read the field header
5492

5493                    // read the status byte.
5494
int fieldStatus =
5495                        StoredFieldHeader.readStatus(
5496                            pageData, offset_to_field_data);
5497
5498                    // read the field data length, position on 1st byte of data
5499
int fieldDataLength =
5500                        StoredFieldHeader.readFieldLengthAndSetStreamPosition(
5501                            pageData,
5502                            offset_to_field_data +
5503                              StoredFieldHeader.STORED_FIELD_HEADER_STATUS_SIZE,
5504                            fieldStatus,
5505                            slotFieldSize,
5506                            dataIn);
5507
5508
5509                    if (SanityManager.DEBUG)
5510                    {
5511                        SanityManager.ASSERT(
5512                            !StoredFieldHeader.isExtensible(fieldStatus),
5513                            "extensible fields not supported yet");
5514                    }
5515
5516                    Object JavaDoc column = row[columnId];
5517
5518                    OverflowInputStream overflowIn = null;
5519
5520                    // SRW-DJD code assumes non-extensible case ...
5521

5522                    if ((fieldStatus & StoredFieldHeader.FIELD_NONEXISTENT) !=
5523                                            StoredFieldHeader.FIELD_NONEXISTENT)
5524                    {
5525                        // normal path - field exists.
5526

5527                        boolean isOverflow =
5528                            ((fieldStatus &
5529                                  StoredFieldHeader.FIELD_OVERFLOW) != 0);
5530
5531                        if (isOverflow)
5532                        {
5533
5534                            // A fetched long column is returned as a stream
5535

5536                            long overflowPage =
5537                                CompressedNumber.readLong((InputStream) dataIn);
5538
5539                            int overflowId =
5540                                CompressedNumber.readInt((InputStream) dataIn);
5541
5542                            // Prepare the stream for results...
5543
// create the byteHolder the size of a page, so,
5544
// that it will fit the field Data that would fit
5545
// on a page.
5546

5547                            MemByteHolder byteHolder =
5548                                new MemByteHolder(pageData.length);
5549
5550                            overflowIn = new OverflowInputStream(
5551                                byteHolder, owner, overflowPage,
5552                                overflowId, recordToLock);
5553                        }
5554
5555                        // Deal with Object columns
5556
if (column instanceof DataValueDescriptor)
5557                        {
5558                            DataValueDescriptor sColumn =
5559                                (DataValueDescriptor) column;
5560
5561                            // is the column null ?
5562
if ((fieldStatus &
5563                                        StoredFieldHeader.FIELD_NULL) == 0)
5564                            {
5565                                // the field is not null.
5566

5567                                // set the limit for the user read
5568
if (!isOverflow)
5569                                {
5570                                    // normal, non-overflow column case.
5571

5572                                    dataIn.setLimit(fieldDataLength);
5573                                    inUserCode = dataIn;
5574                                    sColumn.readExternalFromArray(dataIn);
5575                                    inUserCode = null;
5576                                    int unread = dataIn.clearLimit();
5577                                    if (unread != 0)
5578                                        dataIn.skipBytes(unread);
5579                                }
5580                                else
5581                                {
5582                                    // column being fetched is a long column.
5583

5584                                    FormatIdInputStream newIn =
5585                                        new FormatIdInputStream(overflowIn);
5586
5587                                    // long columns are fetched as a stream.
5588

5589                                    boolean fetchStream = true;
5590
5591                                    if (!(sColumn instanceof StreamStorable))
5592                                    {
5593                                        fetchStream = false;
5594                                    }
5595
5596                                    if (fetchStream)
5597                                    {
5598                                        ((StreamStorable) sColumn).setStream(
5599                                                                         newIn);
5600                                    }
5601                                    else
5602                                    {
5603                                        inUserCode = newIn;
5604                                        sColumn.readExternal(newIn);
5605                                        inUserCode = null;
5606                                    }
5607                                }
5608                            }
5609                            else
5610                            {
5611                                sColumn.restoreToNull();
5612                            }
5613
5614                        }
5615                        else
5616                        {
5617
5618                            // At this point only non-Storable columns.
5619

5620                            if (StoredFieldHeader.isNull(fieldStatus))
5621                            {
5622                                // Only Storables can be null ...
5623

5624                                throw StandardException.newException(
5625                                        SQLState.DATA_NULL_STORABLE_COLUMN,
5626                                        Integer.toString(columnId));
5627                            }
5628
5629                            // This is a non-extensible field, which means the
5630
// caller must know the correct type and thus the
5631
// element in row is the correct type or null. It
5632
// must be Serializable.
5633
//
5634
// We do not support Externalizable here.
5635

5636                            dataIn.setLimit(fieldDataLength);
5637                            inUserCode = dataIn;
5638                                    // RESOLVE (no non-storables?)
5639
row[columnId] = (Object JavaDoc) dataIn.readObject();
5640                            inUserCode = null;
5641                            int unread = dataIn.clearLimit();
5642                            if (unread != 0)
5643                                dataIn.skipBytes(unread);
5644                        }
5645                    }
5646                    else
5647                    {
5648                        // column is non-existent.
5649

5650                        if (column instanceof DataValueDescriptor)
5651                        {
5652                            // RESOLVE - This is in place for 1.2. In the future
5653
// we may want to return this column as non-existent
5654
// even if it is a storable column, or maybe use a
5655
// supplied default.
5656
((DataValueDescriptor) column).restoreToNull();
5657                        }
5658                        else
5659                        {
5660                            row[columnId] = null;
5661                        }
5662                    }
5663
5664                    // move the counter to point to beginning of next field.
5665
offset_to_field_data = dataIn.getPosition();
5666                }
5667                else
5668                {
5669                    // field is non-existent
5670
Object JavaDoc column = row[columnId];
5671
5672                    if (column instanceof DataValueDescriptor)
5673                    {
5674                        // RESOLVE - This is in place for 1.2. In the future
5675
// we may want to return this column as non-existent
5676
// even if it is a storable column, or maybe use a
5677
// supplied default.
5678

5679                        ((DataValueDescriptor) column).restoreToNull();
5680                    }
5681                    else
5682                    {
5683                        row[columnId] = null;
5684                    }
5685                }
5686            }
5687
5688            // if the last column on this page is bigger than the highest
5689
// column we are looking for, then we are done restoring the record.
5690

5691            if ((numberFields + startColumn) > max_colid)
5692                return true;
5693            else
5694                return false;
5695
5696        }
5697        catch (IOException JavaDoc ioe)
5698        {
5699            // an exception during the restore of a user column, this doesn't
5700
// make the database corrupt, just that this field is inaccessable
5701

5702            if (inUserCode != null)
5703            {
5704                dataIn.clearLimit();
5705
5706                if (ioe instanceof EOFException JavaDoc)
5707                {
5708                    if (SanityManager.DEBUG)
5709                    {
5710                        SanityManager.DEBUG_PRINT("DEBUG_TRACE",
5711                            "StoredPage - EOF while restoring record: " +
5712                                recordHeader +
5713                            "Page dump = " + this);
5714                    }
5715
5716                    // going beyond the limit in a DataInput class results in
5717
// an EOFException when it sees the -1 from a read
5718
throw StandardException.newException(
5719                            SQLState.DATA_STORABLE_READ_MISMATCH,
5720                            ioe, inUserCode.getErrorInfo());
5721                }
5722
5723                // some SQLData error reporting
5724
Exception JavaDoc ne = inUserCode.getNestedException();
5725                if (ne != null)
5726                {
5727                    if (ne instanceof InstantiationException JavaDoc)
5728                    {
5729                        throw StandardException.newException(
5730                            SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION,
5731                            ne, inUserCode.getErrorInfo());
5732                    }
5733
5734                    if (ne instanceof IllegalAccessException JavaDoc)
5735                    {
5736                        throw StandardException.newException(
5737                            SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION,
5738                            ne, inUserCode.getErrorInfo());
5739                    }
5740
5741                    if (ne instanceof StandardException)
5742                    {
5743                        throw (StandardException) ne;
5744                    }
5745                }
5746
5747                throw StandardException.newException(
5748                        SQLState.DATA_STORABLE_READ_EXCEPTION,
5749                        ioe, inUserCode.getErrorInfo());
5750            }
5751
5752            // re-throw to higher levels so they can put it in correct context.
5753
throw ioe;
5754
5755        }
5756        catch (ClassNotFoundException JavaDoc cnfe)
5757        {
5758            dataIn.clearLimit();
5759
5760            // an exception during the restore of a user column, this doesn't
5761
// make the database corrupt, just that this field is inaccessable
5762
throw StandardException.newException(
5763                    SQLState.DATA_STORABLE_READ_MISSING_CLASS,
5764                    cnfe, inUserCode.getErrorInfo());
5765
5766        }
5767        catch (LinkageError JavaDoc le)
5768        {
5769            // Some error during the link of a user class
5770
if (inUserCode != null)
5771            {
5772                dataIn.clearLimit();
5773
5774                throw StandardException.newException(
5775                        SQLState.DATA_STORABLE_READ_EXCEPTION,
5776                        le, inUserCode.getErrorInfo());
5777            }
5778            throw le;
5779        }
5780
5781    }
5782
5783    /**
5784     * Restore a portion of a long column.
5785     * <p>
5786     * Restore a portion of a long column - user must supply two streams on top
5787     * of the same data, one implements ObjectInput interface that knows how to
5788     * restore the object, the other one implements LimitInputStream.
5789     *
5790     * @param fetchStream the stream to read the next portion of long col from
5791     *
5792     * @exception StandardException Standard exception policy.
5793     **/

5794    public void restorePortionLongColumn(
5795    OverflowInputStream fetchStream)
5796        throws StandardException, IOException JavaDoc
5797    {
5798        int slot =
5799            findRecordById(fetchStream.getOverflowId(), FIRST_SLOT_NUMBER);
5800
5801        StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
5802
5803        int offset = getRecordOffset(slot);
5804        int numberFields = recordHeader.getNumberFields();
5805
5806        if (SanityManager.DEBUG)
5807        {
5808            if ((numberFields > 2) || (numberFields < 1))
5809            {
5810                SanityManager.THROWASSERT(
5811                    "longColumn record header must have 1 or 2 fields." +
5812                    "numberFields = " + numberFields);
5813            }
5814        }
5815
5816        rawDataIn.setPosition(offset + recordHeader.size());
5817
5818        int fieldStatus =
5819            StoredFieldHeader.readStatus(rawDataIn);
5820        int fieldDataLength =
5821            StoredFieldHeader.readFieldDataLength(
5822                rawDataIn, fieldStatus, slotFieldSize);
5823
5824        // read the data portion of this segment from the stream.
5825

5826        ByteHolder bh = fetchStream.getByteHolder();
5827        bh.write(rawDataIn, fieldDataLength);
5828        fetchStream.setByteHolder(bh);
5829
5830        // set the next overflow pointer in the stream...
5831
if (numberFields == 1)
5832        {
5833            // this is the last bit of the long column
5834
fetchStream.setOverflowPage(-1);
5835            fetchStream.setOverflowId(-1);
5836        }
5837        else
5838        {
5839            int firstFieldStatus = fieldStatus; // for DEBUG check
5840

5841            // get the field status and data length of the overflow pointer.
5842
fieldStatus =
5843                StoredFieldHeader.readStatus(rawDataIn);
5844            fieldDataLength =
5845                StoredFieldHeader.readFieldDataLength(
5846                    rawDataIn, fieldStatus, slotFieldSize);
5847
5848            if (SanityManager.DEBUG)
5849            {
5850                if (!StoredFieldHeader.isOverflow(fieldStatus))
5851                {
5852                    // In version 1.5, the first field is overflow and the
5853
// second is not. In version 2.0 onwards, the first
5854
// field is not overflow and the second is overflow
5855
// (the overflow bit goes with the overflow pointer).
5856
// Check first field to make sure its overflow bit is
5857
// set on.
5858
SanityManager.ASSERT(
5859                        StoredFieldHeader.isOverflow(firstFieldStatus));
5860                }
5861            }
5862
5863            long overflowPage =
5864                CompressedNumber.readLong((InputStream) rawDataIn);
5865            int overflowId =
5866                CompressedNumber.readInt((InputStream) rawDataIn);
5867
5868            // there is more after this chunk.
5869
fetchStream.setOverflowPage(overflowPage);
5870            fetchStream.setOverflowId(overflowId);
5871        }
5872    }
5873
5874
5875    /**
5876     * Log a Storable to a stream.
5877     * <p>
5878     * Log a Storable into a stream. This is used by update field operations
5879     * <P>
5880     * Write the column in its field format to the stream. Field format is a
5881     * field header followed the data of the column as defined by the data
5882     * itself. See this class's description for the specifics of the header.
5883     *
5884     * @exception StandardException Standard Cloudscape error policy
5885     * @exception IOException RESOLVE
5886     **/

5887    public void logColumn(
5888    int slot,
5889    int fieldId,
5890    Object JavaDoc column,
5891    DynamicByteArrayOutputStream out,
5892    int overflowThreshold)
5893        throws StandardException, IOException JavaDoc
5894    {
5895        // calculate the space available on the page, it includes
5896
// the free space
5897
// the space the record has reserved but not used
5898
// the length of the old field itself
5899

5900        // free space
5901
int bytesAvailable = freeSpace;
5902        int beginPosition = -1;
5903
5904        // space reserved, but not used by the record
5905
bytesAvailable += getReservedCount(slot);
5906
5907        // The size of the old field is also available for the new field
5908
rawDataIn.setPosition(getFieldOffset(slot, fieldId));
5909
5910        int fieldStatus =
5911            StoredFieldHeader.readStatus(rawDataIn);
5912        int fieldDataLength =
5913            StoredFieldHeader.readFieldDataLength(
5914                rawDataIn, fieldStatus, slotFieldSize);
5915
5916        bytesAvailable +=
5917            StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize)
5918                + fieldDataLength;
5919
5920        try
5921        {
5922            setOutputStream(out);
5923            beginPosition = rawDataOut.getPosition();
5924
5925            Object JavaDoc[] row = new Object JavaDoc[1];
5926            row[0] = column;
5927            if (bytesAvailable == logColumn(
5928                                        row, 0, out, bytesAvailable,
5929                                        COLUMN_NONE, overflowThreshold))
5930            {
5931                throw new NoSpaceOnPage(isOverflowPage());
5932            }
5933
5934        }
5935        finally
5936        {
5937            rawDataOut.setPosition(beginPosition);
5938            resetOutputStream();
5939        }
5940    }
5941
5942    /**
5943     * Log a long column into a DataOuput.
5944     * <p>
5945     * Log a long column into a DataOuput. This is used by insert operations
5946     * <P>
5947     * Write the column in its field format to the stream. Field format is a
5948     * field header followed the data of the column as defined by the data
5949     * itself. See this class's description for the specifics of the header.
5950     *
5951     * @param slot slot of the row with the column
5952     * @param recordId record id of the
5953     * @param column the object form of the column to log
5954     * @param out where to log to the column to.
5955     *
5956     * @exception StandardException Standard Cloudscape error policy
5957     * @exception IOException I/O exception from writing to an array.
5958     *
5959     * @see BasePage#logColumn
5960     **/

5961    public int logLongColumn(
5962    int slot,
5963    int recordId,
5964    Object JavaDoc column,
5965    DynamicByteArrayOutputStream out)
5966        throws StandardException, IOException JavaDoc
5967    {
5968        int spaceAvailable = freeSpace;
5969
5970        // need to account for the slot table using extra space...
5971
spaceAvailable -= slotEntrySize;
5972
5973        // <= is ok here as we know we want to write at least one more byte
5974
if (spaceAvailable <= 0)
5975            throw new NoSpaceOnPage(isOverflowPage());
5976
5977        setOutputStream(out);
5978        int beginPosition = out.getPosition();
5979
5980        try
5981        {
5982            // in the long column portion on the new page there will be 1 field
5983
// if the portion fits on the page (2 if it needs another pointer
5984
// to continue to yet another page).
5985
int numberFields = 1;
5986
5987            StoredRecordHeader recordHeader =
5988                new StoredRecordHeader(recordId, numberFields);
5989
5990            int recordHeaderLength = recordHeader.write(logicalDataOut);
5991
5992            spaceAvailable -= recordHeaderLength;
5993
5994            if (spaceAvailable < 0)
5995            {
5996                // this part of long column won't totally fit on page, it
5997
// needs to be linked to another page. Throw exception and
5998
// caller will handle logging an overflow column portion
5999
// with a forward pointer.
6000

6001                throw new NoSpaceOnPage(isOverflowPage());
6002            }
6003            else
6004            {
6005                // the rest of the long column fits on the page!
6006

6007                Object JavaDoc[] row = new Object JavaDoc[1];
6008                row[0] = column;
6009                return logColumn(row, 0, out, spaceAvailable, COLUMN_LONG, 100);
6010            }
6011
6012        }
6013        finally
6014        {
6015            resetOutputStream();
6016        }
6017    }
6018
6019    /**
6020     * Log column from input row to the given output stream.
6021     * <p>
6022     * Read data from row[arrayPosition], and write the column data in
6023     * raw store page format to the given column. Along the way determine
6024     * if the column will fit on the current page.
6025     * <p>
6026     * Action taken in this routine is determined by the kind of column as
6027     * specified in the columnFlag:
6028     * COLUMN_NONE - the column is insignificant
6029     * COLUMN_FIRST - this is the first column in a logRow() call
6030     * COLUMN_LONG - this is a known long column, therefore we will
6031     * store part of the column on the current page and
6032     * overflow the rest if necessary.
6033     * <p>
6034     * Upon entry to this routine logicalDataOut is tied to the
6035     * DynamicByteArrayOutputStream out.
6036     * <BR>
6037     * If a column is a long column and it does not totally fit on the current
6038     * page, then a LongColumnException is thrown. We package up info about
6039     * the current long column in the partially filled in exception so that
6040     * callers can take correct action. The column will now be set a as a
6041     * stream.
6042     *
6043     * @return The spaceAvailable after accounting for space for this column.
6044     *
6045     * @param row array of column from which to read the column from.
6046     * @param arrayPosition The array position of column to be reading from row.
6047     * @param out The stream to write the raw store page format of the
6048     * the column to.
6049     * @param spaceAvailable The number of bytes available on the page for
6050     * this column, this may differ from current page
6051     * as it may include bytes used by previous
6052     * columns.
6053     * @param columnFlag one of: COLUMN_NONE, COLUMN_FIRST, or COLUMN_LONG.
6054     *
6055     * @exception StandardException Standard exception policy.
6056     * @exception LongColumnException Thrown if column will not fit on a
6057     * single page. See notes above
6058     **/

6059    private int logColumn(
6060    Object JavaDoc[] row,
6061    int arrayPosition,
6062    DynamicByteArrayOutputStream out,
6063    int spaceAvailable,
6064    int columnFlag,
6065    int overflowThreshold)
6066        throws StandardException, IOException JavaDoc
6067    {
6068        // RESOLVE (mikem) - why will row be null?
6069
Object JavaDoc column = (row != null ? row[arrayPosition] : null);
6070
6071        // Check to see if the data comes from a page, if it is, then the field
6072
// header is already formatted.
6073
if (column instanceof RawField)
6074        {
6075            // field data is raw, no need to set up a field header etc.
6076

6077            byte[] data = ((RawField) column).getData();
6078
6079            if (data.length <= spaceAvailable)
6080            {
6081                out.write(data);
6082                spaceAvailable -= data.length;
6083            }
6084            return spaceAvailable;
6085        }
6086
6087        // If this is a long column, it may fit in this page or it may not.
6088
boolean longColumnDone = true;
6089
6090
6091        // default field status.
6092
int fieldStatus =
6093            StoredFieldHeader.setFixed(StoredFieldHeader.setInitial(), true);
6094
6095        int beginPosition = out.getPosition();
6096        int columnBeginPosition = 0;
6097        int headerLength;
6098        int fieldDataLength = 0;
6099
6100        if (column instanceof StreamStorable)
6101        {
6102            StreamStorable stream_storable_column = (StreamStorable) column;
6103
6104            if (stream_storable_column.returnStream() != null)
6105            {
6106                column =
6107                    (Object JavaDoc) stream_storable_column.returnStream();
6108            }
6109        }
6110
6111        if (column == null)
6112        {
6113            fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);
6114            headerLength =
6115                StoredFieldHeader.write(
6116                    logicalDataOut, fieldStatus,
6117                    fieldDataLength, slotFieldSize);
6118        }
6119        else if (column instanceof InputStream)
6120        {
6121            RememberBytesInputStream bufferedIn = null;
6122            int bufferLen = 0;
6123
6124            int estimatedMaxDataSize =
6125                getMaxDataLength(spaceAvailable, overflowThreshold);
6126
6127            // if column is already instanceof RememberBytesInputStream, then we
6128
// need to find out how many bytes have already been stored in the
6129
// buffer.
6130
if (column instanceof RememberBytesInputStream)
6131            {
6132                // data is already RememberBytesInputStream
6133

6134                bufferedIn = (RememberBytesInputStream) column;
6135                bufferLen = bufferedIn.numBytesSaved();
6136
6137            }
6138            else
6139            {
6140                // data comes in as an inputstream
6141
bufferedIn = new RememberBytesInputStream(
6142                    (InputStream) column, new MemByteHolder(maxFieldSize + 1));
6143
6144                // always set stream of InputStream to RememberBytesInputStream
6145
// so that all future access to this column will be able to
6146
// get at the bytes drained from the InputStream, and copied
6147
// into the RememberBytesInputStream.
6148
if (row[arrayPosition] instanceof StreamStorable)
6149                    ((StreamStorable)row[arrayPosition]).setStream(bufferedIn);
6150                
6151                // set column to the RememberBytesInputStream so that
6152
// all future access to this column will be able to get
6153
// at bytes that have been already read. This assignment
6154
// is needed to ensure that if long column exception is
6155
// thrown, the column is set correctly
6156
column = bufferedIn;
6157            }
6158
6159            // read the buffer by reading the max we can read.
6160
if (bufferLen < (estimatedMaxDataSize + 1))
6161            {
6162                bufferLen +=
6163                    bufferedIn.fillBuf(estimatedMaxDataSize + 1 - bufferLen);
6164            }
6165
6166            if ((bufferLen <= estimatedMaxDataSize))
6167            {
6168                // we will be able to fit this into the page
6169

6170                fieldDataLength = bufferLen;
6171                fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);
6172                headerLength = StoredFieldHeader.write(
6173                                        logicalDataOut, fieldStatus,
6174                                        fieldDataLength, slotFieldSize);
6175    
6176                // if the field is extensible, then we write the serializable
6177
// formatId. if the field is non-extensible, we don't need to
6178
// write the formatId. but at this point, how do we know
6179
// whether the field is extensible or not??? For Plato release,
6180
// we do not support InputStream on extensible types,
6181
// therefore, we ignore the formatId for now.
6182
bufferedIn.putBuf(logicalDataOut, fieldDataLength);
6183            }
6184            else
6185            {
6186                // current column will not fit into the current page.
6187

6188                if (columnFlag == COLUMN_LONG)
6189                {
6190                    // column is a long column and the remaining portion does
6191
// not fit on the current page.
6192
longColumnDone = false;
6193                   
6194                    // it's a portion of a long column, and there is more to
6195
// write reserve enough room for overflow pointer, then
6196
// write as much data as we can leaving an extra 2 bytes
6197
// for overflow field header.
6198
fieldDataLength =
6199                        estimatedMaxDataSize - OVERFLOW_POINTER_SIZE - 2;
6200                    fieldStatus =
6201                        StoredFieldHeader.setFixed(fieldStatus, true);
6202
6203                    headerLength =
6204                        StoredFieldHeader.write(
6205                            logicalDataOut, fieldStatus,
6206                            fieldDataLength, slotFieldSize);
6207                    bufferedIn.putBuf(logicalDataOut, fieldDataLength);
6208
6209                    // now, we need to adjust the buffer, move the unread
6210
// bytes to the beginning position the cursor correctly,
6211
// so, next time around, we can read more into the buffer.
6212
int remainingBytes = bufferedIn.available();
6213
6214                    // move the unread bytes to the beginning of the byteHolder.
6215
int bytesShifted = bufferedIn.shiftToFront();
6216
6217                }
6218                else
6219                {
6220                    // column not a long column and does not fit on page.
6221
int delta = maxFieldSize - bufferLen + 1;
6222
6223                    if (delta > 0)
6224                        bufferLen += bufferedIn.fillBuf(delta);
6225                    fieldDataLength = bufferLen;
6226
6227                    // the data will not fit on this page make sure the new
6228
// input stream is passed back to the upper layer...
6229
column = (Object JavaDoc) bufferedIn;
6230                }
6231            }
6232        
6233        }
6234        else if (column instanceof DataValueDescriptor)
6235        {
6236            DataValueDescriptor sColumn = (DataValueDescriptor) column;
6237
6238            boolean isNull = sColumn.isNull();
6239            if (isNull)
6240            {
6241                fieldStatus = StoredFieldHeader.setNull(fieldStatus, true);
6242            }
6243
6244            // header is written with 0 length here.
6245
headerLength =
6246                StoredFieldHeader.write(
6247                    logicalDataOut, fieldStatus,
6248                    fieldDataLength, slotFieldSize);
6249
6250            if (!isNull)
6251            {
6252                // write the field data to the log
6253
try
6254                {
6255                    columnBeginPosition = out.getPosition();
6256                    sColumn.writeExternal(logicalDataOut);
6257                }
6258                catch (IOException JavaDoc ioe)
6259                {
6260                    // SQLData error reporting
6261
if (logicalDataOut != null)
6262                    {
6263                        Exception JavaDoc ne = logicalDataOut.getNestedException();
6264                        if (ne != null)
6265                        {
6266                            if (ne instanceof StandardException)
6267                            {
6268                                throw (StandardException) ne;
6269                            }
6270                        }
6271                    }
6272
6273
6274                    throw StandardException.newException(
6275                            SQLState.DATA_STORABLE_WRITE_EXCEPTION, ioe);
6276                }
6277
6278                fieldDataLength =
6279                    (out.getPosition() - beginPosition) - headerLength;
6280            }
6281        }
6282        else if (column instanceof RecordHandle)
6283        {
6284            // we are inserting an overflow pointer for a long column
6285

6286            // casted reference to column to avoid repeated casting.
6287
RecordHandle overflowHandle = (RecordHandle) column;
6288
6289            fieldStatus = StoredFieldHeader.setOverflow(fieldStatus, true);
6290            headerLength =
6291                StoredFieldHeader.write(
6292                    logicalDataOut, fieldStatus,
6293                    fieldDataLength, slotFieldSize);
6294
6295            fieldDataLength +=
6296                CompressedNumber.writeLong(out, overflowHandle.getPageNumber());
6297            fieldDataLength +=
6298                CompressedNumber.writeInt(out, overflowHandle.getId());
6299
6300        }
6301        else
6302        {
6303            // Serializable/Externalizable/Formattable
6304
// all look the same at this point.
6305

6306            // header is written with 0 length here.
6307
headerLength =
6308                StoredFieldHeader.write(
6309                    logicalDataOut, fieldStatus,
6310                    fieldDataLength, slotFieldSize);
6311
6312            logicalDataOut.writeObject(column);
6313            fieldDataLength =
6314                (out.getPosition() - beginPosition) - headerLength;
6315        }
6316
6317        // calculate the size of the field on page with compresed field header
6318

6319        fieldStatus = StoredFieldHeader.setFixed(fieldStatus, false);
6320        int fieldSizeOnPage =
6321            StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize)
6322            + fieldDataLength;
6323
6324        userRowSize += fieldDataLength;
6325
6326        boolean fieldIsLong = isLong(fieldSizeOnPage, overflowThreshold);
6327       
6328        // Do we have enough space on the page for this field?
6329
if (((spaceAvailable < fieldSizeOnPage) || (fieldIsLong)) &&
6330            (columnFlag != COLUMN_LONG))
6331        {
6332            // Column was not long before getting here and does not fit.
6333

6334            if (fieldIsLong)
6335            {
6336                // long column, and this first time we have figured out this
6337
// column is long.
6338

6339                if (!(column instanceof InputStream))
6340                {
6341                    // Convert already written object to an InputStream.
6342
ByteArray fieldData =
6343                        new ByteArray(
6344                            ((DynamicByteArrayOutputStream)out).getByteArray(),
6345                            (columnBeginPosition), fieldDataLength);
6346
6347                    ByteArrayInputStream JavaDoc columnIn =
6348                        new ByteArrayInputStream JavaDoc(
6349                            fieldData.getArray(), columnBeginPosition,
6350                            fieldDataLength);
6351
6352                    MemByteHolder byteHolder =
6353                        new MemByteHolder(fieldDataLength + 1);
6354
6355                    RememberBytesInputStream bufferedIn =
6356                        new RememberBytesInputStream(columnIn, byteHolder);
6357 
6358                    // the data will not fit on this page make sure the new
6359
// input stream is passed back to the upper layer...
6360
column = bufferedIn;
6361                }
6362
6363                out.setPosition(beginPosition);
6364
6365                // This exception carries the information for the client
6366
// routine to continue inserting the long row on multiple
6367
// pages.
6368
LongColumnException lce = new LongColumnException();
6369                lce.setColumn(column);
6370                throw lce;
6371
6372            }
6373            else
6374            {
6375                // Column does not fit on this page, but it isn't a long column.
6376

6377                out.setPosition(beginPosition);
6378                return(spaceAvailable);
6379            }
6380        }
6381 
6382        // Now we go back to update the fieldDataLength in the field header
6383
out.setPosition(beginPosition);
6384
6385        // slotFieldSize is set based on the pageSize.
6386
// We are borrowing this to set the size of our fieldDataLength.
6387
fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);
6388        headerLength = StoredFieldHeader.write(
6389                            out, fieldStatus, fieldDataLength, slotFieldSize);
6390
6391        // set position to the end of the field
6392
out.setPosition(beginPosition + fieldDataLength + headerLength);
6393
6394        spaceAvailable -= fieldSizeOnPage;
6395
6396        // YYZ: revisit
6397
if (columnFlag == COLUMN_LONG)
6398        {
6399            // if we are logging a long column, we don't care how much space
6400
// is left on the page, instead, we care whether we are done with
6401
// the column or not. So, here, we want to return 1. if we are
6402
// not done, and return -1 if we are done.
6403
// If logColumn returns -1, that flag is returned all the way to
6404
// BasePage.insertLongColumn to signal end of loop.
6405
if (longColumnDone)
6406                return -1;
6407            else
6408                return 1;
6409        } else
6410        {
6411            return (spaceAvailable);
6412        }
6413    }
6414
6415    /**
6416     * Create and write a long row header to the log stream.
6417     * <p>
6418     * Called to log a new overflow record, will check for space available
6419     * and throw an exception if the record header will not fit on the page.
6420     * <p>
6421     *
6422     * @return -1
6423     *
6424     * @param slot slot of record to log.
6425     * @param spaceAvailable spaceAvaliable on page.
6426     * @param out stream to log the record to.
6427     *
6428     * @exception StandardException Standard exception policy.
6429     **/

6430    private int logOverflowRecord(
6431    int slot,
6432    int spaceAvailable,
6433    DynamicByteArrayOutputStream out)
6434        throws StandardException, IOException JavaDoc
6435    {
6436        setOutputStream(out);
6437        
6438        StoredRecordHeader pageRecordHeader = getHeaderAtSlot(slot);
6439                
6440        StoredRecordHeader overflow_rh = getOverFlowRecordHeader();
6441        overflow_rh.setOverflowFields(pageRecordHeader);
6442
6443        if (SanityManager.DEBUG)
6444        {
6445            SanityManager.ASSERT(overflow_rh.getOverflowPage() != 0);
6446        }
6447
6448        /*
6449        // #1 situation,
6450        // we want to update the header to just an overflow pointer with no data
6451        // so, update the recordHeader, and we are done...
6452        if (!overflow_rh.isPartialOverflow()) {
6453            // this recordHeader becomes just a overflow pointer,
6454            // we need to make sure that the number of fields is set to 0.
6455            overflow_rh.setNumberFields(0);
6456            
6457            spaceAvailable -= overflow_rh.write(logicalDataOut);
6458
6459            if (spaceAvailable < 0) {
6460                throw new NoSpaceOnPage(isOverflowPage());
6461            }
6462
6463            resetOutputStream();
6464
6465            return (-1);
6466        }
6467        */

6468
6469        // #2 situation,
6470
// we want to only update the recordheader of the page, while leaving
6471
// the data of the record on the page. Just update the header part and
6472
// then arrange for the data part to move to after the new header.
6473

6474        int oldSize = pageRecordHeader.size();
6475        int newSize = overflow_rh.size();
6476
6477        if (oldSize < newSize)
6478        {
6479            // need extra room...
6480
int delta = newSize - oldSize;
6481            if (spaceAvailable < delta)
6482            {
6483                throw new NoSpaceOnPage(isOverflowPage());
6484            }
6485        }
6486
6487        // write the new overflow_rh for the record.
6488
overflow_rh.write(logicalDataOut);
6489
6490        // now, log the data
6491
logRecordDataPortion(
6492            slot, LOG_RECORD_DEFAULT, pageRecordHeader,
6493            (FormatableBitSet) null, logicalDataOut, (RecordHandle)null);
6494
6495        return (-1);
6496    }
6497
6498    private int logOverflowField(
6499    DynamicByteArrayOutputStream out,
6500    int spaceAvailable,
6501    long overflowPage,
6502    int overflowId)
6503        throws StandardException, IOException JavaDoc
6504    {
6505        int fieldStatus =
6506            StoredFieldHeader.setOverflow(
6507                StoredFieldHeader.setInitial(), true);
6508
6509        int fieldSizeOnPage =
6510            CompressedNumber.sizeLong(overflowPage) +
6511            CompressedNumber.sizeInt(overflowId);
6512
6513        int fieldDataLength = fieldSizeOnPage;
6514
6515        fieldSizeOnPage +=
6516            StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize);
6517
6518        // need to check that we have room on the page for this.
6519
spaceAvailable -= fieldSizeOnPage;
6520
6521        // what if there is not enough room for the overflow pointer?
6522
if (spaceAvailable < 0)
6523            throw new NoSpaceOnPage(isOverflowPage());
6524
6525        // write the field to the page:
6526
StoredFieldHeader.write(
6527            logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
6528        CompressedNumber.writeLong(out, overflowPage);
6529        CompressedNumber.writeInt(out, overflowId);
6530
6531        // return the available bytes
6532
return(spaceAvailable);
6533    }
6534
6535    /**
6536     * Log a record to the ObjectOutput stream.
6537     * <p>
6538     * Write out the complete on-page record to the store stream. Data is
6539     * preceeded by a compressed int that gives the length of the following
6540     * data.
6541     *
6542     * @exception StandardException Standard Cloudscape error policy
6543     * @exception IOException on error writing to log stream.
6544     *
6545     * @see BasePage#logRecord
6546     **/

6547    public void logRecord(
6548    int slot,
6549    int flag,
6550    int recordId,
6551    FormatableBitSet validColumns,
6552    OutputStream out,
6553    RecordHandle headRowHandle)
6554        throws StandardException, IOException JavaDoc
6555    {
6556        StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
6557
6558        if (recordId != recordHeader.getId())
6559        {
6560            // the record is being logged under a different identifier,
6561
// write it out with the correct identifier
6562
StoredRecordHeader newRecordHeader =
6563                new StoredRecordHeader(recordHeader);
6564
6565            newRecordHeader.setId(recordId);
6566
6567            newRecordHeader.write(out);
6568            newRecordHeader = null;
6569        }
6570        else
6571        {
6572            // write the original record header
6573
recordHeader.write(out);
6574        }
6575
6576        logRecordDataPortion(
6577            slot, flag, recordHeader, validColumns, out, headRowHandle);
6578
6579    }
6580
6581    private void logRecordDataPortion(
6582    int slot,
6583    int flag,
6584    StoredRecordHeader recordHeader,
6585    FormatableBitSet validColumns,
6586    OutputStream out,
6587    RecordHandle headRowHandle)
6588        throws StandardException, IOException JavaDoc
6589    {
6590        int offset = getRecordOffset(slot);
6591
6592        // now skip over the original header before writing the data
6593
int oldHeaderLength = recordHeader.size();
6594        offset += oldHeaderLength;
6595
6596        // write out the record data (FH+data+...) from the page data
6597
int startField = recordHeader.getFirstField();
6598        int endField = startField + recordHeader.getNumberFields();
6599        int validColumnsSize = (validColumns == null) ? 0 : validColumns.getLength();
6600
6601        for (int fieldId = startField; fieldId < endField; fieldId++) {
6602
6603            rawDataIn.setPosition(offset);
6604
6605            // get the field header information from the page
6606
int fieldStatus = StoredFieldHeader.readStatus(rawDataIn);
6607            int fieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, fieldStatus, slotFieldSize);
6608
6609            // see if this field needs to be logged
6610
// no need to write the data portion if the log is getting written
6611
// for purges unless the field is overflow pointer for a long column.
6612
if (((validColumns != null) && !(validColumnsSize > fieldId && validColumns.isSet(fieldId))) ||
6613                ((flag & BasePage.LOG_RECORD_FOR_PURGE)!=0 && !StoredFieldHeader.isOverflow(fieldStatus)))
6614            {
6615                // nope, move page offset along
6616
offset += StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize);
6617                offset += fieldDataLength;
6618
6619                // write a non-existent field
6620
fieldStatus = StoredFieldHeader.setInitial();
6621                fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);
6622                StoredFieldHeader.write(out, fieldStatus, 0, slotFieldSize);
6623                continue;
6624            }
6625
6626            // If this field is to be updated, and it points to a long column
6627
// chain, the entire long column chain will be orphaned after the
6628
// update operation. Therefore, need to queue up a post commit
6629
// work to reclaim the long column chain. We cannot do any clean
6630
// up in this transaction now because we are underneath a log
6631
// action and cannot interrupt the transaction log buffer.
6632
// HeadRowHandle may be null if updateAtSlot is called to update a
6633
// non-head row piece. In that case, don't do anything.
6634
// If temp container, don't do anything.
6635
if (((flag & BasePage.LOG_RECORD_FOR_UPDATE) != 0) &&
6636                headRowHandle != null &&
6637                StoredFieldHeader.isOverflow(fieldStatus) &&
6638                owner.isTemporaryContainer() == false)
6639            {
6640
6641                int saveOffset = rawDataIn.getPosition(); // remember the page offset
6642
long overflowPage = CompressedNumber.readLong((InputStream) rawDataIn);
6643                int overflowId = CompressedNumber.readInt((InputStream) rawDataIn);
6644
6645                // Remember the time stamp on the first page of the column
6646
// chain. This is to prevent the case where the post commit
6647
// work gets fired twice, in that case, the second time it is
6648
// fired, this overflow page may not part of this row chain
6649
// that is being updated.
6650
Page firstPageOnColumnChain = getOverflowPage(overflowPage);
6651                PageTimeStamp ts = firstPageOnColumnChain.currentTimeStamp();
6652                firstPageOnColumnChain.unlatch();
6653
6654                RawTransaction rxact = (RawTransaction)owner.getTransaction();
6655
6656                ReclaimSpace work =
6657                    new ReclaimSpace(ReclaimSpace.COLUMN_CHAIN,
6658                                headRowHandle,
6659                                fieldId, // long column about to be orphaned by update
6660
overflowPage, // page where the long column starts
6661
overflowId, // record Id of the beginning of the long column
6662
ts,
6663                                rxact.getDataFactory(), true);
6664
6665                rxact.addPostCommitWork(work);
6666
6667                rawDataIn.setPosition(saveOffset); // Just to be safe, reset data stream
6668
}
6669
6670
6671            // write the field header for the log
6672
offset += StoredFieldHeader.write(out, fieldStatus, fieldDataLength, slotFieldSize);
6673
6674            if (fieldDataLength != 0) {
6675
6676                // write the actual data
6677
out.write(pageData, offset, fieldDataLength);
6678
6679                offset += fieldDataLength;
6680            }
6681        }
6682    }
6683
6684    /**
6685        Log a field to the ObjectOutput stream.
6686        <P>
6687        Find the field in the record and then write out the complete
6688        field, i.e. header and data.
6689
6690        @exception StandardException Standard Cloudscape error policy
6691        @exception IOException RESOLVE
6692
6693        @see BasePage#logField
6694    */

6695
6696    public void logField(int slot, int fieldNumber, OutputStream out)
6697        throws StandardException, IOException JavaDoc
6698    {
6699        int offset = getFieldOffset(slot, fieldNumber);
6700
6701        // these reads are always against the page array
6702
ArrayInputStream lrdi = rawDataIn;
6703
6704        // now write out the field we are interested in ...
6705
lrdi.setPosition(offset);
6706        int fieldStatus = StoredFieldHeader.readStatus(lrdi);
6707        int fieldDataLength = StoredFieldHeader.readFieldDataLength(lrdi, fieldStatus, slotFieldSize);
6708
6709        StoredFieldHeader.write(out, fieldStatus, fieldDataLength, slotFieldSize);
6710        
6711        if (fieldDataLength != 0) {
6712            // and then the data
6713
out.write(pageData, lrdi.getPosition(), fieldDataLength);
6714        }
6715    }
6716
6717    /*
6718    ** Overidden methods of BasePage
6719    */

6720
6721    /**
6722        Override insertAtSlot to provide long row support.
6723        @exception StandardException Standard Cloudscape error policy
6724    */

6725    public RecordHandle insertAtSlot(
6726    int slot,
6727    Object JavaDoc[] row,
6728    FormatableBitSet validColumns,
6729    LogicalUndo undo,
6730    byte insertFlag,
6731    int overflowThreshold)
6732        throws StandardException
6733    {
6734        try {
6735
6736            return super.insertAtSlot(slot, row, validColumns, undo, insertFlag, overflowThreshold);
6737
6738        } catch (NoSpaceOnPage nsop) {
6739
6740            // Super class already handle the case of insert that allows overflow.
6741
// If we get here, we know that the insert should not allow overflow.
6742
// Possibles causes:
6743
// 1. insert to an empty page, row will never fit (ie long row)
6744
// 2. insert to original page
6745
// we will do:
6746
// return a null to indicate the insert cannot be accepted ..
6747
return null;
6748
6749        }
6750    }
6751    
6752
6753    /**
6754        Update field at specified slot
6755        @exception StandardException Standard Cloudscape error policy
6756    */

6757    public RecordHandle updateFieldAtSlot(
6758    int slot,
6759    int fieldId,
6760    Object JavaDoc newValue,
6761    LogicalUndo undo)
6762        throws StandardException
6763    {
6764        try {
6765
6766            return super.updateFieldAtSlot(slot, fieldId, newValue, undo);
6767
6768        } catch (NoSpaceOnPage nsop) {
6769
6770
6771            // empty page apart from the record
6772
if (slotsInUse == 1)
6773            {
6774                throw StandardException.newException(
6775                    SQLState.DATA_NO_SPACE_FOR_RECORD);
6776            }
6777            throw StandardException.newException(
6778                    SQLState.DATA_NO_SPACE_FOR_RECORD);
6779
6780/*
6781// djd if (isOverflowPage()) {
6782            }
6783
6784            return XXX;
6785*/

6786        }
6787
6788    }
6789
6790    /**
6791        Get the number of fields on the row at slot
6792        @exception StandardException Standard Cloudscape error policy
6793    */

6794    public int fetchNumFieldsAtSlot(int slot) throws StandardException
6795    {
6796
6797        StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
6798
6799        if (!recordHeader.hasOverflow())
6800            return super.fetchNumFieldsAtSlot(slot);
6801
6802        BasePage overflowPage = getOverflowPage(recordHeader.getOverflowPage());
6803        int count = overflowPage.fetchNumFieldsAtSlot(getOverflowSlot(overflowPage, recordHeader));
6804        overflowPage.unlatch();
6805        return count;
6806    }
6807
6808    /**
6809     * Move record to a page toward the beginning of the file.
6810     * <p>
6811     * As part of compressing the table records need to be moved from the
6812     * end of the file toward the beginning of the file. Only the
6813     * contiguous set of free pages at the very end of the file can
6814     * be given back to the OS. This call is used to purge the row from
6815     * the current page, insert it into a previous page, and return the
6816     * new row location
6817     * Mark the record identified by position as deleted. The record may be
6818     * undeleted sometime later using undelete() by any transaction that sees
6819     * the record.
6820     * <p>
6821     * The interface is optimized to work on a number of rows at a time,
6822     * optimally processing all rows on the page at once. The call will
6823     * process either all rows on the page, or the number of slots in the
6824     * input arrays - whichever is smaller.
6825     * <B>Locking Policy</B>
6826     * <P>
6827     * MUST be called with table locked, not locks are requested. Because
6828     * it is called with table locks the call will go ahead and purge any
6829     * row which is marked deleted. It will also use purge rather than
6830     * delete to remove the old row after it moves it to a new page. This
6831     * is ok since the table lock insures that no other transaction will
6832     * use space on the table before this transaction commits.
6833     *
6834     * <BR>
6835     * A page latch on the new page will be requested and released.
6836     *
6837     * @param slot slot of original row to move.
6838     * @param row a row template to hold all columns of row.
6839     * @param old_handle An array to be filled in by the call with the
6840     * old handles of all rows moved.
6841     * @param new_handle An array to be filled in by the call with the
6842     * new handles of all rows moved.
6843     *
6844     * @return the number of rows processed.
6845     *
6846     * @exception StandardException Standard Cloudscape error policy
6847     *
6848     **/

6849    public int moveRecordForCompressAtSlot(
6850    int slot,
6851    Object JavaDoc[] row,
6852    RecordHandle[] old_handle,
6853    RecordHandle[] new_handle)
6854        throws StandardException
6855    {
6856        long src_pageno = getPageNumber();
6857
6858        try
6859        {
6860            fetchFromSlot(
6861                null,
6862                slot,
6863                row,
6864                (FetchDescriptor) null, // all columns retrieved
6865
false);
6866
6867            int row_size = getRecordPortionLength(slot);
6868
6869            // first see if row will fit on current page being used to insert
6870
StoredPage dest_page =
6871                (StoredPage) owner.getPageForCompress(0, src_pageno);
6872
6873            if (dest_page != null)
6874            {
6875                if ((dest_page.getPageNumber() >= getPageNumber()) ||
6876                    (!dest_page.spaceForCopy(row_size)))
6877                {
6878                    // page won't work
6879
dest_page.unlatch();
6880                    dest_page = null;
6881                }
6882            }
6883
6884            if (dest_page == null)
6885            {
6886                // last page did not work, try unfilled page
6887
dest_page = (StoredPage)
6888                    owner.getPageForCompress(
6889                        ContainerHandle.GET_PAGE_UNFILLED, src_pageno);
6890
6891                if (dest_page != null)
6892                {
6893                    if ((dest_page.getPageNumber() >= getPageNumber()) ||
6894                        (!dest_page.spaceForCopy(row_size)))
6895                    {
6896                        // page won't work
6897
dest_page.unlatch();
6898                        dest_page = null;
6899                    }
6900                }
6901            }
6902
6903            if (dest_page == null)
6904            {
6905                // last and unfilled page did not work, try getting a free page
6906
dest_page = (StoredPage) owner.addPage();
6907
6908                if (dest_page.getPageNumber() >= getPageNumber())
6909                {
6910                    owner.removePage(dest_page);
6911                    dest_page = null;
6912                }
6913            }
6914
6915            if (dest_page != null)
6916            {
6917                int dest_slot = dest_page.recordCount();
6918
6919                old_handle[0] = getRecordHandleAtSlot(slot);
6920
6921                copyAndPurge(dest_page, slot, 1, dest_slot);
6922
6923                new_handle[0] = dest_page.getRecordHandleAtSlot(dest_slot);
6924
6925                dest_page.unlatch();
6926
6927                return(1);
6928            }
6929            else
6930            {
6931                return(0);
6932            }
6933        }
6934        catch (IOException JavaDoc ioe)
6935        {
6936            throw StandardException.newException(
6937                SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
6938        }
6939    }
6940
6941    /*
6942     * methods that is called underneath a page action
6943     */

6944
6945    /*
6946     * update page version and instance due to actions by a log record
6947     */

6948    public void logAction(LogInstant instant) throws StandardException
6949    {
6950        if (SanityManager.DEBUG) {
6951            SanityManager.ASSERT(isLatched());
6952        }
6953
6954        if (rawDataOut == null)
6955            createOutStreams();
6956
6957        if (!isActuallyDirty()) {
6958            // if this is not an overflow page and the page is valid, set the
6959
// initial row count.
6960
if (!isOverflowPage() && ((getPageStatus() & VALID_PAGE) != 0)) {
6961                initialRowCount = internalNonDeletedRecordCount();
6962            } else
6963                initialRowCount = 0;
6964        }
6965
6966        setDirty();
6967
6968        bumpPageVersion();
6969        updateLastLogInstant(instant);
6970    }
6971
6972
6973    /* clean the page for first use or reuse */
6974    private void cleanPage()
6975    {
6976        setDirty();
6977
6978        // set pageData to all nulls
6979
clearSection(0, getPageSize());
6980
6981        slotsInUse = 0;
6982        deletedRowCount = 0;
6983        headerOutOfDate = true; // headerOutOfDate must be set after setDirty
6984
// because isDirty maybe called unlatched
6985

6986        clearAllSpace();
6987
6988    }
6989
6990    /**
6991        Initialize the page.
6992
6993        If reuse, then
6994        Clean up any in memory or on disk structure to ready the page for reuse.
6995        This is not only reusing the page buffer, but reusing a free page
6996        which may or may not be cleaned up the the client of raw store when it
6997        was deallocated.
6998
6999        @exception StandardException Cloudscape Standard Error Policy
7000     */

7001    public void initPage(LogInstant instant, byte status, int recordId,
7002                         boolean overflow, boolean reuse)
7003         throws StandardException
7004    {
7005        // log action at the end after the page is updated with all the
7006
// pertinent information
7007
logAction(instant);
7008
7009        if (reuse)
7010        {
7011            cleanPage();
7012            super.cleanPageForReuse();
7013        }
7014        // if not reuse, createPage already called cleanpage
7015

7016        headerOutOfDate = true; // headerOutOfDate must be set after setDirty
7017
// because isDirty maybe called unlatched
7018
setPageStatus(status);
7019        isOverflowPage = overflow;
7020        nextId = recordId;
7021
7022    }
7023
7024    /**
7025        Set page status
7026        @exception StandardException Cloudscape Standard Error Policy
7027    */

7028    public void setPageStatus(LogInstant instant, byte status)
7029         throws StandardException
7030    {
7031        logAction(instant);
7032        headerOutOfDate = true; // headerOutOfDate must be set after setDirty
7033
// because isDirty maybe called unlatched
7034

7035        setPageStatus(status);
7036    }
7037
7038    /**
7039        Set the row reserved space.
7040        @exception StandardException Cloudscape Standard Error Policy
7041     */

7042    public void setReservedSpace(LogInstant instant, int slot, int value)
7043         throws StandardException, IOException JavaDoc
7044    {
7045        logAction(instant);
7046        headerOutOfDate = true; // headerOutOfDate must be set after setDirty
7047
// because isDirty maybe called unlatched
7048

7049        int delta = value - getReservedCount(slot);
7050
7051        if (SanityManager.DEBUG) {
7052            SanityManager.ASSERT(delta <= freeSpace,
7053                "Cannot grow reserved space because there is not enough free space on the page");
7054            SanityManager.ASSERT(delta != 0,
7055                "Set Reserved Space called to set identical value");
7056
7057            if (value < 0)
7058                SanityManager.THROWASSERT(
7059                    "Cannot set reserved space to value " + value);
7060        }
7061
7062        // Find the end of the record that we are about to add or subtract from
7063
// the reserved space.
7064
int nextRecordOffset = getRecordOffset(slot) + getTotalSpace(slot);
7065
7066        if (delta > 0) {
7067            // Growing - hopefully during a RRR restore
7068
expandPage(nextRecordOffset, delta);
7069        } else {
7070            // shrinking, delta is < 0
7071
shrinkPage(nextRecordOffset, -delta);
7072        }
7073
7074        // Lastly, update the reserved space count in the slot.
7075
rawDataOut.setPosition(getSlotOffset(slot) + (2*slotFieldSize));
7076        if (slotFieldSize == SMALL_SLOT_SIZE)
7077            logicalDataOut.writeShort(value);
7078        else
7079            logicalDataOut.writeInt(value);
7080
7081    }
7082
7083
7084    /**
7085        Store a record at the given slot.
7086
7087        @exception StandardException Standard Cloudscape error policy
7088        @exception IOException RESOLVE
7089    */

7090    public void storeRecord(LogInstant instant, int slot, boolean insert, ObjectInput in)
7091        throws StandardException, IOException JavaDoc
7092    {
7093        logAction(instant);
7094
7095        if (insert)
7096            storeRecordForInsert(slot, in);
7097        else
7098            storeRecordForUpdate(slot, in);
7099    }
7100
7101    private void storeRecordForInsert(int slot, ObjectInput in)
7102        throws StandardException, IOException JavaDoc
7103    {
7104
7105        StoredRecordHeader recordHeader = shiftUp(slot);
7106        if (recordHeader == null) {
7107            recordHeader = new StoredRecordHeader();
7108            setHeaderAtSlot(slot, recordHeader);
7109        }
7110
7111        bumpRecordCount(1);
7112
7113        // recordHeader represents the new version of the record header.
7114
recordHeader.read(in);
7115
7116        // the record is already marked delete, we need to bump the deletedRowCount
7117
if (recordHeader.isDeleted()) {
7118            deletedRowCount++;
7119            headerOutOfDate = true;
7120        }
7121
7122        // during a rollforward insert, recordId == nextId
7123
// during a rollback of purge, recordId < nextId
7124
if (nextId <= recordHeader.getId())
7125            nextId = recordHeader.getId()+1;
7126
7127        int recordOffset = firstFreeByte;
7128        int offset = recordOffset;
7129
7130        // write each field out to the page
7131
int numberFields = recordHeader.getNumberFields();
7132
7133        rawDataOut.setPosition(offset);
7134        offset += recordHeader.write(rawDataOut);
7135
7136        int userData = 0;
7137        for (int i = 0; i < numberFields; i++) {
7138
7139            // get the field header information, the input stream came from the log
7140
int newFieldStatus = StoredFieldHeader.readStatus(in);
7141            int newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);
7142            newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);
7143
7144            rawDataOut.setPosition(offset);
7145            offset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
7146
7147            if (newFieldDataLength != 0) {
7148                in.readFully(pageData, offset, newFieldDataLength);
7149                offset += newFieldDataLength;
7150                userData += newFieldDataLength;
7151            }
7152        }
7153
7154        int dataWritten = offset - firstFreeByte;
7155
7156        freeSpace -= dataWritten;
7157        firstFreeByte += dataWritten;
7158
7159        int reservedSpace = 0;
7160        if (minimumRecordSize > 0) {
7161
7162            // make sure we reserve the minimumRecordSize for the user data
7163
// portion of the record excluding the space we took on recordHeader
7164
// and fieldHeaders.
7165
if (userData < minimumRecordSize) {
7166                reservedSpace = minimumRecordSize - userData;
7167                freeSpace -= reservedSpace;
7168                firstFreeByte += reservedSpace;
7169            }
7170        }
7171
7172        // update the slot table
7173
addSlotEntry(slot, recordOffset, dataWritten, reservedSpace);
7174
7175        if (SanityManager.DEBUG)
7176        {
7177            if ((firstFreeByte > getSlotOffset(slot)) ||
7178                (freeSpace < 0))
7179            {
7180                SanityManager.THROWASSERT(
7181                        " firstFreeByte = " + firstFreeByte +
7182                        " dataWritten = " + dataWritten +
7183                        " getSlotOffset(slot) = " + getSlotOffset(slot) +
7184                        " slot = " + slot +
7185                        " firstFreeByte = " + firstFreeByte +
7186                        " freeSpace = " + freeSpace +
7187                        " page = " + this);
7188            }
7189        }
7190
7191        if ((firstFreeByte > getSlotOffset(slot)) || (freeSpace < 0))
7192        {
7193            throw dataFactory.markCorrupt(
7194                StandardException.newException(
7195                    SQLState.DATA_CORRUPT_PAGE, getPageId()));
7196        }
7197
7198    }
7199
7200
7201    private void storeRecordForUpdate(int slot, ObjectInput in)
7202        throws StandardException, IOException JavaDoc
7203    {
7204        // set up to read the in-memory record header back from the record
7205
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
7206        StoredRecordHeader newRecorderHeader = new StoredRecordHeader();
7207
7208        // recordHeader represents the new version of the record header.
7209
newRecorderHeader.read(in);
7210
7211        int oldFieldCount = recordHeader.getNumberFields();
7212        int newFieldCount = newRecorderHeader.getNumberFields();
7213
7214        int startField = recordHeader.getFirstField();
7215        if (SanityManager.DEBUG) {
7216            if (startField != newRecorderHeader.getFirstField())
7217                SanityManager.THROWASSERT("First field changed from " + startField + " to " + newRecorderHeader.getFirstField());
7218        }
7219
7220        // See if the number of fields shrunk, if so clear out the old data
7221
// we do this first to stop shuffling about the fields that are going to
7222
// be deleted during the update of the earlier fields. This case occurs
7223
// on an update that changes the row to be overflowed.
7224
if (newFieldCount < oldFieldCount) {
7225
7226            int oldDataStartingOffset = getFieldOffset(slot, startField + newFieldCount);
7227
7228            // calculate the length of the to be deleted fields
7229
int deleteLength = getRecordOffset(slot) + getRecordPortionLength(slot) - oldDataStartingOffset;
7230
7231            // we are updateing to zero bytes!
7232
updateRecordPortionLength(slot, -(deleteLength), deleteLength);
7233        }
7234
7235        // write each field out to the page
7236

7237        int startingOffset = getRecordOffset(slot);
7238        int newOffset = startingOffset;
7239        int oldOffset = startingOffset;
7240
7241        // see which field we get to use the reserve space
7242
int reservedSpaceFieldId = newFieldCount < oldFieldCount ?
7243            newFieldCount - 1 : oldFieldCount - 1;
7244        reservedSpaceFieldId += startField;
7245
7246
7247        // the new data the needs to be written at newOffset but can't until
7248
// unsedSpace >= newDataToWrite.length (allowing for the header)
7249
DynamicByteArrayOutputStream newDataToWrite = null;
7250
7251        rawDataOut.setPosition(newOffset);
7252
7253        // write the record header, which may change in size
7254
int oldLength = recordHeader.size();
7255        int newLength = newRecorderHeader.size();
7256
7257        int unusedSpace = oldLength; // the unused space at newOffset
7258

7259        // no fields, so we can eat into the reserve space
7260
if (reservedSpaceFieldId < startField) // no fields
7261
unusedSpace += getReservedCount(slot);
7262
7263        if (unusedSpace >= newLength) {
7264            newRecorderHeader.write(rawDataOut);
7265            newOffset += newLength;
7266            unusedSpace -= newLength;
7267            
7268        } else {
7269
7270            newDataToWrite = new DynamicByteArrayOutputStream(getPageSize());
7271            newRecorderHeader.write(newDataToWrite);
7272        }
7273        oldOffset += oldLength;
7274        int recordDelta = (newLength - oldLength);
7275
7276        int oldFieldStatus = 0;
7277        int oldFieldDataLength = 0;
7278        int newFieldStatus = 0;
7279        int newFieldDataLength = 0;
7280
7281        int oldEndFieldExclusive = startField + oldFieldCount;
7282        int newEndFieldExclusive = startField + newFieldCount;
7283
7284        for (int fieldId = startField; fieldId < newEndFieldExclusive; fieldId++) {
7285
7286            int oldFieldLength = 0;
7287            if (fieldId < oldEndFieldExclusive) {
7288                rawDataIn.setPosition(oldOffset);
7289                oldFieldStatus = StoredFieldHeader.readStatus(rawDataIn);
7290                oldFieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, oldFieldStatus, slotFieldSize);
7291                oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize)
7292                    + oldFieldDataLength;
7293            }
7294
7295            newFieldStatus = StoredFieldHeader.readStatus(in);
7296            newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);
7297
7298            // if no value was provided on an update of a field then use the old value,
7299
// unless the old field didn't exist.
7300
if (StoredFieldHeader.isNonexistent(newFieldStatus) && (fieldId < oldEndFieldExclusive)) {
7301
7302                // may need to move this old field ...
7303
if ((newDataToWrite == null) || (newDataToWrite.getUsed() == 0)) {
7304                    // the is no old data to catch up on, is the data at
7305
// the correct position already?
7306
if (newOffset == oldOffset) {
7307                        // yes, nothing to do!!
7308
if (SanityManager.DEBUG) {
7309                            if (unusedSpace != 0)
7310                            SanityManager.THROWASSERT("Unused space is out of sync, expect 0 got " + unusedSpace);
7311                        }
7312                    } else {
7313                        // need to shift the field left
7314
if (SanityManager.DEBUG) {
7315                            if (unusedSpace != (oldOffset - newOffset))
7316                            SanityManager.THROWASSERT(
7317                                "Unused space is out of sync expected " + (oldOffset - newOffset) + " got " + unusedSpace);
7318                        }
7319
7320                        System.arraycopy(pageData, oldOffset, pageData, newOffset, oldFieldLength);
7321                    }
7322                    newOffset += oldFieldLength;
7323
7324                    // last field to be updated can eat into the reserve space
7325
if (fieldId == reservedSpaceFieldId)
7326                        unusedSpace += getReservedCount(slot);
7327
7328                } else {
7329                    // there is data still to be written, just append this field to the
7330
// saved data
7331
int position = newDataToWrite.getPosition();
7332                    newDataToWrite.setPosition(position + oldFieldLength);
7333                    System.arraycopy(pageData, oldOffset,
7334                        newDataToWrite.getByteArray(), position, oldFieldLength);
7335
7336                    unusedSpace += oldFieldLength;
7337
7338                    // last field to be updated can eat into the reserve space
7339
if (fieldId == reservedSpaceFieldId)
7340                        unusedSpace += getReservedCount(slot);
7341
7342                    // attempt to write out some of what we have in the side buffer now.
7343
int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
7344                    newOffset += copyLength;
7345                    unusedSpace -= copyLength;
7346
7347                }
7348                oldOffset += oldFieldLength;
7349                continue;
7350            }
7351
7352            newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);
7353
7354            int newFieldHeaderLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize);
7355            int newFieldLength = newFieldHeaderLength + newFieldDataLength;
7356
7357            recordDelta += (newFieldLength - oldFieldLength);
7358
7359            // See if we can write this field now
7360

7361            // space available increases by the amount of the old field
7362
unusedSpace += oldFieldLength;
7363            oldOffset += oldFieldLength;
7364
7365            // last field to be updated can eat into the reserve space
7366
if (fieldId == reservedSpaceFieldId)
7367                unusedSpace += getReservedCount(slot);
7368
7369            if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {
7370
7371                // catch up on the old data if possible
7372
int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
7373                newOffset += copyLength;
7374                unusedSpace -= copyLength;
7375            }
7376
7377            if (((newDataToWrite == null) || (newDataToWrite.getUsed() == 0))
7378                && (unusedSpace >= newFieldHeaderLength)) {
7379
7380                // can fit the header in
7381
rawDataOut.setPosition(newOffset);
7382                newOffset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
7383                unusedSpace -= newFieldHeaderLength;
7384
7385                if (newFieldDataLength != 0) {
7386
7387                    // read as much as the field as possible
7388
int fieldCopy = unusedSpace >= newFieldDataLength ?
7389                            newFieldDataLength : unusedSpace;
7390
7391                    if (fieldCopy != 0) {
7392                        in.readFully(pageData, newOffset, fieldCopy);
7393
7394                        newOffset += fieldCopy;
7395                        unusedSpace -= fieldCopy;
7396                    }
7397
7398
7399                    fieldCopy = newFieldDataLength - fieldCopy;
7400                    if (fieldCopy != 0) {
7401                        if (newDataToWrite == null)
7402                            newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);
7403
7404                        // append the remaining portion of the field to the saved data
7405
int position = newDataToWrite.getPosition();
7406                        newDataToWrite.setPosition(position + fieldCopy);
7407                        in.readFully(newDataToWrite.getByteArray(),
7408                                position, fieldCopy);
7409
7410                    }
7411                }
7412            } else {
7413                // can't fit these header, or therefore the field, append it
7414
// to the buffer.
7415

7416                if (newDataToWrite == null)
7417                    newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);
7418
7419                StoredFieldHeader.write(newDataToWrite, newFieldStatus, newFieldDataLength, slotFieldSize);
7420
7421                // save the new field data
7422
if (newFieldDataLength != 0) {
7423                    int position = newDataToWrite.getPosition();
7424                    newDataToWrite.setPosition(position + newFieldDataLength);
7425                    in.readFully(newDataToWrite.getByteArray(),
7426                                position, newFieldDataLength);
7427                }
7428            }
7429        }
7430
7431        // at this point there may still be data left in the saved buffer
7432
// but presumably we can't fit it in
7433

7434        int reservedDelta;
7435
7436        if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {
7437
7438            // need to shift the later records down ...
7439
int nextRecordOffset = startingOffset + getTotalSpace(slot);
7440
7441            int spaceRequiredFromFreeSpace = newDataToWrite.getUsed() - (nextRecordOffset - newOffset);
7442
7443            if (SanityManager.DEBUG) {
7444                if (newOffset > nextRecordOffset)
7445                    SanityManager.THROWASSERT("data has overwritten next record - offset " + newOffset
7446                            + " next record " + nextRecordOffset);
7447
7448                if ((spaceRequiredFromFreeSpace <= 0) || (spaceRequiredFromFreeSpace > freeSpace))
7449                    SanityManager.THROWASSERT("invalid space required " + spaceRequiredFromFreeSpace
7450                    + " newDataToWrite.getUsed() " + newDataToWrite.getUsed()
7451                    + " nextRecordOffset " + nextRecordOffset
7452                    + " newOffset " + newOffset
7453                    + " reservedSpaceFieldId " + reservedSpaceFieldId
7454                    + " startField " + startField
7455                    + " newEndFieldExclusive " + newEndFieldExclusive
7456                    + " newFieldCount " + newFieldCount
7457                    + " oldFieldCount " + oldFieldCount
7458                    + " slot " + slot
7459                    + " freeSpace " + freeSpace
7460                    + " unusedSpace " + unusedSpace
7461                    + " page " + getPageId());
7462
7463
7464                if ((getReservedCount(slot) + spaceRequiredFromFreeSpace) != recordDelta)
7465                    SanityManager.THROWASSERT("mismatch on count: reserved " + getReservedCount(slot) +
7466                        "free space take " + spaceRequiredFromFreeSpace +
7467                        "record delta " + recordDelta);
7468
7469            }
7470
7471            if (spaceRequiredFromFreeSpace > freeSpace) {
7472                throw dataFactory.markCorrupt(
7473                    StandardException.newException(
7474                        SQLState.DATA_CORRUPT_PAGE, getPageId()));
7475            }
7476
7477            // see if this is the last record on the page, if so a simple
7478
// shift of the remaining fields will sufice...
7479
expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);
7480
7481            unusedSpace += spaceRequiredFromFreeSpace;
7482
7483            moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
7484
7485            reservedDelta = -1 * getReservedCount(slot);
7486
7487            if (SanityManager.DEBUG) {
7488                if (newDataToWrite.getUsed() != 0)
7489                    SanityManager.THROWASSERT("data is left in save buffer ... " + newDataToWrite.getUsed());
7490            }
7491        } else {
7492            reservedDelta = -1 * recordDelta;
7493        }
7494
7495        // now reset the length in the slot entry
7496
updateRecordPortionLength(slot, recordDelta, reservedDelta);
7497
7498        setHeaderAtSlot(slot, newRecorderHeader);
7499    }
7500
7501    private int moveSavedDataToPage(DynamicByteArrayOutputStream savedData, int unusedSpace, int pageOffset) {
7502        // catch up on the old data if possible
7503
if (unusedSpace > (savedData.getUsed() / 2)) {
7504            // copy onto the page
7505
int copyLength = unusedSpace <= savedData.getUsed() ?
7506                            unusedSpace : savedData.getUsed();
7507            System.arraycopy(savedData.getByteArray(), 0,
7508                pageData, pageOffset, copyLength);
7509
7510            // fix up the saved buffer
7511
savedData.discardLeft(copyLength);
7512
7513            return copyLength;
7514        }
7515
7516        return 0;
7517    }
7518
7519
7520    /**
7521        Create the space to update a portion of a record.
7522        This method ensures there is enough room to replace the
7523        old data of length oldLength at the given offset, with the new data of length
7524        newLength. This method does put any new data on the page, it moves old data around
7525        and zeros out any old data when newLength < oldLength. This method does
7526        update the information in the slot table.
7527
7528        The passed in offset is the correct place to put the data
7529        when this method returns, ie. it only moves data that
7530        has an offset greater then this.
7531
7532        @exception StandardException Standard Cloudscape error policy
7533        @exception IOException RESOLVE
7534    */

7535    private void createSpaceForUpdate(int slot, int offset, int oldLength, int newLength)
7536        throws StandardException, IOException JavaDoc
7537    {
7538
7539        // now replace the old data with the new data
7540
if (newLength <= oldLength) {
7541
7542            // now shift the remaining data down ...
7543
int diffLength = oldLength - newLength;
7544
7545            // real easy
7546
if (diffLength == 0)
7547                return;
7548
7549            // shift the remaing fields down
7550
int remainingLength =
7551                shiftRemainingData(slot, offset, oldLength, newLength);
7552
7553            // clear the now unused data on the page
7554
clearSection(offset + newLength + remainingLength, diffLength);
7555
7556            if (SanityManager.DEBUG) {
7557
7558                if ((getRecordPortionLength(slot) - diffLength) !=
7559                    ((offset - getRecordOffset(slot)) + newLength +
7560                      remainingLength))
7561                {
7562                    SanityManager.THROWASSERT(
7563                        " Slot table trying to update record length " +
7564                        (getRecordPortionLength(slot) - diffLength) +
7565                        " that is not the same as what it actully is");
7566                }
7567            }
7568
7569            // now reset the length in the slot entry, increase the reserved space
7570
updateRecordPortionLength(slot, -(diffLength), diffLength);
7571            return;
7572        }
7573
7574        // tough case, the new field is bigger than the old field ...
7575
// first attempt, see how much space is in row private reserved space
7576

7577        int extraLength = newLength - oldLength;
7578
7579        // extraLength is always greater than 0.
7580
if (SanityManager.DEBUG)
7581            SanityManager.ASSERT(extraLength > 0);
7582
7583        int recordReservedSpace = getReservedCount(slot);
7584        int reservedDelta = 0;
7585
7586        int spaceRequiredFromFreeSpace = extraLength - recordReservedSpace;
7587
7588        if (SanityManager.DEBUG) {
7589            if (spaceRequiredFromFreeSpace > freeSpace)
7590                SanityManager.THROWASSERT(
7591                    "spaceRequiredFromFreeSpace = " +
7592                        spaceRequiredFromFreeSpace +
7593                    ";freeSpace = " + freeSpace +
7594                    ";newLength = " + newLength +
7595                    ";oldLength = " + oldLength +
7596                    ";\npage= " + this);
7597        }
7598
7599        if (spaceRequiredFromFreeSpace > 0) {
7600            // The update requires all the reserved space + some from free space
7601

7602            int nextRecordOffset = getRecordOffset(slot) + getTotalSpace(slot);
7603
7604            // see if this is the last record on the page, if so a simple
7605
// shift of the remaining fields will sufice...
7606
expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);
7607
7608            // we used all the reserved space we have, set it to 0
7609
reservedDelta = -(recordReservedSpace);
7610        } else {
7611            // the update uses some amount of space from the rows reserved space
7612

7613            // set reserved Delta to account for amount of reserved space used.
7614
reservedDelta = -(extraLength);
7615        }
7616        
7617        // just shift all remaining fields up
7618
int remainingLength = shiftRemainingData(slot, offset, oldLength, newLength);
7619    
7620        if (SanityManager.DEBUG) {
7621            if ((extraLength + reservedDelta) < 0)
7622                SanityManager.THROWASSERT(
7623                    "total space the record occupies cannot shrink, extraLength = "
7624                    + extraLength + " reservedDelta = " + reservedDelta
7625                    + " spacerequired = " + spaceRequiredFromFreeSpace
7626                    + " recordReservedSpace = " + recordReservedSpace);
7627        }
7628
7629        // now reset the length in the slot entry
7630
updateRecordPortionLength(slot, extraLength, reservedDelta);
7631    }
7632
7633    /**
7634        storeField
7635
7636        @exception StandardException Standard Cloudscape error policy
7637        @exception IOException RESOLVE
7638    */

7639    public void storeField(LogInstant instant, int slot, int fieldNumber, ObjectInput in)
7640        throws StandardException, IOException JavaDoc
7641    {
7642        logAction(instant);
7643
7644        int offset = getFieldOffset(slot, fieldNumber);
7645
7646        // get the field header information, the input stream came from the log
7647
ArrayInputStream lrdi = rawDataIn;
7648        lrdi.setPosition(offset);
7649        int oldFieldStatus = StoredFieldHeader.readStatus(lrdi);
7650        int oldFieldDataLength = StoredFieldHeader.readFieldDataLength(lrdi, oldFieldStatus, slotFieldSize);
7651
7652        int newFieldStatus = StoredFieldHeader.readStatus(in);
7653        int newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);
7654        newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);
7655
7656        int oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize) + oldFieldDataLength;
7657        int newFieldLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize) + newFieldDataLength;
7658
7659        createSpaceForUpdate(slot, offset, oldFieldLength, newFieldLength);
7660        
7661        rawDataOut.setPosition(offset);
7662        offset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
7663
7664        if (newFieldDataLength != 0)
7665            in.readFully(pageData, offset, newFieldDataLength);
7666    }
7667
7668    /**
7669        reserveSpaceForSlot
7670        This method will reserve at least specified "spaceToReserve" bytes for the record
7671        in the slot.
7672
7673        @exception StandardException Standard Cloudscape error policy
7674        @exception IOException RESOLVE
7675    */

7676    public void reserveSpaceForSlot(LogInstant instant, int slot, int spaceToReserve)
7677        throws StandardException, IOException JavaDoc
7678    {
7679        logAction(instant);
7680
7681        int extraSpace = spaceToReserve - getReservedCount(slot);
7682        if (extraSpace <= 0)
7683            return;
7684
7685        if (freeSpace < extraSpace)
7686            throw new NoSpaceOnPage(isOverflowPage());
7687
7688        // need to shift the later records down ...
7689
int startingOffset = getRecordOffset(slot);
7690        int nextRecordOffset = startingOffset + getTotalSpace(slot);
7691
7692        // see if this is the last record on the page, if so a simple
7693
// shift of the remaining fields will sufice...
7694
expandPage(nextRecordOffset, extraSpace);
7695
7696        setSlotEntry(slot, startingOffset, getRecordPortionLength(slot), spaceToReserve);
7697    }
7698
7699    /**
7700        Skip a field header and its data on the given stream.
7701        
7702        @exception IOException corrupt stream
7703    */

7704    public void skipField(ObjectInput in) throws IOException JavaDoc {
7705
7706
7707        int fieldStatus = StoredFieldHeader.readStatus(in);
7708        int fieldDataLength = StoredFieldHeader.readFieldDataLength(in, fieldStatus, slotFieldSize);
7709
7710        if (fieldDataLength != 0) {
7711            in.skipBytes(fieldDataLength);
7712        }
7713    }
7714
7715    public void skipRecord(ObjectInput in) throws IOException JavaDoc
7716    {
7717
7718        StoredRecordHeader recordHeader = new StoredRecordHeader();
7719        recordHeader.read(in);
7720
7721        for (int i = recordHeader.getNumberFields(); i > 0; i--) {
7722            skipField(in);
7723        }
7724    }
7725
7726    /**
7727        Shift data within a record to account for an update.
7728
7729        @param offset Offset where the update starts, need not be on a field boundry.
7730        @param oldLength length of the data being replaced
7731        @param newLength length of the data replacing the old data
7732
7733        @return the length of the data in the record after the replaced data.
7734    */

7735    private int shiftRemainingData(int slot, int offset, int oldLength, int newLength)
7736        throws IOException JavaDoc
7737    {
7738
7739        // length of valid data remaining in the record after the portion that
7740
// is being replaced.
7741
int remainingLength = (getRecordOffset(slot) + getRecordPortionLength(slot)) -
7742                                            (offset + oldLength);
7743
7744        if (SanityManager.DEBUG) {
7745
7746            if (!(((remainingLength >= 0) &&
7747                   (getRecordPortionLength(slot) >= oldLength))))
7748            {
7749                SanityManager.THROWASSERT(
7750                    "oldLength = " + oldLength + " newLength = " + newLength +
7751                    "remainingLength = " + remainingLength +
7752                    " offset = " + offset +
7753                    " getRecordOffset(" + slot + ") = " + getRecordOffset(slot)+
7754                    " getRecordPortionLength(" + slot + ") = " +
7755                        getRecordPortionLength(slot));
7756            }
7757        }
7758
7759        if (remainingLength != 0) {
7760            System.arraycopy(pageData, offset + oldLength,
7761                             pageData, offset + newLength, remainingLength);
7762        }
7763
7764        return remainingLength;
7765
7766    }
7767
7768    /**
7769        Set the deleted status
7770
7771        @exception StandardException Standard Cloudscape error policy
7772        @exception IOException RESOLVE
7773        @see BasePage#setDeleteStatus
7774    */

7775    public void setDeleteStatus(LogInstant instant, int slot, boolean delete)
7776        throws StandardException, IOException JavaDoc
7777    {
7778
7779        logAction(instant);
7780
7781        deletedRowCount += super.setDeleteStatus(slot, delete);
7782        headerOutOfDate = true;
7783
7784        int offset = getRecordOffset(slot);
7785        StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
7786
7787        rawDataOut.setPosition(offset);
7788        recordHeader.write(logicalDataOut);
7789    }
7790
7791    /**
7792        get record count without checking for latch
7793    */

7794    protected int internalDeletedRecordCount()
7795    {
7796        return deletedRowCount;
7797    }
7798
7799    /**
7800        purgeRecord from page. Move following slots up by one.
7801
7802        @exception StandardException Standard Cloudscape error policy
7803        @exception IOException RESOLVE
7804    */

7805    public void purgeRecord(LogInstant instant, int slot, int recordId)
7806        throws StandardException, IOException JavaDoc
7807    {
7808
7809        logAction(instant);
7810
7811        // if record is marked deleted, reduce deletedRowCount
7812
if (getHeaderAtSlot(slot).isDeleted())
7813            deletedRowCount--;
7814
7815        int startByte = getRecordOffset(slot);
7816        int endByte = startByte + getTotalSpace(slot) - 1;
7817
7818        compressPage(startByte, endByte);
7819        
7820        // fix up the on-page slot table
7821
removeSlotEntry(slot);
7822
7823        // fix up the in-memory version
7824
removeAndShiftDown(slot);
7825    }
7826
7827    /*
7828    **
7829    */

7830
7831    /**
7832        Get the offset of the field header of the given field for
7833        the record in the given slot.
7834
7835        Field number is the absolute number for the complete record, not just this portion.
7836        E.g. if this is a record portion that starts at field 3 and has 6 fields
7837        then the second field on this *page* has field number 4.
7838    */

7839    private int getFieldOffset(int slot, int fieldNumber) throws IOException JavaDoc
7840    {
7841        // RESOLVE - overflow, needs to be changed
7842
int offset = getRecordOffset(slot);
7843
7844        StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
7845
7846        // get the number of fields
7847
int startField = recordHeader.getFirstField();
7848
7849        if (SanityManager.DEBUG) {
7850            int numberFields = recordHeader.getNumberFields();
7851
7852            if ((fieldNumber < startField) || (fieldNumber >= (startField + numberFields)))
7853                SanityManager.THROWASSERT(
7854                    "fieldNumber: " + fieldNumber +
7855                    " start field: " + startField +
7856                    " number of fields " + numberFields);
7857        }
7858
7859        ArrayInputStream lrdi = rawDataIn;
7860
7861        // skip the record header
7862
lrdi.setPosition(offset + recordHeader.size());
7863
7864        // skip any earlier fields ...
7865
for (int i = startField; i < fieldNumber; i++) {
7866            skipField(lrdi);
7867        }
7868
7869        return rawDataIn.getPosition();
7870    }
7871
7872
7873    /*
7874     * Time stamp support - this page supports time stamp
7875     */

7876
7877    /**
7878        Get a time stamp for this page
7879        @return page time stamp
7880    */

7881    public PageTimeStamp currentTimeStamp()
7882    {
7883        // saving the whole key would be an overkill
7884
return new PageVersion(getPageNumber(), getPageVersion());
7885    }
7886
7887    /**
7888        Set given pageVersion to be the as what is on this page
7889      
7890        @exception StandardException given time stamp is null or is not a time
7891        stamp implementation this page knows how to deal with
7892    */

7893    public void setTimeStamp(PageTimeStamp ts) throws StandardException
7894    {
7895        if (ts == null)
7896        {
7897            throw StandardException.newException(SQLState.DATA_TIME_STAMP_NULL);
7898        }
7899
7900        if (!(ts instanceof PageVersion))
7901        {
7902            throw StandardException.newException(
7903                SQLState.DATA_TIME_STAMP_ILLEGAL, ts);
7904        }
7905
7906        PageVersion pv = (PageVersion)ts;
7907
7908        pv.setPageNumber(getPageNumber());
7909        pv.setPageVersion(getPageVersion());
7910    }
7911
7912    /**
7913        compare given PageVersion with pageVersion on page
7914
7915        @param ts the page version gotton from this page via a currentTimeStamp
7916                or setTimeStamp call earlier
7917
7918        @return true if the same
7919        @exception StandardException given time stamp not gotton from this page
7920    */

7921    public boolean equalTimeStamp(PageTimeStamp ts) throws StandardException
7922    {
7923        if (ts == null)
7924            return false;
7925
7926        if (!(ts instanceof PageVersion))
7927        {
7928            throw StandardException.newException(
7929                SQLState.DATA_TIME_STAMP_ILLEGAL, ts);
7930        }
7931
7932        PageVersion pv = (PageVersion)ts;
7933
7934        if (pv.getPageNumber() != getPageNumber())
7935        {
7936            throw StandardException.newException(
7937                SQLState.DATA_TIME_STAMP_ILLEGAL, ts);
7938        }
7939
7940        return (pv.getPageVersion() == getPageVersion());
7941    }
7942
7943    /** debugging, print this page */
7944    public String JavaDoc toString()
7945    {
7946        if (SanityManager.DEBUG)
7947        {
7948            if (SanityManager.DEBUG_ON("DeadlockTrace") || SanityManager.DEBUG_ON("userLockStackTrace"))
7949                return "page = " + getIdentity();
7950
7951            String JavaDoc str = "---------------------------------------------------\n";
7952            str += pageHeaderToString();
7953            // str += slotTableToString(); // print in memory slot table
7954

7955            // now print each row
7956
for (int s = 0; s < slotsInUse; s++)
7957                str += recordToString(s);
7958        
7959            //if (SanityManager.DEBUG_ON("dumpPageImage"))
7960
{
7961                str += "---------------------------------------------------\n";
7962                str += pagedataToHexDump(pageData);
7963                str += "---------------------------------------------------\n";
7964            }
7965            return str;
7966        }
7967        else
7968            return null;
7969    }
7970
7971    /**
7972     * Provide a hex dump of the data in the in memory version of the page.
7973     * <p>
7974     * The output looks like:
7975     *
7976     * 00000000: 4d5a 9000 0300 0000 0400 0000 ffff 0000 MZ..............
7977     * 00000010: b800 0000 0000 0000 4000 0000 0000 0000 ........@.......
7978     * 00000020: 0000 0000 0000 0000 0000 0000 0000 0000 ................
7979     * 00000030: 0000 0000 0000 0000 0000 0000 8000 0000 ................
7980     * 00000040: 0e1f ba0e 00b4 09cd 21b8 014c cd21 5468 ........!..L.!Th
7981     * 00000050: 6973 2070 726f 6772 616d 2063 616e 6e6f is program canno
7982     * 00000060: 7420 6265 2072 756e 2069 6e20 444f 5320 t be run in DOS
7983     * 00000070: 6d6f 6465 2e0d 0a24 0000 0000 0000 0050 mode...$.......P
7984     * 00000080: 4500 004c 0109 008b abfd 3000 0000 0000 E..L......0.....
7985     * 00000090: 0000 00e0 000e 210b 0102 3700 3405 0000 ......!...7.4...
7986     * 000000a0: 8401 0000 6400 0000 6004 0000 1000 0000 ....d...`.......
7987     * 000000b0: 5005 0000 0008 6000 1000 0000 0200 0001 P.....`.........
7988     * 000000c0: 0000 0000 0000 0004 0000 0000 0000 0000 ................
7989     * 000000d0: 9007 0000 0400 0009 a207 0002 0000 0000 ................
7990     * 000000e0: 0010 0000 1000 0000 0010 0000 1000 0000 ................
7991     * 000000f0: 0000 0010 0000 0000 6006 00ef 8100 0000 ........`.......
7992     * 00000100: 5006 00e6 0c00 0000 0007 00d0 0400 0000 P...............
7993     * 00000110: 0000 0000 0000 0000 0000 0000 0000 0000 ................
7994     * 00000120: 1007 00c8 7100 0000 0000 0000 0000 0000 ....q...........
7995     * 00000130: 0000 0000 0000 0000 0000 0000 0000 0000 ................
7996     *
7997     * <p>
7998     * RESOLVE - this has been hacked together and is not efficient. There
7999     * are probably some java utilities to use.
8000     *
8001     * @return The string with the hex dump in it.
8002     *
8003     * @param data array of bytes to dump.
8004     **/

8005    private static String JavaDoc pagedataToHexDump(byte[] data)
8006    {
8007        return org.apache.derby.iapi.util.StringUtil.hexDump(data);
8008    }
8009
8010    private String JavaDoc pageHeaderToString()
8011    {
8012        if (SanityManager.DEBUG) {
8013            return "page id " + getIdentity() +
8014                " Overflow: " + isOverflowPage +
8015                " PageVersion: " + getPageVersion() +
8016                " SlotsInUse: " + slotsInUse +
8017                " DeletedRowCount: " + deletedRowCount +
8018                " PageStatus: " + getPageStatus() +
8019                " NextId: " + nextId +
8020                " firstFreeByte: " + firstFreeByte +
8021                " freeSpace: " + freeSpace +
8022                " totalSpace: " + totalSpace +
8023                " spareSpace: " + spareSpace +
8024                " PageSize: " + getPageSize() +
8025                "\n";
8026        }
8027        else
8028            return null;
8029    }
8030
8031    private String JavaDoc recordToString(int slot)
8032    {
8033        if (SanityManager.DEBUG)
8034        {
8035            String JavaDoc str = new String JavaDoc();
8036            try
8037            {
8038                StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
8039                int offset = getRecordOffset(slot);
8040                int numberFields = recordHeader.getNumberFields();
8041                str = "\nslot " + slot + " offset " + offset + " " +
8042                         " recordlen " + getTotalSpace(slot) +
8043                         " (" + getRecordPortionLength(slot) +
8044                         "," + getReservedCount(slot) + ")"+
8045                         recordHeader.toString();
8046
8047                rawDataIn.setPosition(offset + recordHeader.size());
8048
8049                for (int i = 0; i < numberFields; i++)
8050                {
8051                    int fieldStatus = StoredFieldHeader.readStatus(rawDataIn);
8052                    int fieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, fieldStatus, slotFieldSize);
8053                    if (fieldDataLength < 0)
8054                    {
8055                        str += "\n\tField " + i + ": offset=" + offset + " null " +
8056                            StoredFieldHeader.toDebugString(fieldStatus);
8057                    }
8058                    else
8059                    {
8060                        str += "\n\tField " + i + ": offset=" + offset +
8061                            " len=" + fieldDataLength + " " +
8062                            StoredFieldHeader.toDebugString(fieldStatus);
8063
8064                        if (StoredFieldHeader.isOverflow(fieldStatus))
8065                        {
8066                            // not likely to be a real pointer, this is most
8067
// likely an old column chain where the first field
8068
// is set to overflow even though the second field
8069
// is the overflow pointer
8070
if (i == 0 && fieldDataLength != 3)
8071                            {
8072                                // figure out where we should go next
8073
offset = rawDataIn.getPosition() + fieldDataLength;
8074                                long overflowPage = CompressedNumber.readLong((InputStream) rawDataIn);
8075                                int overflowId = CompressedNumber.readInt((InputStream) rawDataIn);
8076
8077                                str += "Questionable long column at (" +
8078                                    overflowPage + "," + overflowId + ")";
8079                                rawDataIn.setPosition(offset);
8080                            }
8081                            else
8082                            {
8083                                // print the overflow pointer
8084
long overflowPage = CompressedNumber.readLong((InputStream) rawDataIn);
8085                                int overflowId = CompressedNumber.readInt((InputStream) rawDataIn);
8086                                str += "long column at (" + overflowPage + "," + overflowId + ")";
8087                            }
8088                        }
8089                        else
8090                        {
8091                            // go to next field
8092
offset = rawDataIn.getPosition() + fieldDataLength;
8093                            rawDataIn.setPosition(offset);
8094                        }
8095                    }
8096                }
8097                str += "\n";
8098
8099            }
8100            catch (IOException JavaDoc ioe)
8101            {
8102                str += "\n ======= ERROR IOException =============\n";
8103                str += ioe.toString();
8104            }
8105            catch (StandardException se)
8106            {
8107                str += "\n ======= ERROR StandardException =============\n";
8108                str += se.toString();
8109            }
8110
8111            return str;
8112        }
8113        else
8114            return null;
8115    }
8116
8117    /*
8118    ** Overflow related methods
8119    */

8120
8121    /**
8122        Get the overflow page for a record that has already overflowed.
8123        @exception StandardException Standard Cloudscape error policy
8124    */

8125    protected StoredPage getOverflowPage(long pageNumber) throws StandardException
8126    {
8127
8128        StoredPage overflowPage = (StoredPage) owner.getPage(pageNumber);
8129        if (overflowPage == null) {
8130        }
8131
8132        // RESOLVE-LR
8133
//if (!overflowPage.isOverflow()) {
8134
// overflowPage.unlatch();
8135
//}
8136

8137        return overflowPage;
8138    }
8139
8140    /**
8141        Get an empty overflow page.
8142        @exception StandardException Standard Cloudscape error policy
8143    */

8144    protected BasePage getNewOverflowPage() throws StandardException
8145    {
8146
8147        FileContainer myContainer = (FileContainer) containerCache.find(identity.getContainerId());
8148
8149        try {
8150            // add an overflow page
8151
return (BasePage) myContainer.addPage(owner, true);
8152        } finally {
8153            containerCache.release(myContainer);
8154        }
8155    }
8156
8157    /**
8158        Get the overflow slot for a record that has already overflowed.
8159        @exception StandardException Standard Cloudscape error policy
8160    */

8161    protected static int getOverflowSlot(BasePage overflowPage, StoredRecordHeader recordHeader)
8162        throws StandardException
8163    {
8164
8165        int slot = overflowPage.findRecordById(
8166                        recordHeader.getOverflowId(), Page.FIRST_SLOT_NUMBER);
8167
8168        if (slot < 0)
8169        {
8170            throw StandardException.newException(
8171                    SQLState.DATA_SLOT_NOT_ON_PAGE);
8172        }
8173
8174        return slot;
8175    }
8176
8177    /**
8178        Get a overflow page that potentially can handle a new overflowed record.
8179        @exception StandardException Standard Cloudscape error policy
8180    */

8181    public BasePage getOverflowPageForInsert(
8182    int currentSlot,
8183    Object JavaDoc[] row,
8184    FormatableBitSet validColumns)
8185        throws StandardException
8186    {
8187        return getOverflowPageForInsert(currentSlot, row, validColumns, 0);
8188    }
8189
8190    /**
8191        @exception StandardException Standard Cloudscape error policy
8192    */

8193    public BasePage getOverflowPageForInsert(
8194    int currentSlot,
8195    Object JavaDoc[] row,
8196    FormatableBitSet validColumns,
8197    int startColumn)
8198        throws StandardException
8199    {
8200        // System.out.println("Top of getOverflowPageForInsert");
8201

8202        // look at all the overflow pages that are in use on this page, up
8203
// to a maximum of 5.
8204
long[] pageList = new long[5];
8205        int pageCount = 0;
8206
8207        long currentOverflowPageNumber = 0;
8208
8209slotScan:
8210        for (int slot = 0; (slot < slotsInUse) && (pageCount < pageList.length); slot++) {
8211
8212            StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
8213            if (!recordHeader.hasOverflow())
8214                continue;
8215
8216            long overflowPageNumber = recordHeader.getOverflowPage();
8217
8218            if (slot == currentSlot) {
8219                currentOverflowPageNumber = overflowPageNumber;
8220                continue;
8221            }
8222
8223            for (int i = 0; i < pageCount; i++) {
8224                if (pageList[i] == overflowPageNumber)
8225                    continue slotScan;
8226            }
8227
8228            pageList[pageCount++] = overflowPageNumber;
8229        }
8230
8231
8232        for (int i = 0; i < pageCount; i++) {
8233
8234            long pageNumber = pageList[i];
8235
8236            // don't look at the current overflow page
8237
// used by this slot, because it the record is already
8238
// overflowed then we reached here because the overflow
8239
// page is full.
8240
if (pageNumber == currentOverflowPageNumber)
8241                continue;
8242            StoredPage overflowPage = null;
8243            int spaceNeeded = 0;
8244            try {
8245                overflowPage = getOverflowPage(pageNumber);
8246                if ( overflowPage.spaceForInsert(row, validColumns,
8247                    spaceNeeded, startColumn, 100))
8248                {
8249                    // System.out.println("returning used page: " + pageNumber);
8250
return overflowPage;
8251                }
8252
8253                spaceNeeded = ((StoredPage) overflowPage).getCurrentFreeSpace();
8254                overflowPage.unlatch();
8255                overflowPage = null;
8256                
8257            } catch (StandardException se) {
8258                if (overflowPage != null) {
8259                    overflowPage.unlatch();
8260                    overflowPage = null;
8261                }
8262
8263            }
8264        }
8265
8266        // if we get here then we have to allocate a new overflow page
8267
// System.out.println("returning new page: ");
8268
return getNewOverflowPage();
8269    }
8270    
8271    /**
8272        Update an already overflowed record.
8273
8274        @param slot Slot of the original record on its original page
8275        @param row new version of the data
8276
8277        @exception StandardException Standard Cloudscape error policy
8278    */

8279    protected void updateOverflowed(
8280    RawTransaction t,
8281    int slot,
8282    Object JavaDoc[] row,
8283    FormatableBitSet validColumns,
8284    StoredRecordHeader recordHeader)
8285        throws StandardException
8286    {
8287
8288        BasePage overflowPage = getOverflowPage(recordHeader.getOverflowPage());
8289
8290        try {
8291
8292            int overflowSlot = getOverflowSlot(overflowPage, recordHeader);
8293
8294            overflowPage.doUpdateAtSlot(t, overflowSlot, recordHeader.getOverflowId(), row, validColumns);
8295            overflowPage.unlatch();
8296            overflowPage = null;
8297
8298            return;
8299
8300        } finally {
8301            if (overflowPage != null) {
8302                overflowPage.unlatch();
8303                overflowPage = null;
8304            }
8305        }
8306    }
8307
8308
8309    /**
8310        Update a record handle to point to an overflowed record portion.
8311        Note that the record handle need not be the current page.
8312        @exception StandardException Standard Cloudscape error policy
8313    */

8314    public void updateOverflowDetails(RecordHandle handle, RecordHandle overflowHandle)
8315        throws StandardException
8316    {
8317        long handlePageNumber = handle.getPageNumber();
8318        if (handlePageNumber == getPageNumber()) {
8319            updateOverflowDetails(this, handle, overflowHandle);
8320            return;
8321        }
8322        
8323        StoredPage handlePage = (StoredPage) owner.getPage(handlePageNumber);
8324
8325        updateOverflowDetails(handlePage, handle, overflowHandle);
8326        handlePage.unlatch();
8327    }
8328
8329    private void updateOverflowDetails(StoredPage handlePage, RecordHandle handle, RecordHandle overflowHandle)
8330        throws StandardException {
8331        // update the temp record header, this will be used in the log row ..
8332
handlePage.getOverFlowRecordHeader().setOverflowDetails(overflowHandle);
8333
8334        // Use the slot interface as we don't need a lock since
8335
// the initial insert/update holds the lock on the first
8336
// portion of the record.
8337
int slot = handlePage.getSlotNumber(handle);
8338
8339        // use doUpdateAtSlot as it avoids unnecessary work in updateAtSlot the
8340
// null indicates to this page that the record should become an
8341
// overflow record
8342
handlePage.doUpdateAtSlot(
8343            owner.getTransaction(), slot, handle.getId(),
8344            (Object JavaDoc[]) null, (FormatableBitSet) null);
8345    }
8346
8347    /**
8348        @exception StandardException Standard Cloudscape error policy
8349    */

8350    public void updateFieldOverflowDetails(RecordHandle handle, RecordHandle overflowHandle)
8351        throws StandardException
8352    {
8353        // add an overflow field at the end of the previous record
8354
// uses sparse rows
8355
Object JavaDoc[] row = new Object JavaDoc[2];
8356        row[1] = overflowHandle;
8357
8358        // we are expanding the record to have 2 fields, the second field is the overflow pointer.
8359
FormatableBitSet validColumns = new FormatableBitSet(2);
8360        validColumns.set(1);
8361
8362        // Use the slot interface as we don't need a lock since
8363
// the initial insert/update holds the lock on the first
8364
// portion of the record.
8365
int slot = getSlotNumber(handle);
8366
8367        // use doUpdateAtSlot as it avoids unnecessary work in updateAtSlot
8368
doUpdateAtSlot(owner.getTransaction(), slot, handle.getId(), row, validColumns);
8369    }
8370
8371    /**
8372        @exception StandardException Standard Cloudscape error policy
8373    */

8374    public int appendOverflowFieldHeader(DynamicByteArrayOutputStream logBuffer, RecordHandle overflowHandle)
8375        throws StandardException, IOException JavaDoc
8376    {
8377        int fieldStatus = StoredFieldHeader.setInitial();
8378        fieldStatus = StoredFieldHeader.setOverflow(fieldStatus, true);
8379
8380        long overflowPage = overflowHandle.getPageNumber();
8381        int overflowId = overflowHandle.getId();
8382        int fieldDataLength = CompressedNumber.sizeLong(overflowPage)
8383            + CompressedNumber.sizeInt(overflowId);
8384
8385        // write the field header to the log buffer
8386
int lenWritten = StoredFieldHeader.write(logBuffer, fieldStatus, fieldDataLength, slotFieldSize);
8387
8388        // write the overflow details to the log buffer
8389
lenWritten += CompressedNumber.writeLong(logBuffer, overflowPage);
8390        lenWritten += CompressedNumber.writeInt(logBuffer, overflowId);
8391
8392        // this length is the same on page as in the log
8393
return (lenWritten);
8394    }
8395
8396    protected int getSlotsInUse()
8397    {
8398        return(slotsInUse);
8399    }
8400
8401
8402    /**
8403        return the max datalength allowed with the space available
8404    */

8405    private int getMaxDataLength(int spaceAvailable, int overflowThreshold) {
8406
8407        if (SanityManager.DEBUG) {
8408            if (overflowThreshold == 0)
8409                SanityManager.THROWASSERT("overflowThreshold cannot be 0");
8410        }
8411
8412        // we need to take into considering of the overflowThreshold
8413
// the overflowThreshold limits the max data length,
8414
// whatever space we have left, we will not allow max data length
8415
// to exceed the overflow threshold.
8416
int maxThresholdSpace = totalSpace * overflowThreshold / 100;
8417        int maxAvailable = 0;
8418
8419        if (spaceAvailable < (64 - 2))
8420            maxAvailable = spaceAvailable - 2;
8421        else if (spaceAvailable < (16383 - 3))
8422            maxAvailable = spaceAvailable - 3;
8423        else
8424            maxAvailable = spaceAvailable - 5;
8425
8426        return (maxAvailable > maxThresholdSpace ? maxThresholdSpace : maxAvailable);
8427
8428    }
8429
8430    /**
8431        return whether the field has exceeded the max threshold for this page
8432        it compares the fieldSize with the largest possible field for this page
8433    */

8434    private boolean isLong(int fieldSize, int overflowThreshold) {
8435
8436        if (SanityManager.DEBUG) {
8437            if (overflowThreshold == 0)
8438                SanityManager.THROWASSERT("overflowThreshold cannot be 0");
8439        }
8440
8441        // if a field size is over the threshold, then it becomes a long column
8442
int maxThresholdSize = maxFieldSize * overflowThreshold / 100;
8443        return (fieldSize > maxThresholdSize);
8444    }
8445
8446    /**
8447        Perform an update.
8448
8449        @exception StandardException Standard cloudscape policy
8450    */

8451    public void doUpdateAtSlot(
8452    RawTransaction t,
8453    int slot,
8454    int id,
8455    Object JavaDoc[] row,
8456    FormatableBitSet validColumns)
8457        throws StandardException
8458    {
8459        // If this is a head page, the recordHandle is the head row handle.
8460
// If this is not a head page, we are calling updateAtSlot inside some
8461
// convoluted loop that updates an overflow chain. There is nothing we
8462
// can doing about it anyway.
8463
RecordHandle headRowHandle =
8464            isOverflowPage() ? null : getRecordHandleAtSlot(slot);
8465        
8466        // RESOLVE: djd/yyz what does a null row means? (sku)
8467
if (row == null)
8468        {
8469            owner.getActionSet().actionUpdate(
8470                t, this, slot, id, row, validColumns, -1,
8471                (DynamicByteArrayOutputStream) null, -1, headRowHandle);
8472
8473            return;
8474        }
8475
8476        // startColumn is the first column to be updated.
8477
int startColumn = RowUtil.nextColumn(row, validColumns, 0);
8478        if (startColumn == -1)
8479            return;
8480
8481        if (SanityManager.DEBUG)
8482        {
8483            // make sure that if N bits are set in the validColumns that
8484
// exactly N columns are passed in via the row array.
8485
if (!isOverflowPage() && validColumns != null)
8486            {
8487                if (RowUtil.getNumberOfColumns(-1, validColumns) > row.length)
8488                    SanityManager.THROWASSERT("updating slot " + slot +
8489                         " on page " + getIdentity() + " " +
8490                          RowUtil.getNumberOfColumns(-1, validColumns) +
8491                          " bits are set in validColumns but only " +
8492                          row.length + " columns in row[]");
8493            }
8494        }
8495
8496
8497        // Keep track of row shrinkage in the head row piece. If any row piece
8498
// shrinks, file a post commit work to clear all reserved space for the
8499
// entire row chain.
8500
boolean rowHasReservedSpace = false;
8501
8502        StoredPage curPage = this;
8503        for (;;)
8504        {
8505            StoredRecordHeader rh = curPage.getHeaderAtSlot(slot);
8506
8507            int startField = rh.getFirstField();
8508            int endFieldExclusive = startField + rh.getNumberFields();
8509
8510            // curPage contains column[startField] to column[endFieldExclusive-1]
8511

8512            // Need to cope with an update that is increasing the number of
8513
// columns. If this occurs we want to make sure that we perform a
8514
// single update to the last portion of a record, and not an update
8515
// of the current columns and then an update to append a column.
8516

8517            long nextPage = -1;
8518            int realStartColumn = -1;
8519            int realSpaceOnPage = -1;
8520
8521            if (!rh.hasOverflow() ||
8522                ((startColumn >= startField) &&
8523                 (startColumn < endFieldExclusive)))
8524            {
8525                boolean hitLongColumn;
8526                int nextColumn = -1;
8527                Object JavaDoc[] savedFields = null;
8528                DynamicByteArrayOutputStream logBuffer = null;
8529
8530                do
8531                {
8532                    try
8533                    {
8534                        // Update this portion of the record.
8535
// Pass in headRowHandle in case we are to update any
8536
// long column and they need to be cleaned up by post
8537
// commit processing. We don't want to purge the
8538
// columns right now because in order to reclaim the
8539
// page, we need to remove them. But it would be bad
8540
// to remove them now because the transaction may not
8541
// commit for a long time. We can do both purging of
8542
// the long column and page removal together in the
8543
// post commit.
8544
nextColumn =
8545                            owner.getActionSet().actionUpdate(
8546                                t, curPage, slot, id, row, validColumns,
8547                                realStartColumn, logBuffer,
8548                                realSpaceOnPage, headRowHandle);
8549
8550                        hitLongColumn = false;
8551
8552                    }
8553                    catch (LongColumnException lce)
8554                    {
8555    
8556                        if (lce.getRealSpaceOnPage() == -1)
8557                        {
8558                            // an update that has caused the row to increase
8559
// in size *and* push some fields off the page
8560
// that need to be inserted in an overflow page
8561

8562                            // no need to make a copy as we are going to use
8563
// this buffer right away
8564
logBuffer = lce.getLogBuffer();
8565
8566                            savedFields =
8567                                (Object JavaDoc[]) lce.getColumn();
8568                            
8569                            realStartColumn = lce.getNextColumn();
8570                            realSpaceOnPage = -1;
8571
8572                            hitLongColumn = true;
8573
8574                            continue;
8575                        }
8576
8577                        
8578                        // we caught a real long column exception
8579
// three things should happen here:
8580
// 1. insert the long column into overflow pages.
8581
// 2. append the overflow field header in the main chain.
8582
// 3. continue the update in the main data chain.
8583
logBuffer =
8584                            new DynamicByteArrayOutputStream(lce.getLogBuffer());
8585
8586                        // step 1: insert the long column ... if this update
8587
// operation rolls back, purge the after image column
8588
// chain and reclaim the overflow page because the
8589
// whole chain will be orphaned anyway.
8590
RecordHandle longColumnHandle =
8591                            insertLongColumn(
8592                                curPage, lce, Page.INSERT_UNDO_WITH_PURGE);
8593
8594                        // step 2: append overflow field header to log buffer
8595
int overflowFieldLen = 0;
8596                        try
8597                        {
8598                            overflowFieldLen +=
8599                                appendOverflowFieldHeader(
8600                                    logBuffer, longColumnHandle);
8601
8602                        }
8603                        catch (IOException JavaDoc ioe)
8604                        {
8605                            throw StandardException.newException(
8606                                SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
8607                        }
8608
8609                        // step 3: continue the insert in the main data chain
8610
// need to pass the log buffer, and start column to the
8611
// next insert.
8612
realStartColumn = lce.getNextColumn() + 1;
8613                        realSpaceOnPage = lce.getRealSpaceOnPage() - overflowFieldLen;
8614                        hitLongColumn = true;
8615
8616                    }
8617
8618                } while (hitLongColumn);
8619
8620
8621                // See if we completed all the columns that are on this page.
8622
int validColumnsSize =
8623                    (validColumns == null) ? 0 : validColumns.getLength();
8624
8625                if (nextColumn != -1)
8626                {
8627
8628                    if (SanityManager.DEBUG)
8629                    {
8630                        // note nextColumn might be less than the the first
8631
// column we started updating. This is because the
8632
// update might force the record header to grow and
8633
// push fields before the one we are updating off the
8634
// page and into this insert.
8635

8636                        if ((nextColumn < startField) ||
8637                            (rh.hasOverflow() && (nextColumn >= endFieldExclusive)))
8638                        {
8639                            SanityManager.THROWASSERT(
8640                                "nextColumn out of range = " + nextColumn +
8641                                " expected between " +
8642                                startField + " and " + endFieldExclusive);
8643                        }
8644                    }
8645
8646                    // Need to insert rows from nextColumn to endFieldExclusive
8647
// onto a new overflow page.
8648
// If the column is not being updated we
8649
// pick it up from the current page. If it is being updated
8650
// we take it from the new value.
8651
int possibleLastFieldExclusive = endFieldExclusive;
8652                    
8653                    if (!rh.hasOverflow())
8654                    {
8655                        // we might be adding a field here
8656
if (validColumns == null)
8657                        {
8658                            if (row.length > possibleLastFieldExclusive)
8659                                possibleLastFieldExclusive = row.length;
8660                        }
8661                        else
8662                        {
8663                            if (validColumnsSize > possibleLastFieldExclusive)
8664                                possibleLastFieldExclusive = validColumnsSize;
8665                        }
8666                    }
8667
8668
8669                    // use a sparse row
8670
Object JavaDoc[] newRow =
8671                        new Object JavaDoc[possibleLastFieldExclusive];
8672
8673                    FormatableBitSet newColumnList =
8674                        new FormatableBitSet(possibleLastFieldExclusive);
8675
8676                    ByteArrayOutputStream fieldStream = null;
8677
8678                    for (int i = nextColumn; i < possibleLastFieldExclusive; i++)
8679                    {
8680                        if ((validColumns == null) ||
8681                            (validColumnsSize > i && validColumns.isSet(i)))
8682                        {
8683                            newColumnList.set(i);
8684                            // use the new value
8685
newRow[i] = RowUtil.getColumn(row, validColumns, i);
8686
8687                        }
8688                        else if (i < endFieldExclusive)
8689                        {
8690                            newColumnList.set(i);
8691
8692                            // use the old value
8693
newRow[i] = savedFields[i - nextColumn];
8694                        }
8695                    }
8696
8697                    RecordHandle handle = curPage.getRecordHandleAtSlot(slot);
8698
8699                    // If the portion we just updated is the last portion then
8700
// there cannot be any updates to do.
8701
if (rh.hasOverflow())
8702                    {
8703                        // We have to carry across the overflow information
8704
// from the current record, if any.
8705
nextPage = rh.getOverflowPage();
8706                        id = rh.getOverflowId();
8707
8708                        // find the next starting column before unlatching page
8709
startColumn =
8710                            RowUtil.nextColumn(
8711                                row, validColumns, endFieldExclusive);
8712                    }
8713                    else
8714                    {
8715                        startColumn = -1;
8716                        nextPage = 0;
8717                    }
8718
8719
8720                    // After the update is done, see if this row piece has
8721
// shrunk in curPage if no other row pieces have shrunk so
8722
// far. In head page, need to respect minimumRecordSize.
8723
// In overflow page, only need to respect
8724
// RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT
8725
// Don't bother with temp container.
8726
if (!rowHasReservedSpace && headRowHandle != null &&
8727                        curPage != null && !owner.isTemporaryContainer())
8728                    {
8729                        rowHasReservedSpace =
8730                            curPage.checkRowReservedSpace(slot);
8731                    }
8732
8733
8734                    // insert the record portion on a new overflow page at slot
8735
// 0 this will automatically handle any overflows in
8736
// this new portion
8737

8738                    // BasePage op = getNewOverflowPage();
8739

8740                    BasePage op =
8741                        curPage.getOverflowPageForInsert(
8742                            slot,
8743                            newRow,
8744                            newColumnList,
8745                            nextColumn);
8746
8747                    // We have all the information from this page so unlatch it
8748
if (curPage != this)
8749                    {
8750                        curPage.unlatch();
8751                        curPage = null;
8752                    }
8753
8754                    byte mode = Page.INSERT_OVERFLOW;
8755                    if (nextPage != 0)
8756                        mode |= Page.INSERT_FOR_SPLIT;
8757
8758                    RecordHandle nextPortionHandle =
8759                        nextPage == 0 ? null :
8760                        owner.makeRecordHandle(nextPage, id);
8761
8762                    // RESOLVED (sku): even though we would like to roll back
8763
// these inserts with PURGE rather than with delete,
8764
// we have to delete because if we purge the last row
8765
// from an overflow page, the purge will queue a post
8766
// commit to remove the page.
8767
// While this is OK with long columns, we cannot do this
8768
// for long rows because long row overflow pages can be
8769
// shared by more than one long rows, and thus it is unsafe
8770
// to remove the page without first latching the head page.
8771
// However, the insert log record do not have the head
8772
// row's page number so the rollback cannot put that
8773
// information into the post commit work.
8774
RecordHandle portionHandle =
8775                        op.insertAllowOverflow(
8776                            0, newRow, newColumnList, nextColumn, mode, 100,
8777                            nextPortionHandle);
8778
8779                    // Update the previous record header to point to new portion
8780
if (curPage == this)
8781                        updateOverflowDetails(this, handle, portionHandle);
8782                    else
8783                        updateOverflowDetails(handle, portionHandle);
8784                    op.unlatch();
8785                }
8786                else
8787                {
8788
8789                    // See earlier comments on checking row reserved space.
8790
if (!rowHasReservedSpace &&
8791                        headRowHandle != null &&
8792                        curPage != null &&
8793                        !owner.isTemporaryContainer())
8794                    {
8795                        rowHasReservedSpace =
8796                            curPage.checkRowReservedSpace(slot);
8797                    }
8798
8799
8800                    // find the next starting column before we unlatch the page
8801
startColumn =
8802                        rh.hasOverflow() ?
8803                            RowUtil.nextColumn(
8804                                row, validColumns, endFieldExclusive) : -1;
8805                }
8806
8807                // have we completed this update?
8808
if (startColumn == -1) {
8809
8810                    if ((curPage != this) && (curPage != null))
8811                        curPage.unlatch();
8812                    break; // break out of the for loop
8813
}
8814            }
8815
8816            if (nextPage == -1)
8817            {
8818                if (SanityManager.DEBUG)
8819                {
8820                    SanityManager.ASSERT(
8821                        curPage != null,
8822                        "Current page is null be no overflow information has been obtained");
8823                }
8824
8825                // Get the next page info while we still have the page
8826
// latched.
8827
nextPage = rh.getOverflowPage();
8828                id = rh.getOverflowId();
8829            }
8830            
8831            if ((curPage != this) && (curPage != null))
8832                curPage.unlatch();
8833
8834            // get the next portion page and find the correct slot
8835
curPage = (StoredPage) owner.getPage(nextPage);
8836
8837            if (SanityManager.DEBUG)
8838            {
8839                SanityManager.ASSERT(
8840                    curPage.isOverflowPage(),
8841                    "following row chain gets a non-overflow page");
8842            }
8843
8844            slot = curPage.findRecordById(id, FIRST_SLOT_NUMBER);
8845        }
8846
8847        // Back to the head page. Get rid of all reserved space in the entire
8848
// row post commit.
8849
if (rowHasReservedSpace)
8850        {
8851            RawTransaction rxact = (RawTransaction)owner.getTransaction();
8852
8853            ReclaimSpace work =
8854                new ReclaimSpace(ReclaimSpace.ROW_RESERVE,
8855                                 headRowHandle,
8856                                 rxact.getDataFactory(), true);
8857            rxact.addPostCommitWork(work);
8858        }
8859    }
8860
8861    /**
8862        See if the row on this page has reserved space that can be shrunk once
8863        the update commits.
8864     */

8865    private boolean checkRowReservedSpace(int slot) throws StandardException
8866    {
8867        boolean rowHasReservedSpace = false;
8868        try {
8869            int shrinkage = getReservedCount(slot);
8870
8871            // Only reclaim reserved space if it is
8872
// "reasonably" sized, i.e., we can reclaim at
8873
// least MININUM_RECORD_SIZE_DEFAULT
8874
int reclaimThreshold = RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT;
8875            
8876            if (shrinkage > reclaimThreshold) {
8877                int totalSpace = getRecordPortionLength(slot) + shrinkage;
8878
8879                if (isOverflowPage()) {
8880                    if (totalSpace >
8881                        RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT+reclaimThreshold)
8882                        rowHasReservedSpace = true;
8883
8884                    // Otherwise, I can at most reclaim less than
8885
// MINIMUM_RECORD_SIZE_DEFAULT, forget about that.
8886
} else {
8887                    // this is a head page
8888
if (totalSpace > (minimumRecordSize +
8889                                      RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT))
8890                        rowHasReservedSpace = true;
8891
8892                    // Otherwise, I can at most reclaim less than
8893
// MINIMUM_RECORD_SIZE_DEFAULT, forget about that.
8894
}
8895            }
8896        } catch (IOException JavaDoc ioe) {
8897            throw StandardException.newException(
8898                SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
8899        }
8900
8901        return rowHasReservedSpace;
8902    }
8903
8904    /**
8905        @see BasePage#compactRecord
8906        @exception StandardException Standard Cloudscape error policy
8907     */

8908    protected void compactRecord(RawTransaction t, int slot, int id)
8909         throws StandardException
8910    {
8911        // If this is a head row piece, first take care of the entire overflow
8912
// row chain. Don't need to worry about long column because they are
8913
// not in place updatable.
8914
if (isOverflowPage() == false) {
8915            StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
8916
8917            while (recordHeader.hasOverflow()) {
8918                StoredPage nextPageInRowChain =
8919                    getOverflowPage(recordHeader.getOverflowPage());
8920
8921                if (SanityManager.DEBUG)
8922                    SanityManager.ASSERT(nextPageInRowChain != null);
8923
8924                try {
8925                    int nextId = recordHeader.getOverflowId();
8926                    int nextSlot = getOverflowSlot(nextPageInRowChain, recordHeader);
8927
8928                    nextPageInRowChain.compactRecord(t, nextSlot, nextId);
8929
8930                    // Follow the next long row pointer.
8931
recordHeader = nextPageInRowChain.getHeaderAtSlot(nextSlot);
8932                } finally {
8933                    nextPageInRowChain.unlatch();
8934                }
8935            }
8936        }
8937
8938        // Lastly, see if this row has anything sizable that can be freed.
8939
// Try to only reclaim space larger than MINIMUM_RECORD_SIZE_DEFAULT
8940
// because otherwise it is probably not worth the effort.
8941
int reclaimThreshold = RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT;
8942        try
8943        {
8944            int reserve = getReservedCount(slot);
8945            if (reserve > reclaimThreshold) {
8946                int recordLength = getRecordPortionLength(slot);
8947                int correctReservedSpace = reserve;
8948
8949                if (isOverflowPage()) {
8950                    if ((reserve + recordLength) >
8951                        (RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT+reclaimThreshold))
8952                    {
8953                        // calculate what the correct reserved space is
8954
if (recordLength >= RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT)
8955                            correctReservedSpace = 0;
8956                        else // make sure record takes up minimum_record_size
8957
correctReservedSpace =
8958                                RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT - recordLength;
8959                    }
8960                } else {
8961                    // this is a head page
8962
if ((reserve + recordLength) >
8963                        (minimumRecordSize+reclaimThreshold)) {
8964                        // calculate what the correct reserved space is
8965
if (recordLength >= minimumRecordSize)
8966                            correctReservedSpace = 0;
8967                        else
8968                            correctReservedSpace = minimumRecordSize - recordLength;
8969                    }
8970                }
8971
8972                if (SanityManager.DEBUG)
8973                {
8974                    SanityManager.ASSERT(correctReservedSpace <= reserve,
8975                                         "correct reserve > reserve");
8976                }
8977
8978                // A shrinkage has occured.
8979
if (correctReservedSpace < reserve) {
8980                    owner.getActionSet().
8981                        actionShrinkReservedSpace(t, this, slot, id,
8982                                        correctReservedSpace, reserve);
8983                }
8984            }
8985        } catch (IOException JavaDoc ioe) {
8986            throw StandardException.newException(
8987                SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
8988        }
8989    }
8990}
8991
8992
Popular Tags