KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > apache > derby > iapi > db > OnlineCompress


1 /*
2
3    Derby - Class org.apache.derby.iapi.db.OnlineCompress
4
5    Licensed to the Apache Software Foundation (ASF) under one or more
6    contributor license agreements. See the NOTICE file distributed with
7    this work for additional information regarding copyright ownership.
8    The ASF licenses this file to you under the Apache License, Version 2.0
9    (the "License"); you may not use this file except in compliance with
10    the License. You may obtain a copy of the License at
11
12       http://www.apache.org/licenses/LICENSE-2.0
13
14    Unless required by applicable law or agreed to in writing, software
15    distributed under the License is distributed on an "AS IS" BASIS,
16    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17    See the License for the specific language governing permissions and
18    limitations under the License.
19
20  */

21
22 package org.apache.derby.iapi.db;
23
24 import org.apache.derby.iapi.error.StandardException;
25 import org.apache.derby.iapi.error.PublicAPI;
26
27 import org.apache.derby.iapi.sql.dictionary.DataDictionaryContext;
28 import org.apache.derby.iapi.sql.dictionary.DataDictionary;
29 import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;
30 import org.apache.derby.iapi.sql.dictionary.TableDescriptor;
31 import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;
32 import org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList;
33 import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor;
34 import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptorList;
35 import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;
36
37 import org.apache.derby.iapi.sql.depend.DependencyManager;
38
39 import org.apache.derby.iapi.sql.execute.ExecRow;
40 import org.apache.derby.iapi.sql.execute.ExecutionContext;
41
42 import org.apache.derby.iapi.types.DataValueDescriptor;
43 import org.apache.derby.iapi.types.DataValueFactory;
44
45
46 import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
47 import org.apache.derby.iapi.sql.conn.ConnectionUtil;
48
49 import org.apache.derby.iapi.store.access.TransactionController;
50 import org.apache.derby.iapi.types.RowLocation;
51 import org.apache.derby.iapi.store.access.ScanController;
52 import org.apache.derby.iapi.store.access.ConglomerateController;
53 import org.apache.derby.iapi.store.access.GroupFetchScanController;
54 import org.apache.derby.iapi.store.access.RowUtil;
55 import org.apache.derby.iapi.store.access.Qualifier;
56
57 import org.apache.derby.iapi.services.sanity.SanityManager;
58
59 import org.apache.derby.iapi.reference.SQLState;
60
61 import org.apache.derby.iapi.services.io.FormatableBitSet;
62
63 import java.sql.SQLException JavaDoc;
64
65 /**
66
67 Implementation of SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE().
68 <p>
69 Code which implements the following system procedure:
70
71 void SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE(
72     IN SCHEMANAME VARCHAR(128),
73     IN TABLENAME VARCHAR(128),
74     IN PURGE_ROWS SMALLINT,
75     IN DEFRAGMENT_ROWS SMALLINT,
76     IN TRUNCATE_END SMALLINT)
77 <p>
78 Use the SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE system procedure to reclaim
79 unused, allocated space in a table and its indexes. Typically, unused allocated
80 space exists when a large amount of data is deleted from a table, and there
81 have not been subsequent inserts to use the space freed by the deletes.
82 By default, Derby does not return unused space to the operating system. For
83 example, once a page has been allocated to a table or index, it is not
84 automatically returned to the operating system until the table or index is
85 destroyed. SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE allows you to return unused
86 space to the operating system.
87 <p>
88 This system procedure can be used to force 3 levels of in place compression
89 of a SQL table: PURGE_ROWS, DEFRAGMENT_ROWS, TRUNCATE_END. Unlike
90 SYSCS_UTIL.SYSCS_COMPRESS_TABLE() all work is done in place in the existing
91 table/index.
92 <p>
93 Syntax:
94 SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE(
95     IN SCHEMANAME VARCHAR(128),
96     IN TABLENAME VARCHAR(128),
97     IN PURGE_ROWS SMALLINT,
98     IN DEFRAGMENT_ROWS SMALLINT,
99     IN TRUNCATE_END SMALLINT)
100 <p>
101 SCHEMANAME:
102 An input argument of type VARCHAR(128) that specifies the schema of the table. Passing a null will result in an error.
103 <p>
104 TABLENAME:
105 An input argument of type VARCHAR(128) that specifies the table name of the
106 table. The string must exactly match the case of the table name, and the
107 argument of "Fred" will be passed to SQL as the delimited identifier 'Fred'.
108 Passing a null will result in an error.
109 <p>
110 PURGE_ROWS:
111 If PURGE_ROWS is set to non-zero then a single pass is made through the table
112 which will purge committed deleted rows from the table. This space is then
113 available for future inserted rows, but remains allocated to the table.
114 As this option scans every page of the table, it's performance is linearly
115 related to the size of the table.
116 <p>
117 DEFRAGMENT_ROWS:
118 If DEFRAGMENT_ROWS is set to non-zero then a single defragment pass is made
119 which will move existing rows from the end of the table towards the front
120 of the table. The goal of the defragment run is to empty a set of pages
121 at the end of the table which can then be returned to the OS by the
122 TRUNCATE_END option. It is recommended to only run DEFRAGMENT_ROWS, if also
123 specifying the TRUNCATE_END option. This option scans the whole table and
124 needs to update index entries for every base table row move, and thus execution
125 time is linearly related to the size of the table.
126 <p>
127 TRUNCATE_END:
128 If TRUNCATE_END is set to non-zero then all contiguous pages at the end of
129 the table will be returned to the OS. Running the PURGE_ROWS and/or
130 DEFRAGMENT_ROWS passes options may increase the number of pages affected.
131 This option itself does no scans of the table, so performs on the order of a
132 few system calls.
133 <p>
134 SQL example:
135 To compress a table called CUSTOMER in a schema called US, using all
136 available compress options:
137 call SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE('US', 'CUSTOMER', 1, 1, 1);
138
139 To quickly just return the empty free space at the end of the same table,
140 this option will run much quicker than running all phases but will likely
141 return much less space:
142 call SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE('US', 'CUSTOMER', 0, 0, 1);
143
144 Java example:
145 To compress a table called CUSTOMER in a schema called US, using all
146 available compress options:
147
148 CallableStatement cs = conn.prepareCall
149 ("CALL SYSCS_UTIL.SYSCS_COMPRESS_TABLE(?, ?, ?, ?, ?)");
150 cs.setString(1, "US");
151 cs.setString(2, "CUSTOMER");
152 cs.setShort(3, (short) 1);
153 cs.setShort(4, (short) 1);
154 cs.setShort(5, (short) 1);
155 cs.execute();
156
157 To quickly just return the empty free space at the end of the same table,
158 this option will run much quicker than running all phases but will likely
159 return much less space:
160
161 CallableStatement cs = conn.prepareCall
162 ("CALL SYSCS_UTIL.SYSCS_COMPRESS_TABLE(?, ?, ?, ?, ?)");
163 cs.setString(1, "US");
164 cs.setString(2, "CUSTOMER");
165 cs.setShort(3, (short) 0);
166 cs.setShort(4, (short) 0);
167 cs.setShort(5, (short) 1);
168 cs.execute();
169
170 <p>
171 It is recommended that the SYSCS_UTIL.SYSCS_COMPRESS_TABLE procedure is
172 issued in auto-commit mode.
173 Note: This procedure acquires an exclusive table lock on the table being compressed. All statement plans dependent on the table or its indexes are invalidated. For information on identifying unused space, see the Derby Server and Administration Guide.
174
175 TODO LIST:
176 o defragment requires table level lock in nested user transaction, which
177   will conflict with user lock on same table in user transaction.
178
179 **/

180 public class OnlineCompress
181 {
182
183     /** no requirement for a constructor */
184     private OnlineCompress() {
185     }
186
187     /**
188      * Implementation of SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE().
189      * <p>
190      * Top level implementation of the system procedure. All the
191      * real work is found in the other routines in this file implementing
192      * the 3 phases of inplace compression: purge, defragment, and truncate.
193      * <p>
194      * @param schemaName schema name of table, required
195      * @param tableName table name to be compressed
196      * @param purgeRows if true, do a purge pass on the table
197      * @param defragmentRows if true, do a defragment pass on the table
198      * @param truncateEnd if true, return empty pages at end to OS.
199      *
200      * @exception SQLException Errors returned by throwing SQLException.
201      **/

202     public static void compressTable(
203     String JavaDoc schemaName,
204     String JavaDoc tableName,
205     boolean purgeRows,
206     boolean defragmentRows,
207     boolean truncateEnd)
208         throws SQLException JavaDoc
209     {
210         LanguageConnectionContext lcc = ConnectionUtil.getCurrentLCC();
211         TransactionController tc = lcc.getTransactionExecute();
212
213         try
214         {
215             DataDictionary data_dictionary = lcc.getDataDictionary();
216
217             // Each of the following may give up locks allowing ddl on the
218
// table, so each phase needs to do the data dictionary lookup.
219
// The order is important as it makes sense to first purge
220
// deleted rows, then defragment existing non-deleted rows, and
221
// finally to truncate the end of the file which may have been
222
// made larger by the previous purge/defragment pass.
223

224             if (purgeRows)
225                 purgeRows(schemaName, tableName, data_dictionary, tc);
226
227             if (defragmentRows)
228                 defragmentRows(schemaName, tableName, data_dictionary, tc);
229
230             if (truncateEnd)
231                 truncateEnd(schemaName, tableName, data_dictionary, tc);
232         }
233         catch (StandardException se)
234         {
235             throw PublicAPI.wrapStandardException(se);
236         }
237
238     }
239
240     /**
241      * Defragment rows in the given table.
242      * <p>
243      * Scans the rows at the end of a table and moves them to free spots
244      * towards the beginning of the table. In the same transaction all
245      * associated indexes are updated to reflect the new location of the
246      * base table row.
247      * <p>
248      * After a defragment pass, if was possible, there will be a set of
249      * empty pages at the end of the table which can be returned to the
250      * operating system by calling truncateEnd(). The allocation bit
251      * maps will be set so that new inserts will tend to go to empty and
252      * half filled pages starting from the front of the conglomerate.
253      *
254      * @param schemaName schema of table to defragement
255      * @param tableName name of table to defragment
256      * @param data_dictionary An open data dictionary to look up the table in.
257      * @param tc transaction controller to use to do updates.
258      *
259      **/

260     private static void defragmentRows(
261     String JavaDoc schemaName,
262     String JavaDoc tableName,
263     DataDictionary data_dictionary,
264     TransactionController tc)
265         throws SQLException JavaDoc
266     {
267         GroupFetchScanController base_group_fetch_cc = null;
268         int num_indexes = 0;
269
270         int[][] index_col_map = null;
271         ScanController[] index_scan = null;
272         ConglomerateController[] index_cc = null;
273         DataValueDescriptor[][] index_row = null;
274
275         LanguageConnectionContext lcc = ConnectionUtil.getCurrentLCC();
276         TransactionController nested_tc = null;
277
278         try {
279
280             SchemaDescriptor sd =
281                 data_dictionary.getSchemaDescriptor(
282                     schemaName, nested_tc, true);
283             TableDescriptor td =
284                 data_dictionary.getTableDescriptor(tableName, sd);
285             nested_tc =
286                 tc.startNestedUserTransaction(false);
287
288             if (td == null)
289             {
290                 throw StandardException.newException(
291                     SQLState.LANG_TABLE_NOT_FOUND,
292                     schemaName + "." + tableName);
293             }
294
295             switch (td.getTableType())
296             {
297             /* Skip views and vti tables */
298             case TableDescriptor.VIEW_TYPE:
299             case TableDescriptor.VTI_TYPE:
300                 return;
301             // other types give various errors here
302
// DERBY-719,DERBY-720
303
default:
304                 break;
305             }
306
307
308             ConglomerateDescriptor heapCD =
309                 td.getConglomerateDescriptor(td.getHeapConglomerateId());
310
311             /* Get a row template for the base table */
312             ExecRow baseRow =
313                 lcc.getExecutionContext().getExecutionFactory().getValueRow(
314                     td.getNumberOfColumns());
315
316
317             /* Fill the row with nulls of the correct type */
318             ColumnDescriptorList cdl = td.getColumnDescriptorList();
319             int cdlSize = cdl.size();
320
321             for (int index = 0; index < cdlSize; index++)
322             {
323                 ColumnDescriptor cd = (ColumnDescriptor) cdl.elementAt(index);
324                 baseRow.setColumn(cd.getPosition(), cd.getType().getNull());
325             }
326
327             DataValueDescriptor[][] row_array = new DataValueDescriptor[100][];
328             row_array[0] = baseRow.getRowArray();
329             RowLocation[] old_row_location_array = new RowLocation[100];
330             RowLocation[] new_row_location_array = new RowLocation[100];
331
332             // Create the following 3 arrays which will be used to update
333
// each index as the scan moves rows about the heap as part of
334
// the compress:
335
// index_col_map - map location of index cols in the base row,
336
// ie. index_col_map[0] is column offset of 1st
337
// key column in base row. All offsets are 0
338
// based.
339
// index_scan - open ScanController used to delete old index row
340
// index_cc - open ConglomerateController used to insert new
341
// row
342

343             ConglomerateDescriptor[] conglom_descriptors =
344                 td.getConglomerateDescriptors();
345
346             // conglom_descriptors has an entry for the conglomerate and each
347
// one of it's indexes.
348
num_indexes = conglom_descriptors.length - 1;
349
350             // if indexes exist, set up data structures to update them
351
if (num_indexes > 0)
352             {
353                 // allocate arrays
354
index_col_map = new int[num_indexes][];
355                 index_scan = new ScanController[num_indexes];
356                 index_cc = new ConglomerateController[num_indexes];
357                 index_row = new DataValueDescriptor[num_indexes][];
358
359                 setup_indexes(
360                     nested_tc,
361                     td,
362                     index_col_map,
363                     index_scan,
364                     index_cc,
365                     index_row);
366
367             }
368
369             /* Open the heap for reading */
370             base_group_fetch_cc =
371                 nested_tc.defragmentConglomerate(
372                     td.getHeapConglomerateId(),
373                     false,
374                     true,
375                     TransactionController.OPENMODE_FORUPDATE,
376                     TransactionController.MODE_TABLE,
377                     TransactionController.ISOLATION_SERIALIZABLE);
378
379             int num_rows_fetched = 0;
380             while ((num_rows_fetched =
381                         base_group_fetch_cc.fetchNextGroup(
382                             row_array,
383                             old_row_location_array,
384                             new_row_location_array)) != 0)
385             {
386                 if (num_indexes > 0)
387                 {
388                     for (int row = 0; row < num_rows_fetched; row++)
389                     {
390                         for (int index = 0; index < num_indexes; index++)
391                         {
392                             fixIndex(
393                                 row_array[row],
394                                 index_row[index],
395                                 old_row_location_array[row],
396                                 new_row_location_array[row],
397                                 index_cc[index],
398                                 index_scan[index],
399                                 index_col_map[index]);
400                         }
401                     }
402                 }
403             }
404
405             // TODO - It would be better if commits happened more frequently
406
// in the nested transaction, but to do that there has to be more
407
// logic to catch a ddl that might jump in the middle of the
408
// above loop and invalidate the various table control structures
409
// which are needed to properly update the indexes. For example
410
// the above loop would corrupt an index added midway through
411
// the loop if not properly handled. See DERBY-1188.
412
nested_tc.commit();
413             
414         }
415         catch (StandardException se)
416         {
417             throw PublicAPI.wrapStandardException(se);
418         }
419         finally
420         {
421             try
422             {
423                 /* Clean up before we leave */
424                 if (base_group_fetch_cc != null)
425                 {
426                     base_group_fetch_cc.close();
427                     base_group_fetch_cc = null;
428                 }
429
430                 if (num_indexes > 0)
431                 {
432                     for (int i = 0; i < num_indexes; i++)
433                     {
434                         if (index_scan != null && index_scan[i] != null)
435                         {
436                             index_scan[i].close();
437                             index_scan[i] = null;
438                         }
439                         if (index_cc != null && index_cc[i] != null)
440                         {
441                             index_cc[i].close();
442                             index_cc[i] = null;
443                         }
444                     }
445                 }
446
447                 if (nested_tc != null)
448                 {
449                     nested_tc.destroy();
450                 }
451
452             }
453             catch (StandardException se)
454             {
455                 throw PublicAPI.wrapStandardException(se);
456             }
457         }
458
459         return;
460     }
461
462     /**
463      * Purge committed deleted rows from conglomerate.
464      * <p>
465      * Scans the table and purges any committed deleted rows from the
466      * table. If all rows on a page are purged then page is also
467      * reclaimed.
468      * <p>
469      *
470      * @param schemaName schema of table to defragement
471      * @param tableName name of table to defragment
472      * @param data_dictionary An open data dictionary to look up the table in.
473      * @param tc transaction controller to use to do updates.
474      *
475      **/

476     private static void purgeRows(
477     String JavaDoc schemaName,
478     String JavaDoc tableName,
479     DataDictionary data_dictionary,
480     TransactionController tc)
481         throws StandardException
482     {
483         SchemaDescriptor sd =
484             data_dictionary.getSchemaDescriptor(schemaName, tc, true);
485         TableDescriptor td =
486             data_dictionary.getTableDescriptor(tableName, sd);
487
488         if (td == null)
489         {
490             throw StandardException.newException(
491                 SQLState.LANG_TABLE_NOT_FOUND,
492                 schemaName + "." + tableName);
493         }
494
495         switch (td.getTableType())
496         {
497         /* Skip views and vti tables */
498         case TableDescriptor.VIEW_TYPE:
499         case TableDescriptor.VTI_TYPE:
500             break;
501         // other types give various errors here
502
// DERBY-719,DERBY-720
503
default:
504           {
505
506             ConglomerateDescriptor[] conglom_descriptors =
507                 td.getConglomerateDescriptors();
508
509             for (int cd_idx = 0; cd_idx < conglom_descriptors.length; cd_idx++)
510             {
511                 ConglomerateDescriptor cd = conglom_descriptors[cd_idx];
512
513                 tc.purgeConglomerate(cd.getConglomerateNumber());
514             }
515           }
516         }
517
518         return;
519     }
520
521     /**
522      * Truncate end of conglomerate.
523      * <p>
524      * Returns the contiguous free space at the end of the table back to
525      * the operating system. Takes care of space allocation bit maps, and
526      * OS call to return the actual space.
527      * <p>
528      *
529      * @param schemaName schema of table to defragement
530      * @param tableName name of table to defragment
531      * @param data_dictionary An open data dictionary to look up the table in.
532      * @param tc transaction controller to use to do updates.
533      *
534      **/

535     private static void truncateEnd(
536     String JavaDoc schemaName,
537     String JavaDoc tableName,
538     DataDictionary data_dictionary,
539     TransactionController tc)
540         throws StandardException
541     {
542         SchemaDescriptor sd =
543             data_dictionary.getSchemaDescriptor(schemaName, tc, true);
544         TableDescriptor td =
545             data_dictionary.getTableDescriptor(tableName, sd);
546
547         if (td == null)
548         {
549             throw StandardException.newException(
550                 SQLState.LANG_TABLE_NOT_FOUND,
551                 schemaName + "." + tableName);
552         }
553
554         switch (td.getTableType())
555         {
556         /* Skip views and vti tables */
557         case TableDescriptor.VIEW_TYPE:
558         case TableDescriptor.VTI_TYPE:
559             break;
560         // other types give various errors here
561
// DERBY-719,DERBY-720
562
default:
563           {
564           ConglomerateDescriptor[] conglom_descriptors =
565                 td.getConglomerateDescriptors();
566
567             for (int cd_idx = 0; cd_idx < conglom_descriptors.length; cd_idx++)
568             {
569                 ConglomerateDescriptor cd = conglom_descriptors[cd_idx];
570
571                 tc.compressConglomerate(cd.getConglomerateNumber());
572             }
573           }
574         }
575
576         return;
577     }
578
579     private static void setup_indexes(
580     TransactionController tc,
581     TableDescriptor td,
582     int[][] index_col_map,
583     ScanController[] index_scan,
584     ConglomerateController[] index_cc,
585     DataValueDescriptor[][] index_row)
586         throws StandardException
587     {
588
589         // Initialize the following 3 arrays which will be used to update
590
// each index as the scan moves rows about the heap as part of
591
// the compress:
592
// index_col_map - map location of index cols in the base row, ie.
593
// index_col_map[0] is column offset of 1st key
594
// column in base row. All offsets are 0 based.
595
// index_scan - open ScanController used to delete old index row
596
// index_cc - open ConglomerateController used to insert new row
597

598         ConglomerateDescriptor[] conglom_descriptors =
599                 td.getConglomerateDescriptors();
600
601
602         int index_idx = 0;
603         for (int cd_idx = 0; cd_idx < conglom_descriptors.length; cd_idx++)
604         {
605             ConglomerateDescriptor index_cd = conglom_descriptors[cd_idx];
606
607             if (!index_cd.isIndex())
608             {
609                 // skip the heap descriptor entry
610
continue;
611             }
612
613             // ScanControllers are used to delete old index row
614
index_scan[index_idx] =
615                 tc.openScan(
616                     index_cd.getConglomerateNumber(),
617                     true, // hold
618
TransactionController.OPENMODE_FORUPDATE,
619                     TransactionController.MODE_TABLE,
620                     TransactionController.ISOLATION_SERIALIZABLE,
621                     null, // full row is retrieved,
622
// so that full row can be used for start/stop keys
623
null, // startKeyValue - will be reset with reopenScan()
624
0, //
625
null, // qualifier
626
null, // stopKeyValue - will be reset with reopenScan()
627
0); //
628

629             // ConglomerateControllers are used to insert new index row
630
index_cc[index_idx] =
631                 tc.openConglomerate(
632                     index_cd.getConglomerateNumber(),
633                     true, // hold
634
TransactionController.OPENMODE_FORUPDATE,
635                     TransactionController.MODE_TABLE,
636                     TransactionController.ISOLATION_SERIALIZABLE);
637
638             // build column map to allow index row to be built from base row
639
int[] baseColumnPositions =
640                 index_cd.getIndexDescriptor().baseColumnPositions();
641             int[] zero_based_map =
642                 new int[baseColumnPositions.length];
643
644             for (int i = 0; i < baseColumnPositions.length; i++)
645             {
646                 zero_based_map[i] = baseColumnPositions[i] - 1;
647             }
648
649             index_col_map[index_idx] = zero_based_map;
650
651             // build row array to delete from index and insert into index
652
// length is length of column map + 1 for RowLocation.
653
index_row[index_idx] =
654                 new DataValueDescriptor[baseColumnPositions.length + 1];
655
656             index_idx++;
657         }
658
659         return;
660     }
661
662
663     /**
664      * Delete old index row and insert new index row in input index.
665      * <p>
666      *
667      * @param base_row all columns of base row
668      * @param index_row an index row template, filled in by this routine
669      * @param old_row_loc old location of base row, used to delete index
670      * @param new_row_loc new location of base row, used to update index
671      * @param index_cc index conglomerate to insert new row
672      * @param index_scan index scan to delete old entry
673      * @param index_col_map description of mapping of index row to base row,
674      *
675      *
676      * @exception StandardException Standard exception policy.
677      **/

678     private static void fixIndex(
679     DataValueDescriptor[] base_row,
680     DataValueDescriptor[] index_row,
681     RowLocation old_row_loc,
682     RowLocation new_row_loc,
683     ConglomerateController index_cc,
684     ScanController index_scan,
685     int[] index_col_map)
686         throws StandardException
687     {
688         if (SanityManager.DEBUG)
689         {
690             // baseColumnPositions should describe all columns in index row
691
// except for the final column, which is the RowLocation.
692
SanityManager.ASSERT(index_col_map != null);
693             SanityManager.ASSERT(index_row != null);
694             SanityManager.ASSERT(
695                 (index_col_map.length == (index_row.length - 1)));
696         }
697
698         // create the index row to delete from from the base row, using map
699
for (int index = 0; index < index_col_map.length; index++)
700         {
701             index_row[index] = base_row[index_col_map[index]];
702         }
703         // last column in index in the RowLocation
704
index_row[index_row.length - 1] = old_row_loc;
705
706         // position the scan for the delete, the scan should already be open.
707
// This is done by setting start scan to full key, GE and stop scan
708
// to full key, GT.
709
index_scan.reopenScan(
710             index_row,
711             ScanController.GE,
712             (Qualifier[][]) null,
713             index_row,
714             ScanController.GT);
715
716         // position the scan, serious problem if scan does not find the row.
717
if (index_scan.next())
718         {
719             index_scan.delete();
720         }
721         else
722         {
723             // Didn't find the row we wanted to delete.
724
if (SanityManager.DEBUG)
725             {
726                 SanityManager.THROWASSERT(
727                     "Did not find row to delete." +
728                     "base_row = " + RowUtil.toString(base_row) +
729                     "index_row = " + RowUtil.toString(index_row));
730             }
731         }
732
733         // insert the new index row into the conglomerate
734
index_row[index_row.length - 1] = new_row_loc;
735
736         index_cc.insert(index_row);
737
738         return;
739     }
740 }
741
Popular Tags