KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > apache > derby > impl > sql > execute > InsertResultSet


1 /*
2
3    Derby - Class org.apache.derby.impl.sql.execute.InsertResultSet
4
5    Licensed to the Apache Software Foundation (ASF) under one or more
6    contributor license agreements. See the NOTICE file distributed with
7    this work for additional information regarding copyright ownership.
8    The ASF licenses this file to you under the Apache License, Version 2.0
9    (the "License"); you may not use this file except in compliance with
10    the License. You may obtain a copy of the License at
11
12       http://www.apache.org/licenses/LICENSE-2.0
13
14    Unless required by applicable law or agreed to in writing, software
15    distributed under the License is distributed on an "AS IS" BASIS,
16    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17    See the License for the specific language governing permissions and
18    limitations under the License.
19
20  */

21
22 package org.apache.derby.impl.sql.execute;
23
24 import org.apache.derby.iapi.services.loader.GeneratedMethod;
25
26 import org.apache.derby.iapi.services.context.ContextManager;
27
28 import org.apache.derby.iapi.services.monitor.Monitor;
29
30 import org.apache.derby.iapi.services.sanity.SanityManager;
31
32 import org.apache.derby.iapi.services.stream.HeaderPrintWriter;
33 import org.apache.derby.iapi.services.stream.InfoStreams;
34 import org.apache.derby.iapi.services.io.StreamStorable;
35 import org.apache.derby.iapi.services.loader.GeneratedMethod;
36
37 import org.apache.derby.iapi.error.StandardException;
38
39 import org.apache.derby.iapi.sql.StatementUtil;
40 import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
41
42 import org.apache.derby.iapi.types.DataValueDescriptor;
43 import org.apache.derby.iapi.types.TypeId;
44 import org.apache.derby.iapi.types.RowLocation;
45
46 import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;
47 import org.apache.derby.iapi.sql.dictionary.DataDictionary;
48 import org.apache.derby.iapi.sql.dictionary.DataDictionaryContext;
49 import org.apache.derby.iapi.sql.dictionary.IndexRowGenerator;
50 import org.apache.derby.iapi.sql.dictionary.TableDescriptor;
51 import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;
52 import org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator;
53 import org.apache.derby.iapi.sql.dictionary.StatisticsDescriptor;
54 import org.apache.derby.iapi.sql.dictionary.TriggerDescriptor;
55 import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor;
56 import org.apache.derby.iapi.sql.depend.DependencyManager;
57
58 import org.apache.derby.iapi.sql.ResultColumnDescriptor ;
59
60 import org.apache.derby.iapi.reference.SQLState;
61
62 import org.apache.derby.iapi.sql.execute.ConstantAction;
63 import org.apache.derby.iapi.sql.execute.CursorResultSet;
64 import org.apache.derby.iapi.sql.execute.ExecIndexRow;
65 import org.apache.derby.iapi.sql.execute.ExecRow;
66 import org.apache.derby.iapi.sql.execute.RowChanger;
67 import org.apache.derby.iapi.sql.execute.NoPutResultSet;
68 import org.apache.derby.iapi.sql.execute.TargetResultSet;
69
70 import org.apache.derby.iapi.types.NumberDataValue;
71
72 import org.apache.derby.iapi.sql.Activation;
73 import org.apache.derby.iapi.sql.LanguageProperties;
74 import org.apache.derby.iapi.sql.ResultDescription;
75 import org.apache.derby.iapi.sql.ResultSet;
76
77 import org.apache.derby.iapi.store.access.ColumnOrdering;
78 import org.apache.derby.iapi.store.access.ConglomerateController;
79 import org.apache.derby.iapi.store.access.DynamicCompiledOpenConglomInfo;
80 import org.apache.derby.iapi.store.access.GroupFetchScanController;
81 import org.apache.derby.iapi.store.access.Qualifier;
82 import org.apache.derby.iapi.store.access.RowLocationRetRowSource;
83 import org.apache.derby.iapi.store.access.ScanController;
84 import org.apache.derby.iapi.store.access.SortObserver;
85 import org.apache.derby.iapi.store.access.SortController;
86 import org.apache.derby.iapi.store.access.StaticCompiledOpenConglomInfo;
87 import org.apache.derby.iapi.store.access.TransactionController;
88
89 import org.apache.derby.impl.sql.execute.AutoincrementCounter;
90 import org.apache.derby.impl.sql.execute.InternalTriggerExecutionContext;
91
92 import org.apache.derby.catalog.UUID;
93 import org.apache.derby.catalog.types.StatisticsImpl;
94 import org.apache.derby.iapi.db.TriggerExecutionContext;
95 import org.apache.derby.iapi.services.io.FormatableBitSet;
96 import org.apache.derby.iapi.util.StringUtil;
97
98 import java.util.Enumeration JavaDoc;
99 import java.util.Hashtable JavaDoc;
100 import java.util.Properties JavaDoc;
101 import java.util.Vector JavaDoc;
102
103 /**
104  * Insert the rows from the source into the specified
105  * base table. This will cause constraints to be checked
106  * and triggers to be executed based on the c's and t's
107  * compiled into the insert plan.
108  */

109 class InsertResultSet extends DMLWriteResultSet implements TargetResultSet
110 {
111     // RESOLVE. Embarassingly large public state. If we could move the Replication
112
// code into the same package, then these variables could be protected.
113

114     // passed in at construction time
115

116     private NoPutResultSet sourceResultSet;
117     NoPutResultSet savedSource;
118     InsertConstantAction constants;
119     private GeneratedMethod checkGM;
120     private long heapConglom;
121
122     //following is for jdbc3.0 feature auto generated keys resultset
123
private ResultSet autoGeneratedKeysResultSet;
124     private TemporaryRowHolderImpl autoGeneratedKeysRowsHolder;
125
126     // divined at run time
127

128     private ResultDescription resultDescription;
129     private RowChanger rowChanger;
130
131     private TransactionController tc;
132     private ExecRow row;
133     
134     boolean userSpecifiedBulkInsert;
135     boolean bulkInsertPerformed;
136
137     // bulkInsert
138
protected boolean bulkInsert;
139     private boolean bulkInsertReplace;
140     private boolean firstRow = true;
141     private boolean[] needToDropSort;
142
143     /*
144     ** This hashtable is used to convert an index conglomerate
145     ** from it's old conglom number to the new one. It is
146     ** bulk insert specific.
147     */

148     private Hashtable JavaDoc indexConversionTable;
149
150     // indexedCols is 1-based
151
private FormatableBitSet indexedCols;
152     private ConglomerateController bulkHeapCC;
153
154     protected DataDictionary dd;
155     protected TableDescriptor td;
156         
157     private ExecIndexRow[] indexRows;
158     private ExecRow fullTemplate;
159     private long[] sortIds;
160     private RowLocationRetRowSource[]
161                                     rowSources;
162     private ScanController bulkHeapSC;
163     private ColumnOrdering[][] ordering;
164     private SortController[] sorters;
165     private TemporaryRowHolderImpl rowHolder;
166     private RowLocation rl;
167
168     private boolean hasBeforeStatementTrigger;
169     private boolean hasBeforeRowTrigger;
170     private BulkTableScanResultSet tableScan;
171
172     private int numOpens;
173     private boolean firstExecute;
174
175     // cached across open()s
176
private FKInfo[] fkInfoArray;
177     private TriggerInfo triggerInfo;
178     private RISetChecker fkChecker;
179     private TriggerEventActivator triggerActivator;
180     /**
181      * keeps track of autoincrement values that are generated by
182      * getSetAutoincrementValues.
183      */

184     private NumberDataValue aiCache[];
185
186     /**
187      * If set to true, implies that this (rep)insertresultset has generated
188      * autoincrement values. During refresh for example, the autoincrement
189      * values are not generated but sent from the source to target or
190      * vice-versa.
191      */

192     protected boolean autoincrementGenerated;
193     private long identityVal; //support of IDENTITY_LOCAL_VAL function
194
private boolean setIdentity;
195     
196
197     /**
198      * Returns the description of the inserted rows.
199      * REVISIT: Do we want this to return NULL instead?
200      */

201     public ResultDescription getResultDescription()
202     {
203         return resultDescription;
204     }
205
206     // TargetResultSet interface
207

208     /**
209      * @see TargetResultSet#changedRow
210      *
211      * @exception StandardException thrown if cursor finish ed.
212      */

213     public void changedRow(ExecRow execRow, RowLocation rowLocation)
214         throws StandardException
215     {
216         if (SanityManager.DEBUG)
217         {
218             SanityManager.ASSERT(bulkInsert,
219                 "bulkInsert exected to be true");
220         }
221
222         /* Set up sorters, etc. if 1st row and there are indexes */
223         if (constants.irgs.length > 0)
224         {
225             RowLocation rlClone = (RowLocation) rowLocation.cloneObject();
226
227             // Objectify any the streaming columns that are indexed.
228
for (int i = 0; i < execRow.getRowArray().length; i++)
229             {
230                 if (! constants.indexedCols[i])
231                 {
232                     continue;
233                 }
234
235                 if (execRow.getRowArray()[i] instanceof StreamStorable)
236                     ((DataValueDescriptor)execRow.getRowArray()[i]).getObject();
237             }
238
239             // Every index row will share the same row location, etc.
240
if (firstRow)
241             {
242                 firstRow = false;
243                 indexRows = new ExecIndexRow[constants.irgs.length];
244                 setUpAllSorts(execRow.getNewNullRow(), rlClone);
245             }
246
247             // Put the row into the indexes
248
for (int index = 0; index < constants.irgs.length; index++)
249             {
250                 // Get a new object Array for the index
251
indexRows[index].getNewObjectArray();
252                 // Associate the index row with the source row
253
constants.irgs[index].getIndexRow(execRow,
254                                                rlClone,
255                                                indexRows[index],
256                                                (FormatableBitSet) null);
257
258                 // Insert the index row into the matching sorter
259
sorters[index].insert(indexRows[index].getRowArray());
260             }
261         }
262     }
263
264     /**
265      * Preprocess the source row. Apply any check constraints here.
266      * Do an inplace cloning of all key columns. For triggers, if
267      * we have a before row trigger, we fire it here if we can.
268      * This is useful for bulk insert where the store stands between
269      * the source and us.
270      *
271      * @param execRow The source row.
272      *
273      * @return The preprocessed source row.
274      * @exception StandardException thrown on error
275      */

276     public ExecRow preprocessSourceRow(ExecRow execRow)
277         throws StandardException
278     {
279         //System.out.println("preprocessrow is called ");
280
/*
281         ** We can process before row triggers now. All other
282         ** triggers can only be fired after we have inserted
283         ** all our rows.
284         */

285         if (hasBeforeRowTrigger)
286         {
287             // RESOLVE
288
// Possibly dead code-- if there are triggers we don't do bulk insert.
289
rowHolder.truncate();
290             rowHolder.insert(execRow);
291             triggerActivator.notifyEvent(TriggerEvents.BEFORE_INSERT,
292                                             (CursorResultSet)null,
293                                             rowHolder.getResultSet());
294         }
295
296         if (checkGM != null && !hasBeforeStatementTrigger)
297         {
298             evaluateCheckConstraints();
299         }
300         // RESOLVE - optimize the cloning
301
if (constants.irgs.length > 0)
302         {
303             /* Do in-place cloning of all of the key columns */
304             return execRow.getClone(indexedCols);
305         }
306         else
307         {
308             return execRow;
309         }
310     }
311
312     /**
313       * Run the check constraints against the current row. Raise an error if
314       * a check constraint is violated.
315       *
316       * @exception StandardException thrown on error
317       */

318     private void evaluateCheckConstraints()
319         throws StandardException
320     {
321         if (checkGM != null)
322         {
323
324             // Evaluate the check constraints. The expression evaluation
325
// will throw an exception if there is a violation, so there
326
// is no need to check the result of the expression.
327
checkGM.invoke(activation);
328         }
329     }
330
331     /*
332      * class interface
333      *
334      */

335     /**
336      *
337      * @exception StandardException Thrown on error
338      */

339     InsertResultSet(NoPutResultSet source,
340                            GeneratedMethod checkGM,
341                            Activation activation)
342         throws StandardException
343     {
344         super(activation);
345         sourceResultSet = source;
346         constants = (InsertConstantAction) constantAction;
347         this.checkGM = checkGM;
348         heapConglom = constants.conglomId;
349
350         tc = activation.getTransactionController();
351         fkInfoArray = constants.getFKInfo( lcc.getExecutionContext() );
352         triggerInfo = constants.getTriggerInfo(lcc.getExecutionContext());
353         
354         /*
355         ** If we have a before statement trigger, then
356         ** we cannot check constraints inline.
357         */

358         hasBeforeStatementTrigger = (triggerInfo != null) ?
359                 triggerInfo.hasTrigger(true, false) :
360                 false;
361
362         hasBeforeRowTrigger = (triggerInfo != null) ?
363                 triggerInfo.hasTrigger(true, true) :
364                 false;
365
366         resultDescription = sourceResultSet.getResultDescription();
367
368         // Is this a bulkInsert or regular insert?
369
String JavaDoc insertMode = constants.getProperty("insertMode");
370
371                 RowLocation[] rla;
372
373         if ((rla = constants.getAutoincRowLocation()) != null)
374         {
375             aiCache =
376                 new NumberDataValue[rla.length];
377             for (int i = 0; i < resultDescription.getColumnCount(); i++)
378             {
379                 if (rla[i] == null)
380                     continue;
381                 ResultColumnDescriptor rcd =
382                     resultDescription.getColumnDescriptor(i + 1);
383                 aiCache[i] = (NumberDataValue)rcd.getType().getNull();
384             }
385         }
386
387         if (insertMode != null)
388         {
389             if (StringUtil.SQLEqualsIgnoreCase(insertMode,"BULKINSERT"))
390             {
391                 userSpecifiedBulkInsert = true;
392             }
393             else if (StringUtil.SQLEqualsIgnoreCase(insertMode,"REPLACE"))
394             {
395                 userSpecifiedBulkInsert = true;
396                 bulkInsertReplace = true;
397                 bulkInsert = true;
398
399                 /*
400                 ** For now, we don't allow bulk insert replace when
401                 ** there is a trigger.
402                 */

403                 if (triggerInfo != null)
404                 {
405                     TriggerDescriptor td = triggerInfo.getTriggerArray()[0];
406                     throw StandardException.newException(SQLState.LANG_NO_BULK_INSERT_REPLACE_WITH_TRIGGER_DURING_EXECUTION, constants.getTableName(), td.getName());
407                 }
408             }
409         }
410
411         //System.out.println("new InsertResultSet " + sourceResultSet.getClass());
412
}
413     
414     /**
415         @exception StandardException Standard Cloudscape error policy
416     */

417     public void open() throws StandardException
418     {
419         // Remember if this is the 1st execution
420
firstExecute = (rowChanger == null);
421
422         autoincrementGenerated = false;
423
424         dd = lcc.getDataDictionary();
425
426         /*
427         ** verify the auto-generated key columns list(ie there are no invalid column
428         ** names or positions). This is done at at execution time because for a precompiled
429         ** insert statement, user can specify different column selections for
430         ** auto-generated keys.
431         */

432         if(activation.getAutoGeneratedKeysResultsetMode())
433         {
434             if (activation.getAutoGeneratedKeysColumnIndexes() != null)
435                 verifyAutoGeneratedColumnsIndexes(activation.getAutoGeneratedKeysColumnIndexes());
436             else if (activation.getAutoGeneratedKeysColumnNames() != null)
437                 verifyAutoGeneratedColumnsNames(activation.getAutoGeneratedKeysColumnNames());
438         }
439         rowCount = 0;
440
441         if (numOpens++ == 0)
442         {
443             sourceResultSet.openCore();
444         }
445         else
446         {
447             sourceResultSet.reopenCore();
448         }
449
450         /* If the user specified bulkInsert (or replace) then we need
451          * to get an exclusive table lock on the table. If it is a
452          * regular bulk insert then we need to check to see if the
453          * table is empty. (If not empty, then we end up doing a row
454          * at a time insert.)
455          */

456         if (userSpecifiedBulkInsert)
457         {
458             if (! bulkInsertReplace)
459             {
460                 bulkInsert = verifyBulkInsert();
461             }
462             else
463             {
464                 getExclusiveTableLock();
465             }
466         }
467
468         if (bulkInsert)
469         {
470             // Notify the source that we are the target
471
sourceResultSet.setTargetResultSet(this);
472             long baseTableConglom = bulkInsertCore(lcc, heapConglom);
473
474             if (hasBeforeStatementTrigger)
475             {
476                 tableScan = getTableScanResultSet(baseTableConglom);
477
478                 // fire BEFORE trigger, do this before checking constraints
479
triggerActivator.notifyEvent(TriggerEvents.BEFORE_INSERT,
480                                                 (CursorResultSet)null,
481                                                 tableScan);
482             
483                 // if we have a check constraint, we have
484
// to do it the hard way now before we get
485
// to our AFTER triggers.
486
if (checkGM != null)
487                 {
488                     tableScan = getTableScanResultSet(baseTableConglom);
489
490                     try
491                     {
492                         ExecRow currRow = null;
493                         while ((currRow = tableScan.getNextRowCore()) != null)
494                         {
495                             // we have to set the source row so the check constraint
496
// sees the correct row.
497
sourceResultSet.setCurrentRow(currRow);
498                             evaluateCheckConstraints();
499                         }
500                     } finally
501                     {
502                         sourceResultSet.clearCurrentRow();
503                     }
504                 }
505             }
506             
507             bulkValidateForeignKeys(tc, lcc.getContextManager());
508     
509             // if we have an AFTER trigger, let 'er rip
510
if ((triggerInfo != null) &&
511                 (triggerInfo.hasTrigger(false, true) ||
512                  triggerInfo.hasTrigger(false, false)))
513             {
514                 triggerActivator.notifyEvent(TriggerEvents.AFTER_INSERT,
515                                         (CursorResultSet)null,
516                                         getTableScanResultSet(baseTableConglom));
517             }
518             bulkInsertPerformed = true;
519         }
520         else
521         {
522             row = getNextRowCore(sourceResultSet);
523             normalInsertCore(lcc, firstExecute);
524         }
525
526         /* Cache query plan text for source, before it gets blown away */
527         if (lcc.getRunTimeStatisticsMode())
528         {
529             /* savedSource nulled after run time statistics generation */
530             savedSource = sourceResultSet;
531         }
532
533         /* autoGeneratedResultset for JDBC3. Nulled after statement execution is over
534         (ie after it is saved off in LocalSatement object) */

535         if (activation.getAutoGeneratedKeysResultsetMode())
536             autoGeneratedKeysResultSet = autoGeneratedKeysRowsHolder.getResultSet();
537         else
538             autoGeneratedKeysResultSet = null;
539
540         cleanUp();
541
542         if (aiCache != null)
543         {
544             Hashtable JavaDoc aiHashtable = new Hashtable JavaDoc();
545             int numColumns = aiCache.length;
546             // this insert updated ai values, store them in some persistent
547
// place so that I can see these values.
548
for (int i = 0; i < numColumns; i++)
549             {
550                 if (aiCache[i] == null)
551                     continue;
552                 aiHashtable.put(AutoincrementCounter.makeIdentity(
553                                   constants.getSchemaName(),
554                                   constants.getTableName(),
555                                   constants.getColumnName(i)),
556                                 new Long JavaDoc(aiCache[i].getLong()));
557             }
558             InternalTriggerExecutionContext itec =
559                 (InternalTriggerExecutionContext)lcc.getTriggerExecutionContext();
560             if (itec == null)
561                 lcc.copyHashtableToAIHT(aiHashtable);
562             else
563                 itec.copyHashtableToAIHT(aiHashtable);
564         }
565
566         endTime = getCurrentTimeMillis();
567     }
568
569     /*
570      * Verify that the auto-generated columns list (by position) has valid
571      * column positions for the table.
572      */

573     private void verifyAutoGeneratedColumnsIndexes(int[] columnIndexes)
574         throws StandardException
575     {
576         int size = columnIndexes.length;
577         TableDescriptor td = dd.getTableDescriptor(constants.targetUUID);
578
579         // all 1-based column ids.
580
for (int i = 0; i < size; i++)
581         {
582             if (td.getColumnDescriptor(columnIndexes[i]) == null)
583                 throw StandardException.newException(SQLState.LANG_COLUMN_POSITION_NOT_FOUND, new Integer JavaDoc(columnIndexes[i]));
584         }
585     }
586
587     /*
588      * If user didn't provide columns list for auto-generated columns, then only include
589      * columns with auto-generated values in the resultset. Those columns would be ones
590      * with default value defined.
591      */

592     private int[] generatedColumnPositionsArray()
593         throws StandardException
594     {
595         TableDescriptor td = dd.getTableDescriptor(constants.targetUUID);
596         ColumnDescriptor cd;
597         int size = td.getMaxColumnID();
598
599         int[] generatedColumnPositionsArray = new int[size];
600         int generatedColumnNumbers = 0;
601         for (int i=0; i<size; i++) {
602             generatedColumnPositionsArray[i] = -1;
603         }
604
605         for (int i=0; i<size; i++) {
606             cd = td.getColumnDescriptor(i+1);
607             if (cd.isAutoincrement()) { //if the column has auto-increment value
608
generatedColumnNumbers++;
609                 generatedColumnPositionsArray[i] = i+1;
610             } else if (cd.getDefaultValue() != null || cd.getDefaultInfo() != null) {//default value
611
generatedColumnNumbers++;
612                 generatedColumnPositionsArray[i] = i+1;
613             }
614         }
615         int[] returnGeneratedColumnPositionsArray = new int[generatedColumnNumbers];
616
617         for (int i=0, j=0; i<size; i++) {
618             if (generatedColumnPositionsArray[i] != -1)
619                 returnGeneratedColumnPositionsArray[j++] = generatedColumnPositionsArray[i];
620         }
621
622         return returnGeneratedColumnPositionsArray;
623     }
624
625     /*
626      * Remove duplicate columns from the array. Then use this array to generate a sub-set
627      * of insert resultset to be returned for JDBC3.0 getGeneratedKeys() call.
628      */

629     private int[] uniqueColumnPositionArray(int[] columnIndexes)
630         throws StandardException
631     {
632         int size = columnIndexes.length;
633         TableDescriptor td = dd.getTableDescriptor(constants.targetUUID);
634
635         //create an array of integer (the array size = number of columns in table)
636
// valid column positions are 1...getMaxColumnID()
637
int[] uniqueColumnIndexes = new int[td.getMaxColumnID()];
638
639         int uniqueColumnNumbers = 0;
640
641
642         //At the end of following loop, the uniqueColumnIndexes elements will not be 0 for user
643
//selected auto-generated columns.
644
for (int i=0; i<size; i++) {
645             if (uniqueColumnIndexes[columnIndexes[i] - 1] == 0) {
646                 uniqueColumnNumbers++;
647                 uniqueColumnIndexes[columnIndexes[i] - 1] = columnIndexes[i];
648             }
649         }
650         int[] returnUniqueColumnIndexes = new int[uniqueColumnNumbers];
651
652         //return just the column positions which are not marked 0 in the uniqueColumnIndexes array
653
for (int i=0, j=0; i<uniqueColumnIndexes.length; i++) {
654             if (uniqueColumnIndexes[i] != 0)
655                 returnUniqueColumnIndexes[j++] = uniqueColumnIndexes[i];
656         }
657
658         return returnUniqueColumnIndexes;
659     }
660
661     /**
662      * Verify that the auto-generated columns list (by name) has valid
663      * column names for the table. If all the column names are valid,
664      * convert column names array to corresponding column positions array
665      * Save that column positions array in activation. We do this to simplify the
666      * rest of the logic(it only has to deal with column positions here after).
667      *
668      * @exception StandardException Thrown on error if invalid column
669    * name in the list.
670      */

671     private void verifyAutoGeneratedColumnsNames(String JavaDoc[] columnNames)
672         throws StandardException
673     {
674         int size = columnNames.length;
675         int columnPositions[] = new int[size];
676
677         TableDescriptor td = dd.getTableDescriptor(constants.targetUUID);
678         ColumnDescriptor cd;
679
680         for (int i = 0; i < size; i++)
681         {
682             if (columnNames[i] == null)
683                 throw StandardException.newException(SQLState.LANG_COLUMN_NAME_NOT_FOUND, columnNames[i]);
684             cd = td.getColumnDescriptor(columnNames[i]);
685             if (cd == null)
686                 throw StandardException.newException(SQLState.LANG_COLUMN_NAME_NOT_FOUND, columnNames[i]);
687             else
688                 columnPositions[i] = cd.getPosition();
689         }
690         activation.setAutoGeneratedKeysResultsetInfo(columnPositions, null);
691     }
692
693     /**
694      * @see ResultSet#getAutoGeneratedKeysResultset
695      */

696     public ResultSet getAutoGeneratedKeysResultset()
697     {
698         return autoGeneratedKeysResultSet;
699     }
700
701
702     /**
703      * getSetAutoincrementValue will get the autoincrement value of the
704      * columnPosition specified for the target table. If increment is
705      * non-zero we will also update the autoincrement value.
706      *
707      * @param columnPosition position of the column in the table (1-based)
708      * @param increment amount of increment.
709      *
710      * @exception StandardException if anything goes wrong.
711      */

712     public NumberDataValue
713         getSetAutoincrementValue(int columnPosition, long increment)
714         throws StandardException
715     {
716         long startValue = 0;
717                 NumberDataValue dvd;
718         int index = columnPosition - 1; // all our indices are 0 based.
719

720         /* As in DB2, only for single row insert: insert into t1(c1) values (..) do
721          * we return the correct most recently generated identity column value. For
722          * multiple row insert, or insert with sub-select, the return value is non-
723          * deterministic, and is the previous return value of the IDENTITY_VAL_LOCAL
724          * function, before the insert statement. Also, DB2 can have at most 1 identity
725          * column per table. The return value won't be affected either if Cloudscape
726          * table has more than one identity columns.
727          */

728         setIdentity = (! autoincrementGenerated) && isSourceRowResultSet();
729         autoincrementGenerated = true;
730
731         if (bulkInsert)
732         {
733             ColumnDescriptor cd = td.getColumnDescriptor(columnPosition);
734             long ret;
735
736             // for bulk insert we have the table descriptor
737
// System.out.println("in bulk insert");
738
if (aiCache[index].isNull())
739             {
740                 if (bulkInsertReplace)
741                 {
742                     startValue = cd.getAutoincStart();
743                 }
744                 else
745                 {
746                     dvd = dd.getSetAutoincrementValue(
747                             constants.autoincRowLocation[index],
748                             tc, false, aiCache[index], true);
749                     startValue = dvd.getLong();
750                 }
751                 lcc.autoincrementCreateCounter(td.getSchemaName(),
752                                                td.getName(),
753                                                cd.getColumnName(),
754                                                new Long JavaDoc(startValue),
755                                                increment,
756                                                columnPosition);
757             
758             }
759             ret = lcc.nextAutoincrementValue(td.getSchemaName(),
760                                              td.getName(),
761                                              cd.getColumnName());
762             aiCache[columnPosition - 1].setValue(ret);
763         }
764
765         else
766         {
767             NumberDataValue newValue;
768             TransactionController nestedTC = null, tcToUse = tc;
769
770             try
771             {
772                 nestedTC = tc.startNestedUserTransaction(false);
773                 tcToUse = nestedTC;
774             }
775
776             catch (StandardException se)
777             {
778                 // If I cannot start a Nested User Transaction use the parent
779
// transaction to do all the work.
780
tcToUse = tc;
781             }
782
783             try
784             {
785                 /* If tcToUse == tc, then we are using parent xaction-- this
786                    can happen if for some reason we couldn't start a nested
787                    transaction
788                 */

789                 newValue = dd.getSetAutoincrementValue(
790                            constants.autoincRowLocation[index],
791                            tcToUse, true, aiCache[index], (tcToUse == tc));
792             }
793
794             catch (StandardException se)
795             {
796                 if (tcToUse == tc)
797                 {
798                     /* we've using the parent xaction and we've timed out; just
799                        throw an error and exit.
800                     */

801                     throw se;
802                 }
803
804                 if (se.getMessageId().equals(SQLState.LOCK_TIMEOUT))
805                 {
806                     // if we couldn't do this with a nested xaction, retry with
807
// parent-- we need to wait this time!
808
newValue = dd.getSetAutoincrementValue(
809                                     constants.autoincRowLocation[index],
810                                     tc, true, aiCache[index], true);
811                 }
812                 else if (se.getMessageId().equals(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE))
813                 {
814                     // if we got an overflow error, throw a more meaningful
815
// error message
816
throw StandardException.newException(
817                                                  SQLState.LANG_AI_OVERFLOW,
818                                                  se,
819                                                  constants.getTableName(),
820                                                  constants.getColumnName(index));
821                 }
822                 else throw se;
823             }
824             finally
825             {
826                 // no matter what, commit the nested transaction; if something
827
// bad happened in the child xaction lets not abort the parent
828
// here.
829
if (nestedTC != null)
830                 {
831                     nestedTC.commit();
832                     nestedTC.destroy();
833                 }
834             }
835             aiCache[index] = newValue;
836             if (setIdentity)
837                 identityVal = newValue.getLong();
838         }
839
840         return aiCache[index];
841         
842     }
843
844     // Is sourceResultSet a RowResultSet (values clause)?
845
private boolean isSourceRowResultSet ()
846     {
847         boolean isRow = false;
848         if (sourceResultSet instanceof NormalizeResultSet)
849             isRow = (((NormalizeResultSet) sourceResultSet).source instanceof RowResultSet);
850         return isRow;
851     }
852
853     // checks if source result set is a RowResultSet type.
854
private boolean isSingleRowResultSet()
855     {
856         boolean isRow = false;
857         
858         if (sourceResultSet instanceof RowResultSet)
859             isRow = true;
860         else if (sourceResultSet instanceof NormalizeResultSet)
861             isRow = (((NormalizeResultSet) sourceResultSet).source instanceof RowResultSet);
862         
863         return isRow;
864     }
865     
866     // Do the work for a "normal" insert
867
private void normalInsertCore(LanguageConnectionContext lcc, boolean firstExecute)
868         throws StandardException
869     {
870         boolean setUserIdentity = constants.hasAutoincrement() && isSingleRowResultSet();
871         boolean firstDeferredRow = true;
872         ExecRow deferredRowBuffer = null;
873                 long user_autoinc=0;
874                         
875         /* Get or re-use the row changer.
876          * NOTE: We need to set ourself as the top result set
877          * if this is not the 1st execution. (Done in constructor
878          * for 1st execution.)
879          */

880         if (firstExecute)
881         {
882             rowChanger = lcc.getLanguageConnectionFactory().getExecutionFactory()
883                              .getRowChanger(
884                                      heapConglom,
885                                      constants.heapSCOCI,
886                                      heapDCOCI,
887                                      constants.irgs,
888                                      constants.indexCIDS,
889                                      constants.indexSCOCIs,
890                                      indexDCOCIs,
891                                      0, // number of columns in partial row meaningless for insert
892
tc,
893                                      null, //Changed column ids
894
constants.getStreamStorableHeapColIds(),
895                                      activation
896                                    );
897             rowChanger.setIndexNames(constants.indexNames);
898         }
899         else
900         {
901             lcc.getStatementContext().setTopResultSet(this, subqueryTrackingArray);
902         }
903
904         /* decode lock mode for the execution isolation level */
905         int lockMode = UpdateResultSet.decodeLockMode(lcc, constants.lockMode);
906
907         rowChanger.open(lockMode);
908
909         /* The source does not know whether or not we are doing a
910          * deferred mode insert. If we are, then we must clear the
911          * index scan info from the activation so that the row changer
912          * does not re-use that information (which won't be valid for
913          * a deferred mode insert).
914          */

915         if (constants.deferred)
916         {
917             activation.clearIndexScanInfo();
918         }
919
920         if (fkInfoArray != null)
921         {
922             if (fkChecker == null)
923             {
924                 fkChecker = new RISetChecker(tc, fkInfoArray);
925             }
926             else
927             {
928                 fkChecker.reopen();
929             }
930         }
931
932         if (firstExecute && constants.deferred)
933         {
934             Properties properties = new Properties();
935
936             // Get the properties on the old heap
937
rowChanger.getHeapConglomerateController().getInternalTablePropertySet(properties);
938
939             /*
940             ** If deferred we save a copy of the entire row.
941             */

942             rowHolder = new TemporaryRowHolderImpl(activation, properties,
943                                                    resultDescription);
944             rowChanger.setRowHolder(rowHolder);
945         }
946
947         int[] columnIndexes = null;
948         if (firstExecute && activation.getAutoGeneratedKeysResultsetMode())
949         {
950             ResultDescription rd;
951             Properties properties = new Properties();
952             columnIndexes = activation.getAutoGeneratedKeysColumnIndexes();
953
954             // Get the properties on the old heap
955
rowChanger.getHeapConglomerateController().getInternalTablePropertySet(properties);
956
957             if ( columnIndexes != null) {//use user provided column positions array
958
columnIndexes = uniqueColumnPositionArray(columnIndexes);
959             } else { //prepare array of auto-generated keys for the table since user didn't provide any
960
columnIndexes = generatedColumnPositionsArray();
961             }
962
963             rd = lcc.getLanguageFactory().getResultDescription(resultDescription,columnIndexes);
964             autoGeneratedKeysRowsHolder =
965                 new TemporaryRowHolderImpl(activation, properties, rd);
966         }
967
968
969         while ( row != null )
970         {
971             if (activation.getAutoGeneratedKeysResultsetMode())
972                 autoGeneratedKeysRowsHolder.insert(getCompactRow(row, columnIndexes));
973
974             /*
975             ** If we're doing a deferred insert, insert into the temporary
976             ** conglomerate. Otherwise, insert directly into the permanent
977             ** conglomerates using the rowChanger.
978             */

979             if (constants.deferred)
980             {
981                     rowHolder.insert(row);
982             }
983             else
984             {
985                 // Evaluate any check constraints on the row
986
evaluateCheckConstraints();
987
988                 if (fkChecker != null)
989                 {
990                     fkChecker.doFKCheck(row);
991                 }
992
993                 // Objectify any streaming columns that are indexed.
994
if (constants.irgs.length > 0)
995                 {
996                     DataValueDescriptor[] rowArray = row.getRowArray();
997                     for (int i = 0; i < rowArray.length; i++)
998                     {
999                         //System.out.println("checking " + i);
1000
if (! constants.indexedCols[i])
1001                        {
1002                            continue;
1003                        }
1004
1005
1006                        if (rowArray[i] instanceof StreamStorable)
1007                            rowArray[i].getObject();
1008                    }
1009                }
1010                rowChanger.insertRow(row);
1011            }
1012
1013            rowCount++;
1014            
1015            if(setUserIdentity )
1016            {
1017                        dd = lcc.getDataDictionary();
1018                        td = dd.getTableDescriptor(constants.targetUUID);
1019                       
1020                        int maxColumns = td.getMaxColumnID();
1021                        int col;
1022                        
1023                        for(col=1;col<=maxColumns;col++)
1024                        {
1025                            ColumnDescriptor cd = td.getColumnDescriptor(col);
1026                            if(cd.isAutoincrement())
1027                            {
1028                                break;
1029                            }
1030                        }
1031                        
1032                        if(col <= maxColumns)
1033                        {
1034                            DataValueDescriptor dvd = row.cloneColumn(col);
1035                            user_autoinc = dvd.getLong();
1036                        }
1037             }
1038
1039            // No need to do a next on a single row source
1040
if (constants.singleRowSource)
1041         {
1042            row = null;
1043         }
1044         else
1045         {
1046        row = getNextRowCore(sourceResultSet);
1047         }
1048        }
1049
1050        /*
1051        ** If it's a deferred insert, scan the temporary conglomerate and
1052        ** insert the rows into the permanent conglomerates using rowChanger.
1053        */

1054        if (constants.deferred)
1055        {
1056            if (triggerInfo != null)
1057            {
1058                Vector JavaDoc v = null;
1059                if (aiCache != null)
1060                {
1061                    v = new Vector JavaDoc();
1062                    for (int i = 0; i < aiCache.length; i++)
1063                    {
1064                        String JavaDoc s, t, c;
1065                        if (aiCache[i] == null)
1066                            continue;
1067                    
1068                        Long JavaDoc initialValue =
1069                            lcc.lastAutoincrementValue(
1070                                            (s = constants.getSchemaName()),
1071                                            (t = constants.getTableName()),
1072                                            (c = constants.getColumnName(i)));
1073
1074
1075                        AutoincrementCounter aic =
1076                            new AutoincrementCounter(
1077                                             initialValue,
1078                                             constants.getAutoincIncrement(i),
1079                                             aiCache[i].getLong(),
1080                                             s, t, c, i + 1);
1081                        v.addElement(aic);
1082                    }
1083                }
1084
1085                if (triggerActivator == null)
1086                {
1087                    triggerActivator = new TriggerEventActivator(lcc,
1088                                        tc,
1089                                        constants.targetUUID,
1090                                        triggerInfo,
1091                                        TriggerExecutionContext.INSERT_EVENT,
1092                                        activation,
1093                                        v);
1094                }
1095                else
1096                {
1097                    triggerActivator.reopen();
1098                }
1099
1100                // fire BEFORE trigger, do this before checking constraints
1101
triggerActivator.notifyEvent(TriggerEvents.BEFORE_INSERT,
1102                                                (CursorResultSet)null,
1103                                                rowHolder.getResultSet());
1104            }
1105
1106            CursorResultSet rs = rowHolder.getResultSet();
1107            try
1108            {
1109                rs.open();
1110                while ((deferredRowBuffer = rs.getNextRow()) != null)
1111                {
1112                    // we have to set the source row so the check constraint
1113
// sees the correct row.
1114
sourceResultSet.setCurrentRow(deferredRowBuffer);
1115                    evaluateCheckConstraints();
1116                    rowChanger.insertRow(deferredRowBuffer);
1117                }
1118            } finally
1119            {
1120                sourceResultSet.clearCurrentRow();
1121                rs.close();
1122            }
1123            
1124            if (fkChecker != null)
1125            {
1126                /*
1127                ** Second scan to make sure all the foreign key
1128                ** constraints are ok. We have to do this after
1129                ** we have completed the inserts in case of self
1130                ** referencing constraints.
1131                */

1132                rs = rowHolder.getResultSet();
1133                try
1134                {
1135                    rs.open();
1136                    while ((deferredRowBuffer = rs.getNextRow()) != null)
1137                    {
1138                        fkChecker.doFKCheck(deferredRowBuffer);
1139                    }
1140                } finally
1141                {
1142                    rs.close();
1143                }
1144            }
1145
1146            // fire AFTER trigger
1147
if (triggerActivator != null)
1148            {
1149                triggerActivator.notifyEvent(TriggerEvents.AFTER_INSERT,
1150                                        (CursorResultSet)null,
1151                                        rowHolder.getResultSet());
1152            }
1153        }
1154
1155        if (rowHolder != null)
1156        {
1157            rowHolder.close();
1158            // rowHolder kept across opens
1159
}
1160        if (fkChecker != null)
1161        {
1162            fkChecker.close();
1163            fkChecker = null;
1164        }
1165        if (setIdentity)
1166            lcc.setIdentityValue(identityVal);
1167                /*
1168                 * find the value of the identity column from the user inserted value
1169                 * and do a lcc.setIdentityValue(<user_value>);
1170                 */

1171                else if(setUserIdentity )
1172                {
1173                        lcc.setIdentityValue(user_autoinc);
1174                }
1175 }
1176
1177    /*
1178     * Take the input row and return a new compact ExecRow
1179     * using the column positions provided in columnIndexes.
1180     * Copies references, no cloning.
1181     */

1182    private ExecRow getCompactRow
1183    (
1184        ExecRow inputRow,
1185        int[] columnIndexes
1186    )
1187        throws StandardException
1188    {
1189        ExecRow outRow;
1190        int numInputCols = inputRow.nColumns();
1191
1192        if (columnIndexes == null)
1193        {
1194            outRow = new ValueRow(numInputCols);
1195            Object JavaDoc[] src = inputRow.getRowArray();
1196            Object JavaDoc[] dst = outRow.getRowArray();
1197            System.arraycopy(src, 0, dst, 0, src.length);
1198            return outRow;
1199        }
1200
1201        int numOutputCols = columnIndexes.length;
1202
1203        outRow = new ValueRow(numOutputCols);
1204        for (int i = 0; i < numOutputCols; i++)
1205        {
1206            outRow.setColumn(i+1,
1207                inputRow.getColumn(columnIndexes[i]));
1208        }
1209
1210        return outRow;
1211    }
1212
1213    // Do the work for a bulk insert
1214
private long bulkInsertCore(LanguageConnectionContext lcc,
1215                                long oldHeapConglom)
1216        throws StandardException
1217    {
1218        fullTemplate = constants.getEmptyHeapRow(lcc);
1219        bulkHeapCC = tc.openCompiledConglomerate(
1220                                false,
1221                                TransactionController.OPENMODE_FORUPDATE,
1222                                TransactionController.MODE_TABLE,
1223                                TransactionController.ISOLATION_SERIALIZABLE,
1224                                constants.heapSCOCI,
1225                                heapDCOCI);
1226
1227        long newHeapConglom;
1228
1229        Properties properties = new Properties();
1230
1231        // Get the properties on the old heap
1232
bulkHeapCC.getInternalTablePropertySet(properties);
1233
1234        if (triggerInfo != null)
1235        {
1236            triggerActivator = new TriggerEventActivator(lcc,
1237                                        tc,
1238                                        constants.targetUUID,
1239                                        triggerInfo,
1240                                        TriggerExecutionContext.INSERT_EVENT,
1241                                        activation, null);
1242        }
1243
1244        /*
1245        ** If we have a before row trigger, then we
1246        ** are going to use a row holder pass to our
1247        ** trigger.
1248        */

1249        if (hasBeforeRowTrigger && rowHolder != null)
1250        {
1251            rowHolder =
1252                new TemporaryRowHolderImpl(activation, properties,
1253                                           resultDescription);
1254        }
1255
1256        // Add any new properties or change the values of any existing properties
1257
Properties targetProperties = constants.getTargetProperties();
1258        Enumeration JavaDoc key = targetProperties.keys();
1259        while (key.hasMoreElements())
1260        {
1261            String JavaDoc keyValue = (String JavaDoc) key.nextElement();
1262            properties.put(keyValue, targetProperties.getProperty(keyValue));
1263        }
1264
1265        // Are there indexes to be updated?
1266
if (constants.irgs.length > 0)
1267        {
1268            // Tell source whether or not we need the RIDs back
1269
sourceResultSet.setNeedsRowLocation(true);
1270        }
1271
1272        dd = lcc.getDataDictionary();
1273        td = dd.getTableDescriptor(constants.targetUUID);
1274
1275        /* Do the bulk insert - only okay to reuse the
1276         * same conglomerate if bulkInsert.
1277         */

1278        long[] loadedRowCount = new long[1];
1279        if (bulkInsertReplace)
1280        {
1281            newHeapConglom = tc.createAndLoadConglomerate(
1282                                        "heap",
1283                                        fullTemplate.getRowArray(),
1284                                        null, //column sort order - not required for heap
1285
properties,
1286                                        TransactionController.IS_DEFAULT,
1287                                        sourceResultSet,
1288                                        loadedRowCount);
1289        }
1290        else
1291        {
1292            newHeapConglom = tc.recreateAndLoadConglomerate(
1293                                        "heap",
1294                                        false,
1295                                        fullTemplate.getRowArray(),
1296                                        null, //column sort order - not required for heap
1297
properties,
1298                                        TransactionController.IS_DEFAULT,
1299                                        oldHeapConglom,
1300                                        sourceResultSet,
1301                                        loadedRowCount);
1302        }
1303
1304        /* Nothing else to do if we get back the same conglomerate number.
1305         * (In 2.0 this means that 0 rows were inserted.)
1306         */

1307        if (newHeapConglom == oldHeapConglom)
1308        {
1309            return oldHeapConglom;
1310        }
1311
1312        // Find out how many rows were inserted
1313
rowCount = (int) loadedRowCount[0];
1314
1315        // Set the "estimated" row count
1316
setEstimatedRowCount(newHeapConglom);
1317
1318        /*
1319        ** Inform the data dictionary that we are about to write to it.
1320        ** There are several calls to data dictionary "get" methods here
1321        ** that might be done in "read" mode in the data dictionary, but
1322        ** it seemed safer to do this whole operation in "write" mode.
1323        **
1324        ** We tell the data dictionary we're done writing at the end of
1325        ** the transaction.
1326        */

1327        dd.startWriting(lcc);
1328
1329        lcc.autoincrementFlushCache(constants.targetUUID);
1330
1331        // invalidate any prepared statements that
1332
// depended on this table (including this one)
1333
DependencyManager dm = dd.getDependencyManager();
1334
1335        dm.invalidateFor(td, DependencyManager.BULK_INSERT, lcc);
1336
1337        
1338        // Update all indexes
1339
if (constants.irgs.length > 0)
1340        {
1341            updateAllIndexes(newHeapConglom, constants, td, dd, fullTemplate);
1342        }
1343
1344        // Drop the old conglomerate
1345
bulkHeapCC.close();
1346        bulkHeapCC = null;
1347
1348        /* Update the DataDictionary
1349         * RESOLVE - this will change in 1.4 because we will get
1350         * back the same conglomerate number
1351         */

1352        // Get the ConglomerateDescriptor for the heap
1353
ConglomerateDescriptor cd = td.getConglomerateDescriptor(oldHeapConglom);
1354
1355        // Update sys.sysconglomerates with new conglomerate #
1356
dd.updateConglomerateDescriptor(cd, newHeapConglom, tc);
1357        tc.dropConglomerate(oldHeapConglom);
1358        // END RESOLVE
1359

1360        return newHeapConglom;
1361    }
1362
1363    /*
1364    ** Bulk Referential Integrity Checker
1365    */

1366    private void bulkValidateForeignKeys(TransactionController tc, ContextManager cm)
1367        throws StandardException
1368    {
1369        FKInfo fkInfo;
1370
1371        /*
1372        ** If there are no foreign keys, then nothing to worry
1373        ** about.
1374        ** With bulk insert replace, we still need to verify
1375        ** all non-self referencing foreign keys when
1376        ** there are no rows inserted into the table.
1377        */

1378        if ((indexRows == null && !bulkInsertReplace) ||
1379            fkInfoArray == null)
1380        {
1381            return;
1382        }
1383
1384        for (int i = 0; i < fkInfoArray.length; i++)
1385        {
1386            fkInfo = fkInfoArray[i];
1387
1388            /* With regular bulk insert, we only need to check the
1389             * foreign keys in the table we inserted into. We need
1390             * to get the new conglomerate #s for the foreign keys.
1391             *
1392             * With bulk insert replace, we need to check both the
1393             * foreign keys in the table as well as any foreign keys
1394             * on other tables referencing the table we inserted into.
1395             * If the foreign key is self-referencing then we need to
1396             * get the new conglomerate #, otherwise the conglomerate
1397             * # is the same as the compile time conglomerate #.
1398             * If the foreign key is self-referencing then we need to
1399             * get the new conglomerate # for the primary key as it
1400             * has changed. However, if the foreign key is not self-referencing
1401             * then we only need to get the new conglomerate # for
1402             * the primary key if the primary key is on the table being
1403             * inserted into.
1404             */

1405            if (bulkInsertReplace)
1406            {
1407                for (int index = 0; index < fkInfo.fkConglomNumbers.length; index++)
1408                {
1409                    /* No need to check foreign key if it is self referencing
1410                     * and there were no rows inserted on the replace, as both
1411                     * indexes will be empty.
1412                     */

1413                    if (fkInfo.fkIsSelfReferencing[index] && indexRows == null)
1414                    {
1415                        continue;
1416                    }
1417
1418                    long pkConglom;
1419                    long fkConglom;
1420
1421                    if (fkInfo.fkIsSelfReferencing[index])
1422                    {
1423                        /* Self-referencing foreign key. Both conglomerate
1424                         * #s have changed.
1425                         */

1426                        pkConglom = ((Long JavaDoc)indexConversionTable.get(
1427                                    new Long JavaDoc(fkInfo.refConglomNumber))).longValue();
1428                        fkConglom = ((Long JavaDoc)indexConversionTable.get(
1429                                        new Long JavaDoc(fkInfo.fkConglomNumbers[index]))).longValue();
1430                    }
1431                    else
1432                    {
1433                        /* Non-self referencing foreign key. At this point we
1434                         * don't know if the primary key or the foreign key is
1435                         * on this table. So, for each one, we look to see
1436                         * if the old conglomerate # is in the conversion table.
1437                         * If so, then we get the new conglomerate #, otherwise
1438                         * we use the compile time conglomerate #. This
1439                         * is very simple, though not very elegant.
1440                         */

1441                        Long JavaDoc pkConglomLong = (Long JavaDoc)indexConversionTable.get(
1442                                                new Long JavaDoc(fkInfo.refConglomNumber));
1443                        Long JavaDoc fkConglomLong = (Long JavaDoc)indexConversionTable.get(
1444                                        new Long JavaDoc(fkInfo.fkConglomNumbers[index]));
1445                        if (pkConglomLong == null)
1446                        {
1447                            pkConglom = fkInfo.refConglomNumber;
1448                        }
1449                        else
1450                        {
1451                            pkConglom = pkConglomLong.longValue();
1452                        }
1453                        if (fkConglomLong == null)
1454                        {
1455                            fkConglom = fkInfo.fkConglomNumbers[index];
1456                        }
1457                        else
1458                        {
1459                            fkConglom = fkConglomLong.longValue();
1460                        }
1461                    }
1462                    bulkValidateForeignKeysCore(
1463                            tc, cm, fkInfoArray[i], fkConglom, pkConglom,
1464                            fkInfo.fkConstraintNames[index]);
1465                }
1466            }
1467            else
1468            {
1469                /*
1470                ** We have a FKInfo for each foreign key we are
1471                ** checking. Note that there are no primary key
1472                ** checks on insert, so we can always reference
1473                ** element[0] in the current FKInfo structure.
1474                */

1475                if (SanityManager.DEBUG)
1476                {
1477                    SanityManager.ASSERT(fkInfo.type == FKInfo.FOREIGN_KEY,
1478                        "error, expected to only check foreign keys on insert");
1479                }
1480                Long JavaDoc fkConglom = (Long JavaDoc)indexConversionTable.get(
1481                                        new Long JavaDoc(fkInfo.fkConglomNumbers[0]));
1482                bulkValidateForeignKeysCore(
1483                        tc, cm, fkInfoArray[i], fkConglom.longValue(),
1484                        fkInfo.refConglomNumber, fkInfo.fkConstraintNames[0]);
1485            }
1486        }
1487    }
1488
1489    private void bulkValidateForeignKeysCore(
1490                        TransactionController tc, ContextManager cm,
1491                        FKInfo fkInfo, long fkConglom, long pkConglom,
1492                        String JavaDoc fkConstraintName)
1493        throws StandardException
1494    {
1495        ExecRow template;
1496        GroupFetchScanController refScan = null;
1497        GroupFetchScanController fkScan = null;
1498
1499        try
1500        {
1501
1502                template = makeIndexTemplate(fkInfo, fullTemplate, cm);
1503
1504                /*
1505                ** The indexes have been dropped and recreated, so
1506                ** we need to get the new index conglomerate number.
1507                */

1508                fkScan =
1509                    tc.openGroupFetchScan(
1510                        fkConglom,
1511                        false, // hold
1512
0, // read only
1513
tc.MODE_TABLE, // doesn't matter,
1514
// already locked
1515
tc.ISOLATION_READ_COMMITTED, // doesn't matter,
1516
// already locked
1517
(FormatableBitSet)null, // retrieve all fields
1518
(DataValueDescriptor[])null, // startKeyValue
1519
ScanController.GE, // startSearchOp
1520
null, // qualifier
1521
(DataValueDescriptor[])null, // stopKeyValue
1522
ScanController.GT // stopSearchOp
1523
);
1524
1525                if (SanityManager.DEBUG)
1526                {
1527                    /*
1528                    ** Bulk insert replace calls this method regardless
1529                    ** of whether or not any rows were inserted because
1530                    ** it has to check any referencing foreign keys
1531                    ** after the replace. Otherwise, we
1532                    ** make sure that we actually have a row in the fk.
1533                    ** If not, we have an error because we thought that
1534                    ** since indexRows != null, we must have gotten some
1535                    ** rows.
1536                    */

1537                    if (! bulkInsertReplace)
1538                    {
1539                        SanityManager.ASSERT(fkScan.next(),
1540                            "No rows in fk index, even though indexRows != null");
1541            
1542                        /*
1543                        ** Crank up the scan again.
1544                        */

1545                        fkScan.reopenScan(
1546                            (DataValueDescriptor[])null, // startKeyValue
1547
ScanController.GE, // startSearchOp
1548
null, // qualifier
1549
(DataValueDescriptor[])null, // stopKeyValue
1550
ScanController.GT // stopSearchOp
1551
);
1552                    }
1553                }
1554
1555                /*
1556                ** Open the referenced key scan. Use row locking on
1557                ** the referenced table unless it is self-referencing
1558                ** (in which case we don't need locks)
1559                */

1560                refScan =
1561                    tc.openGroupFetchScan(
1562                        pkConglom,
1563                        false, // hold
1564
0, // read only
1565
(fkConglom == pkConglom) ?
1566                                tc.MODE_TABLE :
1567                                tc.MODE_RECORD,
1568                        tc.ISOLATION_READ_COMMITTED, // read committed is
1569
// good enough
1570
(FormatableBitSet)null, // retrieve all fields
1571
(DataValueDescriptor[])null, // startKeyValue
1572
ScanController.GE, // startSearchOp
1573
null, // qualifier
1574
(DataValueDescriptor[])null, // stopKeyValue
1575
ScanController.GT // stopSearchOp
1576
);
1577
1578                /*
1579                ** Give the scans to the bulk checker to do its
1580                ** magic. It will do a merge on the two indexes.
1581                */

1582                ExecRow firstFailedRow = template.getClone();
1583                RIBulkChecker riChecker = new RIBulkChecker(refScan,
1584                                            fkScan,
1585                                            template,
1586                                            true, // fail on 1st failure
1587
(ConglomerateController)null,
1588                                            firstFailedRow);
1589    
1590                int numFailures = riChecker.doCheck();
1591                if (numFailures > 0)
1592                {
1593                    StandardException se = StandardException.newException(SQLState.LANG_FK_VIOLATION, fkConstraintName,
1594                                    fkInfo.tableName,
1595                                    StatementUtil.typeName(fkInfo.stmtType),
1596                                    RowUtil.toString(firstFailedRow, 0, fkInfo.colArray.length - 1));
1597                    throw se;
1598                }
1599        }
1600        finally
1601        {
1602            if (fkScan != null)
1603            {
1604                fkScan.close();
1605                fkScan = null;
1606            }
1607            if (refScan != null)
1608            {
1609                refScan.close();
1610                refScan = null;
1611            }
1612        }
1613    }
1614
1615    /**
1616     * Make a template row with the correct columns.
1617     */

1618    private ExecRow makeIndexTemplate(FKInfo fkInfo, ExecRow fullTemplate, ContextManager cm)
1619        throws StandardException
1620    {
1621        ExecRow newRow = RowUtil.getEmptyIndexRow(fkInfo.colArray.length+1, cm);
1622
1623        DataValueDescriptor[] templateColArray = fullTemplate.getRowArray();
1624        DataValueDescriptor[] newRowColArray = newRow.getRowArray();
1625
1626        int i;
1627        for (i = 0; i < fkInfo.colArray.length; i++)
1628        {
1629            newRowColArray[i] =
1630                (templateColArray[fkInfo.colArray[i] - 1]).getClone();
1631        }
1632
1633        newRowColArray[i] =
1634            (DataValueDescriptor) fkInfo.rowLocation.cloneObject();
1635
1636        return newRow;
1637    }
1638
1639    /**
1640     * Set up to update all of the indexes on a table when doing a bulk insert
1641     * on an empty table.
1642     *
1643     * @exception StandardException thrown on error
1644     */

1645    private void setUpAllSorts(ExecRow sourceRow,
1646                               RowLocation rl)
1647        throws StandardException
1648    {
1649        int numIndexes = constants.irgs.length;
1650        int numColumns = td.getNumberOfColumns();
1651
1652        ordering = new ColumnOrdering[numIndexes][];
1653        needToDropSort = new boolean[numIndexes];
1654        sortIds = new long[numIndexes];
1655        rowSources = new RowLocationRetRowSource[numIndexes];
1656        // indexedCols is 1-based
1657
indexedCols = new FormatableBitSet(numColumns + 1);
1658
1659
1660        /* For each index, build a single index row and a sorter. */
1661        for (int index = 0; index < numIndexes; index++)
1662        {
1663            // Update the bit map of indexed columns
1664
int[] keyColumns = constants.irgs[index].baseColumnPositions();
1665            for (int i2 = 0; i2 < keyColumns.length; i2++)
1666            {
1667                // indexedCols is 1-based
1668
indexedCols.set(keyColumns[i2]);
1669            }
1670
1671            // create a single index row template for each index
1672
indexRows[index] = constants.irgs[index].getIndexRowTemplate();
1673
1674            // Get an index row based on the base row
1675
// (This call is only necessary here because we need to pass a template to the sorter.)
1676
constants.irgs[index].getIndexRow(sourceRow,
1677                                              rl,
1678                                              indexRows[index],
1679                                              (FormatableBitSet) null);
1680
1681            /* For non-unique indexes, we order by all columns + the RID.
1682             * For unique indexes, we just order by the columns.
1683             * We create a unique index observer for unique indexes
1684             * so that we can catch duplicate key
1685             */

1686            ConglomerateDescriptor cd;
1687            // Get the ConglomerateDescriptor for the index
1688
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
1689            int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
1690            boolean[] isAscending = constants.irgs[index].isAscending();
1691            int numColumnOrderings;
1692            SortObserver sortObserver = null;
1693            /* We can only reuse the wrappers when doing an
1694             * external sort if there is only 1 index. Otherwise,
1695             * we could get in a situation where 1 sort reuses a
1696             * wrapper that is still in use in another sort.
1697             */

1698            boolean reuseWrappers = (numIndexes == 1);
1699            if (cd.getIndexDescriptor().isUnique())
1700            {
1701                numColumnOrderings = baseColumnPositions.length;
1702                String JavaDoc[] columnNames = getColumnNames(baseColumnPositions);
1703
1704                String JavaDoc indexOrConstraintName = cd.getConglomerateName();
1705                if (cd.isConstraint()) // so, the index is backing up a constraint
1706
{
1707                    ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td,
1708                                                                      cd.getUUID());
1709                    indexOrConstraintName = conDesc.getConstraintName();
1710                }
1711                sortObserver = new UniqueIndexSortObserver(
1712                                                        false, // don't clone rows
1713
cd.isConstraint(),
1714                                                        indexOrConstraintName,
1715                                                        indexRows[index],
1716                                                        reuseWrappers,
1717                                                        td.getName());
1718            }
1719            else
1720            {
1721                numColumnOrderings = baseColumnPositions.length + 1;
1722                sortObserver = new BasicSortObserver(false, false,
1723                                                     indexRows[index],
1724                                                     reuseWrappers);
1725            }
1726            ordering[index] = new ColumnOrdering[numColumnOrderings];
1727            for (int ii =0; ii < isAscending.length; ii++)
1728            {
1729                ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
1730            }
1731            if (numColumnOrderings > isAscending.length)
1732                ordering[index][isAscending.length] = new IndexColumnOrder(isAscending.length);
1733
1734            // create the sorters
1735
sortIds[index] = tc.createSort(
1736                                (Properties)null,
1737                                indexRows[index].getRowArrayClone(),
1738                                ordering[index],
1739                                sortObserver,
1740                                false, // not in order
1741
(int) sourceResultSet.getEstimatedRowCount(), // est rows
1742
-1 // est row size, -1 means no idea
1743
);
1744            needToDropSort[index] = true;
1745        }
1746
1747        sorters = new SortController[numIndexes];
1748
1749        // Open the sorts
1750
for (int index = 0; index < numIndexes; index++)
1751        {
1752            sorters[index] = tc.openSort(sortIds[index]);
1753            needToDropSort[index] = true;
1754        }
1755    }
1756
1757    /**
1758     * Update all of the indexes on a table when doing a bulk insert
1759     * on an empty table.
1760     *
1761     * @exception StandardException thrown on error
1762     */

1763    private void updateAllIndexes(long newHeapConglom,
1764                                  InsertConstantAction constants,
1765                                  TableDescriptor td,
1766                                  DataDictionary dd,
1767                                  ExecRow fullTemplate)
1768        throws StandardException
1769    {
1770        int numIndexes = constants.irgs.length;
1771
1772        /*
1773        ** If we didn't actually read in any rows, then
1774        ** we don't need to do anything, unless we were
1775        ** doing a replace.
1776        */

1777        if (indexRows == null)
1778        {
1779            if (bulkInsertReplace)
1780            {
1781                emptyIndexes(newHeapConglom, constants, td, dd, fullTemplate);
1782            }
1783            return;
1784        }
1785
1786        dd.dropStatisticsDescriptors(td.getUUID(), null, tc);
1787        long[] newIndexCongloms = new long[numIndexes];
1788
1789        indexConversionTable = new Hashtable JavaDoc(numIndexes);
1790        // Populate each index
1791
for (int index = 0; index < numIndexes; index++)
1792        {
1793            ConglomerateController indexCC;
1794            Properties properties = new Properties();
1795            ConglomerateDescriptor cd;
1796            // Get the ConglomerateDescriptor for the index
1797
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
1798
1799            
1800            // Build the properties list for the new conglomerate
1801
indexCC = tc.openCompiledConglomerate(
1802                                false,
1803                                TransactionController.OPENMODE_FORUPDATE,
1804                                TransactionController.MODE_TABLE,
1805                                TransactionController.ISOLATION_SERIALIZABLE,
1806                                constants.indexSCOCIs[index],
1807                                indexDCOCIs[index]);
1808
1809            // Get the properties on the old index
1810
indexCC.getInternalTablePropertySet(properties);
1811
1812            /* Create the properties that language supplies when creating the
1813             * the index. (The store doesn't preserve these.)
1814             */

1815            int indexRowLength = indexRows[index].nColumns();
1816            properties.put("baseConglomerateId", Long.toString(newHeapConglom));
1817            if (cd.getIndexDescriptor().isUnique())
1818            {
1819                properties.put("nUniqueColumns",
1820                               Integer.toString(indexRowLength - 1));
1821            }
1822            else
1823            {
1824                properties.put("nUniqueColumns",
1825                               Integer.toString(indexRowLength));
1826            }
1827            properties.put("rowLocationColumn",
1828                            Integer.toString(indexRowLength - 1));
1829            properties.put("nKeyFields", Integer.toString(indexRowLength));
1830
1831            indexCC.close();
1832
1833            // We can finally drain the sorter and rebuild the index
1834
// RESOLVE - all indexes are btrees right now
1835
// Populate the index.
1836
sorters[index].close();
1837            sorters[index] = null;
1838            rowSources[index] = new CardinalityCounter(tc.openSortRowSource(sortIds[index]));
1839            newIndexCongloms[index] = tc.createAndLoadConglomerate(
1840                                        "BTREE",
1841                                        indexRows[index].getRowArray(),
1842                                        ordering[index],
1843                                        properties,
1844                                        TransactionController.IS_DEFAULT,
1845                                        rowSources[index],
1846                                        (long[]) null);
1847
1848            CardinalityCounter cCount = (CardinalityCounter)rowSources[index];
1849            long numRows;
1850            if ((numRows = cCount.getRowCount()) > 0)
1851            {
1852                long[] c = cCount.getCardinality();
1853                DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
1854
1855                for (int i= 0; i < c.length; i++)
1856                {
1857                    StatisticsDescriptor statDesc =
1858                        new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(),
1859                                                    cd.getUUID(), td.getUUID(),
1860                                                    "I", new
1861                                                        StatisticsImpl(numRows,
1862                                                                       c[i]),
1863                                                    i + 1);
1864                    dd.addDescriptor(statDesc, null,
1865                                     DataDictionary.SYSSTATISTICS_CATALOG_NUM,
1866                                     true, tc);
1867                }
1868                
1869            }
1870
1871            /* Update the DataDictionary
1872             * RESOLVE - this will change in 1.4 because we will get
1873             * back the same conglomerate number
1874             *
1875             * Update sys.sysconglomerates with new conglomerate #, if the
1876             * conglomerate is shared by duplicate indexes, all the descriptors
1877             * for those indexes need to be updated with the new number.
1878             */

1879            dd.updateConglomerateDescriptor(
1880                        td.getConglomerateDescriptors(constants.indexCIDS[index]),
1881                        newIndexCongloms[index], tc);
1882
1883            // Drop the old conglomerate
1884
tc.dropConglomerate(constants.indexCIDS[index]);
1885
1886            indexConversionTable.put(new Long JavaDoc(constants.indexCIDS[index]),
1887                                    new Long JavaDoc(newIndexCongloms[index]));
1888        }
1889    }
1890
1891    /**
1892     * @see ResultSet#cleanUp
1893     *
1894     * @exception StandardException Thrown on error
1895     */

1896    public void cleanUp() throws StandardException
1897    {
1898
1899        if (tableScan != null)
1900        {
1901            tableScan.close();
1902            tableScan = null;
1903        }
1904
1905        if (triggerActivator != null)
1906        {
1907            triggerActivator.cleanup();
1908            // triggerActivator is reused across executions
1909
}
1910
1911        /* Close down the source ResultSet tree */
1912        if (sourceResultSet != null)
1913        {
1914            sourceResultSet.close();
1915            // sourceResultSet is reused across executions
1916
}
1917        numOpens = 0;
1918
1919        if (rowChanger != null)
1920        {
1921            rowChanger.close();
1922        }
1923
1924        if (rowHolder != null)
1925        {
1926            rowHolder.close();
1927        }
1928
1929        if (fkChecker != null)
1930        {
1931            fkChecker.close();
1932            // fkChecker is reused across executions
1933
}
1934
1935        if (bulkHeapCC != null)
1936        {
1937            bulkHeapCC.close();
1938            bulkHeapCC = null;
1939        }
1940
1941        if (bulkHeapSC != null)
1942        {
1943            bulkHeapSC.close();
1944            bulkHeapSC = null;
1945        }
1946
1947        // Close each sorter
1948
if (sorters != null)
1949        {
1950            for (int index = 0; index < constants.irgs.length; index++)
1951            {
1952                if (sorters[index] != null)
1953                {
1954                    sorters[index].close();
1955                }
1956                sorters[index] = null;
1957            }
1958        }
1959
1960        if (needToDropSort != null)
1961        {
1962            for (int index = 0; index < needToDropSort.length; index++)
1963            {
1964                if (needToDropSort[index])
1965                {
1966                    tc.dropSort(sortIds[index]);
1967                    needToDropSort[index] = false;
1968                }
1969            }
1970        }
1971
1972        if (rowSources != null)
1973        {
1974            for (int index = 0; index < rowSources.length; index++)
1975            {
1976                if (rowSources[index] != null)
1977                {
1978                    rowSources[index].closeRowSource();
1979                    rowSources[index] = null;
1980                }
1981            }
1982        }
1983        super.close();
1984    }
1985
1986    // Class implementation
1987

1988    /**
1989     * Verify that bulkInsert is allowed on this table.
1990     * The execution time check to see if bulkInsert is allowed
1991     * simply consists of checking to see if this is not a deferred
1992     * mode insert and that the table is empty if this is not replace.
1993     *
1994     * A side effect of calling this method is to get an exclusive
1995     * table lock on the table.
1996     *
1997     * @return Whether or not bulkInsert is allowed on this table.
1998     *
1999     * @exception StandardException Thrown on error
2000     */

2001    protected boolean verifyBulkInsert()
2002        throws StandardException
2003    {
2004        // bulk insert is disabled for deferred mode inserts
2005
if (constants.deferred)
2006        {
2007            /* bulk insert replace should be disallowed for
2008             * deferred mode inserts.
2009             */

2010            if (SanityManager.DEBUG)
2011            {
2012                SanityManager.ASSERT(! bulkInsertReplace,
2013                    "bulkInsertReplace expected to be false for deferred mode inserts");
2014            }
2015            return false;
2016        }
2017
2018        return getExclusiveTableLock();
2019    }
2020
2021    /**
2022     * Get an exclusive table lock on the target table
2023     * (and check to see if the table is populated if
2024     * this is not a bulk insert replace).
2025     *
2026     * @return Whether or not bulkInsert is allowed on this table.
2027     *
2028     * @exception StandardException Thrown on error
2029     */

2030    private boolean getExclusiveTableLock()
2031        throws StandardException
2032    {
2033        boolean rowFound = false;
2034
2035        bulkHeapSC = tc.openCompiledScan(
2036                            false,
2037                            TransactionController.OPENMODE_FORUPDATE,
2038                            TransactionController.MODE_TABLE,
2039                            TransactionController.ISOLATION_SERIALIZABLE,
2040                            (FormatableBitSet) null,
2041                            (DataValueDescriptor[]) null,
2042                            0,
2043                            (Qualifier[][]) null,
2044                            (DataValueDescriptor[]) null,
2045                            0,
2046                            constants.heapSCOCI,
2047                            heapDCOCI);
2048
2049        /* No need to do next if bulk insert replace
2050         * but we do need to get a row location for the
2051         * case where the replace leaves an empty table.
2052         */

2053        if (! bulkInsertReplace)
2054        {
2055            rowFound = bulkHeapSC.next();
2056        }
2057        else
2058        {
2059            rl = bulkHeapSC.newRowLocationTemplate();
2060        }
2061
2062        bulkHeapSC.close();
2063        bulkHeapSC = null;
2064
2065        return ! rowFound;
2066    }
2067
2068    /**
2069     * Set the estimated row count for this table.
2070     *
2071     * @param heapConglom Conglomerate number for the heap
2072     *
2073     * @exception StandardException Thrown on failure
2074     */

2075    private void setEstimatedRowCount(long heapConglom)
2076        throws StandardException
2077    {
2078        bulkHeapSC = tc.openCompiledScan(
2079                            false,
2080                            TransactionController.OPENMODE_FORUPDATE,
2081                            TransactionController.MODE_TABLE,
2082                            TransactionController.ISOLATION_SERIALIZABLE,
2083                            (FormatableBitSet) null,
2084                            (DataValueDescriptor[]) null,
2085                            0,
2086                            (Qualifier[][]) null,
2087                            (DataValueDescriptor[]) null,
2088                            0,
2089                            constants.heapSCOCI,
2090                            heapDCOCI);
2091        
2092        bulkHeapSC.setEstimatedRowCount(rowCount);
2093
2094        bulkHeapSC.close();
2095        bulkHeapSC = null;
2096    }
2097
2098    /**
2099     * Empty the indexes after doing a bulk insert replace
2100     * where the table has 0 rows after the replace.
2101     * RESOLVE: This method is ugly! Prior to 2.0, we simply
2102     * scanned back across the table to build the indexes. We
2103     * changed this in 2.0 to populate the sorters via a call back
2104     * as we populated the table. Doing a 0 row replace into a
2105     * table with indexes is a degenerate case, hence we allow
2106     * ugly and unoptimized code.
2107     *
2108     * @exception StandardException Thrown on failure
2109     */

2110    private void emptyIndexes(long newHeapConglom,
2111                              InsertConstantAction constants,
2112                              TableDescriptor td,
2113                              DataDictionary dd,
2114                              ExecRow fullTemplate)
2115        throws StandardException
2116    {
2117        int numIndexes = constants.irgs.length;
2118        ExecIndexRow[] indexRows = new ExecIndexRow[numIndexes];
2119        ExecRow baseRows = null;
2120        ColumnOrdering[][] ordering = new ColumnOrdering[numIndexes][];
2121        int numColumns = td.getNumberOfColumns();
2122
2123        // Create the BitSet for mapping the partial row to the full row
2124
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
2125        // Need to check each index for referenced columns
2126
int numReferencedColumns = 0;
2127        for (int index = 0; index < numIndexes; index++)
2128        {
2129            int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
2130            for (int bcp = 0; bcp < baseColumnPositions.length; bcp++)
2131            {
2132                if (! bitSet.get(baseColumnPositions[bcp]))
2133                {
2134                    bitSet.set(baseColumnPositions[bcp] );
2135                    numReferencedColumns++;
2136                }
2137            }
2138        }
2139
2140        // We can finally create the partial base row
2141
baseRows = activation.getExecutionFactory().getValueRow(numReferencedColumns);
2142
2143        // Fill in each base row with nulls of the correct data type
2144
int colNumber = 0;
2145        for (int index = 0; index < numColumns; index++)
2146        {
2147            if (bitSet.get(index + 1))
2148            {
2149                colNumber++;
2150                // NOTE: 1-based column numbers
2151
baseRows.setColumn(
2152                        colNumber,
2153                        fullTemplate.getColumn(index + 1).getClone());
2154            }
2155        }
2156
2157        needToDropSort = new boolean[numIndexes];
2158        sortIds = new long[numIndexes];
2159
2160        /* Do the initial set up before scanning the heap.
2161         * For each index, build a single index row and a sorter.
2162         */

2163        for (int index = 0; index < numIndexes; index++)
2164        {
2165            // create a single index row template for each index
2166
indexRows[index] = constants.irgs[index].getIndexRowTemplate();
2167
2168            // Get an index row based on the base row
2169
// (This call is only necessary here because we need to pass a template to the sorter.)
2170
constants.irgs[index].getIndexRow(baseRows,
2171                                              rl,
2172                                              indexRows[index],
2173                                              bitSet);
2174
2175            /* For non-unique indexes, we order by all columns + the RID.
2176             * For unique indexes, we just order by the columns.
2177             * We create a unique index observer for unique indexes
2178             * so that we can catch duplicate key
2179             */

2180            ConglomerateDescriptor cd;
2181            // Get the ConglomerateDescriptor for the index
2182
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
2183            int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
2184            boolean[] isAscending = constants.irgs[index].isAscending();
2185            int numColumnOrderings;
2186            SortObserver sortObserver = null;
2187            if (cd.getIndexDescriptor().isUnique())
2188            {
2189                numColumnOrderings = baseColumnPositions.length;
2190                String JavaDoc[] columnNames = getColumnNames(baseColumnPositions);
2191
2192                String JavaDoc indexOrConstraintName = cd.getConglomerateName();
2193                if (cd.isConstraint()) // so, the index is backing up a constraint
2194
{
2195                    ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td,
2196                                                                      cd.getUUID());
2197                    indexOrConstraintName = conDesc.getConstraintName();
2198                }
2199                sortObserver = new UniqueIndexSortObserver(
2200                                                        false, // don't clone rows
2201
cd.isConstraint(),
2202                                                        indexOrConstraintName,
2203                                                        indexRows[index],
2204                                                        true,
2205                                                        td.getName());
2206            }
2207            else
2208            {
2209                numColumnOrderings = baseColumnPositions.length + 1;
2210                sortObserver = new BasicSortObserver(false, false,
2211                                                     indexRows[index],
2212                                                     true);
2213            }
2214            ordering[index] = new ColumnOrdering[numColumnOrderings];
2215            for (int ii =0; ii < isAscending.length; ii++)
2216            {
2217                ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
2218            }
2219            if (numColumnOrderings > isAscending.length)
2220                ordering[index][isAscending.length] = new IndexColumnOrder(isAscending.length);
2221
2222            // create the sorters
2223
sortIds[index] = tc.createSort(
2224                                (Properties)null,
2225                                indexRows[index].getRowArrayClone(),
2226                                ordering[index],
2227                                sortObserver,
2228                                false, // not in order
2229
rowCount, // est rows
2230
-1 // est row size, -1 means no idea
2231
);
2232            needToDropSort[index] = true;
2233        }
2234
2235        // Populate sorters and get the output of each sorter into a row
2236
// source. The sorters have the indexed columns only and the columns
2237
// are in the correct order.
2238
rowSources = new RowLocationRetRowSource[numIndexes];
2239        // Fill in the RowSources
2240
SortController[] sorters = new SortController[numIndexes];
2241        for (int index = 0; index < numIndexes; index++)
2242        {
2243            sorters[index] = tc.openSort(sortIds[index]);
2244            sorters[index].close();
2245            rowSources[index] = tc.openSortRowSource(sortIds[index]);
2246        }
2247
2248        long[] newIndexCongloms = new long[numIndexes];
2249
2250        // Populate each index
2251
for (int index = 0; index < numIndexes; index++)
2252        {
2253            ConglomerateController indexCC;
2254            Properties properties = new Properties();
2255            ConglomerateDescriptor cd;
2256            // Get the ConglomerateDescriptor for the index
2257
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
2258
2259            
2260            // Build the properties list for the new conglomerate
2261
indexCC = tc.openCompiledConglomerate(
2262                                false,
2263                                TransactionController.OPENMODE_FORUPDATE,
2264                                TransactionController.MODE_TABLE,
2265                                TransactionController.ISOLATION_SERIALIZABLE,
2266                                constants.indexSCOCIs[index],
2267                                indexDCOCIs[index]);
2268
2269            // Get the properties on the old index
2270
indexCC.getInternalTablePropertySet(properties);
2271
2272            /* Create the properties that language supplies when creating the
2273             * the index. (The store doesn't preserve these.)
2274             */

2275            int indexRowLength = indexRows[index].nColumns();
2276            properties.put("baseConglomerateId", Long.toString(newHeapConglom));
2277            if (cd.getIndexDescriptor().isUnique())
2278            {
2279                properties.put("nUniqueColumns",
2280                               Integer.toString(indexRowLength - 1));
2281            }
2282            else
2283            {
2284                properties.put("nUniqueColumns",
2285                               Integer.toString(indexRowLength));
2286            }
2287            properties.put("rowLocationColumn",
2288                            Integer.toString(indexRowLength - 1));
2289            properties.put("nKeyFields", Integer.toString(indexRowLength));
2290
2291            indexCC.close();
2292
2293            // We can finally drain the sorter and rebuild the index
2294
// RESOLVE - all indexes are btrees right now
2295
// Populate the index.
2296
newIndexCongloms[index] = tc.createAndLoadConglomerate(
2297                                        "BTREE",
2298                                        indexRows[index].getRowArray(),
2299                                        null, //default column sort order
2300
properties,
2301                                        TransactionController.IS_DEFAULT,
2302                                        rowSources[index],
2303                                        (long[]) null);
2304
2305            /* Update the DataDictionary
2306             *
2307             * Update sys.sysconglomerates with new conglomerate #, if the
2308             * conglomerate is shared by duplicate indexes, all the descriptors
2309             * for those indexes need to be updated with the new number.
2310             */

2311            dd.updateConglomerateDescriptor(
2312                        td.getConglomerateDescriptors(constants.indexCIDS[index]),
2313                        newIndexCongloms[index], tc);
2314
2315            // Drop the old conglomerate
2316
tc.dropConglomerate(constants.indexCIDS[index]);
2317        }
2318    }
2319
2320    /**
2321     * Get me a table scan result set, preferably a bulk
2322     * table scan, thank you. If we already have one, reopen it.
2323     */

2324    private BulkTableScanResultSet getTableScanResultSet
2325    (
2326        long conglomId
2327    ) throws StandardException
2328    {
2329        if (tableScan == null)
2330        {
2331            tableScan = new BulkTableScanResultSet(
2332                            conglomId,
2333                            tc.getStaticCompiledConglomInfo(conglomId),
2334                            activation,
2335                            new MyRowAllocator(fullTemplate), // result row allocator
2336
0, // result set number
2337
(GeneratedMethod)null, // start key getter
2338
0, // start search operator
2339
(GeneratedMethod)null, // stop key getter
2340
0, // start search operator
2341
false,
2342                            (Qualifier[][])null, // qualifiers
2343
"tableName",
2344                            (String JavaDoc)null,
2345                            (String JavaDoc)null, // index name
2346
false, // is constraint
2347
false, // for update
2348
-1, // saved object for referenced bitImpl
2349
-1,
2350                            tc.MODE_TABLE,
2351                            true, // table locked
2352
tc.ISOLATION_READ_COMMITTED,
2353                            LanguageProperties.BULK_FETCH_DEFAULT_INT, // rows per read
2354
false, // not a 1 row per scan
2355
0d, // estimated rows
2356
0d // estimated cost
2357
);
2358            tableScan.openCore();
2359        }
2360        else
2361        {
2362            tableScan.reopenCore();
2363        }
2364        return tableScan;
2365    }
2366
2367    private String JavaDoc[] getColumnNames(int[] baseColumnPositions)
2368    {
2369        int length = baseColumnPositions.length;
2370        String JavaDoc[] columnNames = new String JavaDoc[length];
2371        for(int i = 0; i < length; i++)
2372        {
2373            columnNames[i] = constants.getColumnName(i);
2374        }
2375        return columnNames;
2376    }
2377
2378    public void finish() throws StandardException {
2379        sourceResultSet.finish();
2380        super.finish();
2381    }
2382
2383
2384    // inner class to be our row template constructor
2385
class MyRowAllocator implements GeneratedMethod
2386    {
2387        private ExecRow row;
2388        MyRowAllocator(ExecRow row)
2389        {
2390            this.row = row;
2391        }
2392
2393        public Object JavaDoc invoke(Object JavaDoc ref)
2394        {
2395            return row.getClone();
2396        }
2397    }
2398}
2399
2400
Popular Tags