KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > apache > derby > impl > sql > compile > FromBaseTable


1 /*
2
3    Derby - Class org.apache.derby.impl.sql.compile.FromBaseTable
4
5    Licensed to the Apache Software Foundation (ASF) under one or more
6    contributor license agreements. See the NOTICE file distributed with
7    this work for additional information regarding copyright ownership.
8    The ASF licenses this file to you under the Apache License, Version 2.0
9    (the "License"); you may not use this file except in compliance with
10    the License. You may obtain a copy of the License at
11
12       http://www.apache.org/licenses/LICENSE-2.0
13
14    Unless required by applicable law or agreed to in writing, software
15    distributed under the License is distributed on an "AS IS" BASIS,
16    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17    See the License for the specific language governing permissions and
18    limitations under the License.
19
20  */

21
22 package org.apache.derby.impl.sql.compile;
23
24 import org.apache.derby.catalog.IndexDescriptor;
25 import org.apache.derby.iapi.util.StringUtil;
26
27 import org.apache.derby.iapi.reference.ClassName;
28 import org.apache.derby.iapi.reference.SQLState;
29
30 import org.apache.derby.iapi.services.io.FormatableBitSet;
31 import org.apache.derby.iapi.services.io.FormatableArrayHolder;
32 import org.apache.derby.iapi.services.io.FormatableIntHolder;
33 import org.apache.derby.iapi.util.JBitSet;
34 import org.apache.derby.iapi.util.ReuseFactory;
35 import org.apache.derby.iapi.services.classfile.VMOpcode;
36
37 import org.apache.derby.iapi.services.compiler.MethodBuilder;
38 import org.apache.derby.iapi.services.property.PropertyUtil;
39 import org.apache.derby.iapi.services.sanity.SanityManager;
40
41 import org.apache.derby.iapi.error.StandardException;
42
43 import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
44
45 import org.apache.derby.iapi.sql.compile.CompilerContext;
46 import org.apache.derby.iapi.sql.compile.OptimizablePredicateList;
47 import org.apache.derby.iapi.sql.compile.Optimizer;
48 import org.apache.derby.iapi.sql.compile.OptimizablePredicate;
49 import org.apache.derby.iapi.sql.compile.Optimizable;
50 import org.apache.derby.iapi.sql.compile.CostEstimate;
51 import org.apache.derby.iapi.sql.compile.AccessPath;
52 import org.apache.derby.iapi.sql.compile.JoinStrategy;
53 import org.apache.derby.iapi.sql.compile.RowOrdering;
54 import org.apache.derby.iapi.sql.compile.C_NodeTypes;
55
56 import org.apache.derby.iapi.sql.dictionary.DataDictionary;
57 import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;
58 import org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList;
59 import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor;
60 import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;
61 import org.apache.derby.iapi.sql.dictionary.IndexRowGenerator;
62 import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;
63 import org.apache.derby.iapi.sql.dictionary.TableDescriptor;
64 import org.apache.derby.iapi.sql.dictionary.ViewDescriptor;
65
66 import org.apache.derby.iapi.sql.execute.ExecRow;
67 import org.apache.derby.iapi.sql.execute.ExecutionContext;
68
69 import org.apache.derby.iapi.sql.LanguageProperties;
70
71 import org.apache.derby.iapi.store.access.StaticCompiledOpenConglomInfo;
72 import org.apache.derby.iapi.store.access.StoreCostController;
73 import org.apache.derby.iapi.store.access.ScanController;
74 import org.apache.derby.iapi.store.access.TransactionController;
75
76 import org.apache.derby.iapi.types.DataValueDescriptor;
77
78 import org.apache.derby.impl.sql.compile.ExpressionClassBuilder;
79 import org.apache.derby.impl.sql.compile.ActivationClassBuilder;
80
81 import java.util.Enumeration JavaDoc;
82 import java.util.Properties JavaDoc;
83 import java.util.Vector JavaDoc;
84 import java.util.HashSet JavaDoc;
85 import java.util.Set JavaDoc;
86
87 /**
88  * A FromBaseTable represents a table in the FROM list of a DML statement,
89  * as distinguished from a FromSubquery, which represents a subquery in the
90  * FROM list. A FromBaseTable may actually represent a view. During parsing,
91  * we can't distinguish views from base tables. During binding, when we
92  * find FromBaseTables that represent views, we replace them with FromSubqueries.
93  * By the time we get to code generation, all FromSubqueries have been eliminated,
94  * and all FromBaseTables will represent only true base tables.
95  * <p>
96  * <B>Positioned Update</B>: Currently, all columns of an updatable cursor
97  * are selected to deal with a positioned update. This is because we don't
98  * know what columns will ultimately be needed from the UpdateNode above
99  * us. For example, consider:<pre><i>
100  *
101  * get c as 'select cint from t for update of ctinyint'
102  * update t set ctinyint = csmallint
103  *
104  * </pre></i> Ideally, the cursor only selects cint. Then,
105  * something akin to an IndexRowToBaseRow is generated to
106  * take the CursorResultSet and get the appropriate columns
107  * out of the base table from the RowLocation retunrned by the
108  * cursor. Then the update node can generate the appropriate
109  * NormalizeResultSet (or whatever else it might need) to
110  * get things into the correct format for the UpdateResultSet.
111  * See CurrentOfNode for more information.
112  *
113 * @author Jeff Lichtman
114  */

115
116 public class FromBaseTable extends FromTable
117 {
118     static final int UNSET = -1;
119
120     TableName tableName;
121     TableDescriptor tableDescriptor;
122
123     ConglomerateDescriptor baseConglomerateDescriptor;
124     ConglomerateDescriptor[] conglomDescs;
125
126     int updateOrDelete;
127     
128     /*
129     ** The number of rows to bulkFetch.
130     ** Initially it is unset. If the user
131     ** uses the bulkFetch table property,
132     ** it is set to that. Otherwise, it
133     ** may be turned on if it isn't an updatable
134     ** cursor and it is the right type of
135     ** result set (more than 1 row expected to
136     ** be returned, and not hash, which does its
137     ** own bulk fetch, and subquery).
138     */

139     int bulkFetch = UNSET;
140
141     /* We may turn off bulk fetch for a variety of reasons,
142      * including because of the min optimization.
143      * bulkFetchTurnedOff is set to true in those cases.
144      */

145     boolean bulkFetchTurnedOff;
146     
147     private double singleScanRowCount;
148
149     private FormatableBitSet referencedCols;
150     private ResultColumnList templateColumns;
151
152     /* A 0-based array of column names for this table used
153      * for optimizer trace.
154      */

155     private String JavaDoc[] columnNames;
156
157     // true if we are to do a special scan to retrieve the last value
158
// in the index
159
private boolean specialMaxScan;
160
161     // true if we are to do a distinct scan
162
private boolean distinctScan;
163
164     /**
165      *Information for dependent table scan for Referential Actions
166      */

167     private boolean raDependentScan;
168     private String JavaDoc raParentResultSetId;
169     private long fkIndexConglomId;
170     private int[] fkColArray;
171
172     /**
173      * Restriction as a PredicateList
174      */

175     PredicateList baseTableRestrictionList;
176     PredicateList nonBaseTableRestrictionList;
177     PredicateList restrictionList;
178     PredicateList storeRestrictionList;
179     PredicateList nonStoreRestrictionList;
180     PredicateList requalificationRestrictionList;
181
182     public static final int UPDATE = 1;
183     public static final int DELETE = 2;
184
185     /* Variables for EXISTS FBTs */
186     private boolean existsBaseTable;
187     private boolean isNotExists; //is a NOT EXISTS base table
188
private JBitSet dependencyMap;
189
190     private boolean getUpdateLocks;
191
192     /**
193      * Initializer for a table in a FROM list. Parameters are as follows:
194      *
195      * <ul>
196      * <li>tableName The name of the table</li>
197      * <li>correlationName The correlation name</li>
198      * <li>derivedRCL The derived column list</li>
199      * <li>tableProperties The Properties list associated with the table.</li>
200      * </ul>
201      *
202      * <p>
203      * - OR -
204      * </p>
205      *
206      * <ul>
207      * <li>tableName The name of the table</li>
208      * <li>correlationName The correlation name</li>
209      * <li>updateOrDelete Table is being updated/deleted from. </li>
210      * <li>derivedRCL The derived column list</li>
211      * </ul>
212      */

213     public void init(
214                             Object JavaDoc arg1,
215                             Object JavaDoc arg2,
216                             Object JavaDoc arg3,
217                             Object JavaDoc arg4)
218     {
219         if (arg3 instanceof Integer JavaDoc)
220         {
221             init(arg2, null);
222             this.tableName = (TableName) arg1;
223             this.updateOrDelete = ((Integer JavaDoc) arg3).intValue();
224             resultColumns = (ResultColumnList) arg4;
225         }
226         else
227         {
228             init(arg2, arg4);
229             this.tableName = (TableName) arg1;
230             resultColumns = (ResultColumnList) arg3;
231         }
232
233         setOrigTableName(this.tableName);
234         templateColumns = resultColumns;
235     }
236
237     /**
238      * no LOJ reordering for base table.
239      */

240     public boolean LOJ_reorderable(int numTables)
241                 throws StandardException
242     {
243         return false;
244     }
245
246     public JBitSet LOJgetReferencedTables(int numTables)
247                 throws StandardException
248     {
249         JBitSet map = new JBitSet(numTables);
250         fillInReferencedTableMap(map);
251         return map;
252     }
253
254     /*
255      * Optimizable interface.
256      */

257
258     /**
259      * @see Optimizable#nextAccessPath
260      *
261      * @exception StandardException Thrown on error
262      */

263     public boolean nextAccessPath(Optimizer optimizer,
264                                     OptimizablePredicateList predList,
265                                     RowOrdering rowOrdering)
266                     throws StandardException
267     {
268         String JavaDoc userSpecifiedIndexName = getUserSpecifiedIndexName();
269         AccessPath ap = getCurrentAccessPath();
270         ConglomerateDescriptor currentConglomerateDescriptor =
271                                                 ap.getConglomerateDescriptor();
272
273         optimizer.trace(Optimizer.CALLING_NEXT_ACCESS_PATH,
274                        ((predList == null) ? 0 : predList.size()),
275                        0, 0.0, getExposedName());
276
277         /*
278         ** Remove the ordering of the current conglomerate descriptor,
279         ** if any.
280         */

281         rowOrdering.removeOptimizable(getTableNumber());
282
283         // RESOLVE: This will have to be modified to step through the
284
// join strategies as well as the conglomerates.
285

286         if (userSpecifiedIndexName != null)
287         {
288             /*
289             ** User specified an index name, so we should look at only one
290             ** index. If there is a current conglomerate descriptor, and there
291             ** are no more join strategies, we've already looked at the index,
292             ** so go back to null.
293             */

294             if (currentConglomerateDescriptor != null)
295             {
296                 if ( ! super.nextAccessPath(optimizer,
297                                             predList,
298                                             rowOrdering) )
299                 {
300                     currentConglomerateDescriptor = null;
301                 }
302             }
303             else
304             {
305                 optimizer.trace(Optimizer.LOOKING_FOR_SPECIFIED_INDEX,
306                                 tableNumber, 0, 0.0, userSpecifiedIndexName);
307
308                 if (StringUtil.SQLToUpperCase(userSpecifiedIndexName).equals("NULL"))
309                 {
310                     /* Special case - user-specified table scan */
311                     currentConglomerateDescriptor =
312                         tableDescriptor.getConglomerateDescriptor(
313                                         tableDescriptor.getHeapConglomerateId()
314                                     );
315                 }
316                 else
317                 {
318                     /* User-specified index name */
319                     getConglomDescs();
320                 
321                     for (int index = 0; index < conglomDescs.length; index++)
322                     {
323                         currentConglomerateDescriptor = conglomDescs[index];
324                         String JavaDoc conglomerateName =
325                             currentConglomerateDescriptor.getConglomerateName();
326                         if (conglomerateName != null)
327                         {
328                             /* Have we found the desired index? */
329                             if (conglomerateName.equals(userSpecifiedIndexName))
330                             {
331                                 break;
332                             }
333                         }
334                     }
335
336                     /* We should always find a match */
337                     if (SanityManager.DEBUG)
338                     {
339                         if (currentConglomerateDescriptor == null)
340                         {
341                             SanityManager.THROWASSERT(
342                                 "Expected to find match for forced index " +
343                                 userSpecifiedIndexName);
344                         }
345                     }
346                 }
347
348                 if ( ! super.nextAccessPath(optimizer,
349                                             predList,
350                                             rowOrdering))
351                 {
352                     if (SanityManager.DEBUG)
353                     {
354                         SanityManager.THROWASSERT("No join strategy found");
355                     }
356                 }
357             }
358         }
359         else
360         {
361             if (currentConglomerateDescriptor != null)
362             {
363                 /*
364                 ** Once we have a conglomerate descriptor, cycle through
365                 ** the join strategies (done in parent).
366                 */

367                 if ( ! super.nextAccessPath(optimizer,
368                                             predList,
369                                             rowOrdering))
370                 {
371                     /*
372                     ** When we're out of join strategies, go to the next
373                     ** conglomerate descriptor.
374                     */

375                     currentConglomerateDescriptor = getNextConglom(currentConglomerateDescriptor);
376
377                     /*
378                     ** New conglomerate, so step through join strategies
379                     ** again.
380                     */

381                     resetJoinStrategies(optimizer);
382
383                     if ( ! super.nextAccessPath(optimizer,
384                                                 predList,
385                                                 rowOrdering))
386                     {
387                         if (SanityManager.DEBUG)
388                         {
389                             SanityManager.THROWASSERT("No join strategy found");
390                         }
391                     }
392                 }
393             }
394             else
395             {
396                 /* Get the first conglomerate descriptor */
397                 currentConglomerateDescriptor = getFirstConglom();
398
399                 if ( ! super.nextAccessPath(optimizer,
400                                             predList,
401                                             rowOrdering))
402                 {
403                     if (SanityManager.DEBUG)
404                     {
405                         SanityManager.THROWASSERT("No join strategy found");
406                     }
407                 }
408             }
409         }
410
411         if (currentConglomerateDescriptor == null)
412         {
413             optimizer.trace(Optimizer.NO_MORE_CONGLOMERATES, tableNumber, 0, 0.0, null);
414         }
415         else
416         {
417             currentConglomerateDescriptor.setColumnNames(columnNames);
418             optimizer.trace(Optimizer.CONSIDERING_CONGLOMERATE, tableNumber, 0, 0.0,
419                             currentConglomerateDescriptor);
420         }
421
422         /*
423         ** Tell the rowOrdering that what the ordering of this conglomerate is
424         */

425         if (currentConglomerateDescriptor != null)
426         {
427             if ( ! currentConglomerateDescriptor.isIndex())
428             {
429                 /* If we are scanning the heap, but there
430                  * is a full match on a unique key, then
431                  * we can say that the table IS NOT unordered.
432                  * (We can't currently say what the ordering is
433                  * though.)
434                  */

435                 if (! isOneRowResultSet(predList))
436                 {
437                     optimizer.trace(Optimizer.ADDING_UNORDERED_OPTIMIZABLE,
438                                      ((predList == null) ? 0 : predList.size()),
439                                      0, 0.0, null);
440
441                     rowOrdering.addUnorderedOptimizable(this);
442                 }
443                 else
444                 {
445                     optimizer.trace(Optimizer.SCANNING_HEAP_FULL_MATCH_ON_UNIQUE_KEY,
446                                      0, 0, 0.0, null);
447                 }
448             }
449             else
450             {
451                 IndexRowGenerator irg =
452                             currentConglomerateDescriptor.getIndexDescriptor();
453
454                 int[] baseColumnPositions = irg.baseColumnPositions();
455                 boolean[] isAscending = irg.isAscending();
456
457                 for (int i = 0; i < baseColumnPositions.length; i++)
458                 {
459                     /*
460                     ** Don't add the column to the ordering if it's already
461                     ** an ordered column. This can happen in the following
462                     ** case:
463                     **
464                     ** create index ti on t(x, y);
465                     ** select * from t where x = 1 order by y;
466                     **
467                     ** Column x is always ordered, so we want to avoid the
468                     ** sort when using index ti. This is accomplished by
469                     ** making column y appear as the first ordered column
470                     ** in the list.
471                     */

472                     if ( ! rowOrdering.orderedOnColumn(isAscending[i] ?
473                                                     RowOrdering.ASCENDING :
474                                                     RowOrdering.DESCENDING,
475                                                     getTableNumber(),
476                                                     baseColumnPositions[i]))
477                     {
478                         rowOrdering.nextOrderPosition(isAscending[i] ?
479                                                     RowOrdering.ASCENDING :
480                                                     RowOrdering.DESCENDING);
481
482                         rowOrdering.addOrderedColumn(isAscending[i] ?
483                                                     RowOrdering.ASCENDING :
484                                                     RowOrdering.DESCENDING,
485                                                     getTableNumber(),
486                                                     baseColumnPositions[i]);
487                     }
488                 }
489             }
490         }
491
492         ap.setConglomerateDescriptor(currentConglomerateDescriptor);
493
494         return currentConglomerateDescriptor != null;
495     }
496
497     /** Tell super-class that this Optimizable can be ordered */
498     protected boolean canBeOrdered()
499     {
500         return true;
501     }
502
503     /**
504      * @see org.apache.derby.iapi.sql.compile.Optimizable#optimizeIt
505      *
506      * @exception StandardException Thrown on error
507      */

508     public CostEstimate optimizeIt(
509                 Optimizer optimizer,
510                 OptimizablePredicateList predList,
511                 CostEstimate outerCost,
512                 RowOrdering rowOrdering)
513             throws StandardException
514     {
515         optimizer.costOptimizable(
516                             this,
517                             tableDescriptor,
518                             getCurrentAccessPath().getConglomerateDescriptor(),
519                             predList,
520                             outerCost);
521
522         // The cost that we found from the above call is now stored in the
523
// cost field of this FBT's current access path. So that's the
524
// cost we want to return here.
525
return getCurrentAccessPath().getCostEstimate();
526     }
527
528     /** @see Optimizable#getTableDescriptor */
529     public TableDescriptor getTableDescriptor()
530     {
531         return tableDescriptor;
532     }
533
534
535     /** @see Optimizable#isMaterializable
536      *
537      * @exception StandardException Thrown on error
538      */

539     public boolean isMaterializable()
540         throws StandardException
541     {
542         /* base tables are always materializable */
543         return true;
544     }
545
546
547     /**
548      * @see Optimizable#pushOptPredicate
549      *
550      * @exception StandardException Thrown on error
551      */

552
553     public boolean pushOptPredicate(OptimizablePredicate optimizablePredicate)
554         throws StandardException
555     {
556         if (SanityManager.DEBUG)
557         {
558             SanityManager.ASSERT(optimizablePredicate instanceof Predicate,
559                 "optimizablePredicate expected to be instanceof Predicate");
560         }
561
562         /* Add the matching predicate to the restrictionList */
563         restrictionList.addPredicate((Predicate) optimizablePredicate);
564
565         return true;
566     }
567
568     /**
569      * @see Optimizable#pullOptPredicates
570      *
571      * @exception StandardException Thrown on error
572      */

573     public void pullOptPredicates(
574                                 OptimizablePredicateList optimizablePredicates)
575                     throws StandardException
576     {
577         for (int i = restrictionList.size() - 1; i >= 0; i--) {
578             optimizablePredicates.addOptPredicate(
579                                     restrictionList.getOptPredicate(i));
580             restrictionList.removeOptPredicate(i);
581         }
582     }
583
584     /**
585      * @see Optimizable#isCoveringIndex
586      * @exception StandardException Thrown on error
587      */

588     public boolean isCoveringIndex(ConglomerateDescriptor cd) throws StandardException
589     {
590         boolean coveringIndex = true;
591         IndexRowGenerator irg;
592         int[] baseCols;
593         int colPos;
594
595         /* You can only be a covering index if you're an index */
596         if ( ! cd.isIndex())
597             return false;
598
599         irg = cd.getIndexDescriptor();
600         baseCols = irg.baseColumnPositions();
601
602         /* First we check to see if this is a covering index */
603         int rclSize = resultColumns.size();
604         for (int index = 0; index < rclSize; index++)
605         {
606             ResultColumn rc = (ResultColumn) resultColumns.elementAt(index);
607
608             /* Ignore unreferenced columns */
609             if (! rc.isReferenced())
610             {
611                 continue;
612             }
613
614             /* Ignore constants - this can happen if all of the columns
615              * were projected out and we ended up just generating
616              * a "1" in RCL.doProject().
617              */

618             if (rc.getExpression() instanceof ConstantNode)
619             {
620                 continue;
621             }
622
623             coveringIndex = false;
624
625             colPos = rc.getColumnPosition();
626
627             /* Is this column in the index? */
628             for (int i = 0; i < baseCols.length; i++)
629             {
630                 if (colPos == baseCols[i])
631                 {
632                     coveringIndex = true;
633                     break;
634                 }
635             }
636
637             /* No need to continue if the column was not in the index */
638             if (! coveringIndex)
639             {
640                 break;
641             }
642         }
643         return coveringIndex;
644     }
645
646     /** @see Optimizable#verifyProperties
647      * @exception StandardException Thrown on error
648      */

649     public void verifyProperties(DataDictionary dDictionary)
650         throws StandardException
651     {
652         if (tableProperties == null)
653         {
654             return;
655         }
656         /* Check here for:
657          * invalid properties key
658          * index and constraint properties
659          * non-existent index
660          * non-existent constraint
661          * invalid joinStrategy
662          * invalid value for hashInitialCapacity
663          * invalid value for hashLoadFactor
664          * invalid value for hashMaxCapacity
665          */

666         boolean indexSpecified = false;
667         boolean constraintSpecified = false;
668         ConstraintDescriptor consDesc = null;
669         Enumeration JavaDoc e = tableProperties.keys();
670
671             StringUtil.SQLEqualsIgnoreCase(tableDescriptor.getSchemaName(),
672                                            "SYS");
673         while (e.hasMoreElements())
674         {
675             String JavaDoc key = (String JavaDoc) e.nextElement();
676             String JavaDoc value = (String JavaDoc) tableProperties.get(key);
677
678             if (key.equals("index"))
679             {
680                 // User only allowed to specify 1 of index and constraint, not both
681
if (constraintSpecified)
682                 {
683                     throw StandardException.newException(SQLState.LANG_BOTH_FORCE_INDEX_AND_CONSTRAINT_SPECIFIED,
684                                 getBaseTableName());
685                 }
686                 indexSpecified = true;
687
688                 /* Validate index name - NULL means table scan */
689                 if (! StringUtil.SQLToUpperCase(value).equals("NULL"))
690                 {
691                     ConglomerateDescriptor cd = null;
692                     ConglomerateDescriptor[] cds = tableDescriptor.getConglomerateDescriptors();
693
694                     for (int index = 0; index < cds.length; index++)
695                     {
696                         cd = cds[index];
697                         String JavaDoc conglomerateName = cd.getConglomerateName();
698                         if (conglomerateName != null)
699                         {
700                             if (conglomerateName.equals(value))
701                             {
702                                 break;
703                             }
704                         }
705                         // Not a match, clear cd
706
cd = null;
707                     }
708
709                     // Throw exception if user specified index not found
710
if (cd == null)
711                     {
712                         throw StandardException.newException(SQLState.LANG_INVALID_FORCED_INDEX1,
713                                         value, getBaseTableName());
714                     }
715                     /* Query is dependent on the ConglomerateDescriptor */
716                     getCompilerContext().createDependency(cd);
717                 }
718             }
719             else if (key.equals("constraint"))
720             {
721                 // User only allowed to specify 1 of index and constraint, not both
722
if (indexSpecified)
723                 {
724                     throw StandardException.newException(SQLState.LANG_BOTH_FORCE_INDEX_AND_CONSTRAINT_SPECIFIED,
725                                 getBaseTableName());
726                 }
727                 constraintSpecified = true;
728
729                 if (! StringUtil.SQLToUpperCase(value).equals("NULL"))
730                 {
731                     consDesc =
732                         dDictionary.getConstraintDescriptorByName(
733                                     tableDescriptor, (SchemaDescriptor)null, value,
734                                     false);
735
736                     /* Throw exception if user specified constraint not found
737                      * or if it does not have a backing index.
738                      */

739                     if ((consDesc == null) || ! consDesc.hasBackingIndex())
740                     {
741                         throw StandardException.newException(SQLState.LANG_INVALID_FORCED_INDEX2,
742                                         value, getBaseTableName());
743                     }
744
745                     /* Query is dependent on the ConstraintDescriptor */
746                     getCompilerContext().createDependency(consDesc);
747                 }
748             }
749             else if (key.equals("joinStrategy"))
750             {
751                 userSpecifiedJoinStrategy = StringUtil.SQLToUpperCase(value);
752             }
753             else if (key.equals("hashInitialCapacity"))
754             {
755                 initialCapacity = getIntProperty(value, key);
756
757                 // verify that the specified value is valid
758
if (initialCapacity <= 0)
759                 {
760                     throw StandardException.newException(SQLState.LANG_INVALID_HASH_INITIAL_CAPACITY,
761                             String.valueOf(initialCapacity));
762                 }
763             }
764             else if (key.equals("hashLoadFactor"))
765             {
766                 try
767                 {
768                     loadFactor = Float.valueOf(value).floatValue();
769                 }
770                 catch (NumberFormatException JavaDoc nfe)
771                 {
772                     throw StandardException.newException(SQLState.LANG_INVALID_NUMBER_FORMAT_FOR_OVERRIDE,
773                             value, key);
774                 }
775
776                 // verify that the specified value is valid
777
if (loadFactor <= 0.0 || loadFactor > 1.0)
778                 {
779                     throw StandardException.newException(SQLState.LANG_INVALID_HASH_LOAD_FACTOR,
780                             value);
781                 }
782             }
783             else if (key.equals("hashMaxCapacity"))
784             {
785                 maxCapacity = getIntProperty(value, key);
786
787                 // verify that the specified value is valid
788
if (maxCapacity <= 0)
789                 {
790                     throw StandardException.newException(SQLState.LANG_INVALID_HASH_MAX_CAPACITY,
791                             String.valueOf(maxCapacity));
792                 }
793             }
794             else if (key.equals("bulkFetch"))
795             {
796                 bulkFetch = getIntProperty(value, key);
797
798                 // verify that the specified value is valid
799
if (bulkFetch <= 0)
800                 {
801                     throw StandardException.newException(SQLState.LANG_INVALID_BULK_FETCH_VALUE,
802                             String.valueOf(bulkFetch));
803                 }
804             
805                 // no bulk fetch on updatable scans
806
if (forUpdate())
807                 {
808                     throw StandardException.newException(SQLState.LANG_INVALID_BULK_FETCH_UPDATEABLE);
809                 }
810             }
811             else
812             {
813                 // No other "legal" values at this time
814
throw StandardException.newException(SQLState.LANG_INVALID_FROM_TABLE_PROPERTY, key,
815                     "index, constraint, joinStrategy");
816             }
817         }
818
819         /* If user specified a non-null constraint name(DERBY-1707), then
820          * replace it in the properties list with the underlying index name to
821          * simplify the code in the optimizer.
822          * NOTE: The code to get from the constraint name, for a constraint
823          * with a backing index, to the index name is convoluted. Given
824          * the constraint name, we can get the conglomerate id from the
825          * ConstraintDescriptor. We then use the conglomerate id to get
826          * the ConglomerateDescriptor from the DataDictionary and, finally,
827          * we get the index name (conglomerate name) from the ConglomerateDescriptor.
828          */

829         if (constraintSpecified && consDesc != null)
830         {
831             ConglomerateDescriptor cd =
832                 dDictionary.getConglomerateDescriptor(
833                     consDesc.getConglomerateId());
834             String JavaDoc indexName = cd.getConglomerateName();
835
836             tableProperties.remove("constraint");
837             tableProperties.put("index", indexName);
838         }
839     }
840
841     /** @see Optimizable#getBaseTableName */
842     public String JavaDoc getBaseTableName()
843     {
844         return tableName.getTableName();
845     }
846
847     /** @see Optimizable#startOptimizing */
848     public void startOptimizing(Optimizer optimizer, RowOrdering rowOrdering)
849     {
850         AccessPath ap = getCurrentAccessPath();
851         AccessPath bestAp = getBestAccessPath();
852         AccessPath bestSortAp = getBestSortAvoidancePath();
853
854         ap.setConglomerateDescriptor((ConglomerateDescriptor) null);
855         bestAp.setConglomerateDescriptor((ConglomerateDescriptor) null);
856         bestSortAp.setConglomerateDescriptor((ConglomerateDescriptor) null);
857         ap.setCoveringIndexScan(false);
858         bestAp.setCoveringIndexScan(false);
859         bestSortAp.setCoveringIndexScan(false);
860         ap.setLockMode(0);
861         bestAp.setLockMode(0);
862         bestSortAp.setLockMode(0);
863
864         /*
865         ** Only need to do this for current access path, because the
866         ** costEstimate will be copied to the best access paths as
867         ** necessary.
868         */

869         CostEstimate costEstimate = getCostEstimate(optimizer);
870         ap.setCostEstimate(costEstimate);
871
872         /*
873         ** This is the initial cost of this optimizable. Initialize it
874         ** to the maximum cost so that the optimizer will think that
875         ** any access path is better than none.
876         */

877         costEstimate.setCost(Double.MAX_VALUE, Double.MAX_VALUE, Double.MAX_VALUE);
878
879         super.startOptimizing(optimizer, rowOrdering);
880     }
881
882     /** @see Optimizable#convertAbsoluteToRelativeColumnPosition */
883     public int convertAbsoluteToRelativeColumnPosition(int absolutePosition)
884     {
885         return mapAbsoluteToRelativeColumnPosition(absolutePosition);
886     }
887
888     /**
889      * @see Optimizable#estimateCost
890      *
891      * @exception StandardException Thrown on error
892      */

893     public CostEstimate estimateCost(OptimizablePredicateList predList,
894                                     ConglomerateDescriptor cd,
895                                     CostEstimate outerCost,
896                                     Optimizer optimizer,
897                                     RowOrdering rowOrdering)
898             throws StandardException
899     {
900         double cost;
901         boolean statisticsForTable = false;
902         boolean statisticsForConglomerate = false;
903         /* unknownPredicateList contains all predicates whose effect on
904          * cost/selectivity can't be calculated by the store.
905          */

906         PredicateList unknownPredicateList = null;
907
908         if (optimizer.useStatistics() && predList != null)
909         {
910             /* if user has specified that we don't use statistics,
911                pretend that statistics don't exist.
912             */

913             statisticsForConglomerate = tableDescriptor.statisticsExist(cd);
914             statisticsForTable = tableDescriptor.statisticsExist(null);
915             unknownPredicateList = new PredicateList();
916             predList.copyPredicatesToOtherList(unknownPredicateList);
917
918         }
919
920         AccessPath currentAccessPath = getCurrentAccessPath();
921         JoinStrategy currentJoinStrategy =
922             currentAccessPath.getJoinStrategy();
923
924         optimizer.trace(Optimizer.ESTIMATING_COST_OF_CONGLOMERATE,
925                         tableNumber, 0, 0.0, cd);
926
927         /* Get the uniqueness factory for later use (see below) */
928         double tableUniquenessFactor =
929                 optimizer.uniqueJoinWithOuterTable(predList);
930
931         boolean oneRowResultSetForSomeConglom = isOneRowResultSet(predList);
932
933         /* Get the predicates that can be used for scanning the base table */
934         baseTableRestrictionList.removeAllElements();
935
936         currentJoinStrategy.getBasePredicates(predList,
937                                        baseTableRestrictionList,
938                                        this);
939                                     
940         /* RESOLVE: Need to figure out how to cache the StoreCostController */
941         StoreCostController scc = getStoreCostController(cd);
942
943         CostEstimate costEstimate = getScratchCostEstimate(optimizer);
944
945         /* First, get the cost for one scan */
946
947         /* Does the conglomerate match at most one row? */
948         if (isOneRowResultSet(cd, baseTableRestrictionList))
949         {
950             /*
951             ** Tell the RowOrdering that this optimizable is always ordered.
952             ** It will figure out whether it is really always ordered in the
953             ** context of the outer tables and their orderings.
954             */

955             rowOrdering.optimizableAlwaysOrdered(this);
956
957             singleScanRowCount = 1.0;
958
959             /* Yes, the cost is to fetch exactly one row */
960             // RESOLVE: NEED TO FIGURE OUT HOW TO GET REFERENCED COLUMN LIST,
961
// FIELD STATES, AND ACCESS TYPE
962
cost = scc.getFetchFromFullKeyCost(
963                                         (FormatableBitSet) null,
964                                         0);
965
966             optimizer.trace(Optimizer.MATCH_SINGLE_ROW_COST,
967                             tableNumber, 0, cost, null);
968
969             costEstimate.setCost(cost, 1.0d, 1.0d);
970
971             /*
972             ** Let the join strategy decide whether the cost of the base
973             ** scan is a single scan, or a scan per outer row.
974             ** NOTE: The multiplication should only be done against the
975             ** total row count, not the singleScanRowCount.
976             */

977             double newCost = costEstimate.getEstimatedCost();
978
979             if (currentJoinStrategy.multiplyBaseCostByOuterRows())
980             {
981                 newCost *= outerCost.rowCount();
982             }
983
984             costEstimate.setCost(
985                 newCost,
986                 costEstimate.rowCount() * outerCost.rowCount(),
987                 costEstimate.singleScanRowCount());
988
989             /*
990             ** Choose the lock mode. If the start/stop conditions are
991             ** constant, choose row locking, because we will always match
992             ** the same row. If they are not constant (i.e. they include
993             ** a join), we decide whether to do row locking based on
994             ** the total number of rows for the life of the query.
995             */

996             boolean constantStartStop = true;
997             for (int i = 0; i < predList.size(); i++)
998             {
999                 OptimizablePredicate pred = predList.getOptPredicate(i);
1000
1001                /*
1002                ** The predicates are in index order, so the start and
1003                ** stop keys should be first.
1004                */

1005                if ( ! (pred.isStartKey() || pred.isStopKey()))
1006                {
1007                    break;
1008                }
1009
1010                /* Stop when we've found a join */
1011                if ( ! pred.getReferencedMap().hasSingleBitSet())
1012                {
1013                    constantStartStop = false;
1014                    break;
1015                }
1016            }
1017
1018            if (constantStartStop)
1019            {
1020                currentAccessPath.setLockMode(
1021                                            TransactionController.MODE_RECORD);
1022
1023                optimizer.trace(Optimizer.ROW_LOCK_ALL_CONSTANT_START_STOP,
1024                                0, 0, 0.0, null);
1025            }
1026            else
1027            {
1028                setLockingBasedOnThreshold(optimizer, costEstimate.rowCount());
1029            }
1030
1031            optimizer.trace(Optimizer.COST_OF_N_SCANS,
1032                            tableNumber, 0, outerCost.rowCount(), costEstimate);
1033
1034            /* Add in cost of fetching base row for non-covering index */
1035            if (cd.isIndex() && ( ! isCoveringIndex(cd) ) )
1036            {
1037                double singleFetchCost =
1038                        getBaseCostController().getFetchFromRowLocationCost(
1039                                                                (FormatableBitSet) null,
1040                                                                0);
1041                cost = singleFetchCost * costEstimate.rowCount();
1042
1043                costEstimate.setEstimatedCost(
1044                                costEstimate.getEstimatedCost() + cost);
1045
1046                optimizer.trace(Optimizer.NON_COVERING_INDEX_COST,
1047                                tableNumber, 0, cost, null);
1048            }
1049        }
1050        else
1051        {
1052            /* Conglomerate might match more than one row */
1053
1054            /*
1055            ** Some predicates are good for start/stop, but we don't know
1056            ** the values they are being compared to at this time, so we
1057            ** estimate their selectivity in language rather than ask the
1058            ** store about them . The predicates on the first column of
1059            ** the conglomerate reduce the number of pages and rows scanned.
1060            ** The predicates on columns after the first reduce the number
1061            ** of rows scanned, but have a much smaller effect on the number
1062            ** of pages scanned, so we keep track of these selectivities in
1063            ** two separate variables: extraFirstColumnSelectivity and
1064            ** extraStartStopSelectivity. (Theoretically, we could try to
1065            ** figure out the effect of predicates after the first column
1066            ** on the number of pages scanned, but it's too hard, so we
1067            ** use these predicates only to reduce the estimated number of
1068            ** rows. For comparisons with known values, though, the store
1069            ** can figure out exactly how many rows and pages are scanned.)
1070            **
1071            ** Other predicates are not good for start/stop. We keep track
1072            ** of their selectvities separately, because these limit the
1073            ** number of rows, but not the number of pages, and so need to
1074            ** be factored into the row count but not into the cost.
1075            ** These selectivities are factored into extraQualifierSelectivity.
1076            **
1077            ** statStartStopSelectivity (using statistics) represents the
1078            ** selectivity of start/stop predicates that can be used to scan
1079            ** the index. If no statistics exist for the conglomerate then
1080            ** the value of this variable remains at 1.0
1081            **
1082            ** statCompositeSelectivity (using statistics) represents the
1083            ** selectivity of all the predicates (including NonBaseTable
1084            ** predicates). This represents the most educated guess [among
1085            ** all the wild surmises in this routine] as to the number
1086            ** of rows that will be returned from this joinNode.
1087            ** If no statistics exist on the table or no statistics at all
1088            ** can be found to satisfy the predicates at this join opertor,
1089            ** then statCompositeSelectivity is left initialized at 1.0
1090            */

1091            double extraFirstColumnSelectivity = 1.0d;
1092            double extraStartStopSelectivity = 1.0d;
1093            double extraQualifierSelectivity = 1.0d;
1094            double extraNonQualifierSelectivity = 1.0d;
1095            double statStartStopSelectivity = 1.0d;
1096            double statCompositeSelectivity = 1.0d;
1097
1098            int numExtraFirstColumnPreds = 0;
1099            int numExtraStartStopPreds = 0;
1100            int numExtraQualifiers = 0;
1101            int numExtraNonQualifiers = 0;
1102
1103            /*
1104            ** It is possible for something to be a start or stop predicate
1105            ** without it being possible to use it as a key for cost estimation.
1106            ** For example, with an index on (c1, c2), and the predicate
1107            ** c1 = othertable.c3 and c2 = 1, the comparison on c1 is with
1108            ** an unknown value, so we can't pass it to the store. This means
1109            ** we can't pass the comparison on c2 to the store, either.
1110            **
1111            ** The following booleans keep track of whether we have seen
1112            ** gaps in the keys we can pass to the store.
1113            */

1114            boolean startGap = false;
1115            boolean stopGap = false;
1116            boolean seenFirstColumn = false;
1117
1118            /*
1119            ** We need to figure out the number of rows touched to decide
1120            ** whether to use row locking or table locking. If the start/stop
1121            ** conditions are constant (i.e. no joins), the number of rows
1122            ** touched is the number of rows per scan. But if the start/stop
1123            ** conditions contain a join, the number of rows touched must
1124            ** take the number of outer rows into account.
1125            */

1126            boolean constantStartStop = true;
1127            boolean startStopFound = false;
1128
1129            /* Count the number of start and stop keys */
1130            int startKeyNum = 0;
1131            int stopKeyNum = 0;
1132            OptimizablePredicate pred;
1133            int predListSize;
1134
1135            if (predList != null)
1136                predListSize = baseTableRestrictionList.size();
1137            else
1138                predListSize = 0;
1139
1140            int startStopPredCount = 0;
1141            ColumnReference firstColumn = null;
1142            for (int i = 0; i < predListSize; i++)
1143            {
1144                pred = baseTableRestrictionList.getOptPredicate(i);
1145                boolean startKey = pred.isStartKey();
1146                boolean stopKey = pred.isStopKey();
1147                if (startKey || stopKey)
1148                {
1149                    startStopFound = true;
1150
1151                    if ( ! pred.getReferencedMap().hasSingleBitSet())
1152                    {
1153                        constantStartStop = false;
1154                    }
1155
1156                    boolean knownConstant =
1157                        pred.compareWithKnownConstant(this, true);
1158                    if (startKey)
1159                    {
1160                        if (knownConstant && ( ! startGap ) )
1161                        {
1162                            startKeyNum++;
1163                            if (unknownPredicateList != null)
1164                                unknownPredicateList.removeOptPredicate(pred);
1165                        }
1166                        else
1167                        {
1168                            startGap = true;
1169                        }
1170                    }
1171
1172                    if (stopKey)
1173                    {
1174                        if (knownConstant && ( ! stopGap ) )
1175                        {
1176                            stopKeyNum++;
1177                            if (unknownPredicateList != null)
1178                                unknownPredicateList.removeOptPredicate(pred);
1179                        }
1180                        else
1181                        {
1182                            stopGap = true;
1183                        }
1184                    }
1185
1186                    /* If either we are seeing startGap or stopGap because start/stop key is
1187                     * comparison with non-constant, we should multiply the selectivity to
1188                     * extraFirstColumnSelectivity. Beetle 4787.
1189                     */

1190                    if (startGap || stopGap)
1191                    {
1192                        // Don't include redundant join predicates in selectivity calculations
1193
if (baseTableRestrictionList.isRedundantPredicate(i))
1194                            continue;
1195
1196                        if (startKey && stopKey)
1197                            startStopPredCount++;
1198
1199                        if (pred.getIndexPosition() == 0)
1200                        {
1201                            extraFirstColumnSelectivity *=
1202                                                        pred.selectivity(this);
1203                            if (! seenFirstColumn)
1204                            {
1205                                ValueNode relNode = ((Predicate) pred).getAndNode().getLeftOperand();
1206                                if (relNode instanceof BinaryRelationalOperatorNode)
1207                                    firstColumn = ((BinaryRelationalOperatorNode) relNode).getColumnOperand(this);
1208                                seenFirstColumn = true;
1209                            }
1210                        }
1211                        else
1212                        {
1213                            extraStartStopSelectivity *= pred.selectivity(this);
1214                            numExtraStartStopPreds++;
1215                        }
1216                    }
1217                }
1218                else
1219                {
1220                    // Don't include redundant join predicates in selectivity calculations
1221
if (baseTableRestrictionList.isRedundantPredicate(i))
1222                    {
1223                        continue;
1224                    }
1225
1226                    /* If we have "like" predicate on the first index column, it is more likely
1227                     * to have a smaller range than "between", so we apply extra selectivity 0.2
1228                     * here. beetle 4387, 4787.
1229                     */

1230                    if (pred instanceof Predicate)
1231                    {
1232                        ValueNode leftOpnd = ((Predicate) pred).getAndNode().getLeftOperand();
1233                        if (firstColumn != null && leftOpnd instanceof LikeEscapeOperatorNode)
1234                        {
1235                            LikeEscapeOperatorNode likeNode = (LikeEscapeOperatorNode) leftOpnd;
1236                            if (likeNode.getLeftOperand().requiresTypeFromContext())
1237                            {
1238                                ValueNode receiver = ((TernaryOperatorNode) likeNode).getReceiver();
1239                                if (receiver instanceof ColumnReference)
1240                                {
1241                                    ColumnReference cr = (ColumnReference) receiver;
1242                                    if (cr.getTableNumber() == firstColumn.getTableNumber() &&
1243                                        cr.getColumnNumber() == firstColumn.getColumnNumber())
1244                                        extraFirstColumnSelectivity *= 0.2;
1245                                }
1246                            }
1247                        }
1248                    }
1249
1250                    if (pred.isQualifier())
1251                    {
1252                        extraQualifierSelectivity *= pred.selectivity(this);
1253                        numExtraQualifiers++;
1254                    }
1255                    else
1256                    {
1257                        extraNonQualifierSelectivity *= pred.selectivity(this);
1258                        numExtraNonQualifiers++;
1259                    }
1260
1261                    /*
1262                    ** Strictly speaking, it shouldn't be necessary to
1263                    ** indicate a gap here, since there should be no more
1264                    ** start/stop predicates, but let's do it, anyway.
1265                    */

1266                    startGap = true;
1267                    stopGap = true;
1268                }
1269            }
1270
1271            if (unknownPredicateList != null)
1272            {
1273                statCompositeSelectivity = unknownPredicateList.selectivity(this);
1274                if (statCompositeSelectivity == -1.0d)
1275                    statCompositeSelectivity = 1.0d;
1276            }
1277
1278            if (seenFirstColumn && statisticsForConglomerate &&
1279                (startStopPredCount > 0))
1280            {
1281                statStartStopSelectivity =
1282                    tableDescriptor.selectivityForConglomerate(cd, startStopPredCount);
1283            }
1284
1285            /*
1286            ** Factor the non-base-table predicates into the extra
1287            ** non-qualifier selectivity, since these will restrict the
1288            ** number of rows, but not the cost.
1289            */

1290            extraNonQualifierSelectivity *=
1291                currentJoinStrategy.nonBasePredicateSelectivity(this, predList);
1292
1293            /* Create the start and stop key arrays, and fill them in */
1294            DataValueDescriptor[] startKeys;
1295            DataValueDescriptor[] stopKeys;
1296
1297            if (startKeyNum > 0)
1298                startKeys = new DataValueDescriptor[startKeyNum];
1299            else
1300                startKeys = null;
1301
1302            if (stopKeyNum > 0)
1303                stopKeys = new DataValueDescriptor[stopKeyNum];
1304            else
1305                stopKeys = null;
1306
1307            startKeyNum = 0;
1308            stopKeyNum = 0;
1309            startGap = false;
1310            stopGap = false;
1311
1312            for (int i = 0; i < predListSize; i++)
1313            {
1314                pred = baseTableRestrictionList.getOptPredicate(i);
1315                boolean startKey = pred.isStartKey();
1316                boolean stopKey = pred.isStopKey();
1317
1318                if (startKey || stopKey)
1319                {
1320                    boolean knownConstant = pred.compareWithKnownConstant(this, true);
1321
1322                    if (startKey)
1323                    {
1324                        if (knownConstant && ( ! startGap ) )
1325                        {
1326                            startKeys[startKeyNum] = pred.getCompareValue(this);
1327                            startKeyNum++;
1328                        }
1329                        else
1330                        {
1331                            startGap = true;
1332                        }
1333                    }
1334
1335                    if (stopKey)
1336                    {
1337                        if (knownConstant && ( ! stopGap ) )
1338                        {
1339                            stopKeys[stopKeyNum] = pred.getCompareValue(this);
1340                            stopKeyNum++;
1341                        }
1342                        else
1343                        {
1344                            stopGap = true;
1345                        }
1346                    }
1347                }
1348                else
1349                {
1350                    startGap = true;
1351                    stopGap = true;
1352                }
1353            }
1354
1355            int startOperator;
1356            int stopOperator;
1357
1358            if (baseTableRestrictionList != null)
1359            {
1360                startOperator = baseTableRestrictionList.startOperator(this);
1361                stopOperator = baseTableRestrictionList.stopOperator(this);
1362            }
1363            else
1364            {
1365                /*
1366                ** If we're doing a full scan, it doesn't matter what the
1367                ** start and stop operators are.
1368                */

1369                startOperator = ScanController.NA;
1370                stopOperator = ScanController.NA;
1371            }
1372
1373            /*
1374            ** Get a row template for this conglomerate. For now, just tell
1375            ** it we are using all the columns in the row.
1376            */

1377            DataValueDescriptor[] rowTemplate =
1378                getRowTemplate(cd, getBaseCostController());
1379
1380            /* we prefer index than table scan for concurrency reason, by a small
1381             * adjustment on estimated row count. This affects optimizer's decision
1382             * especially when few rows are in table. beetle 5006. This makes sense
1383             * since the plan may stay long before we actually check and invalidate it.
1384             * And new rows may be inserted before we check and invalidate the plan.
1385             * Here we only prefer index that has start/stop key from predicates. Non-
1386             * constant start/stop key case is taken care of by selectivity later.
1387             */

1388            long baseRC = (startKeys != null || stopKeys != null) ? baseRowCount() : baseRowCount() + 5;
1389
1390            scc.getScanCost(
1391                    currentJoinStrategy.scanCostType(),
1392                    baseRC,
1393                    1,
1394                    forUpdate(),
1395                    (FormatableBitSet) null,
1396                    rowTemplate,
1397                    startKeys,
1398                    startOperator,
1399                    stopKeys,
1400                    stopOperator,
1401                    false,
1402                    0,
1403                    costEstimate);
1404
1405            /* initialPositionCost is the first part of the index scan cost we get above.
1406             * It's the cost of initial positioning/fetch of key. So it's unrelated to
1407             * row count of how many rows we fetch from index. We extract it here so that
1408             * we only multiply selectivity to the other part of index scan cost, which is
1409             * nearly linear, to make cost calculation more accurate and fair, especially
1410             * compared to the plan of "one row result set" (unique index). beetle 4787.
1411             */

1412            double initialPositionCost = 0.0;
1413            if (cd.isIndex())
1414            {
1415                initialPositionCost = scc.getFetchFromFullKeyCost((FormatableBitSet) null, 0);
1416                /* oneRowResultSetForSomeConglom means there's a unique index, but certainly
1417                 * not this one since we are here. If store knows this non-unique index
1418                 * won't return any row or just returns one row (eg., the predicate is a
1419                 * comparison with constant or almost empty table), we do minor adjustment
1420                 * on cost (affecting decision for covering index) and rc (decision for
1421                 * non-covering). The purpose is favoring unique index. beetle 5006.
1422                 */

1423                if (oneRowResultSetForSomeConglom && costEstimate.rowCount() <= 1)
1424                {
1425                    costEstimate.setCost(costEstimate.getEstimatedCost() * 2,
1426                                         costEstimate.rowCount() + 2,
1427                                         costEstimate.singleScanRowCount() + 2);
1428                }
1429            }
1430
1431            optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN1,
1432                            tableNumber, 0, 0.0, cd);
1433            optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN2,
1434                            tableNumber, 0, 0.0, costEstimate);
1435            optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN3,
1436                            numExtraFirstColumnPreds, 0,
1437                            extraFirstColumnSelectivity, null);
1438            optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN4,
1439                            numExtraStartStopPreds, 0,
1440                            extraStartStopSelectivity, null);
1441            optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN7,
1442                            startStopPredCount, 0,
1443                            statStartStopSelectivity, null);
1444            optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN5,
1445                            numExtraQualifiers, 0,
1446                            extraQualifierSelectivity, null);
1447            optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN6,
1448                            numExtraNonQualifiers, 0,
1449                            extraNonQualifierSelectivity, null);
1450
1451            /* initial row count is the row count without applying
1452               any predicates-- we use this at the end of the routine
1453               when we use statistics to recompute the row count.
1454            */

1455            double initialRowCount = costEstimate.rowCount();
1456
1457            if (statStartStopSelectivity != 1.0d)
1458            {
1459                /*
1460                ** If statistics exist use the selectivity computed
1461                ** from the statistics to calculate the cost.
1462                ** NOTE: we apply this selectivity to the cost as well
1463                ** as both the row counts. In the absence of statistics
1464                ** we only applied the FirstColumnSelectivity to the
1465                ** cost.
1466                */

1467                costEstimate.setCost(
1468                             scanCostAfterSelectivity(costEstimate.getEstimatedCost(),
1469                                                      initialPositionCost,
1470                                                      statStartStopSelectivity,
1471                                                      oneRowResultSetForSomeConglom),
1472                             costEstimate.rowCount() * statStartStopSelectivity,
1473                             costEstimate.singleScanRowCount() *
1474                             statStartStopSelectivity);
1475                optimizer.trace(Optimizer.COST_INCLUDING_STATS_FOR_INDEX,
1476                                tableNumber, 0, 0.0, costEstimate);
1477
1478            }
1479            else
1480            {
1481                /*
1482                ** Factor in the extra selectivity on the first column
1483                ** of the conglomerate (see comment above).
1484                ** NOTE: In this case we want to apply the selectivity to both
1485                ** the total row count and singleScanRowCount.
1486                */

1487                if (extraFirstColumnSelectivity != 1.0d)
1488                {
1489                    costEstimate.setCost(
1490                         scanCostAfterSelectivity(costEstimate.getEstimatedCost(),
1491                                                  initialPositionCost,
1492                                                  extraFirstColumnSelectivity,
1493                                                  oneRowResultSetForSomeConglom),
1494                         costEstimate.rowCount() * extraFirstColumnSelectivity,
1495                         costEstimate.singleScanRowCount() * extraFirstColumnSelectivity);
1496                    
1497                    optimizer.trace(Optimizer.COST_INCLUDING_EXTRA_1ST_COL_SELECTIVITY,
1498                                    tableNumber, 0, 0.0, costEstimate);
1499                }
1500
1501                /* Factor in the extra start/stop selectivity (see comment above).
1502                 * NOTE: In this case we want to apply the selectivity to both
1503                 * the row count and singleScanRowCount.
1504                 */

1505                if (extraStartStopSelectivity != 1.0d)
1506                {
1507                    costEstimate.setCost(
1508                        costEstimate.getEstimatedCost(),
1509                        costEstimate.rowCount() * extraStartStopSelectivity,
1510                        costEstimate.singleScanRowCount() * extraStartStopSelectivity);
1511
1512                    optimizer.trace(Optimizer.COST_INCLUDING_EXTRA_START_STOP,
1513                                    tableNumber, 0, 0.0, costEstimate);
1514                }
1515            }
1516
1517            /*
1518            ** Figure out whether to do row locking or table locking.
1519            **
1520            ** If there are no start/stop predicates, we're doing full
1521            ** conglomerate scans, so do table locking.
1522            */

1523            if (! startStopFound)
1524            {
1525                currentAccessPath.setLockMode(
1526                                            TransactionController.MODE_TABLE);
1527
1528                optimizer.trace(Optimizer.TABLE_LOCK_NO_START_STOP,
1529                                0, 0, 0.0, null);
1530            }
1531            else
1532            {
1533                /*
1534                ** Figure out the number of rows touched. If all the
1535                ** start/stop predicates are constant, the number of
1536                ** rows touched is the number of rows per scan.
1537                ** This is also true for join strategies that scan the
1538                ** inner table only once (like hash join) - we can
1539                ** tell if we have one of those, because
1540                ** multiplyBaseCostByOuterRows() will return false.
1541                */

1542                double rowsTouched = costEstimate.rowCount();
1543
1544                if ( (! constantStartStop) &&
1545                     currentJoinStrategy.multiplyBaseCostByOuterRows())
1546                {
1547                    /*
1548                    ** This is a join where the inner table is scanned
1549                    ** more than once, so we have to take the number
1550                    ** of outer rows into account. The formula for this
1551                    ** works out as follows:
1552                    **
1553                    ** total rows in table = r
1554                    ** number of rows touched per scan = s
1555                    ** number of outer rows = o
1556                    ** proportion of rows touched per scan = s / r
1557                    ** proportion of rows not touched per scan =
1558                    ** 1 - (s / r)
1559                    ** proportion of rows not touched for all scans =
1560                    ** (1 - (s / r)) ** o
1561                    ** proportion of rows touched for all scans =
1562                    ** 1 - ((1 - (s / r)) ** o)
1563                    ** total rows touched for all scans =
1564                    ** r * (1 - ((1 - (s / r)) ** o))
1565                    **
1566                    ** In doing these calculations, we must be careful not
1567                    ** to divide by zero. This could happen if there are
1568                    ** no rows in the table. In this case, let's do table
1569                    ** locking.
1570                    */

1571                    double r = baseRowCount();
1572                    if (r > 0.0)
1573                    {
1574                        double s = costEstimate.rowCount();
1575                        double o = outerCost.rowCount();
1576                        double pRowsNotTouchedPerScan = 1.0 - (s / r);
1577                        double pRowsNotTouchedAllScans =
1578                                        Math.pow(pRowsNotTouchedPerScan, o);
1579                        double pRowsTouchedAllScans =
1580                                        1.0 - pRowsNotTouchedAllScans;
1581                        double rowsTouchedAllScans =
1582                                        r * pRowsTouchedAllScans;
1583
1584                        rowsTouched = rowsTouchedAllScans;
1585                    }
1586                    else
1587                    {
1588                        /* See comments in setLockingBasedOnThreshold */
1589                        rowsTouched = optimizer.tableLockThreshold() + 1;
1590                    }
1591                }
1592
1593                setLockingBasedOnThreshold(optimizer, rowsTouched);
1594            }
1595
1596            /*
1597            ** If the index isn't covering, add the cost of getting the
1598            ** base row. Only apply extraFirstColumnSelectivity and extraStartStopSelectivity
1599            ** before we do this, don't apply extraQualifierSelectivity etc. The
1600            ** reason is that the row count here should be the number of index rows
1601            ** (and hence heap rows) we get, and we need to fetch all those rows, even
1602            ** though later on some of them may be filtered out by other predicates.
1603            ** beetle 4787.
1604            */

1605            if (cd.isIndex() && ( ! isCoveringIndex(cd) ) )
1606            {
1607                double singleFetchCost =
1608                        getBaseCostController().getFetchFromRowLocationCost(
1609                                                                (FormatableBitSet) null,
1610                                                                0);
1611
1612                cost = singleFetchCost * costEstimate.rowCount();
1613
1614                costEstimate.setEstimatedCost(
1615                                costEstimate.getEstimatedCost() + cost);
1616
1617                optimizer.trace(Optimizer.COST_OF_NONCOVERING_INDEX,
1618                                tableNumber, 0, 0.0, costEstimate);
1619            }
1620
1621            /* Factor in the extra qualifier selectivity (see comment above).
1622             * NOTE: In this case we want to apply the selectivity to both
1623             * the row count and singleScanRowCount.
1624             */

1625            if (extraQualifierSelectivity != 1.0d)
1626            {
1627                costEstimate.setCost(
1628                        costEstimate.getEstimatedCost(),
1629                        costEstimate.rowCount() * extraQualifierSelectivity,
1630                        costEstimate.singleScanRowCount() * extraQualifierSelectivity);
1631
1632                optimizer.trace(Optimizer.COST_INCLUDING_EXTRA_QUALIFIER_SELECTIVITY,
1633                                tableNumber, 0, 0.0, costEstimate);
1634            }
1635
1636            singleScanRowCount = costEstimate.singleScanRowCount();
1637
1638            /*
1639            ** Let the join strategy decide whether the cost of the base
1640            ** scan is a single scan, or a scan per outer row.
1641            ** NOTE: In this case we only want to multiply against the
1642            ** total row count, not the singleScanRowCount.
1643            ** NOTE: Do not multiply row count if we determined that
1644            ** conglomerate is a 1 row result set when costing nested
1645            ** loop. (eg, we will find at most 1 match when probing
1646            ** the hash table.)
1647            */

1648            double newCost = costEstimate.getEstimatedCost();
1649            double rowCount = costEstimate.rowCount();
1650
1651            /*
1652            ** RESOLVE - If there is a unique index on the joining
1653            ** columns, the number of matching rows will equal the
1654            ** number of outer rows, even if we're not considering the
1655            ** unique index for this access path. To figure that out,
1656            ** however, would require an analysis phase at the beginning
1657            ** of optimization. So, we'll always multiply the number
1658            ** of outer rows by the number of rows per scan. This will
1659            ** give us a higher than actual row count when there is
1660            ** such a unique index, which will bias the optimizer toward
1661            ** using the unique index. This is probably OK most of the
1662            ** time, since the optimizer would probably choose the
1663            ** unique index, anyway. But it would be better if the
1664            ** optimizer set the row count properly in this case.
1665            */

1666            if (currentJoinStrategy.multiplyBaseCostByOuterRows())
1667            {
1668                newCost *= outerCost.rowCount();
1669            }
1670
1671            rowCount *= outerCost.rowCount();
1672            initialRowCount *= outerCost.rowCount();
1673
1674
1675            /*
1676            ** If this table can generate at most one row per scan,
1677            ** the maximum row count is the number of outer rows.
1678            ** NOTE: This does not completely take care of the RESOLVE
1679            ** in the above comment, since it will only notice
1680            ** one-row result sets for the current join order.
1681            */

1682            if (oneRowResultSetForSomeConglom)
1683            {
1684                if (outerCost.rowCount() < rowCount)
1685                {
1686                    rowCount = outerCost.rowCount();
1687                }
1688            }
1689
1690            /*
1691            ** The estimated cost may be too high for indexes, if the
1692            ** estimated row count exceeds the maximum. Only do this
1693            ** if we're not doing a full scan, and the start/stop position
1694            ** is not constant (i.e. we're doing a join on the first column
1695            ** of the index) - the reason being that this is when the
1696            ** cost may be inaccurate.
1697            */

1698            if (cd.isIndex() && startStopFound && ( ! constantStartStop ) )
1699            {
1700                /*
1701                ** Does any table outer to this one have a unique key on
1702                ** a subset of the joining columns? If so, the maximum number
1703                ** of rows that this table can return is the number of rows
1704                ** in this table times the number of times the maximum number
1705                ** of times each key can be repeated.
1706                */

1707                double scanUniquenessFactor =
1708                  optimizer.uniqueJoinWithOuterTable(baseTableRestrictionList);
1709                if (scanUniquenessFactor > 0.0)
1710                {
1711                    /*
1712                    ** A positive uniqueness factor means there is a unique
1713                    ** outer join key. The value is the reciprocal of the
1714                    ** maximum number of duplicates for each unique key
1715                    ** (the duplicates can be caused by other joining tables).
1716                    */

1717                    double maxRows =
1718                            ((double) baseRowCount()) / scanUniquenessFactor;
1719                    if (rowCount > maxRows)
1720                    {
1721                        /*
1722                        ** The estimated row count is too high. Adjust the
1723                        ** estimated cost downwards proportionately to
1724                        ** match the maximum number of rows.
1725                        */

1726                        newCost *= (maxRows / rowCount);
1727                    }
1728                }
1729            }
1730
1731            /* The estimated total row count may be too high */
1732            if (tableUniquenessFactor > 0.0)
1733            {
1734                /*
1735                ** A positive uniqueness factor means there is a unique outer
1736                ** join key. The value is the reciprocal of the maximum number
1737                ** of duplicates for each unique key (the duplicates can be
1738                ** caused by other joining tables).
1739                */

1740                double maxRows =
1741                            ((double) baseRowCount()) / tableUniquenessFactor;
1742                if (rowCount > maxRows)
1743                {
1744                    /*
1745                    ** The estimated row count is too high. Set it to the
1746                    ** maximum row count.
1747                    */

1748                    rowCount = maxRows;
1749                }
1750            }
1751
1752            costEstimate.setCost(
1753                newCost,
1754                rowCount,
1755                costEstimate.singleScanRowCount());
1756
1757
1758            optimizer.trace(Optimizer.COST_OF_N_SCANS,
1759                            tableNumber, 0, outerCost.rowCount(), costEstimate);
1760
1761            /*
1762            ** Now figure in the cost of the non-qualifier predicates.
1763            ** existsBaseTables have a row count of 1
1764            */

1765            double rc = -1, src = -1;
1766            if (existsBaseTable)
1767                rc = src = 1;
1768            // don't factor in extraNonQualifierSelectivity in case of oneRowResultSetForSomeConglom
1769
// because "1" is the final result and the effect of other predicates already considered
1770
// beetle 4787
1771
else if (extraNonQualifierSelectivity != 1.0d)
1772            {
1773                rc = oneRowResultSetForSomeConglom ? costEstimate.rowCount() :
1774                                            costEstimate.rowCount() * extraNonQualifierSelectivity;
1775                src = costEstimate.singleScanRowCount() * extraNonQualifierSelectivity;
1776            }
1777            if (rc != -1) // changed
1778
{
1779                costEstimate.setCost(costEstimate.getEstimatedCost(), rc, src);
1780                optimizer.trace(Optimizer.COST_INCLUDING_EXTRA_NONQUALIFIER_SELECTIVITY,
1781                                tableNumber, 0, 0.0, costEstimate);
1782            }
1783            
1784        recomputeRowCount:
1785            if (statisticsForTable && !oneRowResultSetForSomeConglom &&
1786                (statCompositeSelectivity != 1.0d))
1787            {
1788                /* if we have statistics we should use statistics to calculate
1789                   row count-- if it has been determined that this table
1790                   returns one row for some conglomerate then there is no need
1791                   to do this recalculation
1792                */

1793
1794                double compositeStatRC = initialRowCount * statCompositeSelectivity;
1795                optimizer.trace(Optimizer.COMPOSITE_SEL_FROM_STATS,
1796                                0, 0, statCompositeSelectivity, null);
1797
1798
1799                if (tableUniquenessFactor > 0.0)
1800                {
1801                    /* If the row count from the composite statistics
1802                       comes up more than what the table uniqueness
1803                       factor indicates then lets stick with the current
1804                       row count.
1805                    */

1806                    if (compositeStatRC > (baseRowCount() *
1807                                           tableUniquenessFactor))
1808                        
1809                    {
1810                        
1811                        break recomputeRowCount;
1812                    }
1813                }
1814                
1815                /* set the row count and the single scan row count
1816                   to the initialRowCount. initialRowCount is the product
1817                   of the RC from store * RC of the outerCost.
1818                   Thus RC = initialRowCount * the selectivity from stats.
1819                   SingleRC = RC / outerCost.rowCount().
1820                */

1821                costEstimate.setCost(costEstimate.getEstimatedCost(),
1822                                     compositeStatRC,
1823                                     (existsBaseTable) ?
1824                                     1 :
1825                                     compositeStatRC / outerCost.rowCount());
1826                
1827                optimizer.trace(Optimizer.COST_INCLUDING_COMPOSITE_SEL_FROM_STATS,
1828                                tableNumber, 0, 0.0, costEstimate);
1829            }
1830        }
1831
1832        /* Put the base predicates back in the predicate list */
1833        currentJoinStrategy.putBasePredicates(predList,
1834                                       baseTableRestrictionList);
1835        return costEstimate;
1836    }
1837
1838    private double scanCostAfterSelectivity(double originalScanCost,
1839                                            double initialPositionCost,
1840                                            double selectivity,
1841                                            boolean anotherIndexUnique)
1842            throws StandardException
1843    {
1844        /* If there's another paln using unique index, its selectivity is 1/r
1845         * because we use row count 1. This plan is not unique index, so we make
1846         * selectivity at least 2/r, which is more fair, because for unique index
1847         * we don't use our selectivity estimates. Unique index also more likely
1848         * locks less rows, hence better concurrency. beetle 4787.
1849         */

1850        if (anotherIndexUnique)
1851        {
1852            double r = baseRowCount();
1853            if (r > 0.0)
1854            {
1855                double minSelectivity = 2.0 / r;
1856                if (minSelectivity > selectivity)
1857                    selectivity = minSelectivity;
1858            }
1859        }
1860        
1861        /* initialPositionCost is the first part of the index scan cost we get above.
1862         * It's the cost of initial positioning/fetch of key. So it's unrelated to
1863         * row count of how many rows we fetch from index. We extract it here so that
1864         * we only multiply selectivity to the other part of index scan cost, which is
1865         * nearly linear, to make cost calculation more accurate and fair, especially
1866         * compared to the plan of "one row result set" (unique index). beetle 4787.
1867         */

1868        double afterInitialCost = (originalScanCost - initialPositionCost) *
1869                                selectivity;
1870        if (afterInitialCost < 0)
1871            afterInitialCost = 0;
1872        return initialPositionCost + afterInitialCost;
1873    }
1874
1875    private void setLockingBasedOnThreshold(
1876                    Optimizer optimizer, double rowsTouched)
1877    {
1878        /* In optimizer we always set it to row lock (unless there's no
1879         * start/stop key found to utilize an index, in which case we do table
1880         * lock), it's up to store to upgrade it to table lock. This makes
1881         * sense for the default read committed isolation level and update
1882         * lock. For more detail, see Beetle 4133.
1883         */

1884        getCurrentAccessPath().setLockMode(
1885                                    TransactionController.MODE_RECORD);
1886    }
1887
1888    /** @see Optimizable#isBaseTable */
1889    public boolean isBaseTable()
1890    {
1891        return true;
1892    }
1893
1894    /** @see Optimizable#forUpdate */
1895    public boolean forUpdate()
1896    {
1897        /* This table is updatable if it is the
1898         * target table of an update or delete,
1899         * or it is (or was) the target table of an
1900         * updatable cursor.
1901         */

1902        return (updateOrDelete != 0) || cursorTargetTable || getUpdateLocks;
1903    }
1904
1905    /** @see Optimizable#initialCapacity */
1906    public int initialCapacity()
1907    {
1908        return initialCapacity;
1909    }
1910
1911    /** @see Optimizable#loadFactor */
1912    public float loadFactor()
1913    {
1914        return loadFactor;
1915    }
1916
1917    /**
1918     * @see Optimizable#memoryUsageOK
1919     */

1920    public boolean memoryUsageOK(double rowCount, int maxMemoryPerTable)
1921            throws StandardException
1922    {
1923        return super.memoryUsageOK(singleScanRowCount, maxMemoryPerTable);
1924    }
1925
1926    /**
1927     * @see Optimizable#isTargetTable
1928     */

1929    public boolean isTargetTable()
1930    {
1931        return (updateOrDelete != 0);
1932    }
1933
1934    /**
1935     * @see Optimizable#uniqueJoin
1936     */

1937    public double uniqueJoin(OptimizablePredicateList predList)
1938                    throws StandardException
1939    {
1940        double retval = -1.0;
1941        PredicateList pl = (PredicateList) predList;
1942        int numColumns = getTableDescriptor().getNumberOfColumns();
1943        int tableNumber = getTableNumber();
1944
1945        // This is supposed to be an array of table numbers for the current
1946
// query block. It is used to determine whether a join is with a
1947
// correlation column, to fill in eqOuterCols properly. We don't care
1948
// about eqOuterCols, so just create a zero-length array, pretending
1949
// that all columns are correlation columns.
1950
int[] tableNumbers = new int[0];
1951        JBitSet[] tableColMap = new JBitSet[1];
1952        tableColMap[0] = new JBitSet(numColumns + 1);
1953
1954        pl.checkTopPredicatesForEqualsConditions(tableNumber,
1955                                                null,
1956                                                tableNumbers,
1957                                                tableColMap,
1958                                                false);
1959
1960        if (supersetOfUniqueIndex(tableColMap))
1961        {
1962            retval =
1963                getBestAccessPath().getCostEstimate().singleScanRowCount();
1964        }
1965
1966        return retval;
1967    }
1968
1969    /**
1970     * @see Optimizable#isOneRowScan
1971     *
1972     * @exception StandardException Thrown on error
1973     */

1974    public boolean isOneRowScan()
1975        throws StandardException
1976    {
1977        /* EXISTS FBT will never be a 1 row scan.
1978         * Otherwise call method in super class.
1979         */

1980        if (existsBaseTable)
1981        {
1982            return false;
1983        }
1984        
1985
1986        return super.isOneRowScan();
1987    }
1988
1989    /**
1990     * @see Optimizable#legalJoinOrder
1991     */

1992    public boolean legalJoinOrder(JBitSet assignedTableMap)
1993    {
1994        // Only an issue for EXISTS FBTs
1995
if (existsBaseTable)
1996        {
1997            /* Have all of our dependencies been satisfied? */
1998            return assignedTableMap.contains(dependencyMap);
1999        }
2000        return true;
2001    }
2002
2003    /**
2004     * Convert this object to a String. See comments in QueryTreeNode.java
2005     * for how this should be done for tree printing.
2006     *
2007     * @return This object as a String
2008     */

2009
2010    public String JavaDoc toString()
2011    {
2012        if (SanityManager.DEBUG)
2013        {
2014            return "tableName: " +
2015                (tableName != null ? tableName.toString() : "null") + "\n" +
2016                "tableDescriptor: " + tableDescriptor + "\n" +
2017                "updateOrDelete: " + updateOrDelete + "\n" +
2018                (tableProperties != null ?
2019                    tableProperties.toString() : "null") + "\n" +
2020                "existsBaseTable: " + existsBaseTable + "\n" +
2021                "dependencyMap: " +
2022                (dependencyMap != null
2023                        ? dependencyMap.toString()
2024                        : "null") + "\n" +
2025                super.toString();
2026        }
2027        else
2028        {
2029            return "";
2030        }
2031    }
2032
2033    /**
2034     * Does this FBT represent an EXISTS FBT.
2035     *
2036     * @return Whether or not this FBT represents
2037     * an EXISTS FBT.
2038     */

2039    boolean getExistsBaseTable()
2040    {
2041        return existsBaseTable;
2042    }
2043
2044    /**
2045     * Set whether or not this FBT represents an
2046     * EXISTS FBT.
2047     *
2048     * @param existsBaseTable Whether or not an EXISTS FBT.
2049     * @param dependencyMap The dependency map for the EXISTS FBT.
2050     * @param isNotExists Whether or not for NOT EXISTS, more specifically.
2051     */

2052    void setExistsBaseTable(boolean existsBaseTable, JBitSet dependencyMap, boolean isNotExists)
2053    {
2054        this.existsBaseTable = existsBaseTable;
2055        this.isNotExists = isNotExists;
2056
2057        /* Set/clear the dependency map as needed */
2058        if (existsBaseTable)
2059        {
2060            this.dependencyMap = dependencyMap;
2061        }
2062        else
2063        {
2064            this.dependencyMap = null;
2065        }
2066    }
2067
2068    /**
2069     * Clear the bits from the dependency map when join nodes are flattened
2070     *
2071     * @param locations vector of bit numbers to be cleared
2072     */

2073    void clearDependency(Vector JavaDoc locations)
2074    {
2075        if (this.dependencyMap != null)
2076        {
2077            for (int i = 0; i < locations.size() ; i++)
2078                this.dependencyMap.clear(((Integer JavaDoc)locations.elementAt(i)).intValue());
2079        }
2080    }
2081
2082    /**
2083     * Set the table properties for this table.
2084     *
2085     * @param tableProperties The new table properties.
2086     */

2087    public void setTableProperties(Properties tableProperties)
2088    {
2089        this.tableProperties = tableProperties;
2090    }
2091
2092    /**
2093     * Bind the table in this FromBaseTable.
2094     * This is where view resolution occurs
2095     *
2096     * @param dataDictionary The DataDictionary to use for binding
2097     * @param fromListParam FromList to use/append to.
2098     *
2099     * @return ResultSetNode The FromTable for the table or resolved view.
2100     *
2101     * @exception StandardException Thrown on error
2102     */

2103
2104    public ResultSetNode bindNonVTITables(DataDictionary dataDictionary,
2105                           FromList fromListParam)
2106                    throws StandardException
2107    {
2108        TableDescriptor tableDescriptor = bindTableDescriptor();
2109
2110        if (tableDescriptor.getTableType() == TableDescriptor.VTI_TYPE) {
2111            ResultSetNode vtiNode = getNodeFactory().mapTableAsVTI(
2112                    tableDescriptor,
2113                    dataDictionary.getVTIClass(tableDescriptor),
2114                    getCorrelationName(),
2115                    resultColumns,
2116                    getProperties(),
2117                    getContextManager());
2118            return vtiNode.bindNonVTITables(dataDictionary, fromListParam);
2119        }
2120        
2121        ResultColumnList derivedRCL = resultColumns;
2122  
2123        // make sure there's a restriction list
2124
restrictionList = (PredicateList) getNodeFactory().getNode(
2125                                            C_NodeTypes.PREDICATE_LIST,
2126                                            getContextManager());
2127        baseTableRestrictionList = (PredicateList) getNodeFactory().getNode(
2128                                            C_NodeTypes.PREDICATE_LIST,
2129                                            getContextManager());
2130
2131
2132        CompilerContext compilerContext = getCompilerContext();
2133
2134        /* Generate the ResultColumnList */
2135        resultColumns = genResultColList();
2136        templateColumns = resultColumns;
2137
2138        /* Resolve the view, if this is a view */
2139        if (tableDescriptor.getTableType() == TableDescriptor.VIEW_TYPE)
2140        {
2141            FromTable fsq;
2142            ResultSetNode rsn;
2143            ViewDescriptor vd;
2144            CreateViewNode cvn;
2145            SchemaDescriptor compSchema;
2146            SchemaDescriptor prevCompSchema;
2147
2148            /* Get the associated ViewDescriptor so that we can get
2149             * the view definition text.
2150             */

2151            vd = dataDictionary.getViewDescriptor(tableDescriptor);
2152
2153            /*
2154            ** Set the default compilation schema to be whatever
2155            ** this schema this view was originally compiled against.
2156            ** That way we pick up the same tables no matter what
2157            ** schema we are running against.
2158            */

2159            compSchema = dataDictionary.getSchemaDescriptor(vd.getCompSchemaId(), null);
2160
2161            prevCompSchema = compilerContext.setCompilationSchema(compSchema);
2162    
2163            try
2164            {
2165        
2166                /* This represents a view - query is dependent on the ViewDescriptor */
2167                compilerContext.createDependency(vd);
2168    
2169                if (SanityManager.DEBUG)
2170                {
2171                    SanityManager.ASSERT(vd != null,
2172                        "vd not expected to be null for " + tableName);
2173                }
2174    
2175                /*
2176                ** Push a compiler context to parse the query text so that
2177                ** it won't clobber the current context.
2178                */

2179                LanguageConnectionContext lcc = getLanguageConnectionContext();
2180                CompilerContext newCC = lcc.pushCompilerContext();
2181                cvn = (CreateViewNode)
2182                    QueryTreeNode.parseQueryText(
2183                        newCC,
2184                        vd.getViewText(),
2185                        (DataValueDescriptor[])null, // default params
2186
lcc);
2187
2188                lcc.popCompilerContext(newCC);
2189
2190                rsn = cvn.getParsedQueryExpression();
2191    
2192                /* If the view contains a '*' then we mark the views derived column list
2193                 * so that the view will still work, and return the expected results,
2194                 * if any of the tables referenced in the view have columns added to
2195                 * them via ALTER TABLE. The expected results means that the view
2196                 * will always return the same # of columns.
2197                 */

2198                if (rsn.getResultColumns().containsAllResultColumn())
2199                {
2200                    resultColumns.setCountMismatchAllowed(true);
2201                }
2202                //Views execute with definer's privileges and if any one of
2203
//those privileges' are revoked from the definer, the view gets
2204
//dropped. So, a view can exist in Derby only if it's owner has
2205
//all the privileges needed to create one. In order to do a
2206
//select from a view, a user only needs select privilege on the
2207
//view and doesn't need any privilege for objects accessed by
2208
//the view. Hence, when collecting privilege requirement for a
2209
//sql accessing a view, we only need to look for select privilege
2210
//on the actual view and that is what the following code is
2211
//checking.
2212
for (int i = 0; i < resultColumns.size(); i++) {
2213                    ResultColumn rc = (ResultColumn) resultColumns.elementAt(i);
2214                    if (rc.isPrivilegeCollectionRequired())
2215                        compilerContext.addRequiredColumnPriv( rc.getTableColumnDescriptor());
2216                }
2217
2218                fsq = (FromTable) getNodeFactory().getNode(
2219                    C_NodeTypes.FROM_SUBQUERY,
2220                    rsn,
2221                    (correlationName != null) ?
2222                        correlationName : getOrigTableName().getTableName(),
2223                    resultColumns,
2224                    tableProperties,
2225                    getContextManager());
2226                // Transfer the nesting level to the new FromSubquery
2227
fsq.setLevel(level);
2228                //We are getting ready to bind the query underneath the view. Since
2229
//that query is going to run with definer's privileges, we do not
2230
//need to collect any privilege requirement for that query.
2231
//Following call is marking the query to run with definer
2232
//privileges. This marking will make sure that we do not collect
2233
//any privilege requirement for it.
2234
fsq.disablePrivilegeCollection();
2235                fsq.setOrigTableName(this.getOrigTableName());
2236                return fsq.bindNonVTITables(dataDictionary, fromListParam);
2237            }
2238            finally
2239            {
2240                compilerContext.setCompilationSchema(prevCompSchema);
2241            }
2242        }
2243        else
2244        {
2245            /* This represents a table - query is dependent on the TableDescriptor */
2246            compilerContext.createDependency(tableDescriptor);
2247
2248            /* Get the base conglomerate descriptor */
2249            baseConglomerateDescriptor =
2250                tableDescriptor.getConglomerateDescriptor(
2251                    tableDescriptor.getHeapConglomerateId()
2252                    );
2253
2254            /* Build the 0-based array of base column names. */
2255            columnNames = resultColumns.getColumnNames();
2256
2257            /* Do error checking on derived column list and update "exposed"
2258             * column names if valid.
2259             */

2260            if (derivedRCL != null)
2261            {
2262                 resultColumns.propagateDCLInfo(derivedRCL,
2263                                                origTableName.getFullTableName());
2264            }
2265
2266            /* Assign the tableNumber */
2267            if (tableNumber == -1) // allow re-bind, in which case use old number
2268
tableNumber = compilerContext.getNextTableNumber();
2269        }
2270
2271        return this;
2272    }
2273
2274    /**
2275     * Determine whether or not the specified name is an exposed name in
2276     * the current query block.
2277     *
2278     * @param name The specified name to search for as an exposed name.
2279     * @param schemaName Schema name, if non-null.
2280     * @param exactMatch Whether or not we need an exact match on specified schema and table
2281     * names or match on table id.
2282     *
2283     * @return The FromTable, if any, with the exposed name.
2284     *
2285     * @exception StandardException Thrown on error
2286     */

2287    protected FromTable getFromTableByName(String JavaDoc name, String JavaDoc schemaName, boolean exactMatch)
2288        throws StandardException
2289    {
2290        // ourSchemaName can be null if correlation name is specified.
2291
String JavaDoc ourSchemaName = getOrigTableName().getSchemaName();
2292        String JavaDoc fullName = (schemaName != null) ? (schemaName + '.' + name) : name;
2293
2294        /* If an exact string match is required then:
2295         * o If schema name specified on 1 but not both then no match.
2296         * o If schema name not specified on either, compare exposed names.
2297         * o If schema name specified on both, compare schema and exposed names.
2298         */

2299        if (exactMatch)
2300        {
2301
2302            if ((schemaName != null && ourSchemaName == null) ||
2303                (schemaName == null && ourSchemaName != null))
2304            {
2305                return null;
2306            }
2307
2308            if (getExposedName().equals(fullName))
2309            {
2310                return this;
2311            }
2312
2313            return null;
2314        }
2315
2316        /* If an exact string match is not required then:
2317         * o If schema name specified on both, compare schema and exposed names.
2318         * o If schema name not specified on either, compare exposed names.
2319         * o If schema name specified on column but not table, then compare
2320         * the column's schema name against the schema name from the TableDescriptor.
2321         * If they agree, then the column's table name must match the exposed name
2322         * from the table, which must also be the base table name, since a correlation
2323         * name does not belong to a schema.
2324         * o If schema name not specified on column then just match the exposed names.
2325         */

2326        // Both or neither schema name specified
2327
if (getExposedName().equals(fullName))
2328        {
2329            return this;
2330        }
2331        else if ((schemaName != null && ourSchemaName != null) ||
2332                 (schemaName == null && ourSchemaName == null))
2333        {
2334            return null;
2335        }
2336
2337        // Schema name only on column
2338
// e.g.: select w1.i from t1 w1 order by test2.w1.i; (incorrect)
2339
if (schemaName != null && ourSchemaName == null)
2340        {
2341            // Compare column's schema name with table descriptor's if it is
2342
// not a synonym since a synonym can be declared in a different
2343
// schema.
2344
if (tableName.equals(origTableName) &&
2345                    ! schemaName.equals(tableDescriptor.getSchemaDescriptor().getSchemaName()))
2346            {
2347                return null;
2348            }
2349
2350            // Compare exposed name with column's table name
2351
if (! getExposedName().equals(name))
2352            {
2353                return null;
2354            }
2355
2356            // Make sure exposed name is not a correlation name
2357
if (! getExposedName().equals(getOrigTableName().getTableName()))
2358            {
2359                return null;
2360            }
2361
2362            return this;
2363        }
2364
2365        /* Schema name only specified on table. Compare full exposed name
2366         * against table's schema name || "." || column's table name.
2367         */

2368        if (! getExposedName().equals(getOrigTableName().getSchemaName() + "." + name))
2369        {
2370            return null;
2371        }
2372
2373        return this;
2374    }
2375
2376
2377    /**
2378      * Bind the table descriptor for this table.
2379      *
2380      * If the tableName is a synonym, it will be resolved here.
2381      * The original table name is retained in origTableName.
2382      *
2383      * @exception StandardException Thrown on error
2384      */

2385    private TableDescriptor bindTableDescriptor()
2386        throws StandardException
2387    {
2388        String JavaDoc schemaName = tableName.getSchemaName();
2389        SchemaDescriptor sd = getSchemaDescriptor(schemaName);
2390
2391        tableDescriptor = getTableDescriptor(tableName.getTableName(), sd);
2392        if (tableDescriptor == null)
2393        {
2394            // Check if the reference is for a synonym.
2395
TableName synonymTab = resolveTableToSynonym(tableName);
2396            if (synonymTab == null)
2397                throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND, tableName);
2398            
2399            tableName = synonymTab;
2400            sd = getSchemaDescriptor(tableName.getSchemaName());
2401
2402            tableDescriptor = getTableDescriptor(synonymTab.getTableName(), sd);
2403            if (tableDescriptor == null)
2404                throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND, tableName);
2405        }
2406
2407        return tableDescriptor;
2408    }
2409
2410
2411    /**
2412     * Bind the expressions in this FromBaseTable. This means binding the
2413     * sub-expressions, as well as figuring out what the return type is for
2414     * each expression.
2415     *
2416     * @param fromListParam FromList to use/append to.
2417     *
2418     * @exception StandardException Thrown on error
2419     */

2420    public void bindExpressions(FromList fromListParam)
2421                    throws StandardException
2422    {
2423        /* No expressions to bind for a FromBaseTable.
2424         * NOTE - too involved to optimize so that this method
2425         * doesn't get called, so just do nothing.
2426         */

2427    }
2428
2429    /**
2430     * Bind the result columns of this ResultSetNode when there is no
2431     * base table to bind them to. This is useful for SELECT statements,
2432     * where the result columns get their types from the expressions that
2433     * live under them.
2434     *
2435     * @param fromListParam FromList to use/append to.
2436     *
2437     * @exception StandardException Thrown on error
2438     */

2439
2440    public void bindResultColumns(FromList fromListParam)
2441                throws StandardException
2442    {
2443        /* Nothing to do, since RCL bound in bindNonVTITables() */
2444    }
2445
2446    /**
2447     * Try to find a ResultColumn in the table represented by this FromBaseTable
2448     * that matches the name in the given ColumnReference.
2449     *
2450     * @param columnReference The columnReference whose name we're looking
2451     * for in the given table.
2452     *
2453     * @return A ResultColumn whose expression is the ColumnNode
2454     * that matches the ColumnReference.
2455     * Returns null if there is no match.
2456     *
2457     * @exception StandardException Thrown on error
2458     */

2459
2460    public ResultColumn getMatchingColumn(ColumnReference columnReference) throws StandardException
2461    {
2462        ResultColumn resultColumn = null;
2463        TableName columnsTableName;
2464        TableName exposedTableName;
2465
2466        columnsTableName = columnReference.getTableNameNode();
2467
2468        if(columnsTableName != null) {
2469            if(columnsTableName.getSchemaName() == null && correlationName == null)
2470                columnsTableName.bind(this.getDataDictionary());
2471        }
2472        /*
2473        ** If there is a correlation name, use that instead of the
2474        ** table name.
2475        */

2476        exposedTableName = getExposedTableName();
2477
2478        if(exposedTableName.getSchemaName() == null && correlationName == null)
2479            exposedTableName.bind(this.getDataDictionary());
2480        /*
2481        ** If the column did not specify a name, or the specified name
2482        ** matches the table we're looking at, see whether the column
2483        ** is in this table.
2484        */

2485        if (columnsTableName == null || columnsTableName.equals(exposedTableName))
2486        {
2487            resultColumn = resultColumns.getResultColumn(columnReference.getColumnName());
2488            /* Did we find a match? */
2489            if (resultColumn != null)
2490            {
2491                columnReference.setTableNumber(tableNumber);
2492                if (tableDescriptor != null)
2493                {
2494                    FormatableBitSet referencedColumnMap = tableDescriptor.getReferencedColumnMap();
2495                    if (referencedColumnMap == null)
2496                        referencedColumnMap = new FormatableBitSet(
2497                                    tableDescriptor.getNumberOfColumns() + 1);
2498                    referencedColumnMap.set(resultColumn.getColumnPosition());
2499                    tableDescriptor.setReferencedColumnMap(referencedColumnMap);
2500                }
2501            }
2502        }
2503
2504        return resultColumn;
2505    }
2506
2507    /**
2508     * Preprocess a ResultSetNode - this currently means:
2509     * o Generating a referenced table map for each ResultSetNode.
2510     * o Putting the WHERE and HAVING clauses in conjunctive normal form (CNF).
2511     * o Converting the WHERE and HAVING clauses into PredicateLists and
2512     * classifying them.
2513     * o Ensuring that a ProjectRestrictNode is generated on top of every
2514     * FromBaseTable and generated in place of every FromSubquery.
2515     * o Pushing single table predicates down to the new ProjectRestrictNodes.
2516     *
2517     * @param numTables The number of tables in the DML Statement
2518     * @param gbl The group by list, if any
2519     * @param fromList The from list, if any
2520     *
2521     * @return ResultSetNode at top of preprocessed tree.
2522     *
2523     * @exception StandardException Thrown on error
2524     */

2525
2526    public ResultSetNode preprocess(int numTables,
2527                                    GroupByList gbl,
2528                                    FromList fromList)
2529                                throws StandardException
2530    {
2531        /* Generate the referenced table map */
2532        referencedTableMap = new JBitSet(numTables);
2533        referencedTableMap.set(tableNumber);
2534
2535        return genProjectRestrict(numTables);
2536    }
2537
2538    /**
2539     * Put a ProjectRestrictNode on top of each FromTable in the FromList.
2540     * ColumnReferences must continue to point to the same ResultColumn, so
2541     * that ResultColumn must percolate up to the new PRN. However,
2542     * that ResultColumn will point to a new expression, a VirtualColumnNode,
2543     * which points to the FromTable and the ResultColumn that is the source for
2544     * the ColumnReference.
2545     * (The new PRN will have the original of the ResultColumnList and
2546     * the ResultColumns from that list. The FromTable will get shallow copies
2547     * of the ResultColumnList and its ResultColumns. ResultColumn.expression
2548     * will remain at the FromTable, with the PRN getting a new
2549     * VirtualColumnNode for each ResultColumn.expression.)
2550     * We then project out the non-referenced columns. If there are no referenced
2551     * columns, then the PRN's ResultColumnList will consist of a single ResultColumn
2552     * whose expression is 1.
2553     *
2554     * @param numTables Number of tables in the DML Statement
2555     *
2556     * @return The generated ProjectRestrictNode atop the original FromTable.
2557     *
2558     * @exception StandardException Thrown on error
2559     */

2560
2561    protected ResultSetNode genProjectRestrict(int numTables)
2562                throws StandardException
2563    {
2564        /* We get a shallow copy of the ResultColumnList and its
2565         * ResultColumns. (Copy maintains ResultColumn.expression for now.)
2566         */

2567        ResultColumnList prRCList = resultColumns;
2568        resultColumns = resultColumns.copyListAndObjects();
2569
2570        /* Replace ResultColumn.expression with new VirtualColumnNodes
2571         * in the ProjectRestrictNode's ResultColumnList. (VirtualColumnNodes include
2572         * pointers to source ResultSetNode, this, and source ResultColumn.)
2573         * NOTE: We don't want to mark the underlying RCs as referenced, otherwise
2574         * we won't be able to project out any of them.
2575         */

2576        prRCList.genVirtualColumnNodes(this, resultColumns, false);
2577
2578        /* Project out any unreferenced columns. If there are no referenced
2579         * columns, generate and bind a single ResultColumn whose expression is 1.
2580         */

2581        prRCList.doProjection();
2582
2583        /* Finally, we create the new ProjectRestrictNode */
2584        return (ResultSetNode) getNodeFactory().getNode(
2585                                C_NodeTypes.PROJECT_RESTRICT_NODE,
2586                                this,
2587                                prRCList,
2588                                null, /* Restriction */
2589                                null, /* Restriction as PredicateList */
2590                                null, /* Project subquery list */
2591                                null, /* Restrict subquery list */
2592                                null,
2593                                getContextManager() );
2594    }
2595
2596    /**
2597     * @see ResultSetNode#changeAccessPath
2598     *
2599     * @exception StandardException Thrown on error
2600     */

2601    public ResultSetNode changeAccessPath() throws StandardException
2602    {
2603        ResultSetNode retval;
2604        AccessPath ap = getTrulyTheBestAccessPath();
2605        ConglomerateDescriptor trulyTheBestConglomerateDescriptor =
2606                                                ap.getConglomerateDescriptor();
2607        JoinStrategy trulyTheBestJoinStrategy = ap.getJoinStrategy();
2608        Optimizer optimizer = ap.getOptimizer();
2609
2610        optimizer.trace(Optimizer.CHANGING_ACCESS_PATH_FOR_TABLE,
2611                        tableNumber, 0, 0.0, null);
2612
2613        if (SanityManager.DEBUG)
2614        {
2615            SanityManager.ASSERT(
2616                trulyTheBestConglomerateDescriptor != null,
2617                "Should only modify access path after conglomerate has been chosen.");
2618        }
2619
2620        /*
2621        ** Make sure user-specified bulk fetch is OK with the chosen join
2622        ** strategy.
2623        */

2624        if (bulkFetch != UNSET)
2625        {
2626            if ( ! trulyTheBestJoinStrategy.bulkFetchOK())
2627            {
2628                throw StandardException.newException(SQLState.LANG_INVALID_BULK_FETCH_WITH_JOIN_TYPE,
2629                                            trulyTheBestJoinStrategy.getName());
2630            }
2631            // bulkFetch has no meaning for hash join, just ignore it
2632
else if (trulyTheBestJoinStrategy.ignoreBulkFetch())
2633            {
2634                disableBulkFetch();
2635            }
2636            // bug 4431 - ignore bulkfetch property if it's 1 row resultset
2637
else if (isOneRowResultSet())
2638            {
2639                disableBulkFetch();
2640            }
2641        }
2642
2643        // bulkFetch = 1 is the same as no bulk fetch
2644
if (bulkFetch == 1)
2645        {
2646            disableBulkFetch();
2647        }
2648
2649        /* Remove any redundant join clauses. A redundant join clause is one
2650         * where there are other join clauses in the same equivalence class
2651         * after it in the PredicateList.
2652         */

2653        restrictionList.removeRedundantPredicates();
2654
2655        /*
2656        ** Divide up the predicates for different processing phases of the
2657        ** best join strategy.
2658        */

2659        storeRestrictionList = (PredicateList) getNodeFactory().getNode(
2660                                                    C_NodeTypes.PREDICATE_LIST,
2661                                                    getContextManager());
2662        nonStoreRestrictionList = (PredicateList) getNodeFactory().getNode(
2663                                                    C_NodeTypes.PREDICATE_LIST,
2664                                                    getContextManager());
2665        requalificationRestrictionList =
2666                                    (PredicateList) getNodeFactory().getNode(
2667                                                    C_NodeTypes.PREDICATE_LIST,
2668                                                    getContextManager());
2669        trulyTheBestJoinStrategy.divideUpPredicateLists(
2670                                            this,
2671                                            restrictionList,
2672                                            storeRestrictionList,
2673                                            nonStoreRestrictionList,
2674                                            requalificationRestrictionList,
2675                                            getDataDictionary());
2676
2677        /*
2678        ** Consider turning on bulkFetch if it is turned
2679        ** off. Only turn it on if it is a not an updatable
2680        ** scan and if it isn't a oneRowResultSet, and
2681        ** not a subquery, and it is OK to use bulk fetch
2682        ** with the chosen join strategy. NOTE: the subquery logic
2683        ** could be more sophisticated -- we are taking
2684        ** the safe route in avoiding reading extra
2685        ** data for something like:
2686        **
2687        ** select x from t where x in (select y from t)
2688        **
2689        ** In this case we want to stop the subquery
2690        ** evaluation as soon as something matches.
2691        */

2692        if (trulyTheBestJoinStrategy.bulkFetchOK() &&
2693            !(trulyTheBestJoinStrategy.ignoreBulkFetch()) &&
2694            ! bulkFetchTurnedOff &&
2695            (bulkFetch == UNSET) &&
2696            !forUpdate() &&
2697            !isOneRowResultSet() &&
2698            getLevel() == 0)
2699        {
2700            bulkFetch = getDefaultBulkFetch();
2701        }
2702
2703        /* Statement is dependent on the chosen conglomerate. */
2704        getCompilerContext().createDependency(
2705                trulyTheBestConglomerateDescriptor);
2706
2707        /* No need to modify access path if conglomerate is the heap */
2708        if ( ! trulyTheBestConglomerateDescriptor.isIndex())
2709        {
2710            /*
2711            ** We need a little special logic for SYSSTATEMENTS
2712            ** here. SYSSTATEMENTS has a hidden column at the
2713            ** end. When someone does a select * we don't want
2714            ** to get that column from the store. So we'll always
2715            ** generate a partial read bitSet if we are scanning
2716            ** SYSSTATEMENTS to ensure we don't get the hidden
2717            ** column.
2718            */

2719            boolean isSysstatements = tableName.equals("SYS","SYSSTATEMENTS");
2720            /* Template must reflect full row.
2721             * Compact RCL down to partial row.
2722             */

2723            templateColumns = resultColumns;
2724            referencedCols = resultColumns.getReferencedFormatableBitSet(cursorTargetTable, isSysstatements, false);
2725            resultColumns = resultColumns.compactColumns(cursorTargetTable, isSysstatements);
2726            return this;
2727        }
2728        
2729        /* No need to go to the data page if this is a covering index */
2730        /* Derby-1087: use data page when returning an updatable resultset */
2731        if (ap.getCoveringIndexScan() && (!cursorTargetTable()))
2732        {
2733            /* Massage resultColumns so that it matches the index. */
2734            resultColumns = newResultColumns(resultColumns,
2735                                             trulyTheBestConglomerateDescriptor,
2736                                             baseConglomerateDescriptor,
2737                                             false);
2738
2739            /* We are going against the index. The template row must be the full index row.
2740             * The template row will have the RID but the result row will not
2741             * since there is no need to go to the data page.
2742             */

2743            templateColumns = newResultColumns(resultColumns,
2744                                             trulyTheBestConglomerateDescriptor,
2745                                             baseConglomerateDescriptor,
2746                                             false);
2747            templateColumns.addRCForRID();
2748
2749            // If this is for update then we need to get the RID in the result row
2750
if (forUpdate())
2751            {
2752                resultColumns.addRCForRID();
2753            }
2754            
2755            /* Compact RCL down to the partial row. We always want a new
2756             * RCL and FormatableBitSet because this is a covering index. (This is
2757             * because we don't want the RID in the partial row returned
2758             * by the store.)
2759             */

2760            referencedCols = resultColumns.getReferencedFormatableBitSet(cursorTargetTable,true, false);
2761            resultColumns = resultColumns.compactColumns(cursorTargetTable,true);
2762
2763            resultColumns.setIndexRow(
2764                baseConglomerateDescriptor.getConglomerateNumber(),
2765                forUpdate());
2766
2767            return this;
2768        }
2769
2770        /* Statement is dependent on the base conglomerate if this is
2771         * a non-covering index.
2772         */

2773        getCompilerContext().createDependency(baseConglomerateDescriptor);
2774
2775        /*
2776        ** On bulkFetch, we need to add the restrictions from
2777        ** the TableScan and reapply them here.
2778        */

2779        if (bulkFetch != UNSET)
2780        {
2781            restrictionList.copyPredicatesToOtherList(
2782                                                requalificationRestrictionList);
2783        }
2784
2785        /*
2786        ** We know the chosen conglomerate is an index. We need to allocate
2787        ** an IndexToBaseRowNode above us, and to change the result column
2788        ** list for this FromBaseTable to reflect the columns in the index.
2789        ** We also need to shift "cursor target table" status from this
2790        ** FromBaseTable to the new IndexToBaseRowNow (because that's where
2791        ** a cursor can fetch the current row).
2792        */

2793        ResultColumnList newResultColumns =
2794            newResultColumns(resultColumns,
2795                            trulyTheBestConglomerateDescriptor,
2796                            baseConglomerateDescriptor,
2797                            true
2798                            );
2799
2800        /* Compact the RCL for the IndexToBaseRowNode down to
2801         * the partial row for the heap. The referenced BitSet
2802         * will reflect only those columns coming from the heap.
2803         * (ie, it won't reflect columns coming from the index.)
2804         * NOTE: We need to re-get all of the columns from the heap
2805         * when doing a bulk fetch because we will be requalifying
2806         * the row in the IndexRowToBaseRow.
2807         */

2808        // Get the BitSet for all of the referenced columns
2809
FormatableBitSet indexReferencedCols = null;
2810        FormatableBitSet heapReferencedCols = null;
2811        if ((bulkFetch == UNSET) &&
2812            (requalificationRestrictionList == null ||
2813             requalificationRestrictionList.size() == 0))
2814        {
2815            /* No BULK FETCH or requalification, XOR off the columns coming from the heap
2816             * to get the columns coming from the index.
2817             */

2818            indexReferencedCols = resultColumns.getReferencedFormatableBitSet(cursorTargetTable, true, false);
2819            heapReferencedCols = resultColumns.getReferencedFormatableBitSet(cursorTargetTable, true, true);
2820            if (heapReferencedCols != null)
2821            {
2822                indexReferencedCols.xor(heapReferencedCols);
2823            }
2824        }
2825        else
2826        {
2827            // BULK FETCH or requalification - re-get all referenced columns from the heap
2828
heapReferencedCols = resultColumns.getReferencedFormatableBitSet(cursorTargetTable, true, false) ;
2829        }
2830        ResultColumnList heapRCL = resultColumns.compactColumns(cursorTargetTable, false);
2831        retval = (ResultSetNode) getNodeFactory().getNode(
2832                                        C_NodeTypes.INDEX_TO_BASE_ROW_NODE,
2833                                        this,
2834                                        baseConglomerateDescriptor,
2835                                        heapRCL,
2836                                        new Boolean JavaDoc(cursorTargetTable),
2837                                        heapReferencedCols,
2838                                        indexReferencedCols,
2839                                        requalificationRestrictionList,
2840                                        new Boolean JavaDoc(forUpdate()),
2841                                        tableProperties,
2842                                        getContextManager());
2843
2844        /*
2845        ** The template row is all the columns. The
2846        ** result set is the compacted column list.
2847        */

2848        resultColumns = newResultColumns;
2849
2850        templateColumns = newResultColumns(resultColumns,
2851                                           trulyTheBestConglomerateDescriptor,
2852                                           baseConglomerateDescriptor,
2853                                           false);
2854        /* Since we are doing a non-covered index scan, if bulkFetch is on, then
2855         * the only columns that we need to get are those columns referenced in the start and stop positions
2856         * and the qualifiers (and the RID) because we will need to re-get all of the other
2857         * columns from the heap anyway.
2858         * At this point in time, columns referenced anywhere in the column tree are
2859         * marked as being referenced. So, we clear all of the references, walk the
2860         * predicate list and remark the columns referenced from there and then add
2861         * the RID before compacting the columns.
2862         */

2863        if (bulkFetch != UNSET)
2864        {
2865            resultColumns.markAllUnreferenced();
2866            storeRestrictionList.markReferencedColumns();
2867            if (nonStoreRestrictionList != null)
2868            {
2869                nonStoreRestrictionList.markReferencedColumns();
2870            }
2871        }
2872        resultColumns.addRCForRID();
2873        templateColumns.addRCForRID();
2874
2875        // Compact the RCL for the index scan down to the partial row.
2876
referencedCols = resultColumns.getReferencedFormatableBitSet(cursorTargetTable, false, false);
2877        resultColumns = resultColumns.compactColumns(cursorTargetTable, false);
2878        resultColumns.setIndexRow(
2879                baseConglomerateDescriptor.getConglomerateNumber(),
2880                forUpdate());
2881
2882        /* We must remember if this was the cursorTargetTable
2883         * in order to get the right locking on the scan.
2884         */

2885        getUpdateLocks = cursorTargetTable;
2886        cursorTargetTable = false;
2887
2888        return retval;
2889    }
2890
2891    /**
2892     * Create a new ResultColumnList to reflect the columns in the
2893     * index described by the given ConglomerateDescriptor. The columns
2894     * in the new ResultColumnList are based on the columns in the given
2895     * ResultColumnList, which reflects the columns in the base table.
2896     *
2897     * @param oldColumns The original list of columns, which reflects
2898     * the columns in the base table.
2899     * @param idxCD The ConglomerateDescriptor, which describes
2900     * the index that the new ResultColumnList will
2901     * reflect.
2902     * @param heapCD The ConglomerateDescriptor for the base heap
2903     * @param cloneRCs Whether or not to clone the RCs
2904     *
2905     * @return A new ResultColumnList that reflects the columns in the index.
2906     *
2907     * @exception StandardException Thrown on error
2908     */

2909    private ResultColumnList newResultColumns(
2910                                        ResultColumnList oldColumns,
2911                                        ConglomerateDescriptor idxCD,
2912                                        ConglomerateDescriptor heapCD,
2913                                        boolean cloneRCs)
2914                        throws StandardException
2915    {
2916        IndexRowGenerator irg = idxCD.getIndexDescriptor();
2917        int[] baseCols = irg.baseColumnPositions();
2918        ResultColumnList newCols =
2919                                (ResultColumnList) getNodeFactory().getNode(
2920                                                C_NodeTypes.RESULT_COLUMN_LIST,
2921                                                getContextManager());
2922
2923        for (int i = 0; i < baseCols.length; i++)
2924        {
2925            int basePosition = baseCols[i];
2926            ResultColumn oldCol = oldColumns.getResultColumn(basePosition);
2927            ResultColumn newCol;
2928
2929            if (SanityManager.DEBUG)
2930            {
2931                SanityManager.ASSERT(oldCol != null,
2932                            "Couldn't find base column "+basePosition+
2933                            "\n. RCL is\n"+oldColumns);
2934            }
2935
2936            /* If we're cloning the RCs its because we are
2937             * building an RCL for the index when doing
2938             * a non-covering index scan. Set the expression
2939             * for the old RC to be a VCN pointing to the
2940             * new RC.
2941             */

2942            if (cloneRCs)
2943            {
2944                newCol = oldCol.cloneMe();
2945                oldCol.setExpression(
2946                    (ValueNode) getNodeFactory().getNode(
2947                        C_NodeTypes.VIRTUAL_COLUMN_NODE,
2948                        this,
2949                        newCol,
2950                        ReuseFactory.getInteger(oldCol.getVirtualColumnId()),
2951                        getContextManager()));
2952            }
2953            else
2954            {
2955                newCol = oldCol;
2956            }
2957
2958            newCols.addResultColumn(newCol);
2959        }
2960
2961        /*
2962        ** The conglomerate is an index, so we need to generate a RowLocation
2963        ** as the last column of the result set. Notify the ResultColumnList
2964        ** that it needs to do this. Also tell the RCL whether this is
2965        ** the target of an update, so it can tell the conglomerate controller
2966        ** when it is getting the RowLocation template.
2967        */

2968        newCols.setIndexRow(heapCD.getConglomerateNumber(), forUpdate());
2969
2970        return newCols;
2971    }
2972
2973    /**
2974     * Generation on a FromBaseTable creates a scan on the
2975     * optimizer-selected conglomerate.
2976     *
2977     * @param acb The ActivationClassBuilder for the class being built
2978     * @param mb the execute() method to be built
2979     *
2980     * @exception StandardException Thrown on error
2981     */

2982    public void generate(ActivationClassBuilder acb,
2983                                MethodBuilder mb)
2984                            throws StandardException
2985    {
2986        generateResultSet( acb, mb );
2987
2988        /*
2989        ** Remember if this base table is the cursor target table, so we can
2990        ** know which table to use when doing positioned update and delete
2991        */

2992        if (cursorTargetTable)
2993        {
2994            acb.rememberCursorTarget(mb);
2995        }
2996    }
2997
2998    /**
2999     * Generation on a FromBaseTable for a SELECT. This logic was separated
3000     * out so that it could be shared with PREPARE SELECT FILTER.
3001     *
3002     * @param acb The ExpressionClassBuilder for the class being built
3003     * @param mb The execute() method to be built
3004     *
3005     * @exception StandardException Thrown on error
3006     */

3007    public void generateResultSet(ExpressionClassBuilder acb,
3008                                MethodBuilder mb)
3009                            throws StandardException
3010    {
3011        /* We must have been a best conglomerate descriptor here */
3012        if (SanityManager.DEBUG)
3013        SanityManager.ASSERT(
3014            getTrulyTheBestAccessPath().getConglomerateDescriptor() != null);
3015
3016        /* Get the next ResultSet #, so that we can number this ResultSetNode, its
3017         * ResultColumnList and ResultSet.
3018         */

3019        assignResultSetNumber();
3020
3021        /*
3022        ** If we are doing a special scan to get the last row
3023        ** of an index, generate it separately.
3024        */

3025        if (specialMaxScan)
3026        {
3027            generateMaxSpecialResultSet(acb, mb);
3028            return;
3029        }
3030
3031        /*
3032        ** If we are doing a special distinct scan, generate
3033        ** it separately.
3034        */

3035        if (distinctScan)
3036        {
3037            generateDistinctScan(acb, mb);
3038            return;
3039        }
3040        
3041        /*
3042         * Referential action dependent table scan, generate it
3043         * seperately.
3044         */

3045
3046        if(raDependentScan)
3047        {
3048            generateRefActionDependentTableScan(acb, mb);
3049            return;
3050
3051        }
3052    
3053        JoinStrategy trulyTheBestJoinStrategy =
3054            getTrulyTheBestAccessPath().getJoinStrategy();
3055
3056        // the table scan generator is what we return
3057
acb.pushGetResultSetFactoryExpression(mb);
3058
3059        int nargs = getScanArguments(acb, mb);
3060
3061        mb.callMethod(VMOpcode.INVOKEINTERFACE, (String JavaDoc) null,
3062            trulyTheBestJoinStrategy.resultSetMethodName(bulkFetch != UNSET),
3063            ClassName.NoPutResultSet, nargs);
3064
3065        /* If this table is the target of an update or a delete, then we must
3066         * wrap the Expression up in an assignment expression before
3067         * returning.
3068         * NOTE - scanExpress is a ResultSet. We will need to cast it to the
3069         * appropriate subclass.
3070         * For example, for a DELETE, instead of returning a call to the
3071         * ResultSetFactory, we will generate and return:
3072         * this.SCANRESULTSET = (cast to appropriate ResultSet type)
3073         * The outer cast back to ResultSet is needed so that
3074         * we invoke the appropriate method.
3075         * (call to the ResultSetFactory)
3076         */

3077        if ((updateOrDelete == UPDATE) || (updateOrDelete == DELETE))
3078        {
3079            mb.cast(ClassName.CursorResultSet);
3080            mb.putField(acb.getRowLocationScanResultSetName(), ClassName.CursorResultSet);
3081            mb.cast(ClassName.NoPutResultSet);
3082        }
3083    }
3084
3085    /**
3086     * Get the final CostEstimate for this ResultSetNode.
3087     *
3088     * @return The final CostEstimate for this ResultSetNode.
3089     */

3090    public CostEstimate getFinalCostEstimate()
3091    {
3092        return getTrulyTheBestAccessPath().getCostEstimate();
3093    }
3094    
3095        /* helper method used by generateMaxSpecialResultSet and
3096         * generateDistinctScan to return the name of the index if the
3097         * conglomerate is an index.
3098         * @param cd Conglomerate for which we need to push the index name
3099         * @param mb Associated MethodBuilder
3100         * @throws StandardException
3101         */

3102        private void pushIndexName(ConglomerateDescriptor cd, MethodBuilder mb)
3103          throws StandardException
3104        {
3105            if (cd.isConstraint()) {
3106                DataDictionary dd = getDataDictionary();
3107                ConstraintDescriptor constraintDesc =
3108                    dd.getConstraintDescriptor(tableDescriptor, cd.getUUID());
3109                mb.push(constraintDesc.getConstraintName());
3110            } else if (cd.isIndex()) {
3111                mb.push(cd.getConglomerateName());
3112            } else {
3113             // If the conglomerate is the base table itself, make sure we push null.
3114
// Before the fix for DERBY-578, we would push the base table name
3115
// and this was just plain wrong and would cause statistics information to be incorrect.
3116
mb.pushNull("java.lang.String");
3117            }
3118        }
3119    
3120        private void generateMaxSpecialResultSet
3121    (
3122        ExpressionClassBuilder acb,
3123        MethodBuilder mb
3124    ) throws StandardException
3125    {
3126        ConglomerateDescriptor cd = getTrulyTheBestAccessPath().getConglomerateDescriptor();
3127        CostEstimate costEstimate = getFinalCostEstimate();
3128        int colRefItem = (referencedCols == null) ?
3129                        -1 :
3130                        acb.addItem(referencedCols);
3131        boolean tableLockGranularity = tableDescriptor.getLockGranularity() == TableDescriptor.TABLE_LOCK_GRANULARITY;
3132    
3133        /*
3134        ** getLastIndexKeyResultSet
3135        ** (
3136        ** activation,
3137        ** resultSetNumber,
3138        ** resultRowAllocator,
3139        ** conglomereNumber,
3140        ** tableName,
3141        ** optimizeroverride
3142        ** indexName,
3143        ** colRefItem,
3144        ** lockMode,
3145        ** tableLocked,
3146        ** isolationLevel,
3147        ** optimizerEstimatedRowCount,
3148        ** optimizerEstimatedRowCost,
3149        ** );
3150        */

3151
3152        acb.pushGetResultSetFactoryExpression(mb);
3153
3154        acb.pushThisAsActivation(mb);
3155        mb.push(getResultSetNumber());
3156        resultColumns.generateHolder(acb, mb, referencedCols, (FormatableBitSet) null);
3157        mb.push(cd.getConglomerateNumber());
3158        mb.push(tableDescriptor.getName());
3159        //User may have supplied optimizer overrides in the sql
3160
//Pass them onto execute phase so it can be shown in
3161
//run time statistics.
3162
if (tableProperties != null)
3163            mb.push(org.apache.derby.iapi.util.PropertyUtil.sortProperties(tableProperties));
3164        else
3165            mb.pushNull("java.lang.String");
3166                pushIndexName(cd, mb);
3167        mb.push(colRefItem);
3168        mb.push(getTrulyTheBestAccessPath().getLockMode());
3169        mb.push(tableLockGranularity);
3170        mb.push(getCompilerContext().getScanIsolationLevel());
3171        mb.push(costEstimate.singleScanRowCount());
3172        mb.push(costEstimate.getEstimatedCost());
3173
3174        mb.callMethod(VMOpcode.INVOKEINTERFACE, (String JavaDoc) null, "getLastIndexKeyResultSet",
3175                    ClassName.NoPutResultSet, 13);
3176
3177
3178    }
3179
3180    private void generateDistinctScan
3181    (
3182        ExpressionClassBuilder acb,
3183        MethodBuilder mb
3184    ) throws StandardException
3185    {
3186        ConglomerateDescriptor cd = getTrulyTheBestAccessPath().getConglomerateDescriptor();
3187        CostEstimate costEstimate = getFinalCostEstimate();
3188        int colRefItem = (referencedCols == null) ?
3189                        -1 :
3190                        acb.addItem(referencedCols);
3191        boolean tableLockGranularity = tableDescriptor.getLockGranularity() == TableDescriptor.TABLE_LOCK_GRANULARITY;
3192    
3193        /*
3194        ** getDistinctScanResultSet
3195        ** (
3196        ** activation,
3197        ** resultSetNumber,
3198        ** resultRowAllocator,
3199        ** conglomereNumber,
3200        ** tableName,
3201        ** optimizeroverride
3202        ** indexName,
3203        ** colRefItem,
3204        ** lockMode,
3205        ** tableLocked,
3206        ** isolationLevel,
3207        ** optimizerEstimatedRowCount,
3208        ** optimizerEstimatedRowCost,
3209        ** closeCleanupMethod
3210        ** );
3211        */

3212
3213        /* Get the hash key columns and wrap them in a formattable */
3214        int[] hashKeyColumns;
3215
3216        hashKeyColumns = new int[resultColumns.size()];
3217        if (referencedCols == null)
3218        {
3219            for (int index = 0; index < hashKeyColumns.length; index++)
3220            {
3221                hashKeyColumns[index] = index;
3222            }
3223        }
3224        else
3225        {
3226            int index = 0;
3227            for (int colNum = referencedCols.anySetBit();
3228                    colNum != -1;
3229                    colNum = referencedCols.anySetBit(colNum))
3230            {
3231                hashKeyColumns[index++] = colNum;
3232            }
3233        }
3234
3235        FormatableIntHolder[] fihArray =
3236                FormatableIntHolder.getFormatableIntHolders(hashKeyColumns);
3237        FormatableArrayHolder hashKeyHolder = new FormatableArrayHolder(fihArray);
3238        int hashKeyItem = acb.addItem(hashKeyHolder);
3239        long conglomNumber = cd.getConglomerateNumber();
3240        StaticCompiledOpenConglomInfo scoci = getLanguageConnectionContext().
3241                                                getTransactionCompile().
3242                                                    getStaticCompiledConglomInfo(conglomNumber);
3243
3244        acb.pushGetResultSetFactoryExpression(mb);
3245
3246        acb.pushThisAsActivation(mb);
3247        mb.push(conglomNumber);
3248        mb.push(acb.addItem(scoci));
3249        resultColumns.generateHolder(acb, mb, referencedCols, (FormatableBitSet) null);
3250        mb.push(getResultSetNumber());
3251        mb.push(hashKeyItem);
3252        mb.push(tableDescriptor.getName());
3253        //User may have supplied optimizer overrides in the sql
3254
//Pass them onto execute phase so it can be shown in
3255
//run time statistics.
3256
if (tableProperties != null)
3257            mb.push(org.apache.derby.iapi.util.PropertyUtil.sortProperties(tableProperties));
3258        else
3259            mb.pushNull("java.lang.String");
3260        pushIndexName(cd, mb);
3261        mb.push(cd.isConstraint());
3262        mb.push(colRefItem);
3263        mb.push(getTrulyTheBestAccessPath().getLockMode());
3264        mb.push(tableLockGranularity);
3265        mb.push(getCompilerContext().getScanIsolationLevel());
3266        mb.push(costEstimate.singleScanRowCount());
3267        mb.push(costEstimate.getEstimatedCost());
3268        
3269        mb.callMethod(VMOpcode.INVOKEINTERFACE, (String JavaDoc) null, "getDistinctScanResultSet",
3270                            ClassName.NoPutResultSet, 16);
3271    }
3272
3273
3274    /**
3275     * Generation on a FromBaseTable for a referential action dependent table.
3276     *
3277     * @param acb The ExpressionClassBuilder for the class being built
3278     * @param mb The execute() method to be built
3279     *
3280     * @exception StandardException Thrown on error
3281     */

3282
3283    private void generateRefActionDependentTableScan
3284    (
3285        ExpressionClassBuilder acb,
3286        MethodBuilder mb
3287    ) throws StandardException
3288    {
3289
3290        acb.pushGetResultSetFactoryExpression(mb);
3291
3292        //get the parameters required to do a table scan
3293
int nargs = getScanArguments(acb, mb);
3294
3295        //extra parameters required to create an dependent table result set.
3296
mb.push(raParentResultSetId); //id for the parent result set.
3297
mb.push(fkIndexConglomId);
3298        mb.push(acb.addItem(fkColArray));
3299        mb.push(acb.addItem(getDataDictionary().getRowLocationTemplate(
3300                      getLanguageConnectionContext(), tableDescriptor)));
3301
3302        int argCount = nargs + 4;
3303        mb.callMethod(VMOpcode.INVOKEINTERFACE, (String JavaDoc) null, "getRaDependentTableScanResultSet",
3304                            ClassName.NoPutResultSet, argCount);
3305
3306        if ((updateOrDelete == UPDATE) || (updateOrDelete == DELETE))
3307        {
3308            mb.cast(ClassName.CursorResultSet);
3309            mb.putField(acb.getRowLocationScanResultSetName(), ClassName.CursorResultSet);
3310            mb.cast(ClassName.NoPutResultSet);
3311        }
3312
3313    }
3314
3315
3316
3317    private int getScanArguments(ExpressionClassBuilder acb,
3318                                          MethodBuilder mb)
3319        throws StandardException
3320    {
3321        // get a function to allocate scan rows of the right shape and size
3322
MethodBuilder resultRowAllocator =
3323                        resultColumns.generateHolderMethod(acb,
3324                                                    referencedCols,
3325                                                    (FormatableBitSet) null);
3326
3327        // pass in the referenced columns on the saved objects
3328
// chain
3329
int colRefItem = -1;
3330        if (referencedCols != null)
3331        {
3332            colRefItem = acb.addItem(referencedCols);
3333        }
3334
3335        // beetle entry 3865: updateable cursor using index
3336
int indexColItem = -1;
3337        if (cursorTargetTable || getUpdateLocks)
3338        {
3339            ConglomerateDescriptor cd = getTrulyTheBestAccessPath().getConglomerateDescriptor();
3340            if (cd.isIndex())
3341            {
3342                int[] baseColPos = cd.getIndexDescriptor().baseColumnPositions();
3343                boolean[] isAscending = cd.getIndexDescriptor().isAscending();
3344                int[] indexCols = new int[baseColPos.length];
3345                for (int i = 0; i < indexCols.length; i++)
3346                    indexCols[i] = isAscending[i] ? baseColPos[i] : -baseColPos[i];
3347                indexColItem = acb.addItem(indexCols);
3348            }
3349        }
3350
3351        AccessPath ap = getTrulyTheBestAccessPath();
3352        JoinStrategy trulyTheBestJoinStrategy = ap.getJoinStrategy();
3353
3354        /*
3355        ** We can only do bulkFetch on NESTEDLOOP
3356        */

3357        if (SanityManager.DEBUG)
3358        {
3359            if ( ( ! trulyTheBestJoinStrategy.bulkFetchOK()) &&
3360                (bulkFetch != UNSET))
3361            {
3362                SanityManager.THROWASSERT("bulkFetch should not be set "+
3363                                "for the join strategy " +
3364                                trulyTheBestJoinStrategy.getName());
3365            }
3366        }
3367
3368        int nargs = trulyTheBestJoinStrategy.getScanArgs(
3369                                            getLanguageConnectionContext().getTransactionCompile(),
3370                                            mb,
3371                                            this,
3372                                            storeRestrictionList,
3373                                            nonStoreRestrictionList,
3374                                            acb,
3375                                            bulkFetch,
3376                                            resultRowAllocator,
3377                                            colRefItem,
3378                                            indexColItem,
3379                                            getTrulyTheBestAccessPath().
3380                                                                getLockMode(),
3381                                            (tableDescriptor.getLockGranularity() == TableDescriptor.TABLE_LOCK_GRANULARITY),
3382                                            getCompilerContext().getScanIsolationLevel(),
3383                                            ap.getOptimizer().getMaxMemoryPerTable()
3384                                            );
3385
3386        return nargs;
3387    }
3388
3389    /**
3390     * Convert an absolute to a relative 0-based column position.
3391     *
3392     * @param absolutePosition The absolute 0-based column position.
3393     *
3394     * @return The relative 0-based column position.
3395     */

3396    private int mapAbsoluteToRelativeColumnPosition(int absolutePosition)
3397    {
3398        if (referencedCols == null)
3399        {
3400            return absolutePosition;
3401        }
3402
3403        /* setBitCtr counts the # of columns in the row,
3404         * from the leftmost to the absolutePosition, that will be
3405         * in the partial row returned by the store. This becomes
3406         * the new value for column position.
3407         */

3408        int setBitCtr = 0;
3409        int bitCtr = 0;
3410        for ( ;
3411             bitCtr < referencedCols.size() && bitCtr < absolutePosition;
3412             bitCtr++)
3413        {
3414            if (referencedCols.get(bitCtr))
3415            {
3416                setBitCtr++;
3417            }
3418        }
3419        return setBitCtr;
3420    }
3421
3422    /**
3423     * Get the exposed name for this table, which is the name that can
3424     * be used to refer to it in the rest of the query.
3425     *
3426     * @return The exposed name of this table.
3427     *
3428     */

3429    public String JavaDoc getExposedName()
3430    {
3431        if (correlationName != null)
3432            return correlationName;
3433        else
3434            return getOrigTableName().getFullTableName();
3435    }
3436    
3437    /**
3438     * Get the exposed table name for this table, which is the name that can
3439     * be used to refer to it in the rest of the query.
3440     *
3441     * @return TableName The exposed name of this table.
3442     *
3443     * @exception StandardException Thrown on error
3444     */

3445    private TableName getExposedTableName() throws StandardException
3446    {
3447        if (correlationName != null)
3448            return makeTableName(null, correlationName);
3449        else
3450            return getOrigTableName();
3451    }
3452    
3453    /**
3454     * Return the table name for this table.
3455     *
3456     * @return The table name for this table.
3457     */

3458
3459    public TableName getTableNameField()
3460    {
3461        return tableName;
3462    }
3463
3464    /**
3465     * Return a ResultColumnList with all of the columns in this table.
3466     * (Used in expanding '*'s.)
3467     * NOTE: Since this method is for expanding a "*" in the SELECT list,
3468     * ResultColumn.expression will be a ColumnReference.
3469     *
3470     * @param allTableName The qualifier on the "*"
3471     *
3472     * @return ResultColumnList List of result columns from this table.
3473     *
3474     * @exception StandardException Thrown on error
3475     */

3476    public ResultColumnList getAllResultColumns(TableName allTableName)
3477            throws StandardException
3478    {
3479        return getResultColumnsForList(allTableName, resultColumns,
3480                getOrigTableName());
3481    }
3482
3483    /**
3484     * Build a ResultColumnList based on all of the columns in this FromBaseTable.
3485     * NOTE - Since the ResultColumnList generated is for the FromBaseTable,
3486     * ResultColumn.expression will be a BaseColumnNode.
3487     *
3488     * @return ResultColumnList representing all referenced columns
3489     *
3490     * @exception StandardException Thrown on error
3491     */

3492    public ResultColumnList genResultColList()
3493            throws StandardException
3494    {
3495        ResultColumnList rcList = null;
3496        ResultColumn resultColumn;
3497        ValueNode valueNode;
3498        ColumnDescriptor colDesc = null;
3499        TableName exposedName;
3500
3501        /* Cache exposed name for this table.
3502         * The exposed name becomes the qualifier for each column
3503         * in the expanded list.
3504         */

3505        exposedName = getExposedTableName();
3506
3507        /* Add all of the columns in the table */
3508        rcList = (ResultColumnList) getNodeFactory().getNode(
3509                                        C_NodeTypes.RESULT_COLUMN_LIST,
3510                                        getContextManager());
3511        ColumnDescriptorList cdl = tableDescriptor.getColumnDescriptorList();
3512        int cdlSize = cdl.size();
3513
3514        for (int index = 0; index < cdlSize; index++)
3515        {
3516            /* Build a ResultColumn/BaseColumnNode pair for the column */
3517            colDesc = (ColumnDescriptor) cdl.elementAt(index);
3518            //A ColumnDescriptor instantiated through SYSCOLUMNSRowFactory only has
3519
//the uuid set on it and no table descriptor set on it. Since we know here
3520
//that this columnDescriptor is tied to tableDescriptor, set it so using
3521
//setTableDescriptor method. ColumnDescriptor's table descriptor is used
3522
//to get ResultSetMetaData.getTableName & ResultSetMetaData.getSchemaName
3523
colDesc.setTableDescriptor(tableDescriptor);
3524
3525            valueNode = (ValueNode) getNodeFactory().getNode(
3526                                            C_NodeTypes.BASE_COLUMN_NODE,
3527                                            colDesc.getColumnName(),
3528                                            exposedName,
3529                                            colDesc.getType(),
3530                                            getContextManager());
3531            resultColumn = (ResultColumn) getNodeFactory().getNode(
3532                                            C_NodeTypes.RESULT_COLUMN,
3533                                            colDesc,
3534                                            valueNode,
3535                                            getContextManager());
3536
3537            /* Build the ResultColumnList to return */
3538            rcList.addResultColumn(resultColumn);
3539        }
3540
3541        return rcList;
3542    }
3543
3544    /**
3545     * Augment the RCL to include the columns in the FormatableBitSet.
3546     * If the column is already there, don't add it twice.
3547     * Column is added as a ResultColumn pointing to a
3548     * ColumnReference.
3549     *
3550     * @param inputRcl The original list
3551     * @param colsWeWant bit set of cols we want
3552     *
3553     * @return ResultColumnList the rcl
3554     *
3555     * @exception StandardException Thrown on error
3556     */

3557    public ResultColumnList addColsToList
3558    (
3559        ResultColumnList inputRcl,
3560        FormatableBitSet colsWeWant
3561    )
3562            throws StandardException
3563    {
3564        ResultColumnList rcList = null;
3565        ResultColumn resultColumn;
3566        ValueNode valueNode;
3567        ColumnDescriptor cd = null;
3568        TableName exposedName;
3569
3570        /* Cache exposed name for this table.
3571         * The exposed name becomes the qualifier for each column
3572         * in the expanded list.
3573         */

3574        exposedName = getExposedTableName();
3575
3576        /* Add all of the columns in the table */
3577        ResultColumnList newRcl = (ResultColumnList) getNodeFactory().getNode(
3578                                                C_NodeTypes.RESULT_COLUMN_LIST,
3579                                                getContextManager());
3580        ColumnDescriptorList cdl = tableDescriptor.getColumnDescriptorList();
3581        int cdlSize = cdl.size();
3582
3583        for (int index = 0; index < cdlSize; index++)
3584        {
3585            /* Build a ResultColumn/BaseColumnNode pair for the column */
3586            cd = (ColumnDescriptor) cdl.elementAt(index);
3587            int position = cd.getPosition();
3588
3589            if (!colsWeWant.get(position))
3590            {
3591                continue;
3592            }
3593
3594            if ((resultColumn = inputRcl.getResultColumn(position)) == null)
3595            {
3596                valueNode = (ValueNode) getNodeFactory().getNode(
3597                                                C_NodeTypes.COLUMN_REFERENCE,
3598                                                cd.getColumnName(),
3599                                                exposedName,
3600                                                getContextManager());
3601                resultColumn = (ResultColumn) getNodeFactory().
3602                                                getNode(
3603                                                    C_NodeTypes.RESULT_COLUMN,
3604                                                    cd,
3605                                                    valueNode,
3606                                                    getContextManager());
3607            }
3608
3609            /* Build the ResultColumnList to return */
3610            newRcl.addResultColumn(resultColumn);
3611        }
3612
3613        return newRcl;
3614    }
3615
3616    /**
3617     * Return a TableName node representing this FromTable.
3618     * @return a TableName node representing this FromTable.
3619     * @exception StandardException Thrown on error
3620     */

3621    public TableName getTableName()
3622            throws StandardException
3623    {
3624        TableName tn;
3625
3626        tn = super.getTableName();
3627
3628        if(tn != null) {
3629            if(tn.getSchemaName() == null &&
3630               correlationName == null)
3631                   tn.bind(this.getDataDictionary());
3632        }
3633
3634        return (tn != null ? tn : tableName);
3635    }
3636
3637    /**
3638        Mark this ResultSetNode as the target table of an updatable
3639        cursor.
3640     */

3641    public boolean markAsCursorTargetTable()
3642    {
3643        cursorTargetTable = true;
3644        return true;
3645    }
3646
3647    /**
3648     * Is this a table that has a FOR UPDATE
3649     * clause?
3650     *
3651     * @return true/false
3652     */

3653    protected boolean cursorTargetTable()
3654    {
3655        return cursorTargetTable;
3656    }
3657
3658    /**
3659     * Mark as updatable all the columns in the result column list of this
3660     * FromBaseTable that match the columns in the given update column list.
3661     *
3662     * @param updateColumns A ResultColumnList representing the columns
3663     * to be updated.
3664     */

3665    void markUpdated(ResultColumnList updateColumns)
3666    {
3667        resultColumns.markUpdated(updateColumns);
3668    }
3669
3670    /**
3671     * Search to see if a query references the specifed table name.
3672     *
3673     * @param name Table name (String) to search for.
3674     * @param baseTable Whether or not name is for a base table
3675     *
3676     * @return true if found, else false
3677     *
3678     * @exception StandardException Thrown on error
3679     */

3680    public boolean referencesTarget(String JavaDoc name, boolean baseTable)
3681        throws StandardException
3682    {
3683        return baseTable && name.equals(getBaseTableName());
3684    }
3685
3686    /**
3687     * Return true if the node references SESSION schema tables (temporary or permanent)
3688     *
3689     * @return true if references SESSION schema tables, else false
3690     *
3691     * @exception StandardException Thrown on error
3692     */

3693    public boolean referencesSessionSchema()
3694        throws StandardException
3695    {
3696        //If base table is a SESSION schema table, then return true.
3697
return isSessionSchema(tableDescriptor.getSchemaDescriptor());
3698    }
3699
3700
3701    /**
3702     * Return whether or not the underlying ResultSet tree will return
3703     * a single row, at most. This method is intended to be used during
3704     * generation, after the "truly" best conglomerate has been chosen.
3705     * This is important for join nodes where we can save the extra next
3706     * on the right side if we know that it will return at most 1 row.
3707     *
3708     * @return Whether or not the underlying ResultSet tree will return a single row.
3709     * @exception StandardException Thrown on error
3710     */

3711    public boolean isOneRowResultSet() throws StandardException
3712    {
3713        // EXISTS FBT will only return a single row
3714
if (existsBaseTable)
3715        {
3716            return true;
3717        }
3718
3719        /* For hash join, we need to consider both the qualification
3720         * and hash join predicates and we consider them against all
3721         * conglomerates since we are looking for any uniqueness
3722         * condition that holds on the columns in the hash table,
3723         * otherwise we just consider the predicates in the
3724         * restriction list and the conglomerate being scanned.
3725
3726         */

3727        AccessPath ap = getTrulyTheBestAccessPath();
3728        JoinStrategy trulyTheBestJoinStrategy = ap.getJoinStrategy();
3729        PredicateList pl;
3730
3731        if (trulyTheBestJoinStrategy.isHashJoin())
3732        {
3733            pl = (PredicateList) getNodeFactory().getNode(
3734                                            C_NodeTypes.PREDICATE_LIST,
3735                                            getContextManager());
3736            if (storeRestrictionList != null)
3737            {
3738                pl.nondestructiveAppend(storeRestrictionList);
3739            }
3740            if (nonStoreRestrictionList != null)
3741            {
3742                pl.nondestructiveAppend(nonStoreRestrictionList);
3743            }
3744            return isOneRowResultSet(pl);
3745        }
3746        else
3747        {
3748            return isOneRowResultSet(getTrulyTheBestAccessPath().
3749                                        getConglomerateDescriptor(),
3750                                     restrictionList);
3751        }
3752    }
3753
3754    /**
3755     * Return whether or not this is actually a EBT for NOT EXISTS.
3756     */

3757    public boolean isNotExists()
3758    {
3759        return isNotExists;
3760    }
3761
3762    public boolean isOneRowResultSet(OptimizablePredicateList predList) throws StandardException
3763    {
3764        ConglomerateDescriptor[] cds = tableDescriptor.getConglomerateDescriptors();
3765
3766        for (int index = 0; index < cds.length; index++)
3767        {
3768            if (isOneRowResultSet(cds[index], predList))
3769            {
3770                return true;
3771            }
3772        }
3773
3774        return false;
3775    }
3776
3777    /**
3778     * Determine whether or not the columns marked as true in
3779     * the passed in array are a superset of any unique index
3780     * on this table.
3781     * This is useful for subquery flattening and distinct elimination
3782     * based on a uniqueness condition.
3783     *
3784     * @param eqCols The columns to consider
3785     *
3786     * @return Whether or not the columns marked as true are a superset
3787     */

3788    protected boolean supersetOfUniqueIndex(boolean[] eqCols)
3789        throws StandardException
3790    {
3791        ConglomerateDescriptor[] cds = tableDescriptor.getConglomerateDescriptors();
3792
3793        /* Cycle through the ConglomerateDescriptors */
3794        for (int index = 0; index < cds.length; index++)
3795        {
3796            ConglomerateDescriptor cd = cds[index];
3797
3798            if (! cd.isIndex())
3799            {
3800                continue;
3801            }
3802            IndexDescriptor id = cd.getIndexDescriptor();
3803
3804            if (! id.isUnique())
3805            {
3806                continue;
3807            }
3808
3809            int[] keyColumns = id.baseColumnPositions();
3810
3811            int inner = 0;
3812            for ( ; inner < keyColumns.length; inner++)
3813            {
3814                if (! eqCols[keyColumns[inner]])
3815                {
3816                    break;
3817                }
3818            }
3819
3820            /* Did we get a full match? */
3821            if (inner == keyColumns.length)
3822            {
3823                return true;
3824            }
3825        }
3826
3827        return false;
3828    }
3829
3830    /**
3831     * Determine whether or not the columns marked as true in
3832     * the passed in join table matrix are a superset of any single column unique index
3833     * on this table.
3834     * This is useful for distinct elimination
3835     * based on a uniqueness condition.
3836     *
3837     * @param tableColMap The columns to consider
3838     *
3839     * @return Whether or not the columns marked as true for one at least
3840     * one table are a superset
3841     */

3842    protected boolean supersetOfUniqueIndex(JBitSet[] tableColMap)
3843        throws StandardException
3844    {
3845        ConglomerateDescriptor[] cds = tableDescriptor.getConglomerateDescriptors();
3846
3847        /* Cycle through the ConglomerateDescriptors */
3848        for (int index = 0; index < cds.length; index++)
3849        {
3850            ConglomerateDescriptor cd = cds[index];
3851
3852            if (! cd.isIndex())
3853            {
3854                continue;
3855            }
3856            IndexDescriptor id = cd.getIndexDescriptor();
3857
3858            if (! id.isUnique())
3859            {
3860                continue;
3861            }
3862
3863            int[] keyColumns = id.baseColumnPositions();
3864            int numBits = tableColMap[0].size();
3865            JBitSet keyMap = new JBitSet(numBits);
3866            JBitSet resMap = new JBitSet(numBits);
3867
3868            int inner = 0;
3869            for ( ; inner < keyColumns.length; inner++)
3870            {
3871                keyMap.set(keyColumns[inner]);
3872            }
3873            int table = 0;
3874            for ( ; table < tableColMap.length; table++)
3875            {
3876                resMap.setTo(tableColMap[table]);
3877                resMap.and(keyMap);
3878                if (keyMap.equals(resMap))
3879                {
3880                    tableColMap[table].set(0);
3881                    return true;
3882                }
3883            }
3884
3885        }
3886
3887        return false;
3888    }
3889
3890    /**
3891     * Get the lock mode for the target table heap of an update or delete
3892     * statement. It is not always MODE_RECORD. We want the lock on the
3893     * heap to be consistent with optimizer and eventually system's decision.
3894     * This is to avoid deadlock (beetle 4318). During update/delete's
3895     * execution, it will first use this lock mode we return to lock heap to
3896     * open a RowChanger, then use the lock mode that is the optimizer and
3897     * system's combined decision to open the actual source conglomerate.
3898     * We've got to make sure they are consistent. This is the lock chart (for
3899     * detail reason, see comments below):
3900     * BEST ACCESS PATH LOCK MODE ON HEAP
3901     * ---------------------- -----------------------------------------
3902     * index row lock
3903     *
3904     * heap row lock if READ_COMMITTED,
3905     * REPEATBLE_READ, or READ_UNCOMMITTED &&
3906     * not specified table lock otherwise,
3907     * use optimizer decided best acess
3908     * path's lock mode
3909     *
3910     * @return The lock mode
3911     */

3912    public int updateTargetLockMode()
3913    {
3914        /* if best access path is index scan, we always use row lock on heap,
3915         * consistent with IndexRowToBaseRowResultSet's openCore(). We don't
3916         * need to worry about the correctness of serializable isolation level
3917         * because index will have previous key locking if it uses row locking
3918         * as well.
3919         */

3920        if (getTrulyTheBestAccessPath().getConglomerateDescriptor().isIndex())
3921            return TransactionController.MODE_RECORD;
3922
3923        /* we override optimizer's decision of the lock mode on heap, and
3924         * always use row lock if we are read committed/uncommitted or
3925         * repeatable read isolation level, and no forced table lock.
3926         *
3927         * This is also reflected in TableScanResultSet's constructor,
3928         * KEEP THEM CONSISTENT!
3929         *
3930         * This is to improve concurrency, while maintaining correctness with
3931         * serializable level. Since the isolation level can change between
3932         * compilation and execution if the statement is cached or stored, we
3933         * encode both the SERIALIZABLE lock mode and the non-SERIALIZABLE
3934         * lock mode in the returned lock mode if they are different.
3935         */

3936        int isolationLevel =
3937            getLanguageConnectionContext().getCurrentIsolationLevel();
3938
3939
3940        if ((isolationLevel != ExecutionContext.SERIALIZABLE_ISOLATION_LEVEL) &&
3941            (tableDescriptor.getLockGranularity() !=
3942                    TableDescriptor.TABLE_LOCK_GRANULARITY))
3943        {
3944            int lockMode = getTrulyTheBestAccessPath().getLockMode();
3945            if (lockMode != TransactionController.MODE_RECORD)
3946                lockMode = (lockMode & 0xff) << 16;
3947            else
3948                lockMode = 0;
3949            lockMode += TransactionController.MODE_RECORD;
3950
3951            return lockMode;
3952        }
3953
3954        /* if above don't apply, use optimizer's decision on heap's lock
3955         */

3956        return getTrulyTheBestAccessPath().getLockMode();
3957    }
3958
3959    /**
3960     * Return whether or not the underlying ResultSet tree
3961     * is ordered on the specified columns.
3962     * RESOLVE - This method currently only considers the outermost table
3963     * of the query block.
3964     * RESOLVE - We do not currently push method calls down, so we don't
3965     * worry about whether the equals comparisons can be against a variant method.
3966     *
3967     * @param crs The specified ColumnReference[]
3968     * @param permuteOrdering Whether or not the order of the CRs in the array can be permuted
3969     * @param fbtVector Vector that is to be filled with the FromBaseTable
3970     *
3971     * @return Whether the underlying ResultSet tree
3972     * is ordered on the specified column.
3973     *
3974     * @exception StandardException Thrown on error
3975     */

3976    boolean isOrderedOn(ColumnReference[] crs, boolean permuteOrdering, Vector JavaDoc fbtVector)
3977                throws StandardException
3978    {
3979        /* The following conditions must be met, regardless of the value of permuteOrdering,
3980         * in order for the table to be ordered on the specified columns:
3981         * o Each column is from this table. (RESOLVE - handle joins later)
3982         * o The access path for this table is an index.
3983         */

3984        // Verify that all CRs are from this table
3985
for (int index = 0; index < crs.length; index++)
3986        {
3987            if (crs[index].getTableNumber() != tableNumber)
3988            {
3989                return false;
3990            }
3991        }
3992        // Verify access path is an index
3993
ConglomerateDescriptor cd = getTrulyTheBestAccessPath().getConglomerateDescriptor();
3994        if (! cd.isIndex())
3995        {
3996            return false;
3997        }
3998
3999        // Now consider whether or not the CRs can be permuted
4000
boolean isOrdered;
4001        if (permuteOrdering)
4002        {
4003            isOrdered = isOrdered(crs, cd);
4004        }
4005        else
4006        {
4007            isOrdered = isStrictlyOrdered(crs, cd);
4008        }
4009
4010        if (fbtVector != null)
4011        {
4012            fbtVector.addElement(this);
4013        }
4014
4015        return isOrdered;
4016    }
4017
4018    /**
4019     * Turn off bulk fetch
4020     */

4021    void disableBulkFetch()
4022    {
4023        bulkFetchTurnedOff = true;
4024        bulkFetch = UNSET;
4025    }
4026
4027    /**
4028     * Do a special scan for max.
4029     */

4030    void doSpecialMaxScan()
4031    {
4032        if (SanityManager.DEBUG)
4033        {
4034            if ((restrictionList.size() != 0) ||
4035                (storeRestrictionList.size() != 0) ||
4036                (nonStoreRestrictionList.size() != 0))
4037            {
4038                SanityManager.THROWASSERT("shouldn't be setting max special scan because there is a restriction");
4039            }
4040        }
4041        specialMaxScan = true;
4042    }
4043
4044    /**
4045     * Is it possible to do a distinct scan on this ResultSet tree.
4046     * (See SelectNode for the criteria.)
4047     *
4048     * @param distinctColumns the set of distinct columns
4049     * @return Whether or not it is possible to do a distinct scan on this ResultSet tree.
4050     */

4051    boolean isPossibleDistinctScan(Set distinctColumns)
4052    {
4053        if ((restrictionList != null && restrictionList.size() != 0)) {
4054            return false;
4055        }
4056
4057        HashSet JavaDoc columns = new HashSet JavaDoc();
4058        for (int i = 0; i < resultColumns.size(); i++) {
4059            ResultColumn rc = (ResultColumn) resultColumns.elementAt(i);
4060            columns.add(rc.getExpression());
4061        }
4062
4063        return columns.equals(distinctColumns);
4064    }
4065
4066    /**
4067     * Mark the underlying scan as a distinct scan.
4068     */

4069    void markForDistinctScan()
4070    {
4071        distinctScan = true;
4072    }
4073
4074
4075    /**
4076     * Notify the underlying result set tree that the result is
4077     * ordering dependent. (For example, no bulk fetch on an index
4078     * if under an IndexRowToBaseRow.)
4079     */

4080    void markOrderingDependent()
4081    {
4082        /* NOTE: IRTBR will use a different method to tell us that
4083         * it cannot do a bulk fetch as the ordering issues are
4084         * specific to a FBT being under an IRTBR as opposed to a
4085         * FBT being under a PRN, etc.
4086         * So, we just ignore this call for now.
4087         */

4088    }
4089
4090    /**
4091     * Return whether or not this index is ordered on a permutation of the specified columns.
4092     *
4093     * @param crs The specified ColumnReference[]
4094     * @param cd The ConglomerateDescriptor for the chosen index.
4095     *
4096     * @return Whether or not this index is ordered exactly on the specified columns.
4097     *
4098     * @exception StandardException Thrown on error
4099     */

4100    private boolean isOrdered(ColumnReference[] crs, ConglomerateDescriptor cd)
4101                        throws StandardException
4102    {
4103        /* This table is ordered on a permutation of the specified columns if:
4104         * o For each key column, until a match has been found for all of the
4105         * ColumnReferences, it is either in the array of ColumnReferences
4106         * or there is an equality predicate on it.
4107         * (NOTE: It is okay to exhaust the key columns before the ColumnReferences
4108         * if the index is unique. In other words if we have CRs left over after
4109         * matching all of the columns in the key then the table is considered ordered
4110         * iff the index is unique. For example:
4111         * i1 on (c1, c2), unique
4112         * select distinct c3 from t1 where c1 = 1 and c2 = ?;
4113         * is ordered on c3 since there will be at most 1 qualifying row.)
4114         */

4115        boolean[] matchedCRs = new boolean[crs.length];
4116
4117        int nextKeyColumn = 0;
4118        int[] keyColumns = cd.getIndexDescriptor().baseColumnPositions();
4119
4120        // Walk through the key columns
4121
for ( ; nextKeyColumn < keyColumns.length; nextKeyColumn++)
4122        {
4123            boolean currMatch = false;
4124            // See if the key column is in crs
4125
for (int nextCR = 0; nextCR < crs.length; nextCR++)
4126            {
4127                if (crs[nextCR].getColumnNumber() == keyColumns[nextKeyColumn])
4128                {
4129                    matchedCRs[nextCR] = true;
4130                    currMatch = true;
4131                    break;
4132                }
4133            }
4134
4135            // Advance to next key column if we found a match on this one
4136
if (currMatch)
4137            {
4138                continue;
4139            }
4140
4141            // Stop search if there is no equality predicate on this key column
4142
if (! storeRestrictionList.hasOptimizableEqualityPredicate(this, keyColumns[nextKeyColumn], true))
4143            {
4144                break;
4145            }
4146        }
4147
4148        /* Count the number of matched CRs. The table is ordered if we matched all of them. */
4149        int numCRsMatched = 0;
4150        for (int nextCR = 0; nextCR < matchedCRs.length; nextCR++)
4151        {
4152            if (matchedCRs[nextCR])
4153            {
4154                numCRsMatched++;
4155            }
4156        }
4157
4158        if (numCRsMatched == matchedCRs.length)
4159        {
4160            return true;
4161        }
4162
4163        /* We didn't match all of the CRs, but if
4164         * we matched all of the key columns then
4165         * we need to check if the index is unique.
4166         */

4167        if (nextKeyColumn == keyColumns.length)
4168        {
4169            if (cd.getIndexDescriptor().isUnique())
4170            {
4171                return true;
4172            }
4173            else
4174            {
4175                return false;
4176            }
4177        }
4178        else
4179        {
4180            return false;
4181        }
4182    }
4183
4184    /**
4185     * Return whether or not this index is ordered on a permutation of the specified columns.
4186     *
4187     * @param crs The specified ColumnReference[]
4188     * @param cd The ConglomerateDescriptor for the chosen index.
4189     *
4190     * @return Whether or not this index is ordered exactly on the specified columns.
4191     *
4192     * @exception StandardException Thrown on error
4193     */

4194    private boolean isStrictlyOrdered(ColumnReference[] crs, ConglomerateDescriptor cd)
4195                        throws StandardException
4196    {
4197        /* This table is ordered on the specified columns in the specified order if:
4198         * o For each ColumnReference, it is either the next key column or there
4199         * is an equality predicate on all key columns prior to the ColumnReference.
4200         * (NOTE: If the index is unique, then it is okay to have a suffix of
4201         * unmatched ColumnReferences because the set is known to be ordered. For example:
4202         * i1 on (c1, c2), unique
4203         * select distinct c3 from t1 where c1 = 1 and c2 = ?;
4204         * is ordered on c3 since there will be at most 1 qualifying row.)
4205         */

4206        int nextCR = 0;
4207        int nextKeyColumn = 0;
4208        int[] keyColumns = cd.getIndexDescriptor().baseColumnPositions();
4209
4210        // Walk through the CRs
4211
for ( ; nextCR < crs.length; nextCR++)
4212        {
4213            /* If we've walked through all of the key columns then
4214             * we need to check if the index is unique.
4215             * Beetle 4402
4216             */

4217            if (nextKeyColumn == keyColumns.length)
4218            {
4219                if (cd.getIndexDescriptor().isUnique())
4220                {
4221                    break;
4222                }
4223                else
4224                {
4225                    return false;
4226                }
4227            }
4228            if (crs[nextCR].getColumnNumber() == keyColumns[nextKeyColumn])
4229            {
4230                nextKeyColumn++;
4231                continue;
4232            }
4233            else
4234            {
4235                while (crs[nextCR].getColumnNumber() != keyColumns[nextKeyColumn])
4236                {
4237                    // Stop if there is no equality predicate on this key column
4238
if (! storeRestrictionList.hasOptimizableEqualityPredicate(this, keyColumns[nextKeyColumn], true))
4239                    {
4240                        return false;
4241                    }
4242
4243                    // Advance to the next key column
4244
nextKeyColumn++;
4245
4246                    /* If we've walked through all of the key columns then
4247                     * we need to check if the index is unique.
4248                     */

4249                    if (nextKeyColumn == keyColumns.length)
4250                    {
4251                        if (cd.getIndexDescriptor().isUnique())
4252                        {
4253                            break;
4254                        }
4255                        else
4256                        {
4257                            return false;
4258                        }
4259                    }
4260                }
4261            }
4262        }
4263        return true;
4264    }
4265
4266    /**
4267     * Is this a one-row result set with the given conglomerate descriptor?
4268     */

4269    private boolean isOneRowResultSet(ConglomerateDescriptor cd,
4270                                    OptimizablePredicateList predList)
4271        throws StandardException
4272    {
4273        if (predList == null)
4274        {
4275            return false;
4276        }
4277
4278        if (SanityManager.DEBUG)
4279        {
4280            if (! (predList instanceof PredicateList))
4281            {
4282                SanityManager.THROWASSERT(
4283                    "predList should be a PredicateList, but is a " +
4284                    predList.getClass().getName()
4285                );
4286            }
4287        }
4288
4289        PredicateList restrictionList = (PredicateList) predList;
4290
4291        if (! cd.isIndex())
4292        {
4293            return false;
4294        }
4295
4296        IndexRowGenerator irg =
4297            cd.getIndexDescriptor();
4298
4299        // is this a unique index
4300
if (! irg.isUnique())
4301        {
4302            return false;
4303        }
4304
4305        int[] baseColumnPositions = irg.baseColumnPositions();
4306
4307        DataDictionary dd = getDataDictionary();
4308
4309        // Do we have an exact match on the full key
4310
for (int index = 0; index < baseColumnPositions.length; index++)
4311        {
4312            // get the column number at this position
4313
int curCol = baseColumnPositions[index];
4314
4315            /* Is there a pushable equality predicate on this key column?
4316             * (IS NULL is also acceptable)
4317             */

4318            if (! restrictionList.hasOptimizableEqualityPredicate(this, curCol, true))
4319            {
4320                return false;
4321            }
4322
4323        }
4324
4325        return true;
4326    }
4327
4328    private int getDefaultBulkFetch()
4329        throws StandardException
4330    {
4331        int valInt;
4332        String JavaDoc valStr = PropertyUtil.getServiceProperty(
4333                          getLanguageConnectionContext().getTransactionCompile(),
4334                          LanguageProperties.BULK_FETCH_PROP,
4335                          LanguageProperties.BULK_FETCH_DEFAULT);
4336                            
4337        valInt = getIntProperty(valStr, LanguageProperties.BULK_FETCH_PROP);
4338
4339        // verify that the specified value is valid
4340
if (valInt <= 0)
4341        {
4342            throw StandardException.newException(SQLState.LANG_INVALID_BULK_FETCH_VALUE,
4343                    String.valueOf(valInt));
4344        }
4345
4346        /*
4347        ** If the value is <= 1, then reset it
4348        ** to UNSET -- this is how customers can
4349        ** override the bulkFetch default to turn
4350        ** it off.
4351        */

4352        return (valInt <= 1) ?
4353            UNSET : valInt;
4354    }
4355
4356    private String JavaDoc getUserSpecifiedIndexName()
4357    {
4358        String JavaDoc retval = null;
4359
4360        if (tableProperties != null)
4361        {
4362            retval = tableProperties.getProperty("index");
4363        }
4364
4365        return retval;
4366    }
4367
4368    /*
4369    ** RESOLVE: This whole thing should probably be moved somewhere else,
4370    ** like the optimizer or the data dictionary.
4371    */

4372    private StoreCostController getStoreCostController(
4373                                        ConglomerateDescriptor cd)
4374            throws StandardException
4375    {
4376        return getCompilerContext().getStoreCostController(cd.getConglomerateNumber());
4377    }
4378
4379    private StoreCostController getBaseCostController()
4380            throws StandardException
4381    {
4382        return getStoreCostController(baseConglomerateDescriptor);
4383    }
4384
4385    private boolean gotRowCount = false;
4386    private long rowCount = 0;
4387    private long baseRowCount() throws StandardException
4388    {
4389        if (! gotRowCount)
4390        {
4391            StoreCostController scc = getBaseCostController();
4392            rowCount = scc.getEstimatedRowCount();
4393            gotRowCount = true;
4394        }
4395
4396        return rowCount;
4397    }
4398
4399    private DataValueDescriptor[] getRowTemplate(
4400    ConglomerateDescriptor cd,
4401    StoreCostController scc)
4402            throws StandardException
4403    {
4404        /*
4405        ** If it's for a heap scan, just get all the columns in the
4406        ** table.
4407        */

4408        if (! cd.isIndex())
4409            return templateColumns.buildEmptyRow().getRowArray();
4410
4411        /* It's an index scan, so get all the columns in the index */
4412        ExecRow emptyIndexRow = templateColumns.buildEmptyIndexRow(
4413                                                        tableDescriptor,
4414                                                        cd,
4415                                                        scc,
4416                                                        getDataDictionary());
4417
4418        return emptyIndexRow.getRowArray();
4419    }
4420
4421    private ConglomerateDescriptor getFirstConglom()
4422        throws StandardException
4423    {
4424        getConglomDescs();
4425        return conglomDescs[0];
4426    }
4427
4428    private ConglomerateDescriptor getNextConglom(ConglomerateDescriptor currCD)
4429        throws StandardException
4430    {
4431        int index = 0;
4432
4433        for ( ; index < conglomDescs.length; index++)
4434        {
4435            if (currCD == conglomDescs[index])
4436            {
4437                break;
4438            }
4439        }
4440
4441        if (index < conglomDescs.length - 1)
4442        {
4443            return conglomDescs[index + 1];
4444        }
4445        else
4446        {
4447            return null;
4448        }
4449    }
4450
4451    private void getConglomDescs()
4452        throws StandardException
4453    {
4454        if (conglomDescs == null)
4455        {
4456            conglomDescs = tableDescriptor.getConglomerateDescriptors();
4457        }
4458    }
4459
4460
4461    /**
4462     * set the Information gathered from the parent table that is
4463     * required to peform a referential action on dependent table.
4464     */

4465    public void setRefActionInfo(long fkIndexConglomId,
4466                                 int[]fkColArray,
4467                                 String JavaDoc parentResultSetId,
4468                                 boolean dependentScan)
4469    {
4470
4471
4472        this.fkIndexConglomId = fkIndexConglomId;
4473        this.fkColArray = fkColArray;
4474        this.raParentResultSetId = parentResultSetId;
4475        this.raDependentScan = dependentScan;
4476    }
4477
4478
4479}
4480
Popular Tags