KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > apache > derby > impl > sql > execute > DeleteCascadeResultSet


1 /*
2
3    Derby - Class org.apache.derby.impl.sql.execute.DeleteCascadeResultSet
4
5    Licensed to the Apache Software Foundation (ASF) under one or more
6    contributor license agreements. See the NOTICE file distributed with
7    this work for additional information regarding copyright ownership.
8    The ASF licenses this file to you under the Apache License, Version 2.0
9    (the "License"); you may not use this file except in compliance with
10    the License. You may obtain a copy of the License at
11
12       http://www.apache.org/licenses/LICENSE-2.0
13
14    Unless required by applicable law or agreed to in writing, software
15    distributed under the License is distributed on an "AS IS" BASIS,
16    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17    See the License for the specific language governing permissions and
18    limitations under the License.
19
20  */

21
22 package org.apache.derby.impl.sql.execute;
23
24 import org.apache.derby.iapi.services.sanity.SanityManager;
25 import org.apache.derby.iapi.error.StandardException;
26 import org.apache.derby.iapi.sql.execute.ConstantAction;
27 import org.apache.derby.iapi.sql.execute.CursorResultSet;
28 import org.apache.derby.iapi.sql.execute.RowChanger;
29 import org.apache.derby.iapi.sql.execute.NoPutResultSet;
30 import org.apache.derby.iapi.sql.Activation;
31 import org.apache.derby.iapi.sql.ResultDescription;
32 import org.apache.derby.iapi.types.DataValueDescriptor;
33 import org.apache.derby.iapi.sql.ResultSet;
34 import org.apache.derby.iapi.store.access.ConglomerateController;
35 import org.apache.derby.iapi.store.access.TransactionController;
36 import org.apache.derby.iapi.sql.execute.ExecRow;
37 import org.apache.derby.iapi.sql.execute.TemporaryRowHolder;
38
39 import org.apache.derby.iapi.reference.SQLState;
40
41 import java.util.Vector JavaDoc;
42 import java.util.Hashtable JavaDoc;
43 import java.util.Enumeration JavaDoc;
44
45 /**
46  * Delete the rows from the specified base table and executes delete/update
47  * on dependent tables depending on the referential actions specified.
48  * Note:(beetle:5197) Dependent Resultsets of DeleteCascade Resultset can in
49  * any one of the multiple resultsets generated for the same table because of
50  * multiple foreign key relationship to the same table. At the bind time ,
51  * dependents are binded only once per table.
52  * We can not depend on mainNodeTable Flag to fire actions on dependents,
53  * it should be done based on whether the resultset has dependent resultsets or not.
54  *
55  */

56 public class DeleteCascadeResultSet extends DeleteResultSet
57 {
58
59
60     public ResultSet[] dependentResultSets;
61     private int noDependents =0;
62     private CursorResultSet parentSource;
63     private FKInfo parentFKInfo;
64     private long fkIndexConglomNumber;
65     private String JavaDoc resultSetId;
66     private boolean mainNodeForTable = true;
67     private boolean affectedRows = false;
68     private int tempRowHolderId; //this result sets temporary row holder id
69

70     /*
71      * class interface
72      * @exception StandardException Thrown on error
73      */

74     public DeleteCascadeResultSet
75     (
76         NoPutResultSet source,
77         Activation activation,
78         int constantActionItem,
79         ResultSet[] dependentResultSets,
80         String JavaDoc resultSetId
81     )
82         throws StandardException
83     {
84
85         super(source,
86               ((constantActionItem == -1) ?activation.getConstantAction() :
87               (ConstantAction)activation.getPreparedStatement().getSavedObject(constantActionItem)),
88               activation);
89
90         ConstantAction passedInConstantAction;
91         if(constantActionItem == -1)
92             passedInConstantAction = activation.getConstantAction(); //root table
93
else
94         {
95             passedInConstantAction =
96                 (ConstantAction) activation.getPreparedStatement().getSavedObject(constantActionItem);
97             resultDescription = constants.resultDescription;
98         }
99         cascadeDelete = true;
100         this.resultSetId = resultSetId;
101         
102         if(dependentResultSets != null)
103         {
104             noDependents = dependentResultSets.length;
105             this.dependentResultSets = dependentResultSets;
106         }
107
108     }
109
110
111
112     /**
113         @exception StandardException Standard Cloudscape error policy
114     */

115     public void open() throws StandardException
116     {
117
118
119         try{
120             setup();
121             if(isMultipleDeletePathsExist())
122             {
123                 setRowHoldersTypeToUniqueStream();
124                 //collect until there are no more rows to found
125
while(collectAffectedRows(false));
126             }else
127             {
128                 collectAffectedRows(false);
129             }
130             if (! affectedRows)
131             {
132                 activation.addWarning(
133                             StandardException.newWarning(
134                                 SQLState.LANG_NO_ROW_FOUND));
135             }
136
137             runFkChecker(true); //check for only RESTRICT referential action rule violations
138
Hashtable JavaDoc mntHashTable = new Hashtable JavaDoc(); //Hash Table to identify mutiple node for same table cases.
139
mergeRowHolders(mntHashTable);
140             fireBeforeTriggers(mntHashTable);
141             deleteDeferredRows();
142             runFkChecker(false); //check for all constraint violations
143
rowChangerFinish();
144             fireAfterTriggers();
145             cleanUp();
146         }finally
147         {
148             //clear the parent result sets hash table
149
activation.clearParentResultSets();
150         }
151
152         endTime = getCurrentTimeMillis();
153
154     }
155     
156
157     /**
158      *Gathers the rows that needs to be deleted/updated
159      *and creates a temporary resulsets that will be passed
160      *as source to its dependent result sets.
161      */

162     void setup() throws StandardException
163     {
164
165         /* Cache query plan text for source, before it gets blown away */
166         if (lcc.getRunTimeStatisticsMode())
167         {
168             /* savedSource nulled after run time statistics generation */
169             savedSource = source;
170         }
171
172         super.setup();
173         activation.setParentResultSet(rowHolder, resultSetId);
174         Vector JavaDoc sVector = (Vector JavaDoc) activation.getParentResultSet(resultSetId);
175         tempRowHolderId = sVector.size() -1;
176         for(int i =0 ; i < noDependents; i++)
177         {
178             if(dependentResultSets[i] instanceof UpdateResultSet)
179             {
180                 ((UpdateResultSet) dependentResultSets[i]).setup();
181             }else
182             {
183                 ((DeleteCascadeResultSet) dependentResultSets[i]).setup();
184             }
185         }
186
187     }
188
189
190     boolean collectAffectedRows(boolean rowsFound) throws StandardException
191     {
192         if(super.collectAffectedRows())
193         {
194             affectedRows = true;
195             rowsFound = true;
196         }
197
198         for(int i =0 ; i < noDependents; i++)
199         {
200             if(dependentResultSets[i] instanceof UpdateResultSet)
201             {
202                 if(((UpdateResultSet)dependentResultSets[i]).collectAffectedRows())
203                     rowsFound = true;
204             }else
205             {
206                 if(((DeleteCascadeResultSet)
207                     dependentResultSets[i]).collectAffectedRows(rowsFound))
208                     rowsFound = true;
209             }
210         }
211
212         return rowsFound;
213     }
214
215
216     void fireBeforeTriggers(Hashtable JavaDoc msht) throws StandardException
217     {
218         if(!mainNodeForTable)
219         {
220             /*to handle case where no table node had qualified rows, in which case no node for
221              * the table get marked as mainNodeFor table , one way to identify
222              * such case is to look at the mutinode hash table and see if the result id exist ,
223              *if it does not means none of the table nodes resulsets got marked
224              * as main node for table. If that is the case we mark this
225              * resultset as mainNodeTable and put entry in the hash table.
226              */

227             if(!msht.containsKey(resultSetId))
228             {
229                 mainNodeForTable = true;
230                 msht.put(resultSetId, resultSetId);
231             }
232         }
233         
234         //execute the before triggers on the dependents
235
//Defect 5743: Before enabling BEFORE triggers, check DB2 behavior.
236
for(int i =0 ; i < noDependents; i++)
237         {
238             if(dependentResultSets[i] instanceof UpdateResultSet)
239             {
240                 ((UpdateResultSet) dependentResultSets[i]).fireBeforeTriggers();
241             }
242             else{
243                 ((DeleteCascadeResultSet)dependentResultSets[i]).fireBeforeTriggers(msht);
244             }
245         }
246
247         //If there is more than one node for the same table
248
//only one node fires the triggers
249
if(mainNodeForTable && constants.deferred)
250             super.fireBeforeTriggers();
251     }
252
253     void fireAfterTriggers() throws StandardException
254     {
255         //fire the After Triggers on the dependent tables, if any rows changed
256
for(int i=0 ; i<noDependents && affectedRows; i++){
257             if(dependentResultSets[i] instanceof UpdateResultSet)
258             {
259                 ((UpdateResultSet) dependentResultSets[i]).fireAfterTriggers();
260             }
261             else{
262
263                 ((DeleteCascadeResultSet)dependentResultSets[i]).fireAfterTriggers();
264             }
265         }
266
267         //If there is more than one node for the same table
268
//, we let only one node fire the triggers.
269
if(mainNodeForTable && constants.deferred)
270             super.fireAfterTriggers();
271     }
272
273     void deleteDeferredRows() throws StandardException
274     {
275         
276         //delete the rows in the dependents tables
277
for(int i =0 ; i < noDependents; i++)
278         {
279             if(dependentResultSets[i] instanceof UpdateResultSet)
280             {
281                 ((UpdateResultSet) dependentResultSets[i]).updateDeferredRows();
282             }
283             else{
284                 ((DeleteCascadeResultSet)dependentResultSets[i]).deleteDeferredRows();
285             }
286         }
287
288             
289         //If there is more than one node for the same table
290
//only one node deletes all the rows.
291
if(mainNodeForTable)
292             super.deleteDeferredRows();
293     }
294
295     
296     void runFkChecker(boolean restrictCheckOnly) throws StandardException
297     {
298
299         //run the Foreign key or primary key Checker on the dependent tables
300
for(int i =0 ; i < noDependents; i++)
301         {
302             if(dependentResultSets[i] instanceof UpdateResultSet)
303             {
304                 ((UpdateResultSet) dependentResultSets[i]).runChecker(restrictCheckOnly);
305             }
306             else{
307                 ((DeleteCascadeResultSet)dependentResultSets[i]).runFkChecker(restrictCheckOnly);
308             }
309         }
310
311         //If there is more than one node for the same table
312
//only one node does all foreign key checks.
313
if(mainNodeForTable)
314             super.runFkChecker(restrictCheckOnly);
315     }
316
317
318     public void cleanUp() throws StandardException
319     {
320
321         super.cleanUp();
322         for(int i =0 ; i < noDependents; i++)
323         {
324             if(dependentResultSets[i] instanceof UpdateResultSet)
325             {
326                 ((UpdateResultSet) dependentResultSets[i]).cleanUp();
327             }else
328             {
329                 ((DeleteCascadeResultSet) dependentResultSets[i]).cleanUp();
330             }
331         }
332         
333         endTime = getCurrentTimeMillis();
334     }
335
336
337     private void rowChangerFinish() throws StandardException
338     {
339
340         rc.finish();
341         for(int i =0 ; i < noDependents; i++)
342         {
343             if(dependentResultSets[i] instanceof UpdateResultSet)
344             {
345                 ((UpdateResultSet) dependentResultSets[i]).rowChangerFinish();
346             }else
347             {
348                 ((DeleteCascadeResultSet) dependentResultSets[i]).rowChangerFinish();
349             }
350         }
351     }
352
353
354
355     //if there is more than one node for the same table, copy the rows
356
// into one node , so that we don't fire trigger more than once.
357
private void mergeRowHolders(Hashtable JavaDoc msht) throws StandardException
358     {
359         if(msht.containsKey(resultSetId) || rowCount ==0)
360         {
361             //there is already another resultset node that is marked as main
362
//node for this table or this resultset has no rows qualified.
363
//when none of the resultset nodes for the table has any rows then
364
//we mark them as one them as main node in fireBeforeTriggers().
365
mainNodeForTable = false;
366         }else
367         {
368             mergeResultSets();
369             mainNodeForTable = true;
370             msht.put(resultSetId, resultSetId);
371         }
372         
373         for(int i =0 ; i < noDependents; i++)
374         {
375             if(dependentResultSets[i] instanceof UpdateResultSet)
376             {
377                 return;
378             }
379             else{
380                 ((DeleteCascadeResultSet)dependentResultSets[i]).mergeRowHolders(msht);
381             }
382         }
383     }
384
385
386
387     private void mergeResultSets() throws StandardException
388     {
389         Vector JavaDoc sVector = (Vector JavaDoc) activation.getParentResultSet(resultSetId);
390         int size = sVector.size();
391         // if there is more than one source, we need to merge them into onc
392
// temporary result set.
393
if(size > 1)
394         {
395             ExecRow row = null;
396             int rowHolderId = 0 ;
397             //copy all the vallues in the result set to the current resultset row holder
398
while(rowHolderId < size)
399             {
400                 if(rowHolderId == tempRowHolderId )
401                 {
402                     //skipping the row holder that we are copying the rows into.
403
rowHolderId++;
404                     continue;
405                 }
406                 TemporaryRowHolder currentRowHolder = (TemporaryRowHolder)sVector.elementAt(rowHolderId);
407                 CursorResultSet rs = currentRowHolder.getResultSet();
408                 rs.open();
409                 while ((row = rs.getNextRow()) != null)
410                 {
411                     rowHolder.insert(row);
412                 }
413                 rs.close();
414                 rowHolderId++;
415             }
416             
417         }
418     }
419
420
421     public void finish() throws StandardException {
422         super.finish();
423         
424         //clear the parent result sets hash table
425
//This is necessary in case if we hit any error conditions
426
activation.clearParentResultSets();
427     }
428
429
430     /* check whether we have mutiple path delete scenario, if
431     ** find any retun true. Multiple delete paths exist if we find more than
432     ** one parent source resultset for a table involved in the delete cascade
433     **/

434     private boolean isMultipleDeletePathsExist()
435     {
436         Hashtable JavaDoc parentResultSets = activation.getParentResultSets();
437         for (Enumeration JavaDoc e = parentResultSets.keys() ; e.hasMoreElements() ;)
438         {
439             String JavaDoc rsId = (String JavaDoc) e.nextElement();
440             Vector JavaDoc sVector = (Vector JavaDoc) activation.getParentResultSet(rsId);
441             int size = sVector.size();
442             if(size > 1)
443             {
444                 return true;
445             }
446         }
447         return false;
448     }
449
450     /*
451     **Incases where we have multiple paths we could get the same
452     **rows to be deleted mutiple time and also in case of cycles
453     **there might be new rows getting added to the row holders through
454     **multiple iterations. To handle these case we set the temporary row holders
455     ** to be 'uniqStream' type.
456     **/

457     private void setRowHoldersTypeToUniqueStream()
458     {
459         Hashtable JavaDoc parentResultSets = activation.getParentResultSets();
460         for (Enumeration JavaDoc e = parentResultSets.keys() ; e.hasMoreElements() ;)
461         {
462             String JavaDoc rsId = (String JavaDoc) e.nextElement();
463             Vector JavaDoc sVector = (Vector JavaDoc) activation.getParentResultSet(rsId);
464             int size = sVector.size();
465             int rowHolderId = 0 ;
466             while(rowHolderId < size)
467             {
468                 TemporaryRowHolder currentRowHolder = (TemporaryRowHolder)sVector.elementAt(rowHolderId);
469                 currentRowHolder.setRowHolderTypeToUniqueStream();
470                 rowHolderId++;
471             }
472         }
473     }
474
475 }
476
477
478
479
480
481
482
483
484
485
486
487
488
Popular Tags