KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > oracle > toplink > essentials > mappings > AggregateCollectionMapping


1 /*
2  * The contents of this file are subject to the terms
3  * of the Common Development and Distribution License
4  * (the "License"). You may not use this file except
5  * in compliance with the License.
6  *
7  * You can obtain a copy of the license at
8  * glassfish/bootstrap/legal/CDDLv1.0.txt or
9  * https://glassfish.dev.java.net/public/CDDLv1.0.html.
10  * See the License for the specific language governing
11  * permissions and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL
14  * HEADER in each file and include the License file at
15  * glassfish/bootstrap/legal/CDDLv1.0.txt. If applicable,
16  * add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your
18  * own identifying information: Portions Copyright [yyyy]
19  * [name of copyright owner]
20  */

21 // Copyright (c) 1998, 2006, Oracle. All rights reserved.
22
package oracle.toplink.essentials.mappings;
23
24 import java.util.*;
25 import oracle.toplink.essentials.exceptions.*;
26 import oracle.toplink.essentials.expressions.*;
27 import oracle.toplink.essentials.internal.descriptors.*;
28 import oracle.toplink.essentials.internal.helper.*;
29 import oracle.toplink.essentials.internal.identitymaps.*;
30 import oracle.toplink.essentials.internal.queryframework.*;
31 import oracle.toplink.essentials.internal.sessions.*;
32 import oracle.toplink.essentials.sessions.DatabaseRecord;
33 import oracle.toplink.essentials.descriptors.DescriptorEvent;
34 import oracle.toplink.essentials.descriptors.DescriptorEventManager;
35 import oracle.toplink.essentials.queryframework.*;
36 import oracle.toplink.essentials.indirection.ValueHolderInterface;
37 import oracle.toplink.essentials.internal.sessions.AbstractRecord;
38 import oracle.toplink.essentials.internal.sessions.UnitOfWorkImpl;
39 import oracle.toplink.essentials.internal.sessions.AbstractSession;
40 import oracle.toplink.essentials.descriptors.ClassDescriptor;
41
42 /**
43  * <p><b>Purpose</b>: The aggregate collection mapping is used to represent the aggregate relationship between a single
44  * source object and a collection of target objects. The target objects cannot exist without the existence of the
45  * source object (privately owned)
46  * Unlike the normal aggregate mapping, there is a target table being mapped from the target objects.
47  * Unlike normal 1:m mapping, there is no 1:1 back reference mapping, as foreign key constraints have been resolved by the aggregation.
48  *
49  * @author King (Yaoping) Wang
50  * @since TOPLink/Java 3.0
51  */

52 public class AggregateCollectionMapping extends CollectionMapping implements RelationalMapping {
53
54     /** This is a key in the target table which is a foreign key in the target table. */
55     protected transient Vector targetForeignKeyFields;
56
57     /** This is a primary key in the source table that is used as foreign key in the target table */
58     protected transient Vector sourceKeyFields;
59
60     /** Foreign keys in the target table to the related keys in the source table */
61     protected transient Map targetForeignKeyToSourceKeys;
62
63     /**
64      * PUBLIC:
65      * Default constructor.
66      */

67     public AggregateCollectionMapping() {
68         this.targetForeignKeyToSourceKeys = new HashMap(5);
69         this.sourceKeyFields = oracle.toplink.essentials.internal.helper.NonSynchronizedVector.newInstance(1);
70         this.targetForeignKeyFields = oracle.toplink.essentials.internal.helper.NonSynchronizedVector.newInstance(1);
71         this.deleteAllQuery = new DeleteAllQuery();
72         //aggregates should always cascade all operations
73
this.setCascadeAll(true);
74     }
75
76     /**
77      * INTERNAL:
78      */

79     public boolean isRelationalMapping() {
80         return true;
81     }
82
83     /**
84      * PUBLIC:
85      * Define the target foreign key relationship in the 1-M aggregate collection mapping.
86      * Both the target foreign key field name and the source primary key field name must be specified.
87      */

88     public void addTargetForeignKeyFieldName(String JavaDoc targetForeignKey, String JavaDoc sourceKey) {
89         getTargetForeignKeyFields().addElement(new DatabaseField(targetForeignKey));
90         getSourceKeyFields().addElement(new DatabaseField(sourceKey));
91     }
92
93     /**
94      * INTERNAL:
95      * Used during building the backup shallow copy to copy the vector without re-registering the target objects.
96      */

97     public Object JavaDoc buildBackupCloneForPartObject(Object JavaDoc attributeValue, Object JavaDoc clone, Object JavaDoc backup, UnitOfWorkImpl unitOfWork) {
98         ContainerPolicy containerPolicy = getContainerPolicy();
99         if (attributeValue == null) {
100             return containerPolicy.containerInstance(1);
101         }
102
103         Object JavaDoc clonedAttributeValue = containerPolicy.containerInstance(containerPolicy.sizeFor(attributeValue));
104         synchronized (attributeValue) {
105             for (Object JavaDoc valuesIterator = containerPolicy.iteratorFor(attributeValue);
106                      containerPolicy.hasNext(valuesIterator);) {
107                 Object JavaDoc cloneValue = buildElementBackupClone(containerPolicy.next(valuesIterator, unitOfWork), unitOfWork);
108                 containerPolicy.addInto(cloneValue, clonedAttributeValue, unitOfWork);
109             }
110         }
111         return clonedAttributeValue;
112     }
113
114     /**
115      * INTERNAL:
116      * Require for cloning, the part must be cloned.
117      * Ignore the objects, use the attribute value.
118      * this is identical to the super class except that the element must be added to the new
119      * aggregates collection so that the referenced objects will be clonned correctly
120      */

121     public Object JavaDoc buildCloneForPartObject(Object JavaDoc attributeValue, Object JavaDoc original, Object JavaDoc clone, UnitOfWorkImpl unitOfWork, boolean isExisting) {
122         ContainerPolicy containerPolicy = getContainerPolicy();
123         if (attributeValue == null) {
124             return containerPolicy.containerInstance(1);
125         }
126         Object JavaDoc clonedAttributeValue = containerPolicy.containerInstance(containerPolicy.sizeFor(attributeValue));
127
128         // I need to synchronize here to prevent the collection from changing while I am cloning it.
129
// This will occur when I am merging into the cache and I am instantiating a UOW valueHolder at the same time
130
// I can not synchronize around the clone, as this will cause deadlocks, so I will need to copy the collection then create the clones
131
// I will use a temporary collection to help speed up the process
132
Object JavaDoc temporaryCollection = null;
133         synchronized (attributeValue) {
134             temporaryCollection = containerPolicy.cloneFor(attributeValue);
135         }
136         for (Object JavaDoc valuesIterator = containerPolicy.iteratorFor(temporaryCollection);
137                  containerPolicy.hasNext(valuesIterator);) {
138             Object JavaDoc originalElement = containerPolicy.next(valuesIterator, unitOfWork);
139
140             //need to add to aggregate list in the case that there are related objects.
141
if (unitOfWork.isOriginalNewObject(original)) {
142                 unitOfWork.addNewAggregate(originalElement);
143             }
144             Object JavaDoc cloneValue = buildElementClone(originalElement, unitOfWork, isExisting);
145             containerPolicy.addInto(cloneValue, clonedAttributeValue, unitOfWork);
146         }
147         return clonedAttributeValue;
148     }
149
150     /**
151      * INTERNAL:
152      * Clone the aggregate collection, if necessary.
153      */

154     protected Object JavaDoc buildElementBackupClone(Object JavaDoc element, UnitOfWorkImpl unitOfWork) {
155         // Do not clone for read-only.
156
if (unitOfWork.isClassReadOnly(element.getClass())) {
157             return element;
158         }
159
160         ClassDescriptor aggregateDescriptor = getReferenceDescriptor(element.getClass(), unitOfWork);
161         Object JavaDoc clonedElement = aggregateDescriptor.getObjectBuilder().buildBackupClone(element, unitOfWork);
162
163         return clonedElement;
164     }
165
166     /**
167      * INTERNAL:
168      * Clone the aggregate collection, if necessary.
169      */

170     protected Object JavaDoc buildElementClone(Object JavaDoc element, UnitOfWorkImpl unitOfWork, boolean isExisting) {
171         // Do not clone for read-only.
172
if (unitOfWork.isClassReadOnly(element.getClass())) {
173             return element;
174         }
175
176         ClassDescriptor aggregateDescriptor = getReferenceDescriptor(element.getClass(), unitOfWork);
177
178         // bug 2612602 as we are building the working copy make sure that we call to correct clone method.
179
Object JavaDoc clonedElement = aggregateDescriptor.getObjectBuilder().instantiateWorkingCopyClone(element, unitOfWork);
180         aggregateDescriptor.getObjectBuilder().populateAttributesForClone(element, clonedElement, unitOfWork, null);
181         // CR 4155 add the originals to the UnitOfWork so that we can find it later in the merge
182
// as aggregates have no identity. If we don't do this we will loose indirection information.
183
unitOfWork.getCloneToOriginals().put(clonedElement, element);
184         return clonedElement;
185     }
186
187     /**
188      * INTERNAL:
189      * Cascade registerNew for Create through mappings that require the cascade
190      */

191     public void cascadeRegisterNewIfRequired(Object JavaDoc object, UnitOfWorkImpl uow, IdentityHashtable visitedObjects){
192         //aggregate objects are not registered but their mappings should be.
193
Object JavaDoc cloneAttribute = null;
194         cloneAttribute = getAttributeValueFromObject(object);
195         if ((cloneAttribute == null) || (!getIndirectionPolicy().objectIsInstantiated(cloneAttribute))) {
196             return;
197         }
198
199         ObjectBuilder builder = null;
200         ContainerPolicy cp = getContainerPolicy();
201         Object JavaDoc cloneObjectCollection = null;
202         cloneObjectCollection = getRealCollectionAttributeValueFromObject(object, uow);
203         Object JavaDoc cloneIter = cp.iteratorFor(cloneObjectCollection);
204         while (cp.hasNext(cloneIter)) {
205             Object JavaDoc nextObject = cp.next(cloneIter, uow);
206             if (nextObject != null && (! visitedObjects.contains(nextObject))){
207                 visitedObjects.put(nextObject, nextObject);
208                 builder = getReferenceDescriptor(nextObject.getClass(), uow).getObjectBuilder();
209                 builder.cascadeRegisterNewForCreate(nextObject, uow, visitedObjects);
210             }
211         }
212     }
213
214     /**
215      * INTERNAL:
216      * Cascade registerNew for Create through mappings that require the cascade
217      */

218     public void cascadePerformRemoveIfRequired(Object JavaDoc object, UnitOfWorkImpl uow, IdentityHashtable visitedObjects){
219         //aggregate objects are not registered but their mappings should be.
220
Object JavaDoc cloneAttribute = null;
221         cloneAttribute = getAttributeValueFromObject(object);
222         if ((cloneAttribute == null)) {
223             return;
224         }
225
226         ObjectBuilder builder = null;
227         ContainerPolicy cp = getContainerPolicy();
228         Object JavaDoc cloneObjectCollection = null;
229         cloneObjectCollection = getRealCollectionAttributeValueFromObject(object, uow);
230         Object JavaDoc cloneIter = cp.iteratorFor(cloneObjectCollection);
231         while (cp.hasNext(cloneIter)) {
232             Object JavaDoc nextObject = cp.next(cloneIter, uow);
233             if (nextObject != null && ( ! visitedObjects.contains(nextObject) ) ){
234                 visitedObjects.put(nextObject, nextObject);
235                 builder = getReferenceDescriptor(nextObject.getClass(), uow).getObjectBuilder();
236                 builder.cascadePerformRemove(nextObject, uow, visitedObjects);
237             }
238         }
239     }
240
241     /**
242      * INTERNAL:
243      * The mapping clones itself to create deep copy.
244      */

245     public Object JavaDoc clone() {
246         AggregateCollectionMapping mappingObject = (AggregateCollectionMapping)super.clone();
247
248         mappingObject.setTargetForeignKeyToSourceKeys(new HashMap(getTargetForeignKeyToSourceKeys()));
249
250         return mappingObject;
251     }
252
253     /**
254      * INTERNAL:
255      * This method is used to create a change record from comparing two aggregate collections
256      * @return ChangeRecord
257      */

258     public ChangeRecord compareForChange(Object JavaDoc clone, Object JavaDoc backUp, ObjectChangeSet owner, AbstractSession session) {
259         Object JavaDoc cloneAttribute = null;
260         Object JavaDoc backUpAttribute = null;
261
262         cloneAttribute = getAttributeValueFromObject(clone);
263
264         if ((cloneAttribute != null) && (!getIndirectionPolicy().objectIsInstantiated(cloneAttribute))) {
265             //If the clone's valueholder was not triggered then no changes were made.
266
return null;
267         }
268         if (!owner.isNew()) {
269             backUpAttribute = getAttributeValueFromObject(backUp);
270             if ((backUpAttribute == null) && (cloneAttribute == null)) {
271                 return null;
272             }
273             ContainerPolicy cp = getContainerPolicy();
274             Object JavaDoc backupCollection = null;
275             Object JavaDoc cloneCollection = null;
276
277             cloneCollection = getRealCollectionAttributeValueFromObject(clone, session);
278             backupCollection = getRealCollectionAttributeValueFromObject(backUp, session);
279
280             if (cp.sizeFor(backupCollection) != cp.sizeFor(cloneCollection)) {
281                 return convertToChangeRecord(cloneCollection, owner, session);
282             }
283             Object JavaDoc cloneIterator = cp.iteratorFor(cloneCollection);
284             Object JavaDoc backUpIterator = cp.iteratorFor(backupCollection);
285             boolean change = false;
286
287             // For bug 2863721 must use a different UnitOfWorkChangeSet as here just
288
// seeing if changes are needed. If changes are needed then a
289
// real changeSet will be created later.
290
UnitOfWorkChangeSet uowComparisonChangeSet = new UnitOfWorkChangeSet();
291             while (cp.hasNext(cloneIterator)) {
292                 Object JavaDoc cloneObject = cp.next(cloneIterator, session);
293
294                 // For CR#2285 assume that if null is added the collection has changed.
295
if (cloneObject == null) {
296                     change = true;
297                     break;
298                 }
299                 Object JavaDoc backUpObject = null;
300                 if (cp.hasNext(backUpIterator)) {
301                     backUpObject = cp.next(backUpIterator, session);
302                 } else {
303                     change = true;
304                     break;
305                 }
306                 if (cloneObject.getClass().equals(backUpObject.getClass())) {
307                     ObjectBuilder builder = getReferenceDescriptor(cloneObject.getClass(), session).getObjectBuilder();
308                     ObjectChangeSet initialChanges = builder.createObjectChangeSet(cloneObject, uowComparisonChangeSet, owner.isNew(), session);
309
310                     //compare for changes will return null if no change is detected and I need to remove the changeSet
311
ObjectChangeSet changes = builder.compareForChange(cloneObject, backUpObject, uowComparisonChangeSet, session);
312                     if (changes != null) {
313                         change = true;
314                         break;
315                     }
316                 } else {
317                     change = true;
318                     break;
319                 }
320             }
321             if ((change == true) || (cp.hasNext(backUpIterator))) {
322                 return convertToChangeRecord(cloneCollection, owner, session);
323             } else {
324                 return null;
325             }
326         }
327
328         return convertToChangeRecord(getRealCollectionAttributeValueFromObject(clone, session), owner, session);
329     }
330
331     /**
332      * INTERNAL:
333      * Compare the attributes belonging to this mapping for the objects.
334      */

335     public boolean compareObjects(Object JavaDoc firstObject, Object JavaDoc secondObject, AbstractSession session) {
336         Object JavaDoc firstCollection = getRealCollectionAttributeValueFromObject(firstObject, session);
337         Object JavaDoc secondCollection = getRealCollectionAttributeValueFromObject(secondObject, session);
338         ContainerPolicy containerPolicy = getContainerPolicy();
339
340         if (containerPolicy.sizeFor(firstCollection) != containerPolicy.sizeFor(secondCollection)) {
341             return false;
342         }
343
344         if (containerPolicy.sizeFor(firstCollection) == 0) {
345             return true;
346         }
347
348         //iterator the first aggregate collection
349
for (Object JavaDoc iterFirst = containerPolicy.iteratorFor(firstCollection);
350                  containerPolicy.hasNext(iterFirst);) {
351             //fetch the next object from the first iterator.
352
Object JavaDoc firstAggregateObject = containerPolicy.next(iterFirst, session);
353
354             //iterator the second aggregate collection
355
for (Object JavaDoc iterSecond = containerPolicy.iteratorFor(secondCollection); true;) {
356                 //fetch the next object from the second iterator.
357
Object JavaDoc secondAggregateObject = containerPolicy.next(iterSecond, session);
358
359                 //matched object found, break to outer FOR loop
360
if (getReferenceDescriptor().getObjectBuilder().compareObjects(firstAggregateObject, secondAggregateObject, session)) {
361                     break;
362                 }
363
364                 if (!containerPolicy.hasNext(iterSecond)) {
365                     return false;
366                 }
367             }
368         }
369
370         return true;
371     }
372
373     /**
374      * INTERNAL:
375      * This method is used to convert the contents of an aggregateCollection into a
376      * changeRecord
377      * @return oracle.toplink.essentials.internal.sessions.AggregateCollectionChangeRecord the changerecord representing this AggregateCollectionMapping
378      * @param owner oracle.toplink.essentials.internal.sessions.ObjectChangeSet the ChangeSet that uses this record
379      * @param cloneCollection Object the collection to convert
380      * @param session oracle.toplink.essentials.publicinterface.Session
381      */

382     protected ChangeRecord convertToChangeRecord(Object JavaDoc cloneCollection, ObjectChangeSet owner, AbstractSession session) {
383         ContainerPolicy cp = getContainerPolicy();
384         Object JavaDoc cloneIter = cp.iteratorFor(cloneCollection);
385         Vector collectionChanges = new Vector(2);
386         while (cp.hasNext(cloneIter)) {
387             Object JavaDoc aggregateObject = cp.next(cloneIter, session);
388
389             // For CR#2258 quietly ignore nulls inserted into a collection.
390
if (aggregateObject != null) {
391                 ObjectChangeSet changes = getReferenceDescriptor(aggregateObject.getClass(), session).getObjectBuilder().compareForChange(aggregateObject, null, (UnitOfWorkChangeSet)owner.getUOWChangeSet(), session);
392                 collectionChanges.addElement(changes);
393             }
394         }
395
396         //cr 3013 Removed if collection is empty return null block, which prevents recording clear() change
397
AggregateCollectionChangeRecord changeRecord = new AggregateCollectionChangeRecord(owner);
398         changeRecord.setAttribute(getAttributeName());
399         changeRecord.setMapping(this);
400         changeRecord.setChangedValues(collectionChanges);
401         return changeRecord;
402     }
403
404     /**
405      * To delete all the entries matching the selection criteria from the table stored in the
406      * referenced descriptor
407      */

408     protected void deleteAll(DeleteObjectQuery query) throws DatabaseException {
409         Object JavaDoc referenceObjects = null;
410         if(usesIndirection()) {
411            Object JavaDoc attribute = getAttributeAccessor().getAttributeValueFromObject(query.getObject());
412            if(attribute == null || !((ValueHolderInterface)attribute).isInstantiated()) {
413                // An empty Vector indicates to DeleteAllQuery that no objects should be removed from cache
414
referenceObjects = new Vector(0);
415            }
416         }
417         if(referenceObjects == null) {
418             referenceObjects = this.getRealCollectionAttributeValueFromObject(query.getObject(), query.getSession());
419         }
420         // Ensure that the query is prepare before cloning.
421
((DeleteAllQuery)getDeleteAllQuery()).executeDeleteAll(query.getSession().getSessionForClass(getReferenceClass()), query.getTranslationRow(), getContainerPolicy().vectorFor(referenceObjects, query.getSession()));
422     }
423
424     /**
425      * INTERNAL:
426      * Execute a descriptor event for the specified event code.
427      */

428     protected void executeEvent(int eventCode, ObjectLevelModifyQuery query) {
429         ClassDescriptor referenceDescriptor = getReferenceDescriptor(query.getObject().getClass(), query.getSession());
430
431         // PERF: Avoid events if no listeners.
432
if (referenceDescriptor.getEventManager().hasAnyEventListeners()) {
433             referenceDescriptor.getEventManager().executeEvent(new DescriptorEvent(eventCode, query));
434         }
435     }
436
437     /**
438      * INTERNAL:
439      * Extract the source primary key value from the target row.
440      * Used for batch reading, most following same order and fields as in the mapping.
441      */

442     protected Vector extractKeyFromTargetRow(AbstractRecord row, AbstractSession session) {
443         Vector key = new Vector(getTargetForeignKeyFields().size());
444
445         for (int index = 0; index < getTargetForeignKeyFields().size(); index++) {
446             DatabaseField targetField = (DatabaseField)getTargetForeignKeyFields().elementAt(index);
447             DatabaseField sourceField = (DatabaseField)getSourceKeyFields().elementAt(index);
448             Object JavaDoc value = row.get(targetField);
449
450             // Must ensure the classificatin to get a cache hit.
451
try {
452                 value = session.getDatasourcePlatform().getConversionManager().convertObject(value, getDescriptor().getObjectBuilder().getFieldClassification(sourceField));
453             } catch (ConversionException e) {
454                 throw ConversionException.couldNotBeConverted(this, getDescriptor(), e);
455             }
456
457             key.addElement(value);
458         }
459
460         return key;
461     }
462
463     /**
464      * INTERNAL:
465      * Extract the primary key value from the source row.
466      * Used for batch reading, most following same order and fields as in the mapping.
467      */

468     protected Vector extractPrimaryKeyFromRow(AbstractRecord row, AbstractSession session) {
469         Vector key = new Vector(getSourceKeyFields().size());
470
471         for (Enumeration fieldEnum = getSourceKeyFields().elements(); fieldEnum.hasMoreElements();) {
472             DatabaseField field = (DatabaseField)fieldEnum.nextElement();
473             Object JavaDoc value = row.get(field);
474
475             // Must ensure the classificatin to get a cache hit.
476
try {
477                 value = session.getDatasourcePlatform().getConversionManager().convertObject(value, getDescriptor().getObjectBuilder().getFieldClassification(field));
478             } catch (ConversionException e) {
479                 throw ConversionException.couldNotBeConverted(this, getDescriptor(), e);
480             }
481
482             key.addElement(value);
483         }
484
485         return key;
486     }
487
488     /**
489      * INTERNAL:
490      * return the aggregate databaseRow with the primary keys from the source table and targer table
491      */

492     public AbstractRecord getAggregateRow(ObjectLevelModifyQuery query, Object JavaDoc object) {
493         Vector referenceObjectKeys = getReferenceObjectKeys(query);
494         AbstractRecord aggregateRow = new DatabaseRecord();
495         Vector keys = getTargetForeignKeyFields();
496         for (int keyIndex = 0; keyIndex < keys.size(); keyIndex++) {
497             aggregateRow.put(keys.elementAt(keyIndex), referenceObjectKeys.elementAt(keyIndex));
498         }
499         getReferenceDescriptor(object.getClass(), query.getSession()).getObjectBuilder().buildRow(aggregateRow, object, query.getSession());
500
501         return aggregateRow;
502     }
503
504     /**
505      * Delete all criteria is created with target foreign keys and source keys.
506      * This criteria is then used to delete target records from the table.
507      */

508     protected Expression getDeleteAllCriteria(AbstractSession session) {
509         Expression expression;
510         Expression criteria = null;
511         Expression builder = new ExpressionBuilder();
512
513         for (Iterator keys = getTargetForeignKeyToSourceKeys().keySet().iterator(); keys.hasNext();) {
514             DatabaseField targetForeignKey = (DatabaseField)keys.next();
515             DatabaseField sourceKey = (DatabaseField)getTargetForeignKeyToSourceKeys().get(targetForeignKey);
516
517             expression = builder.getField(targetForeignKey).equal(builder.getParameter(sourceKey));
518
519             criteria = expression.and(criteria);
520         }
521
522         return criteria;
523     }
524
525     /**
526      * INTERNAL:
527      * for inheritance purpose
528      */

529     public ClassDescriptor getReferenceDescriptor(Class JavaDoc theClass, AbstractSession session) {
530         if (getReferenceDescriptor().getJavaClass().equals(theClass)) {
531             return getReferenceDescriptor();
532         } else {
533             ClassDescriptor subclassDescriptor = session.getDescriptor(theClass);
534             if (subclassDescriptor == null) {
535                 throw DescriptorException.noSubClassMatch(theClass, this);
536             } else {
537                 return subclassDescriptor;
538             }
539         }
540     }
541
542     /**
543      * INTERNAL:
544      * get reference object keys
545      */

546     public Vector getReferenceObjectKeys(ObjectLevelModifyQuery query) throws DatabaseException, OptimisticLockException {
547         Vector referenceObjectKeys = new Vector(getSourceKeyFields().size());
548
549         //For CR#2587-S.M. For nested aggregate collections the source keys can easily be read from the original query.
550
AbstractRecord translationRow = query.getTranslationRow();
551
552         for (Enumeration sourcekeys = getSourceKeyFields().elements();
553                  sourcekeys.hasMoreElements();) {
554             DatabaseField sourceKey = (DatabaseField)sourcekeys.nextElement();
555
556             // CR#2587. Try first to get the source key from the original query. If that fails try to get it from the object.
557
Object JavaDoc referenceKey = null;
558             if ((translationRow != null) && (translationRow.containsKey(sourceKey))) {
559                 referenceKey = translationRow.get(sourceKey);
560             } else {
561                 referenceKey = getDescriptor().getObjectBuilder().extractValueFromObjectForField(query.getObject(), sourceKey, query.getSession());
562             }
563             referenceObjectKeys.addElement(referenceKey);
564         }
565
566         return referenceObjectKeys;
567     }
568
569     /**
570      * PUBLIC:
571      * Return the source key field names associated with the mapping.
572      * These are in-order with the targetForeignKeyFieldNames.
573      */

574     public Vector getSourceKeyFieldNames() {
575         Vector fieldNames = new Vector(getSourceKeyFields().size());
576         for (Enumeration fieldsEnum = getSourceKeyFields().elements();
577                  fieldsEnum.hasMoreElements();) {
578             fieldNames.addElement(((DatabaseField)fieldsEnum.nextElement()).getQualifiedName());
579         }
580
581         return fieldNames;
582     }
583
584     /**
585      * INTERNAL:
586      * Return the source key names associated with the mapping
587      */

588     public Vector getSourceKeyFields() {
589         return sourceKeyFields;
590     }
591
592     /**
593      * PUBLIC:
594      * Return the target foregin key field names associated with the mapping.
595      * These are in-order with the sourceKeyFieldNames.
596      */

597     public Vector getTargetForeignKeyFieldNames() {
598         Vector fieldNames = new Vector(getTargetForeignKeyFields().size());
599         for (Enumeration fieldsEnum = getTargetForeignKeyFields().elements();
600                  fieldsEnum.hasMoreElements();) {
601             fieldNames.addElement(((DatabaseField)fieldsEnum.nextElement()).getQualifiedName());
602         }
603
604         return fieldNames;
605     }
606
607     /**
608      * INTERNAL:
609      * Return the target foregin key fields associated with the mapping
610      */

611     public Vector getTargetForeignKeyFields() {
612         return targetForeignKeyFields;
613     }
614
615     /**
616      * INTERNAL:
617      */

618     public Map getTargetForeignKeyToSourceKeys() {
619         return targetForeignKeyToSourceKeys;
620     }
621
622     /**
623      * INTERNAL:
624      * For aggregate collection mapping the reference descriptor is cloned. The cloned descriptor is then
625      * assigned primary keys and table names before initialize. Once cloned descriptor is initialized
626      * it is assigned as reference descriptor in the aggregate mapping. This is a very specifiec
627      * behaviour for aggregate mappings. The original descriptor is used only for creating clones and
628      * after that mapping never uses it.
629      * Some initialization is done in postInitialize to ensure the target descriptor's references are initialized.
630      */

631     public void initialize(AbstractSession session) throws DescriptorException {
632         super.initialize(session);
633
634         if (!getReferenceDescriptor().isAggregateCollectionDescriptor()) {
635             session.getIntegrityChecker().handleError(DescriptorException.referenceDescriptorIsNotAggregateCollection(getReferenceClass().getName(), this));
636         }
637
638         if (shouldInitializeSelectionCriteria()) {
639             if (isSourceKeySpecified()) {
640                 initializeTargetForeignKeyToSourceKeys(session);
641             } else {
642                 initializeTargetForeignKeyToSourceKeysWithDefaults(session);
643             }
644
645             initializeSelectionCriteria(session);
646         }
647
648         // Aggregate 1:m never maintains cache as target objects are aggregates.
649
getSelectionQuery().setShouldMaintainCache(false);
650
651         initializeDeleteAllQuery(session);
652     }
653
654     /**
655      * INTERNAL:
656      * For aggregate mapping the reference descriptor is cloned. Also the involved inheritanced descriptor, its childern
657      * and parents all need to be cloned. The cloned descriptors are then assigned primary keys and table names before
658      * initialize. Once cloned descriptor is initialized it is assigned as reference descriptor in the aggregate mapping.
659      * This is a very specifiec behaviour for aggregate mappings. The original descriptor is used only for creating clones
660      * and after that mapping never uses it.
661      * Some initialization is done in postInitialize to ensure the target descriptor's references are initialized.
662      */

663     public void initializeChildInheritance(ClassDescriptor parentDescriptor, AbstractSession session) throws DescriptorException {
664         //recursive call to the further childern descriptors
665
if (parentDescriptor.getInheritancePolicy().hasChildren()) {
666             //setFields(clonedChildDescriptor.getFields());
667
Vector childDescriptors = parentDescriptor.getInheritancePolicy().getChildDescriptors();
668             Vector cloneChildDescriptors = oracle.toplink.essentials.internal.helper.NonSynchronizedVector.newInstance();
669             for (Enumeration enumtr = childDescriptors.elements(); enumtr.hasMoreElements();) {
670                 ClassDescriptor clonedChildDescriptor = (ClassDescriptor)((ClassDescriptor)enumtr.nextElement()).clone();
671
672                 if (!clonedChildDescriptor.isAggregateCollectionDescriptor()) {
673                     session.getIntegrityChecker().handleError(DescriptorException.referenceDescriptorIsNotAggregate(clonedChildDescriptor.getJavaClass().getName(), this));
674                 }
675
676                 clonedChildDescriptor.getInheritancePolicy().setParentDescriptor(parentDescriptor);
677                 clonedChildDescriptor.preInitialize(session);
678                 clonedChildDescriptor.initialize(session);
679                 cloneChildDescriptors.addElement(clonedChildDescriptor);
680                 initializeChildInheritance(clonedChildDescriptor, session);
681             }
682             parentDescriptor.getInheritancePolicy().setChildDescriptors(cloneChildDescriptors);
683         }
684     }
685
686     /**
687      * INTERNAL:
688      * Initialize delete all query. This query is used to delete the collection of objects from the
689      * target table.
690      */

691     protected void initializeDeleteAllQuery(AbstractSession session) {
692         DeleteAllQuery query = (DeleteAllQuery)getDeleteAllQuery();
693         query.setReferenceClass(getReferenceClass());
694         query.setShouldMaintainCache(false);
695         if (!hasCustomDeleteAllQuery()) {
696             if (getSelectionCriteria() == null) {
697                 query.setSelectionCriteria(getDeleteAllCriteria(session));
698             } else {
699                 query.setSelectionCriteria(getSelectionCriteria());
700             }
701         }
702     }
703
704     /**
705      * INTERNAL:
706      * For aggregate mapping the reference descriptor is cloned. Also the involved inheritanced descriptor, its childern
707      * and parents all need to be cloned. The cloned descriptors are then assigned primary keys and table names before
708      * initialize. Once cloned descriptor is initialized it is assigned as reference descriptor in the aggregate mapping.
709      * This is a very specifiec behaviour for aggregate mappings. The original descriptor is used only for creating clones
710      * and after that mapping never uses it.
711      * Some initialization is done in postInitialize to ensure the target descriptor's references are initialized.
712      */

713     public void initializeParentInheritance(ClassDescriptor parentDescriptor, ClassDescriptor childDescriptor, AbstractSession session) throws DescriptorException {
714         if (!parentDescriptor.isAggregateCollectionDescriptor()) {
715             session.getIntegrityChecker().handleError(DescriptorException.referenceDescriptorIsNotAggregateCollection(parentDescriptor.getJavaClass().getName(), this));
716         }
717
718         ClassDescriptor clonedParentDescriptor = (ClassDescriptor)parentDescriptor.clone();
719
720         //recursive call to the further parent descriptors
721
if (clonedParentDescriptor.getInheritancePolicy().isChildDescriptor()) {
722             ClassDescriptor parentToParentDescriptor = session.getDescriptor(clonedParentDescriptor.getJavaClass());
723             initializeParentInheritance(parentToParentDescriptor, parentDescriptor, session);
724         }
725
726         Vector childern = oracle.toplink.essentials.internal.helper.NonSynchronizedVector.newInstance(1);
727         childern.addElement(childDescriptor);
728         clonedParentDescriptor.getInheritancePolicy().setChildDescriptors(childern);
729         clonedParentDescriptor.preInitialize(session);
730         clonedParentDescriptor.initialize(session);
731     }
732
733     /**
734      * INTERNAL:
735      * Selection criteria is created with target foreign keys and source keys.
736      * This criteria is then used to read records from the target table.
737      */

738     protected void initializeSelectionCriteria(AbstractSession session) {
739         Expression expression;
740         Expression criteria;
741         Expression builder = new ExpressionBuilder();
742
743         for (Iterator keys = getTargetForeignKeyToSourceKeys().keySet().iterator(); keys.hasNext();) {
744             DatabaseField targetForeignKey = (DatabaseField)keys.next();
745             DatabaseField sourceKey = (DatabaseField)getTargetForeignKeyToSourceKeys().get(targetForeignKey);
746
747             expression = builder.getField(targetForeignKey).equal(builder.getParameter(sourceKey));
748
749             criteria = expression.and(getSelectionCriteria());
750             setSelectionCriteria(criteria);
751         }
752     }
753
754     /**
755      * INTERNAL:
756      * The foreign keys and the primary key names are converted to DatabaseFields and stored.
757      */

758     protected void initializeTargetForeignKeyToSourceKeys(AbstractSession session) throws DescriptorException {
759         if (getTargetForeignKeyFields().isEmpty()) {
760             throw DescriptorException.noTargetForeignKeysSpecified(this);
761         }
762
763         for (Enumeration keys = getTargetForeignKeyFields().elements(); keys.hasMoreElements();) {
764             DatabaseField foreignKeyfield = (DatabaseField)keys.nextElement();
765             getReferenceDescriptor().buildField(foreignKeyfield);
766         }
767
768         for (Enumeration keys = getSourceKeyFields().elements(); keys.hasMoreElements();) {
769             DatabaseField sourceKeyfield = (DatabaseField)keys.nextElement();
770             getDescriptor().buildField(sourceKeyfield);
771         }
772
773         if (getTargetForeignKeyFields().size() != getSourceKeyFields().size()) {
774             throw DescriptorException.targetForeignKeysSizeMismatch(this);
775         }
776
777         Enumeration targetForeignKeysEnum = getTargetForeignKeyFields().elements();
778         Enumeration sourceKeysEnum = getSourceKeyFields().elements();
779         for (; targetForeignKeysEnum.hasMoreElements();) {
780             getTargetForeignKeyToSourceKeys().put(targetForeignKeysEnum.nextElement(), sourceKeysEnum.nextElement());
781         }
782     }
783
784     /**
785      * INTERNAL:
786      * The foreign keys and the primary key names are converted to DatabaseFields and stored. The source keys
787      * are not specified by the user so primary keys are extracted from the reference descriptor.
788      */

789     protected void initializeTargetForeignKeyToSourceKeysWithDefaults(AbstractSession session) throws DescriptorException {
790         if (getTargetForeignKeyFields().isEmpty()) {
791             throw DescriptorException.noTargetForeignKeysSpecified(this);
792         }
793
794         List sourceKeys = getDescriptor().getPrimaryKeyFields();
795         setSourceKeyFields(oracle.toplink.essentials.internal.helper.NonSynchronizedVector.newInstance(sourceKeys));
796         for (Enumeration keys = getTargetForeignKeyFields().elements(); keys.hasMoreElements();) {
797             DatabaseField foreignKeyfield = ((DatabaseField)keys.nextElement());
798             getReferenceDescriptor().buildField(foreignKeyfield);
799         }
800
801         if (getTargetForeignKeyFields().size() != sourceKeys.size()) {
802             throw DescriptorException.targetForeignKeysSizeMismatch(this);
803         }
804
805         for (int index = 0; index < getTargetForeignKeyFields().size(); index++) {
806             getTargetForeignKeyToSourceKeys().put(getTargetForeignKeyFields().get(index), sourceKeys.get(index));
807         }
808     }
809
810     /**
811      * INTERNAL:
812      * Iterate on the specified element.
813      */

814     public void iterateOnElement(DescriptorIterator iterator, Object JavaDoc element) {
815         // CR#... Aggregate collections must iterate as aggregates, not regular mappings.
816
// For some reason the element can be null, this makes absolutly no sense, but we have a test case for it...
817
if (element != null) {
818             iterator.iterateForAggregateMapping(element, this, iterator.getSession().getDescriptor(element));
819         }
820     }
821
822     /**
823      * INTERNAL:
824      */

825     public boolean isAggregateCollectionMapping() {
826         return true;
827     }
828
829     /**
830      * INTERNAL:
831      */

832     public boolean isPrivateOwned() {
833         return true;
834     }
835
836     /**
837      * Checks if source key is specified or not.
838      */

839     protected boolean isSourceKeySpecified() {
840         return !(getSourceKeyFields().isEmpty());
841     }
842
843     /**
844      * INTERNAL:
845      * Merge changes from the source to the target object.
846      * Because this is a collection mapping, values are added to or removed from the
847      * collection based on the changeset
848      */

849     public void mergeChangesIntoObject(Object JavaDoc target, ChangeRecord changeRecord, Object JavaDoc source, MergeManager mergeManager) {
850         //Check to see if the target has an instantiated collection
851
if (!isAttributeValueInstantiated(target)) {
852             //Then do nothing.
853
return;
854         }
855
856         ContainerPolicy containerPolicy = getContainerPolicy();
857         AbstractSession session = mergeManager.getSession();
858         Object JavaDoc valueOfTarget = null;
859
860         //At this point the source's indirection must be instantiated or the changeSet would never have
861
// been created
862
Object JavaDoc sourceAggregate = null;
863
864         //On a distributed cache if our changes are for the same version as the target object
865
//then load the changes from database.
866
// CR 4143
867
// CR 4155 Always replace the collection with the query results as we will not be able to
868
// find the originals for merging and indirection information may be lost.
869
if (mergeManager.shouldMergeChangesIntoDistributedCache()) {
870             ClassDescriptor descriptor = getDescriptor();
871             AbstractRecord parentRow = descriptor.getObjectBuilder().extractPrimaryKeyRowFromObject(target, session);
872             Object JavaDoc result = getIndirectionPolicy().valueFromQuery(getSelectionQuery(), parentRow, session);//fix for indirection
873
setAttributeValueInObject(target, result);
874             return;
875         }
876
877         // iterate over the changes and merge the collections
878
Vector aggregateObjects = ((AggregateCollectionChangeRecord)changeRecord).getChangedValues();
879         valueOfTarget = containerPolicy.containerInstance();
880         // Next iterate over the changes and add them to the container
881
ObjectChangeSet objectChanges = null;
882         for (int i = 0; i < aggregateObjects.size(); ++i) {
883             objectChanges = (ObjectChangeSet)aggregateObjects.elementAt(i);
884             Class JavaDoc localClassType = objectChanges.getClassType(session);
885             sourceAggregate = objectChanges.getUnitOfWorkClone();
886
887             // cr 4155 Load the target from the UnitOfWork. This will be the original
888
// aggregate object that has the original indirection in it.
889
Object JavaDoc targetAggregate = ((UnitOfWorkImpl)mergeManager.getSession()).getCloneToOriginals().get(sourceAggregate);
890
891             if (targetAggregate == null) {
892                 targetAggregate = getReferenceDescriptor(localClassType, session).getObjectBuilder().buildNewInstance();
893             }
894             getReferenceDescriptor(localClassType, session).getObjectBuilder().mergeChangesIntoObject(targetAggregate, objectChanges, sourceAggregate, mergeManager);
895             containerPolicy.addInto(targetAggregate, valueOfTarget, session);
896         }
897         setRealAttributeValueInObject(target, valueOfTarget);
898     }
899
900     /**
901      * INTERNAL:
902      * Merge changes from the source to the target object.
903      */

904     public void mergeIntoObject(Object JavaDoc target, boolean isTargetUnInitialized, Object JavaDoc source, MergeManager mergeManager) {
905         if (isTargetUnInitialized) {
906             // This will happen if the target object was removed from the cache before the commit was attempted
907
if (mergeManager.shouldMergeWorkingCopyIntoOriginal() && (!isAttributeValueInstantiated(source))) {
908                 setAttributeValueInObject(target, getIndirectionPolicy().getOriginalIndirectionObject(getAttributeValueFromObject(source), mergeManager.getSession()));
909                 return;
910             }
911         }
912         if (!shouldMergeCascadeReference(mergeManager)) {
913             // This is only going to happen on mergeClone, and we should not attempt to merge the reference
914
return;
915         }
916         if (mergeManager.shouldMergeOriginalIntoWorkingCopy()) {
917             if (!isAttributeValueInstantiated(target)) {
918                 // This will occur when the clone's value has not been instantiated yet and we do not need
919
// the refresh that attribute
920
return;
921             }
922         } else if (!isAttributeValueInstantiated(source)) {
923             // I am merging from a clone into an original. No need to do merge if the attribute was never
924
// modified
925
return;
926         }
927
928         ContainerPolicy containerPolicy = getContainerPolicy();
929         Object JavaDoc valueOfSource = getRealCollectionAttributeValueFromObject(source, mergeManager.getSession());
930         Object JavaDoc valueOfTarget = containerPolicy.containerInstance(containerPolicy.sizeFor(valueOfSource));
931         for (Object JavaDoc sourceValuesIterator = containerPolicy.iteratorFor(valueOfSource);
932                  containerPolicy.hasNext(sourceValuesIterator);) {
933             Object JavaDoc sourceValue = containerPolicy.next(sourceValuesIterator, mergeManager.getSession());
934
935             //CR#2896 - TW
936
Object JavaDoc originalValue = getReferenceDescriptor(sourceValue.getClass(), mergeManager.getSession()).getObjectBuilder().buildNewInstance();
937             getReferenceDescriptor(sourceValue.getClass(), mergeManager.getSession()).getObjectBuilder().mergeIntoObject(originalValue, true, sourceValue, mergeManager);
938             containerPolicy.addInto(originalValue, valueOfTarget, mergeManager.getSession());
939         }
940
941         // Must re-set variable to allow for set method to re-morph changes if the collection is not being stored directly.
942
setRealAttributeValueInObject(target, valueOfTarget);
943     }
944
945     /**
946      * INTERNAL:
947      * An object was added to the collection during an update, insert it if private.
948      */

949     protected void objectAddedDuringUpdate(ObjectLevelModifyQuery query, Object JavaDoc objectAdded, ObjectChangeSet changeSet) throws DatabaseException, OptimisticLockException {
950         // Insert must not be done for uow or cascaded queries and we must cascade to cascade policy.
951
InsertObjectQuery insertQuery = getAndPrepareModifyQueryForInsert(query, objectAdded);
952         query.getSession().executeQuery(insertQuery, insertQuery.getTranslationRow());
953     }
954
955     /**
956      * INTERNAL:
957      * An object was removed to the collection during an update, delete it if private.
958      */

959     protected void objectRemovedDuringUpdate(ObjectLevelModifyQuery query, Object JavaDoc objectDeleted) throws DatabaseException, OptimisticLockException {
960         // Delete must not be done for uow or cascaded queries and we must cascade to cascade policy.
961
DeleteObjectQuery deleteQuery = new DeleteObjectQuery();
962         prepareModifyQueryForDelete(query, deleteQuery, objectDeleted);
963         query.getSession().executeQuery(deleteQuery, deleteQuery.getTranslationRow());
964     }
965
966     /**
967      * INTERNAL:
968      * An object is still in the collection, update it as it may have changed.
969      */

970     protected void objectUnchangedDuringUpdate(ObjectLevelModifyQuery query, Object JavaDoc object, Hashtable backupCloneKeyedCache, CacheKey cachedKey) throws DatabaseException, OptimisticLockException {
971         // Always write for updates, either private or in uow if calling this method.
972
UpdateObjectQuery updateQuery = new UpdateObjectQuery();
973         Object JavaDoc backupclone = backupCloneKeyedCache.get(cachedKey);
974         updateQuery.setBackupClone(backupclone);
975         prepareModifyQueryForUpdate(query, updateQuery, object);
976         query.getSession().executeQuery(updateQuery, updateQuery.getTranslationRow());
977     }
978
979     /**
980      * INTERNAL:
981      * For aggregate collection mapping the reference descriptor is cloned. The cloned descriptor is then
982      * assigned primary keys and table names before initialize. Once cloned descriptor is initialized
983      * it is assigned as reference descriptor in the aggregate mapping. This is a very specifiec
984      * behaviour for aggregate mappings. The original descriptor is used only for creating clones and
985      * after that mapping never uses it.
986      * Some initialization is done in postInitialize to ensure the target descriptor's references are initialized.
987      */

988     public void postInitialize(AbstractSession session) throws DescriptorException {
989         super.postInitialize(session);
990         getReferenceDescriptor().postInitialize(session);
991     }
992
993     /**
994      * INTERNAL:
995      * Insert privately owned parts
996      */

997     public void postInsert(WriteObjectQuery query) throws DatabaseException, OptimisticLockException {
998         if (isReadOnly()) {
999             return;
1000        }
1001
1002        Object JavaDoc objects = getRealCollectionAttributeValueFromObject(query.getObject(), query.getSession());
1003
1004        // insert each object one by one
1005
ContainerPolicy cp = getContainerPolicy();
1006        for (Object JavaDoc iter = cp.iteratorFor(objects); cp.hasNext(iter);) {
1007            Object JavaDoc object = cp.next(iter, query.getSession());
1008            InsertObjectQuery insertQuery = getAndPrepareModifyQueryForInsert(query, object);
1009            query.getSession().executeQuery(insertQuery, insertQuery.getTranslationRow());
1010        }
1011    }
1012
1013    /**
1014     * INTERNAL:
1015     * Update the privately owned parts
1016     */

1017    public void postUpdate(WriteObjectQuery writeQuery) throws DatabaseException, OptimisticLockException {
1018        if (isReadOnly()) {
1019            return;
1020        }
1021
1022        // If objects are not instantiated that means they are not changed.
1023
if (!isAttributeValueInstantiated(writeQuery.getObject())) {
1024            return;
1025        }
1026
1027        // Manage objects added and removed from the collection.
1028
Object JavaDoc objects = getRealCollectionAttributeValueFromObject(writeQuery.getObject(), writeQuery.getSession());
1029        Object JavaDoc currentObjectsInDB = readPrivateOwnedForObject(writeQuery);
1030        if (currentObjectsInDB == null) {
1031            currentObjectsInDB = getContainerPolicy().containerInstance(1);
1032        }
1033        compareObjectsAndWrite(currentObjectsInDB, objects, writeQuery);
1034    }
1035
1036    /**
1037     * INTERNAL:
1038     * Delete privately owned parts
1039     */

1040    public void preDelete(DeleteObjectQuery query) throws DatabaseException, OptimisticLockException {
1041        if (isReadOnly()) {
1042            return;
1043        }
1044
1045        // if privately owned parts have their privately own parts, delete those one by one
1046
// else delete everything in one shot.
1047
if (getReferenceDescriptor().hasDependencyOnParts() || getReferenceDescriptor().usesOptimisticLocking() || (getReferenceDescriptor().hasInheritance() && getReferenceDescriptor().getInheritancePolicy().shouldReadSubclasses()) || getReferenceDescriptor().hasMultipleTables()) {
1048            Object JavaDoc objects = getRealCollectionAttributeValueFromObject(query.getObject(), query.getSession());
1049            ContainerPolicy containerPolicy = getContainerPolicy();
1050            for (Object JavaDoc iter = containerPolicy.iteratorFor(objects); containerPolicy.hasNext(iter);) {
1051                Object JavaDoc object = containerPolicy.next(iter, query.getSession());
1052                DeleteObjectQuery deleteQuery = new DeleteObjectQuery();
1053                prepareModifyQueryForDelete(query, deleteQuery, object);
1054                query.getSession().executeQuery(deleteQuery, deleteQuery.getTranslationRow());
1055            }
1056            if (!query.getSession().isUnitOfWork()) {
1057                // This deletes any objects on the database, as the collection in memory may has been changed.
1058
// This is not required for unit of work, as the update would have already deleted these objects,
1059
// and the backup copy will include the same objects causing double deletes.
1060
verifyDeleteForUpdate(query);
1061            }
1062        } else {
1063            deleteAll(query);
1064        }
1065    }
1066
1067    /**
1068     * INTERNAL:
1069     * The message is passed to its reference class descriptor.
1070     */

1071    public void preInsert(WriteObjectQuery query) throws DatabaseException, OptimisticLockException {
1072        if (isReadOnly()) {
1073            return;
1074        }
1075
1076        Object JavaDoc objects = getRealCollectionAttributeValueFromObject(query.getObject(), query.getSession());
1077
1078        // pre-insert each object one by one
1079
ContainerPolicy cp = getContainerPolicy();
1080        for (Object JavaDoc iter = cp.iteratorFor(objects); cp.hasNext(iter);) {
1081            Object JavaDoc object = cp.next(iter, query.getSession());
1082            InsertObjectQuery insertQuery = getAndPrepareModifyQueryForInsert(query, object);
1083
1084            // aggregates do not actually use a query to write to the database so the pre-write must be called here
1085
executeEvent(DescriptorEventManager.PreWriteEvent, insertQuery);
1086            executeEvent(DescriptorEventManager.PreInsertEvent, insertQuery);
1087            getReferenceDescriptor().getQueryManager().preInsert(insertQuery);
1088        }
1089    }
1090
1091    /**
1092     * INTERNAL:
1093     * Returns clone of InsertObjectQuery from the reference descriptor, if it is not set - create it.
1094     */

1095    protected InsertObjectQuery getInsertObjectQuery(AbstractSession session, ClassDescriptor desc) {
1096        InsertObjectQuery insertQuery = desc.getQueryManager().getInsertQuery();
1097        if (insertQuery == null) {
1098            insertQuery = new InsertObjectQuery();
1099            desc.getQueryManager().setInsertQuery(insertQuery);
1100        }
1101        if (insertQuery.getModifyRow() == null) {
1102            AbstractRecord modifyRow = new DatabaseRecord();
1103            for (int i = 0; i < getTargetForeignKeyFields().size(); i++) {
1104                DatabaseField field = (DatabaseField)getTargetForeignKeyFields().elementAt(i);
1105                modifyRow.put(field, null);
1106            }
1107            desc.getObjectBuilder().buildTemplateInsertRow(session, modifyRow);
1108            insertQuery.setModifyRow(modifyRow);
1109        }
1110        return insertQuery;
1111    }
1112
1113    /**
1114     * INTERNAL:
1115     * setup the modifyQuery for post insert/update and pre delete
1116     */

1117    public InsertObjectQuery getAndPrepareModifyQueryForInsert(ObjectLevelModifyQuery originalQuery, Object JavaDoc object) {
1118        AbstractSession session = originalQuery.getSession();
1119        ClassDescriptor objReferenceDescriptor = getReferenceDescriptor(object.getClass(), session);
1120        InsertObjectQuery insertQueryFromDescriptor = getInsertObjectQuery(session, objReferenceDescriptor);
1121        insertQueryFromDescriptor.checkPrepare(session, insertQueryFromDescriptor.getModifyRow());
1122
1123        InsertObjectQuery insertQuery = (InsertObjectQuery)insertQueryFromDescriptor.clone();
1124        insertQuery.setObject(object);
1125
1126        AbstractRecord targetForeignKeyRow = new DatabaseRecord();
1127        Vector referenceObjectKeys = getReferenceObjectKeys(originalQuery);
1128        for (int keyIndex = 0; keyIndex < getTargetForeignKeyFields().size(); keyIndex++) {
1129            targetForeignKeyRow.put(getTargetForeignKeyFields().elementAt(keyIndex), referenceObjectKeys.elementAt(keyIndex));
1130        }
1131
1132        insertQuery.setModifyRow(targetForeignKeyRow);
1133        insertQuery.setTranslationRow(targetForeignKeyRow);
1134        insertQuery.setSession(session);
1135        insertQuery.setCascadePolicy(originalQuery.getCascadePolicy());
1136        insertQuery.dontMaintainCache();
1137
1138        // For bug 2863721 must set a backup clone for compatibility with
1139
// old event mechanism, even though for AggregateCollections there is no
1140
// way to get a backup directly from a clone.
1141
if (session.isUnitOfWork()) {
1142            Object JavaDoc backupAttributeValue = getReferenceDescriptor(object.getClass(), session).getObjectBuilder().buildNewInstance();
1143            insertQuery.setBackupClone(backupAttributeValue);
1144        }
1145        return insertQuery;
1146    }
1147
1148    /**
1149     * INTERNAL:
1150     * setup the modifyQuery for pre delete
1151     */

1152    public void prepareModifyQueryForDelete(ObjectLevelModifyQuery originalQuery, ObjectLevelModifyQuery modifyQuery, Object JavaDoc object) {
1153        AbstractRecord aggregateRow = getAggregateRow(originalQuery, object);
1154        modifyQuery.setObject(object);
1155        modifyQuery.setPrimaryKey(getReferenceDescriptor().getObjectBuilder().extractPrimaryKeyFromRow(aggregateRow, originalQuery.getSession()));
1156        modifyQuery.setModifyRow(aggregateRow);
1157        modifyQuery.setTranslationRow(aggregateRow);
1158        modifyQuery.setSession(originalQuery.getSession());
1159        if (originalQuery.shouldCascadeOnlyDependentParts()) {
1160            //This query is the result of being in a UnitOfWork therefor use the Aggregate Collection
1161
//specific cascade policy to prevent cascading the delete now
1162
modifyQuery.setCascadePolicy(DatabaseQuery.CascadeAggregateDelete);
1163        } else {
1164            modifyQuery.setCascadePolicy(originalQuery.getCascadePolicy());
1165        }
1166        modifyQuery.dontMaintainCache();
1167    }
1168
1169    /**
1170     * INTERNAL:
1171     * setup the modifyQuery for update,
1172     */

1173    public void prepareModifyQueryForUpdate(ObjectLevelModifyQuery originalQuery, ObjectLevelModifyQuery modifyQuery, Object JavaDoc object) {
1174        AbstractRecord aggregateRow = getAggregateRow(originalQuery, object);
1175        modifyQuery.setObject(object);
1176        modifyQuery.setPrimaryKey(getReferenceDescriptor().getObjectBuilder().extractPrimaryKeyFromRow(aggregateRow, originalQuery.getSession()));
1177        modifyQuery.setTranslationRow(aggregateRow);
1178        modifyQuery.setSession(originalQuery.getSession());
1179        modifyQuery.setCascadePolicy(originalQuery.getCascadePolicy());
1180        modifyQuery.dontMaintainCache();
1181    }
1182
1183    /**
1184     * PUBLIC:
1185     * Set the source key field names associated with the mapping.
1186     * These must be in-order with the targetForeignKeyFieldNames.
1187     */

1188    public void setSourceKeyFieldNames(Vector fieldNames) {
1189        Vector fields = oracle.toplink.essentials.internal.helper.NonSynchronizedVector.newInstance(fieldNames.size());
1190        for (Enumeration fieldNamesEnum = fieldNames.elements(); fieldNamesEnum.hasMoreElements();) {
1191            fields.addElement(new DatabaseField((String JavaDoc)fieldNamesEnum.nextElement()));
1192        }
1193
1194        setSourceKeyFields(fields);
1195    }
1196
1197    /**
1198     * INTERNAL:
1199     * set all the primary key names associated with this mapping
1200     */

1201    public void setSourceKeyFields(Vector sourceKeyFields) {
1202        this.sourceKeyFields = sourceKeyFields;
1203    }
1204
1205    /**
1206     * PUBLIC:
1207     * Set the target foregin key field names associated with the mapping.
1208     * These must be in-order with the sourceKeyFieldNames.
1209     */

1210    public void setTargetForeignKeyFieldNames(Vector fieldNames) {
1211        Vector fields = oracle.toplink.essentials.internal.helper.NonSynchronizedVector.newInstance(fieldNames.size());
1212        for (Enumeration fieldNamesEnum = fieldNames.elements(); fieldNamesEnum.hasMoreElements();) {
1213            fields.addElement(new DatabaseField((String JavaDoc)fieldNamesEnum.nextElement()));
1214        }
1215
1216        setTargetForeignKeyFields(fields);
1217    }
1218
1219    /**
1220     * INTERNAL:
1221     * set the target foregin key fields associated with the mapping
1222     */

1223    public void setTargetForeignKeyFields(Vector targetForeignKeyFields) {
1224        this.targetForeignKeyFields = targetForeignKeyFields;
1225    }
1226
1227    protected void setTargetForeignKeyToSourceKeys(Map targetForeignKeyToSourceKeys) {
1228        this.targetForeignKeyToSourceKeys = targetForeignKeyToSourceKeys;
1229    }
1230
1231    /**
1232     * Returns true as any process leading to object modification should also affect its privately owned parts
1233     * Usually used by write, insert, update and delete.
1234     */

1235    protected boolean shouldObjectModifyCascadeToParts(ObjectLevelModifyQuery query) {
1236        if (isReadOnly()) {
1237            return false;
1238        }
1239
1240        return true;
1241    }
1242
1243    /**
1244     * ADVANCED:
1245     * This method is used to have an object add to a collection once the changeSet is applied
1246     * The referenceKey parameter should only be used for direct Maps. PLEASE ENSURE that the changes
1247     * have been made in the object model first.
1248     */

1249    public void simpleAddToCollectionChangeRecord(Object JavaDoc referenceKey, Object JavaDoc changeSetToAdd, ObjectChangeSet changeSet, AbstractSession session) {
1250        AggregateCollectionChangeRecord collectionChangeRecord = (AggregateCollectionChangeRecord)changeSet.getChangesForAttributeNamed(this.getAttributeName());
1251        if (collectionChangeRecord == null) {
1252            //if there is no change for this attribute then create a changeSet for it. no need to modify the resulting
1253
// change record as it should be built from the clone which has the changes allready
1254
Object JavaDoc cloneObject = ((UnitOfWorkChangeSet)changeSet.getUOWChangeSet()).getUOWCloneForObjectChangeSet(changeSet);
1255            Object JavaDoc cloneCollection = this.getRealAttributeValueFromObject(cloneObject, session);
1256            collectionChangeRecord = (AggregateCollectionChangeRecord)convertToChangeRecord(cloneCollection, changeSet, session);
1257            changeSet.addChange(collectionChangeRecord);
1258        } else {
1259            collectionChangeRecord.getChangedValues().add(changeSetToAdd);
1260        }
1261    }
1262
1263    /**
1264     * ADVANCED:
1265     * This method is used to have an object removed from a collection once the changeSet is applied
1266     * The referenceKey parameter should only be used for direct Maps. PLEASE ENSURE that the changes
1267     * have been made in the object model first.
1268     */

1269    public void simpleRemoveFromCollectionChangeRecord(Object JavaDoc referenceKey, Object JavaDoc changeSetToRemove, ObjectChangeSet changeSet, AbstractSession session) {
1270        AggregateCollectionChangeRecord collectionChangeRecord = (AggregateCollectionChangeRecord)changeSet.getChangesForAttributeNamed(this.getAttributeName());
1271
1272        if (collectionChangeRecord == null) {
1273            //if there is no change for this attribute then create a changeSet for it. no need to modify the resulting
1274
// change record as it should be built from the clone which has the changes allready
1275
Object JavaDoc cloneObject = ((UnitOfWorkChangeSet)changeSet.getUOWChangeSet()).getUOWCloneForObjectChangeSet(changeSet);
1276            Object JavaDoc cloneCollection = this.getRealAttributeValueFromObject(cloneObject, session);
1277            collectionChangeRecord = (AggregateCollectionChangeRecord)convertToChangeRecord(cloneCollection, changeSet, session);
1278            changeSet.addChange(collectionChangeRecord);
1279        } else {
1280            collectionChangeRecord.getChangedValues().remove(changeSetToRemove);
1281        }
1282    }
1283
1284    /**
1285     * INTERNAL:
1286     * Retrieves a value from the row for a particular query key
1287     */

1288    protected Object JavaDoc valueFromRowInternal(AbstractRecord row, JoinedAttributeManager joinManager, AbstractSession executionSession) throws DatabaseException {
1289        // For CR#2587: a fix to allow the reading of nested aggregate collections that
1290
// use foreign keys as primary keys.
1291
// Even though foreign keys are not read in a read query insert them into the row that
1292
// is returned from the database to allow cascading of primary keys.
1293
// This row will eventually become the translation row which is used to read the aggregate collection.
1294
// The fix works by passing foreign key information between source and target queries via the translation row.
1295
// Must clone the row first, for due to prior optimizations the vector of fields is now part of
1296
// a prepared query!
1297
row = (AbstractRecord)row.clone();
1298        int i = 0;
1299        for (Enumeration sourceKeys = getSourceKeyFields().elements();
1300                 sourceKeys.hasMoreElements(); i++) {
1301            DatabaseField sourceKey = (DatabaseField)sourceKeys.nextElement();
1302            Object JavaDoc value = null;
1303
1304            // First insure that the source foreign key field is in the row.
1305
// N.B. If get() is used and returns null it may just mean that the field exists but the value is null.
1306
int index = row.getFields().indexOf(sourceKey);
1307            if (index == -1) {
1308                //Line x: Retrieve the value from the source query's translation row.
1309
value = joinManager.getBaseQuery().getTranslationRow().get(sourceKey);
1310                row.add(sourceKey, value);
1311            } else {
1312                value = row.getValues().elementAt(index);
1313            }
1314
1315            //Now duplicate the source key field values with target key fields, so children aggregate collections can later access them.
1316
//This will enable the later execution of the above line x.
1317
row.add((DatabaseField)getTargetForeignKeyFields().elementAt(i), value);
1318        }
1319        return super.valueFromRowInternal(row, joinManager, executionSession);
1320    }
1321
1322    /**
1323     * INTERNAL:
1324     * Checks if object is deleted from the database or not.
1325     */

1326    public boolean verifyDelete(Object JavaDoc object, AbstractSession session) throws DatabaseException {
1327        // Row is built for translation
1328
if (isReadOnly()) {
1329            return true;
1330        }
1331
1332        AbstractRecord row = getDescriptor().getObjectBuilder().buildRowForTranslation(object, session);
1333        Object JavaDoc value = session.executeQuery(getSelectionQuery(), row);
1334
1335        return getContainerPolicy().isEmpty(value);
1336    }
1337
1338    /**
1339     * Verifying deletes make sure that all the records privately owned by this mapping are
1340     * actually removed. If such records are found than those are all read and removed one
1341     * by one taking their privately owned parts into account.
1342     */

1343    protected void verifyDeleteForUpdate(DeleteObjectQuery query) throws DatabaseException, OptimisticLockException {
1344        Object JavaDoc objects = readPrivateOwnedForObject(query);
1345
1346        // Delete all these object one by one.
1347
ContainerPolicy cp = getContainerPolicy();
1348        for (Object JavaDoc iter = cp.iteratorFor(objects); cp.hasNext(iter);) {
1349            query.getSession().deleteObject(cp.next(iter, query.getSession()));
1350        }
1351    }
1352
1353    /**
1354     * INTERNAL:
1355     * Add a new value and its change set to the collection change record. This is used by
1356     * attribute change tracking. Currently it is not supported in AggregateCollectionMapping.
1357     */

1358    public void addToCollectionChangeRecord(Object JavaDoc newKey, Object JavaDoc newValue, ObjectChangeSet objectChangeSet, UnitOfWorkImpl uow) throws DescriptorException {
1359        throw DescriptorException.invalidMappingOperation(this, "addToCollectionChangeRecord");
1360    }
1361    
1362    /**
1363     * INTERNAL
1364     * Return true if this mapping supports cascaded version optimistic locking.
1365     */

1366    public boolean isCascadedLockingSupported() {
1367        return true;
1368    }
1369    
1370    /**
1371     * INTERNAL:
1372     * Return if this mapping supports change tracking.
1373     */

1374    public boolean isChangeTrackingSupported() {
1375        return false;
1376    }
1377
1378    /**
1379     * INTERNAL:
1380     * Remove a value and its change set from the collection change record. This is used by
1381     * attribute change tracking. Currently it is not supported in AggregateCollectionMapping.
1382     */

1383    public void removeFromCollectionChangeRecord(Object JavaDoc newKey, Object JavaDoc newValue, ObjectChangeSet objectChangeSet, UnitOfWorkImpl uow) throws DescriptorException {
1384        throw DescriptorException.invalidMappingOperation(this, "removeFromCollectionChangeRecord");
1385    }
1386}
1387
Popular Tags