KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > com > daffodilwoods > daffodildb > server > datasystem > persistentsystem > PersistentTable


1 package com.daffodilwoods.daffodildb.server.datasystem.persistentsystem;
2
3 import java.util.*;
4
5 import com.daffodilwoods.daffodildb.server.datadictionarysystem.*;
6 import com.daffodilwoods.daffodildb.server.datasystem.interfaces.*;
7 import com.daffodilwoods.daffodildb.server.datasystem.persistentsystem.versioninfo.*;
8 import com.daffodilwoods.daffodildb.utils.*;
9 import com.daffodilwoods.daffodildb.utils.byteconverter.*;
10 import com.daffodilwoods.database.general.*;
11 import com.daffodilwoods.database.resource.*;
12 import com.daffodilwoods.daffodildb.server.datasystem.btree.*;
13
14 /**
15  * PersistentTable manages all the read and write operations on clusters with the help of record cluster.
16  * it provides appropriate cluster to the record cluster.
17  */

18
19 public class PersistentTable implements DatabaseConstants, _TableList {
20
21    /**
22     * To Get Cluster For Read and Write
23     */

24
25    private PersistentDatabase persistentDatabase;
26
27    /**
28     * Name of Table
29     */

30
31    private QualifiedIdentifier tableName;
32
33    /**
34     * Characteristics of last Cluster in Table
35     */

36
37    private ClusterCharacteristics lastClusterCharacteristics;
38
39    /**
40     * Characteristics of first Cluster in Table
41     */

42
43    private ClusterCharacteristics firstClusterCharacteristics;
44
45    /**
46     * Map to maintain free cluster list.
47     */

48    public HashMap freeClusterMap;
49
50    /**
51       * Contains the properties of a particular Database
52       */

53
54    DatabaseProperties databaseProperties;
55    /**
56     * Version Handler to use database costants.
57     */

58    VersionHandler versionHandler;
59
60
61
62    public PersistentTable(QualifiedIdentifier name, PersistentDatabase pd,
63                           ClusterCharacteristics firstCC,
64                           ClusterCharacteristics lastCC) throws DException {
65       tableName = name;
66       persistentDatabase = pd;
67       databaseProperties = pd.getDatabaseProperties();
68       firstClusterCharacteristics = firstCC;
69       freeClusterMap = new HashMap();
70       versionHandler = persistentDatabase.getVersionHandler();
71       initializeLastCC();
72    }
73
74    /**
75     * Inserts record in Last Cluster,in case of failure gets next Cluster and Inserts Record in next cluster
76     * and updates lastClusterCharacteristics of table , If Record can not be inserted in one cluster, then
77     * inserts record partialy.
78     *
79     * @param user To perform write operations on cluster
80     * @param recordCluster _RecordCluster To peform opertaions on cluster
81     * @param columnValues record value which has to insert
82     *
83     * @return TableKey of record at which Record is inserted
84     */

85
86    public Object JavaDoc insert(_DatabaseUser user, _RecordCluster recordCluster,
87                         Object JavaDoc columnValues) throws DException {
88       Cluster cluster = getLastCluster(user);
89       recordCluster.setCluster(cluster);
90       int beforeInsert_RecordCount = recordCluster.getRecordCount();
91       int startColumnIndex = 0;
92       short recordNumber = -1;
93       short exactRecordNumber = -1;
94       Cluster exactCluster = null;
95       while (true) {
96          int[] result = recordCluster.insert( (byte[]) columnValues, startColumnIndex, false);
97          if (result[0] == _RecordCluster.SUCCESSFUL) {
98             Cluster cls = recordCluster.getCluster();
99             recordNumber = recordNumber == -1 ? recordCluster.getRecordCount() : 1;
100             lastClusterCharacteristics = cls.clusterCharacteristics;
101             persistentDatabase.updateWriteClusters(cls);
102             break;
103          }
104          if (startColumnIndex == 0 && result[0] == _RecordCluster.PARTIAL) {
105             exactRecordNumber = recordNumber == -1 ?
106                 (short) (beforeInsert_RecordCount + 1) : recordNumber;
107             exactCluster = recordCluster.getCluster();
108          }
109          Cluster tempCluster1 = persistentDatabase.getNewCluster(user, tableName);
110          removeClusterFromMap(tempCluster1.getClusterAddress());
111          recordCluster.getCluster().setNextCluster(tempCluster1);
112          persistentDatabase.updateWriteClusters(recordCluster.getCluster());
113          persistentDatabase.getCluster(user, firstClusterCharacteristics).
114              setLastCluster(tempCluster1.getClusterCharacteristics());
115          recordCluster.setCluster(tempCluster1);
116          beforeInsert_RecordCount = recordCluster.getRecordCount();
117          if (result[0] == _RecordCluster.PARTIAL)
118             startColumnIndex = result[1];
119       }
120       exactRecordNumber = startColumnIndex == 0 ? recordNumber :
121           exactRecordNumber;
122       exactCluster = startColumnIndex == 0 ? recordCluster.getCluster() :
123           exactCluster;
124       recordCluster.setCluster(null);
125       return new TableKey(exactCluster.getClusterAddress(), exactRecordNumber);
126    }
127
128    /**
129     * deletes record from Cluster at the given key
130     * If Record is partial then it gets DException having DSECode 2006 then it deletes record from all clusters
131     * in which it was inserted.
132     * If Actual Record Exists at Different TableKey then it gets DException having DSECode 2005 and Deletes
133     * Record from that tableKey
134     * @param user To Perform write Operation on Cluster
135     * @param recordCluster To Delete Record From Cluster
136     * @param key TableKey of record which has to delete
137     *
138     * @return key TableKey of record which has to delete
139     *
140     * @throws DException if Record is already deleted
141     */

142    public Object JavaDoc delete(_DatabaseUser user, _RecordCluster recordCluster,
143                         Object JavaDoc key) throws DException {
144       return delete(user, recordCluster, key, true, false);
145    }
146
147    private Object JavaDoc delete(_DatabaseUser user, _RecordCluster recordCluster,
148                          Object JavaDoc key, boolean checkKeyValidity, boolean isUpdate) throws DException {
149       TableKey tkey = (TableKey) key;
150       if (checkClusterInMap(tkey.getStartAddress()))
151         throw StaticExceptions.RECORD_DELETED_EXCEPTION ;
152       Cluster cluster = getClusterForWrite(user,
153                                            new ClusterCharacteristics(tkey.getStartAddress(), false)); //(Cluster)tkey.cluster.get();
154
recordCluster.setCluster(cluster);
155       short recordNumber = tkey.getRecordNumber();
156       ClusterStatus status = null;
157       try {
158          status = recordCluster.delete(recordNumber, checkKeyValidity);
159       } catch (DException de) {
160          if (de.getDseCode().equalsIgnoreCase("DSE2005")) {
161             TableKey tk = (TableKey) de.getParameters()[0];
162             delete(user, recordCluster, tk, false, true);
163          } else if (de.getDseCode().equalsIgnoreCase("DSE2006")) {
164             while ( (status = recordCluster.partialDelete(recordNumber)).
165                    currentRecordStatus == false) {
166                Cluster next = getNextCluster(recordCluster.getCluster(), user);
167                persistentDatabase.updateWriteClusters(recordCluster.getCluster());
168                recordCluster.setCluster(next);
169                recordNumber = 1;
170             }
171             persistentDatabase.updateWriteClusters(recordCluster.getCluster());
172          } else
173             throw de;
174       }
175       if (status != null && status.activeRecordCount == 0 &&
176           !tableName.equals(SystemTables.CLUSTERINFO) && !isUpdate) {
177          updateDeletedClusterInformation(user, status, recordCluster);
178       } else
179          persistentDatabase.updateWriteClusters(cluster);
180       return key;
181    }
182
183
184    /**
185     * Updates record at given key.If record can not be updated in that Cluster then inserts in New
186     * Cluster and puts new tableKey of Record at OldKey in Cluster
187     * It gets DException having DSEcode DSE2006 if Record was partially inserted,then it calculates
188     * oldRecordSize from All Clusters and their free space if record can not be updated then deletes that
189     * record and inserts in new Cluster and puts new tableKey of Record at OldKey in Cluster. If actual
190     * record is at different Key then it gets DException having DSE2005 , then it updates Record at actual
191     * key
192     *
193     * @param user To write Uncommited Bytes of cluster in database file
194     * @param recordCluster To perform write operations on cluster
195     * @param key key of record which has to update
196     * @param values new record values
197     *
198     * @return key key of Record which has to update
199     *
200     * @throws DException If record is deleted at given key
201     */

202
203    public Object JavaDoc update(_DatabaseUser user, _RecordCluster recordCluster,
204                         Object JavaDoc key, Object JavaDoc values) throws DException {
205       byte[] bytes = (byte[]) values;
206       TableKey tkey = (TableKey) key;
207       if (checkClusterInMap(tkey.getStartAddress()))
208         throw StaticExceptions.RECORD_DELETED_EXCEPTION ;
209       short recordNumber = tkey.getRecordNumber();
210       Cluster cluster = getClusterForWrite(user,
211                                            new ClusterCharacteristics(tkey.getStartAddress(), false)); //(Cluster)tkey.cluster.get();
212
recordCluster.setCluster(cluster);
213       try {
214          recordCluster.update(recordNumber, bytes);
215       } catch (DException de) {
216          if (de.getDseCode().equalsIgnoreCase("DSE2003")) // Record Is Deleted
217
throw de;
218          if (de.getDseCode().equalsIgnoreCase("DSE2006")) { // Partialy written
219
if (recordCluster instanceof PartialFixedRecordCluster) {
220                updatePartial(user, recordCluster, key, bytes, false);
221                return key;
222             }
223             Object JavaDoc[] parameters = de.getParameters(); // length of oldRecord and free space in that cluster
224
int length = parameters[0].hashCode();
225             int space = parameters[1].hashCode();
226             boolean isComplete = false;
227             short rec = 1;
228             do {
229                Cluster next = getNextCluster(recordCluster.getCluster(), null);
230                recordCluster.setCluster(next);
231                length += recordCluster.getLength0fPartialRecord(rec) - versionHandler.ACTIVE_DELETE -
232                    versionHandler.FULL_PARTIAL; // partialy written record.
233
space += recordCluster.getRange();
234                isComplete = recordCluster.isComplete(rec); // get this boolean from recordCluster
235
} while (!isComplete);
236             if (length + space >= bytes.length + versionHandler.ACTIVE_DELETE + versionHandler.FULL_PARTIAL) { // + ACTIVE_DELETE + FULL_PARTIAL because these two bytes are not added in new bytes
237
updatePartial(user, recordCluster, key, bytes, true);
238                return key;
239             }
240          }
241          if (de.getDseCode().equalsIgnoreCase("DSE2005")) { // Record Is Updated
242
TableKey tk = (TableKey) de.getParameters()[0];
243             update(user, recordCluster, tk, bytes);
244             return key;
245          }
246          delete(user, recordCluster, tkey, true, true);
247          TableKey tk = (TableKey) insertOld(user, recordCluster, bytes);
248          byte[] bb = getNewAddress(tk.getRecordNumber(), tk.getStartAddress());
249          recordCluster.setCluster(cluster);
250          ( (VariableRecordCluster) recordCluster).updateNewAddress(recordNumber,
251              bb);
252       }
253       persistentDatabase.updateWriteClusters(recordCluster.getCluster());
254       recordCluster.setCluster(null);
255       return key;
256    }
257
258    /**
259     * Return bytes of TableKey
260     */

261
262    private byte[] getNewAddress(short recordNumber, int add) {
263       byte[] bytes = new byte[6];
264       System.arraycopy(CCzufDpowfsufs.getBytes(add), 0, bytes, 0,
265                        versionHandler.NEWADDRESSLENGTH);
266       System.arraycopy(CCzufDpowfsufs.getBytes(recordNumber), 0, bytes,
267                        versionHandler.NEWADDRESSLENGTH,
268                        versionHandler.LENGTH);
269       return bytes;
270    }
271
272    /**
273     * Updates Partial Record In cluster.
274     * we get cluster of record number and update it untill all new bytes
275     * stored in place of old record if adjust other cluster flag is true
276     * than we adjust freed cluster during update operation.
277     */

278
279    private void updatePartial(_DatabaseUser user, _RecordCluster recordCluster,
280                               Object JavaDoc key, byte[] bytes, boolean adjustClusters) throws
281        DException {
282       TableKey tkey = (TableKey) key;
283       if (checkClusterInMap(tkey.getStartAddress()))
284         throw StaticExceptions.RECORD_DELETED_EXCEPTION ;
285       Cluster cluster = getClusterForWrite(user,
286                                            new ClusterCharacteristics(tkey.getStartAddress(), false)); //(Cluster)tkey.cluster.get();
287
recordCluster.setCluster(cluster);
288       int startPosition = 0;
289       short recordNumber = tkey.getRecordNumber();
290       while (true) {
291          startPosition = recordCluster.partialUpdate(recordNumber, startPosition, bytes);
292          if (startPosition == bytes.length) {
293             if (adjustClusters)
294                adjustOtherClusters(user, recordCluster, (short) 1, cluster);
295             persistentDatabase.updateWriteClusters(cluster);
296             break;
297          }
298          Cluster nextCluster = getNextCluster(recordCluster.getCluster(), user);
299          persistentDatabase.updateWriteClusters(recordCluster.getCluster());
300          recordCluster.setCluster(nextCluster);
301          recordNumber = 1;
302       }
303    }
304
305    public void setTable(_TableList tableOperations) {
306       throw new RuntimeException JavaDoc("Method Not Supported");
307    }
308
309    public _TableList getTable() {
310       throw new RuntimeException JavaDoc("Method Not Supported");
311    }
312
313    public _TableList getTable(int i) {
314       return this;
315    }
316
317    public String JavaDoc toString() {
318       return "" + tableName + " " + getClass() + "@" + hashCode();
319    }
320
321    public int getColumnCount() throws DException {
322       throw new java.lang.UnsupportedOperationException JavaDoc("method not supported ");
323    }
324    /**
325     * To add bytes array passed into single array.
326     * @param bb1 byte[]
327     * @param bb2 byte[]
328     * @return byte[]
329     */

330    private byte[] append(byte[] bb1, byte[] bb2) {
331       byte[] appendBytes = new byte[bb1.length + bb2.length];
332       System.arraycopy(bb1, 0, appendBytes, 0, bb1.length);
333       System.arraycopy(bb2, 0, appendBytes, bb1.length, bb2.length);
334       return appendBytes;
335    }
336
337    /**
338     * Checks validity of key ,If key is valid then it does nothing just returns
339     * Throws Exception If Record is deletd or key is InValid
340     *
341     * @param recordCluster To Perform read operation on Cluster
342     * @param key key whose validity has to check
343     *
344     * @throws DException if key is inValid or Record at this key is deleted
345     */

346
347    public void checkValidity(_RecordCluster recordCluster, Object JavaDoc key) throws
348        DException {
349       TableKey tableKey = (TableKey) key;
350       Cluster cluster = (Cluster) tableKey.cluster.get();
351       if (cluster == null)
352          cluster = getClusterForRead(new ClusterCharacteristics(tableKey.
353              getStartAddress(), false));
354       int recordNumber = tableKey.getRecordNumber();
355       if (recordNumber > cluster.actualRecordCount || recordNumber < 1) {
356         throw StaticExceptions.INVALID_TABLE_KEY;
357       }
358       if(cluster.activeRecordCount != cluster.actualRecordCount ){
359         recordCluster.setCluster(cluster);
360         recordCluster.checkValidity(tableKey.getRecordNumber());
361       }
362    }
363
364    /**
365     * Returns new Instance of Cluster Iterartor
366     * @return ClusterIterator
367     */

368
369    public ClusterIterator getClusterIterator() throws DException {
370       return new ClusterIterator(this);
371    }
372
373    public _TableCharacteristics getTableCharacteristics() throws DException {
374       /**@todo: Implement this com.daffodilwoods.daffodildb.server.datasystem.persistentsystem._TableOperations method*/
375       throw new java.lang.UnsupportedOperationException JavaDoc(
376           "Method getTableCharacteristics() not yet implemented.");
377    }
378
379    public TableProperties getTableProperties() throws DException {
380       throw new java.lang.UnsupportedOperationException JavaDoc(" method not define yet ");
381    }
382
383    /**
384     * To Remove Cluster From Table If all Records of this Cluster are deleted
385     * but we doesn't remove last and first cluster of a table and also those
386     * whose first record is partial.
387     * if last record is partial in passed status than we get its next and check it for
388     * active records and to be last.
389     */

390
391    private void updateDeletedClusterInformation(_DatabaseUser user,
392                                                 ClusterStatus status,
393                                                 _RecordCluster recordCluster) throws
394        DException {
395       Cluster cluster = recordCluster.getCluster();
396       if (cluster.clusterCharacteristics.equals(lastClusterCharacteristics)) {
397       } else if (cluster.clusterCharacteristics.equals(firstClusterCharacteristics)) {
398       } else {
399          if (status.lastRecordPartial) {
400             Cluster nextCluster = getNextCluster(cluster, user);
401             int startAddress;
402             Cluster nex;
403             while (nextCluster != null && nextCluster.activeRecordCount == 0) {
404                if (nextCluster.clusterCharacteristics.equals(
405                    lastClusterCharacteristics)) {
406                   break;
407                }
408                nex = nextCluster;
409                nextCluster = getNextCluster(nextCluster, user);
410                startAddress = nex.clusterCharacteristics.getStartAddress();
411                persistentDatabase.addFreeCluster(user, startAddress);
412                freeClusterMap.put(new Integer JavaDoc(startAddress), "");
413                cluster.setNextCluster(nextCluster);
414             }
415          }
416          if (!status.firstRecordPartial) {
417             getPreviousCluster(cluster,
418                                user).setNextCluster(getNextCluster(cluster, user));
419             persistentDatabase.addFreeCluster(user,
420                                               cluster.clusterCharacteristics.getStartAddress());
421             freeClusterMap.put(new Integer JavaDoc(cluster.clusterCharacteristics.
422                                            getStartAddress()), "");
423          }
424       }
425    }
426    /**
427     *
428     * @param user _DatabaseUser
429     * @param recordCluster _RecordCluster
430     * @param recordNumber short
431     * @param currentCluster Cluster
432     * @throws DException
433     */

434    private void adjustOtherClusters(_DatabaseUser user,
435                                     _RecordCluster recordCluster,
436                                     short recordNumber, Cluster currentCluster) throws
437        DException {
438       Cluster cluster = recordCluster.getCluster();
439       byte[] clusterBytes = cluster.getBytes();
440       ClusterCharacteristics nextCC = cluster.getNextClusterCharacteristics();
441       if (nextCC == null) {
442          return;
443       }
444       short startPointer = cluster.getStartPointerOfRecord(recordNumber);
445       if (clusterBytes[startPointer + 1] == versionHandler.COMPLETE &&
446           cluster.getActiveRecordCount() > 0)
447          return;
448       currentCluster = recordCluster.getCluster();
449       cluster = persistentDatabase.getClusterForWrite(user, nextCC);
450       boolean isAnyClusterAddedIntoFreeList = false;
451       while (cluster.getActiveRecordCount() <= 0) {
452          nextCC = cluster.getNextClusterCharacteristics();
453          if (nextCC == null)
454             break;
455          persistentDatabase.addFreeCluster(user, cluster.getClusterAddress());
456          freeClusterMap.put(new Integer JavaDoc(cluster.getClusterAddress()), "");
457          isAnyClusterAddedIntoFreeList = true;
458          cluster = persistentDatabase.getClusterForWrite(user, nextCC);
459       }
460       if (isAnyClusterAddedIntoFreeList)
461          currentCluster.setNextCluster(cluster);
462
463    }
464
465
466    /**
467     * returns First Cluster of Table
468     * @return first Cluster
469     */

470    Cluster getFirstCluster() throws DException {
471       return getClusterForRead(firstClusterCharacteristics);
472    }
473
474    /**
475     * returns Last Cluster of Table
476     * @return Last Cluster
477     */

478
479    Cluster getLastCluster(_DatabaseUser user) throws DException {
480       return user == null ? getClusterForRead(lastClusterCharacteristics)
481           : getClusterForWrite(user, lastClusterCharacteristics);
482    }
483
484    /**
485     * returns next Cluster of given cluster
486     * @param cluster Cluster whose next cluster is required
487     * @return next Cluster
488     */

489
490    Cluster getNextCluster(Cluster cluster, _DatabaseUser user) throws DException {
491       ClusterCharacteristics next = cluster.getNextClusterCharacteristics();
492       return next == null ? null : user == null ? getClusterForRead(next)
493           : getClusterForWrite(user, next);
494    }
495
496    /**
497     * returns previous Cluster of given cluster
498     * @param cluster Cluster whose previous cluster is required
499     * @return previous Cluster
500     */

501
502    Cluster getPreviousCluster(Cluster cluster, _DatabaseUser user) throws
503        DException {
504       if (cluster.getClusterAddress() ==
505           firstClusterCharacteristics.getStartAddress())
506          return null;
507       ClusterCharacteristics previous = cluster.getPreviousClusterCharacteristics();
508
509       return previous == null ? null : user == null ? getClusterForRead(previous)
510           : getClusterForWrite(user, previous);
511    }
512
513    /**
514     * To insert Record which could not be updated in its current cluster
515     *
516     * @param user to commit current bytes in databasefile
517     * @param recordCluster To Perform write operations on cluster
518     * @param columnValues values which has to insert
519     *
520     * @return tableKey at which Record is inserted
521     */

522
523    private Object JavaDoc insertOld(_DatabaseUser user, _RecordCluster recordCluster,
524                             Object JavaDoc columnValues) throws DException {
525       Cluster cluster = getLastCluster(user);
526       recordCluster.setCluster(cluster);
527       int beforeInsert_RecordCount = recordCluster.getRecordCount();
528       int startColumnIndex = 0;
529       short recordNumber = -1;
530       short exactRecordNumber = -1;
531       Cluster exactCluster = null;
532       int[] result;
533       while (true) {
534          result = recordCluster.insert( (byte[]) columnValues, startColumnIndex, true);
535          if (result[0] == _RecordCluster.SUCCESSFUL) {
536             Cluster cls = recordCluster.getCluster();
537             recordNumber = recordNumber == -1 ? recordCluster.getRecordCount() : 1;
538             lastClusterCharacteristics = cls.clusterCharacteristics;
539             persistentDatabase.updateWriteClusters(cls);
540             break; // insert successful;
541
}
542          if (startColumnIndex == 0 && result[0] == _RecordCluster.PARTIAL) {
543             exactRecordNumber = recordNumber == -1 ?
544                 (short) (beforeInsert_RecordCount + 1) : recordNumber;
545             exactCluster = recordCluster.getCluster();
546          }
547          Cluster tempCluster1 = persistentDatabase.getNewCluster(user, tableName);
548          removeClusterFromMap(tempCluster1.getClusterAddress());
549          recordCluster.getCluster().setNextCluster(tempCluster1);
550          persistentDatabase.updateWriteClusters(recordCluster.getCluster());
551          persistentDatabase.getCluster(user, firstClusterCharacteristics).
552              setLastCluster(tempCluster1.getClusterCharacteristics());
553          recordCluster.setCluster(tempCluster1);
554          beforeInsert_RecordCount = recordCluster.getRecordCount();
555          if (result[0] == _RecordCluster.PARTIAL) {
556             startColumnIndex = result[1];
557          }
558       }
559       exactRecordNumber = startColumnIndex == 0 ? recordNumber :
560           exactRecordNumber;
561       exactCluster = startColumnIndex == 0 ? recordCluster.getCluster() :
562           exactCluster;
563       return new TableKey(exactCluster.clusterCharacteristics.getStartAddress(),
564                           exactRecordNumber);
565    }
566
567    /**
568     * To get a cluster for read.
569     * @param clusterCharacteristics ClusterCharacteristics whose cluster is to b geted.
570     * @throws DException
571     * @return Cluster
572     */

573    Cluster getClusterForRead(ClusterCharacteristics clusterCharacteristics) throws
574        DException {
575       return persistentDatabase.getClusterForRead(clusterCharacteristics,
576                                                   clusterCharacteristics.
577                                                   isBtreeCluster);
578    }
579    /**
580     * To get a cluster for write
581     * @param user _DatabaseUser
582     * @param clusterCharacteristics ClusterCharacteristics whose cluster to be geted.
583     * @throws DException
584     * @return Cluster
585     */

586    Cluster getClusterForWrite(_DatabaseUser user,
587                               ClusterCharacteristics clusterCharacteristics) throws
588        DException {
589       return persistentDatabase.getClusterForWrite(user, clusterCharacteristics);
590    }
591
592    /**
593     * To get a record values.
594     * @param tableKey TableKey - whose bytes to be geted.
595     * @param recordCluster _RecordCluster - record cluster for that record.
596     * @throws DException
597     * @return BufferRange[] - bytes of that record.
598     */

599    public Object JavaDoc getColumnValues(TableKey tableKey,
600                                         _RecordCluster recordCluster) throws
601        DException {
602       if (checkClusterInMap(tableKey.getStartAddress())) {
603         throw StaticExceptions.RECORD_DELETED_EXCEPTION ;
604       }
605       Cluster cluster = (Cluster)tableKey.cluster.get() ;
606       cluster = cluster == null ? getClusterForRead(new ClusterCharacteristics(tableKey.getStartAddress(), false)) : cluster ;
607       recordCluster.setCluster(cluster);
608       try {
609          return recordCluster.retrieveBufferRange(tableKey.getRecordNumber());
610       } catch (DException de) {
611          if (de.getDseCode().equalsIgnoreCase("DSE2006")) {
612             Object JavaDoc[] parameters = de.getParameters(); // length of oldRecord and free space in that cluster
613
byte[] recordBytes = ( (BufferRange) parameters[0]).getBytes();
614             boolean isRecordBytesComplete = false;
615             byte[] temp;
616             Object JavaDoc[] result;
617             while (!isRecordBytesComplete) {
618                cluster = getNextCluster(cluster, null);
619                recordCluster.setCluster(cluster);
620                result = recordCluster.partialRetrieve( (short) 1);
621                temp = (byte[]) result[0];
622                recordBytes = append(recordBytes, temp);
623                isRecordBytesComplete = ( (byte[]) result[1])[0] ==
624                    versionHandler.COMPLETE;
625             }
626             recordCluster.setCluster(null);
627             return new BufferRange(recordBytes, 0, recordBytes.length);
628          }
629          if (de.getDseCode().equalsIgnoreCase("DSE2005")) {
630             TableKey tk = (TableKey) de.getParameters()[0];
631             return getColumnValues(tk, recordCluster);
632          }
633          throw de;
634       }
635    }
636    /**
637     * To initialize last cc of a table
638     * @throws DException
639     */

640    private void initializeLastCC() throws DException {
641       ClusterCharacteristics cc = persistentDatabase.getClusterForRead(
642           firstClusterCharacteristics, firstClusterCharacteristics.isBtreeCluster).
643           getPreviousClusterCharacteristics();
644       lastClusterCharacteristics = cc == null ? firstClusterCharacteristics : cc;
645    }
646
647    /**
648     * Methods Written Below are Not Used Now
649     */

650
651    public void rollBack() throws DException {
652       lastClusterCharacteristics = persistentDatabase.
653           getLastClusterClaracteristics(tableName);
654    }
655
656    public Object JavaDoc getColumnValues(TableKey key, _RecordCluster recordCluster,
657                                       int column) throws DException {
658       throw new UnsupportedOperationException JavaDoc("method can't be supported");
659    }
660
661    public Object JavaDoc getColumnValues(TableKey key,
662                                         _RecordCluster recordCluster,
663                                         int[] columns) throws DException {
664       throw new UnsupportedOperationException JavaDoc("method can't be supported");
665    }
666
667
668    public void removeClusterFromMap(int key) {
669       freeClusterMap.remove(new Integer JavaDoc(key));
670    }
671
672    public boolean checkClusterInMap(int key) {
673       return freeClusterMap.size() > 0 ? freeClusterMap.containsKey(new Integer JavaDoc(key)) : false ;
674    }
675
676 /*
677       private short getWrittenLengthInThisCluster(Cluster cluster,
678                                                   short recordNumber) throws
679           DException {
680          short insertableAddress = CCzufDpowfsufs.getShortValue(cluster.getBytes(), 0);
681          short length = 0;
682          short start = cluster.getStartPointerOfRecord(recordNumber);
683          if (recordNumber == cluster.actualRecordCount) {
684             length = (short) (insertableAddress - start - 2 - 4);
685          } else {
686             length = (short) (cluster.getStartPointerOfRecord( (short) (recordNumber +
687                 1)) - start - 2 - 4);
688          }
689          return length;
690       }
691
692        */

693 }
694
Popular Tags