KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > com > daffodilwoods > daffodildb > server > datasystem > persistentsystem > LobManager


1 package com.daffodilwoods.daffodildb.server.datasystem.persistentsystem;
2
3 import java.io.*;
4
5 import com.daffodilwoods.daffodildb.server.datasystem.interfaces.*;
6 import com.daffodilwoods.daffodildb.utils.byteconverter.*;
7 import com.daffodilwoods.database.resource.*;
8 import com.daffodilwoods.daffodildb.server.datasystem.persistentsystem.
9
    versioninfo.VersionHandler;
10 import com.daffodilwoods.daffodildb.server.datasystem.utility.
11
    SimpleFIFOReadWriteLocker;
12
13 /**
14  * Insert, update and retrive operations on Blob-Clob datatype columns are managed by LobManager.It
15  * provides cluster to write bytes with the help of database and maintains link list of clusters.For
16  * retrieving it provides address of first cluster.
17  */

18 public class LobManager {
19
20   /**
21    * To get Cluster
22    */

23
24   public PersistentDatabase database;
25
26   /**
27    * start cluster addresses for blob clob columns of a table.
28    */

29
30   private int[] startAddresses;
31
32   /**
33    * last cluster addresses for blob clob columns of table.
34    */

35
36   private int[] lastAddresses;
37   /**
38    * It is used to get Cluster Size of database.
39    */

40   DatabaseProperties databaseProperties;
41   /**
42    * it is used to get new address length , length etc. other constants.
43    */

44   VersionHandler versionHandler;
45   /**
46    * Maximum Bytes that can be inserted for a record into this cluster.
47    * by reserving some bytes for update,its header and next cluster address.
48    */

49   private int MAXLENGTHFORFULLINSERT;
50   /**
51    * For locking of table.
52    */

53   SimpleFIFOReadWriteLocker lock;
54   /**
55    * Doesn't used here but used in dblob class.
56    */

57   static Object JavaDoc monitor = new Object JavaDoc();
58
59   private boolean isTempDatabase;
60
61   public LobManager(PersistentDatabase database0, int[] startAddresses0) throws
62       DException {
63     database = database0;
64     isTempDatabase = database.getDatabaseName().equalsIgnoreCase(DatabaseConstants.TEMPDATABASE);
65     versionHandler = database.getVersionHandler();
66     databaseProperties = database.getDatabaseProperties();
67     MAXLENGTHFORFULLINSERT = databaseProperties.CLUSTERSIZE -
68         versionHandler.CLUSTER_STARTPOINTER - versionHandler.NEWADDRESSLENGTH -
69         versionHandler.BLOBBYTESFORUPDATE - versionHandler.BLOBROWHEADER;
70     startAddresses = startAddresses0;
71     initializeLastClusterAddresses();
72     lock = new SimpleFIFOReadWriteLocker();
73   }
74   /**
75    * It initialize last cluster address of all blob clob records by getting previouse
76    * of start cluster address.
77    * @throws DException
78    */

79   private void initializeLastClusterAddresses() throws DException {
80     int len = startAddresses.length;
81     lastAddresses = new int[len];
82     for (int i = 0; i < len; i++) {
83       if (startAddresses[i] != -1) {
84         Cluster cls = database.getClusterForRead(new ClusterCharacteristics(
85             startAddresses[i], false), false);
86         lastAddresses[i] = CCzufDpowfsufs.getIntValue(cls.getBytes(),
87             3 * versionHandler.LENGTH);
88       }
89     }
90   }
91
92   /**
93    * It gets all bytes of column from _LobUpdatable, then it gets Clusters from database to write bytes
94    * It maintains LinkList for all clusters, it sets first cluster address in _Lobupdatable and returns
95    * _LobUpdatable
96    *
97    * @param user To perform write operations
98    *
99    * @param dBlobUpdatable _LobUpdatable from which bytes of insertion are got
100    *
101    * @return _LobUpdatable in which first cluster address is set
102    */

103
104   public Object JavaDoc insertBlobBytes(_DatabaseUser user, Object JavaDoc dBlobUpdatable,
105                                 int columnIndex) throws DException {
106     try {
107       lock.lockTable();
108       return insertBlobBytesWithoutLock(user, dBlobUpdatable,columnIndex);
109     }
110      finally {
111       lock.releaseTable();
112     }
113
114   }
115
116   /**
117    * add another entry for newly inserted record
118    */

119
120   private void updateColumnPositions(Cluster cluster) throws DException {
121     cluster.addNewEntry();
122   }
123
124   /**
125    * for inserting bytes of a new record, we required at least 9 bytes.
126    * 1 : ACTIVE/DELETED
127    * 1 : FULL/PARTIAL
128    * 4 : LENGTH OF BLOB/CLOB COLUMN
129    * 1 : AT LEAST ONE DATA BYTES SHOULD RESIDE IN THIS CLUSTER
130    * 2 : START POINTER OF THE COLUMN BYTES IN THIS CLUSTER
131    * @param freeSpaceInCluster : free space in this cluster
132    * @return true : if cluster has a min space of 9 bytes
133    */

134
135   private boolean canWrite(int freeSpaceInCluster) {
136     return freeSpaceInCluster > 1 + 1 + 4 + 1 + 2;
137   }
138
139   /**
140    * canWrite method calculates the free space present in the cluster
141    * @param freeSpaceInCluster - free space of cluster.
142    * @param lengthOfBytes - length of bytes to be inserted.
143    * @return - true if we can write only even blob row header other wise false
144    */

145   private boolean canWrite(int freeSpaceInCluster, int lengthOfBytes) {
146     ; //freeSpaceInCluster > 1 + 1 + 4 + 1 + 2; // ACTIVE/DELETED + FULL/PARTIAL + INTSIZE + one data byte + DATA LENGTH IN THIS CLUSTER
147
if (lengthOfBytes <= MAXLENGTHFORFULLINSERT)
148       return freeSpaceInCluster >=
149           (lengthOfBytes + versionHandler.BLOBROWHEADER) ? true : false;
150     else
151       return freeSpaceInCluster > versionHandler.BLOBROWHEADER;
152   }
153   /**
154    * To get free space for insert of a record, we calculate it by subtracting
155    * space used and new address length , update bytes,header, recordInsertableAddress pointer etc.
156    * @param cluster Cluster - cluster whoose free space is to be calculated for insertion of a record.
157    * @return int - free space in it.
158    */

159   public int getFreeSpaceForInsert(Cluster cluster) {
160     short spaceUsed = CCzufDpowfsufs.getShortValue(cluster.getBytes(), 0);
161     return (int) (databaseProperties.CLUSTERSIZE - spaceUsed -
162                        versionHandler.NEWADDRESSLENGTH -
163                        cluster.actualRecordCount * versionHandler.LENGTH -
164                        versionHandler.BLOBBYTESFORUPDATE -
165                        versionHandler.BLOBROWHEADER - versionHandler.LENGTH - 1);
166   }
167
168   /**
169    * when data is in the form of stream , then it retrieves all bytes from stream and inserts in clusters.
170    * it sets first cluster address in _Lobupdatable and returns _LobUpdatable
171    *
172    * @param user To perform write operations
173    *
174    * @param dBlobUpdatable _LobUpdatable from which bytes of insertion are got
175    *
176    * @return _LobUpdatable in which first cluster address is set
177    */

178
179   private Object JavaDoc insert(_DatabaseUser user, _LobUpdatable blobUpdatabale,
180                         int columnIndex) throws DException {
181     try {
182       InputStream stream = blobUpdatabale.getStream();
183       int lastClusterId = lastAddresses[columnIndex];
184       Cluster firstCluster = database.getCluster(user,new ClusterCharacteristics(startAddresses[
185           columnIndex], false));
186       ClusterCharacteristics lastCC = new ClusterCharacteristics(lastClusterId, false);
187       Cluster lastCluster = database.getCluster(user, lastCC);
188       Cluster previousCluster = null;
189       int startAddress = -1;
190       int totalLength = 0;
191       int freeSpaceInCluster = getFreeSpaceForInsert(lastCluster);
192       short recordId = (short) (lastCluster.getActualRecordCount() + 1);
193       short actualRecordId = recordId;
194       if (!canWrite(freeSpaceInCluster)) { // 4 : ACTIVE/DELETED,FULL/PARTIAL,LENGTH,MIN ONE BYTE,START POINTER
195
Cluster nextCluster = database.getNewCluster(user, null);
196         lastCC= nextCluster.getClusterCharacteristics();
197         lastCluster.setNextCluster(nextCluster);
198         manageClusters( user,lastCluster); // database.updateWriteClusters(lastCluster);
199
lastCluster = nextCluster;
200         lastAddresses[columnIndex] = lastCC.getStartAddress();
201         firstCluster.setLastCluster(lastCC);
202         freeSpaceInCluster = getFreeSpaceForInsert(lastCluster);
203         actualRecordId = recordId = 1;
204       }
205       ClusterCharacteristics totalLengthCC = lastCluster.
206           getClusterCharacteristics();
207       startAddress = lastCC.getStartAddress();
208       byte[] columnBytes = new byte[freeSpaceInCluster];
209       int totalAvailableBytesInStream = stream.available();
210       int readBytesLength = totalAvailableBytesInStream == 0 ? 0 :
211           stream.read(columnBytes);
212       int cnt = -1;
213
214       while (true) {
215         ++cnt;
216         byte[] bytes = lastCluster.getBytes();
217         short insertableAddress = CCzufDpowfsufs.getShortValue(bytes, 0);
218         int pointer = insertableAddress;
219          totalLength += readBytesLength;
220         lastCluster.updateByte(pointer++, versionHandler.ACTIVE);
221         lastCluster.updateByte(pointer++,
222                                totalLength == totalAvailableBytesInStream ? cnt > 0 ?
223                                versionHandler.COMPLETE : versionHandler.FULL :
224                                versionHandler.PARTIALLY);
225         pointer += 4;
226         System.arraycopy(columnBytes, 0, bytes, pointer, readBytesLength);
227         lastCluster.actualRecordCount++;
228         lastCluster.activeRecordCount++;
229         int recordStartPointer = databaseProperties.CLUSTERSIZE -
230             versionHandler.NEWADDRESSLENGTH -
231             lastCluster.actualRecordCount * versionHandler.LENGTH;
232         lastCluster.updateBytes(recordStartPointer,
233                                 CCzufDpowfsufs.getBytes(insertableAddress));
234         updateColumnPositions(lastCluster);
235         lastCluster.updateColumnPositions( -1, insertableAddress);
236         lastCluster.updateClusterInformation( (short) (pointer +
237             readBytesLength));
238
239         if (totalLength == totalAvailableBytesInStream) {
240           manageClusters(user,lastCluster); // database.updateWriteClusters(lastCluster);
241
break;
242         }
243         previousCluster = lastCluster;
244         lastCluster = database.getNewCluster(user, null);
245         lastCC= lastCluster.getClusterCharacteristics();
246         previousCluster.setNextCluster(lastCluster);
247         manageClusters(user,previousCluster); //database.updateWriteClusters(previousCluster);
248
firstCluster.setLastCluster(lastCC);
249         lastAddresses[columnIndex] = lastCC.getStartAddress();
250         freeSpaceInCluster = getFreeSpaceForInsert(lastCluster);
251         columnBytes = new byte[freeSpaceInCluster];
252         readBytesLength = stream.read(columnBytes);
253       }
254       Cluster cls = database.getClusterForWrite(user, totalLengthCC);
255       int start = cls.getStartPointerOfRecord(actualRecordId);
256       cls.updateBytes(start + versionHandler.LENGTH,
257                       CCzufDpowfsufs.getBytes(totalLength));
258       blobUpdatabale.setStartingClusterAddress(startAddress);
259       blobUpdatabale.setRecordNumber(actualRecordId);
260       return blobUpdatabale;
261     }
262     catch (IOException ioe) {
263       throw new DException("DSE2025", new Object JavaDoc[] {ioe.getMessage()});
264     }
265   }
266
267
268   /**
269    * It constructs DblobUpdatable and Dblob to retrieve bytes. it sets First cluster in DBlob to read
270    * bytes
271    *
272    * @param startClusterAddress address of first cluster
273    *
274    * @return DblobUpdatable after setting first cluster address
275    */

276
277   public Object JavaDoc retrieveDataForBlob(int startClusterAddress, short recordNumber) {
278     DBlob dBlob = new DBlob(new ClusterCharacteristics(startClusterAddress, false), this,
279                             recordNumber);
280     dBlob.setDatabaseProperties(databaseProperties);
281     DBlobUpdatable dBlobUpdatable = new DBlobUpdatable(dBlob);
282     dBlobUpdatable.setStartingClusterAddress(startClusterAddress);
283     dBlobUpdatable.setRecordNumber(recordNumber);
284     return dBlobUpdatable;
285   }
286
287   /**
288    * It constructs DClobUpdatable and DClob to retrieve strings. it sets First cluster in DClob to read
289    * string
290    *
291    * @param startClusterAddress address of first cluster
292    *
293    * @return DClobUpdatable after setting first cluster address
294    */

295
296   public Object JavaDoc retrieveDataForClob(int startClusterAddress, short recordNumber) {
297     DClob dClob = new DClob(new ClusterCharacteristics(startClusterAddress, false), this,
298                             recordNumber);
299     dClob.setDatabaseProperties(databaseProperties);
300     DClobUpdatable dClobUpdatable = new DClobUpdatable(dClob);
301     dClobUpdatable.setStartingClusterAddress(startClusterAddress);
302     dClobUpdatable.setRecordNumber(recordNumber);
303     return dClobUpdatable;
304   }
305
306   /**
307    * Updates old bytes with new bytes. first it use previous clusters but if new bytes size is more than
308    * previous bytes then inserts bytes in new clusters.
309    *
310    * @param user to perform write operations
311    * @param startAddress start cluster address of old inserted bytes
312    * @param dBlobUpdatable _LobUpdatable in which new bytes are set
313    *
314    * @return dBlobUpdatable in which first cluster address is set
315    */

316
317   public Object JavaDoc updateBlob(_DatabaseUser user, int startAddress,
318                            Object JavaDoc dBlobUpdatable, int columnIndex,
319                            short recordNumber) throws DException {
320
321
322       boolean flag = ( (_LobUpdatable) dBlobUpdatable).isStream(); //This check is applied if Object dBlobUpdatable is Stream then call another method updateStream().
323
if (startAddress == 0 && recordNumber == 0) //This check is used to see that if there is no such record present for which update call is given then we just give call to insert method with the updated values
324
return insertBlobBytes(user, (_LobUpdatable) dBlobUpdatable,
325                                columnIndex);
326       if (flag) //This check is applied if Object dBlobUpdatable is Stream then call another method updateStream().
327
return updateStream(user, dBlobUpdatable, startAddress, columnIndex,
328                             recordNumber);
329       byte[] columnBytes = ( (_LobUpdatable) dBlobUpdatable).getBytes();
330       try {
331       lock.lockTable();
332       ClusterCharacteristics firstCC = new ClusterCharacteristics(startAddress, false);
333       Cluster firstCluster = database.getClusterForWrite(user, firstCC);
334       byte[] clusterBytes = firstCluster.getBytes();
335
336       short startPointer = firstCluster.getStartPointerOfRecord(recordNumber);
337       boolean isFull = clusterBytes[startPointer + 1] == versionHandler.FULL;
338       try {
339         if (isFull) {
340           updateIfFullyInserted(firstCluster, columnBytes, recordNumber,
341                                 startPointer, clusterBytes);
342         }
343         else {
344           updateIfPartialyInserted(user, firstCluster, columnBytes,
345                                    recordNumber, startPointer, columnIndex,
346                                    clusterBytes);
347         }
348         ( (_LobUpdatable) dBlobUpdatable).setStartingClusterAddress(
349             firstCluster.getClusterAddress());
350         ( (_LobUpdatable) dBlobUpdatable).setRecordNumber(recordNumber);
351         return dBlobUpdatable;
352       }
353       catch (DException ex) {
354         if(!ex.getDseCode().equalsIgnoreCase("DSE0") )
355           throw ex;
356         deleteBlobWithoutLock(user, startAddress, columnIndex, recordNumber);
357         return insertBlobBytesWithoutLock(user, dBlobUpdatable, columnIndex);
358       }
359
360     }
361     finally {
362       lock.releaseTable();
363     }
364
365   }
366   /**
367    * It is used while we update a partial record.
368    * firstly we check old length in this cluster and get margin also which we have
369    * as update bytes now we calculate bytes which we can adjust in this cluster.
370    * now we check for that is record complete in this cluster than we use
371    * updateBytesOfLastRelatedCluster method.
372    * else it is not complete in it than we update bytes with new bytes
373    * and we check that all of new bytes are replaced in place of old
374    * and we replaces old with new than we call adjustOtherClusters to collect
375    * free space which is remaining in cluster after updation.
376    *
377    *
378    * @param user _DatabaseUser
379    * @param cluster Cluster
380    * @param columnBytes byte[]
381    * @param recordNumber short
382    * @param startPointer short
383    * @param columnIndex int
384    * @param clusterBytes byte[]
385    * @throws DException
386    */

387   private void updateIfPartialyInserted(_DatabaseUser user, Cluster cluster,
388                                         byte[] columnBytes, short recordNumber,
389                                         short startPointer, int columnIndex,
390                                         byte[] clusterBytes) throws DException {
391     int oldLength = CCzufDpowfsufs.getIntValue(clusterBytes, startPointer + 2);
392     int newLength = columnBytes.length;
393     int writtenSize = 0;
394     short margin = 0;
395     int remainingLength = newLength;
396     while (true) {
397       short oldLengthInThisCluster = getWrittenLengthInThisCluster(cluster,
398           recordNumber);
399       margin = newLength <= oldLength ? 0 : getMargin(cluster);
400       int lengthOfAdjustableBytes = remainingLength <=
401           oldLengthInThisCluster + margin ? remainingLength :
402           oldLengthInThisCluster + margin;
403       if (clusterBytes[startPointer + 1] != versionHandler.COMPLETE) { //This check is used to verify if the respective cluster is the last cluster of the partial record which is to be updated
404
System.arraycopy(columnBytes, writtenSize, clusterBytes,
405                          startPointer + 6, lengthOfAdjustableBytes);
406         cluster.updateBytes(startPointer + 2,
407                             CCzufDpowfsufs.getBytes(columnBytes.length));
408         cluster.updateClusterInformation( (short) (startPointer + 6 +
409             lengthOfAdjustableBytes));
410         if (writtenSize + lengthOfAdjustableBytes >= newLength) {
411           cluster.updateByte(startPointer + 1,
412                              writtenSize == 0 ? versionHandler.FULL :
413                              versionHandler.COMPLETE);
414           adjustOtherClusters(user, cluster, (short) 1, columnIndex);
415           manageClusters(user,cluster); // database.updateWriteClusters(cluster);
416
break;
417         }
418       }
419       else {
420         if (remainingLength > lengthOfAdjustableBytes) {
421            throw new DException("DSE0",new Object JavaDoc[]{"Can't Update Here"});
422         }
423         updateBytesOfLastRelatedCluster(cluster, recordNumber, startPointer,
424                                         columnBytes, writtenSize,
425                                         columnBytes.length);
426         break;
427       }
428       writtenSize += lengthOfAdjustableBytes;
429       remainingLength -= lengthOfAdjustableBytes; // before -=writtenSize
430
ClusterCharacteristics nextCC = cluster.getNextClusterCharacteristics();
431       manageClusters(user,cluster); // database.updateWriteClusters(cluster);
432
cluster = database.getClusterForWrite(user, nextCC);
433       recordNumber = 1;
434       clusterBytes = cluster.getBytes();
435       startPointer = cluster.getStartPointerOfRecord(recordNumber);
436     }
437   }
438
439   /**
440    * If record number to be updated is equal to the actualRecordCount then simply updates the values
441    * else updates the record and shifts the rest of the bytes written in this cluster
442    * by the difference in the lenghths of the old and new records and
443    * updates the start pointers of effected record in column positions array and in cluster bytes
444    *
445    * @param cluster
446    * @param recordNumber record numbet to be updated
447    * @param startPointer
448    * @param columnBytes new value of the record
449    * @param start position from where to start writing from the ColumnBytes
450    * @param end upto which position bytes are to be written from ColumnBytes
451    */

452
453   private void updateBytesOfLastRelatedCluster(Cluster cluster,
454                                                short recordNumber,
455                                                short startPointer,
456                                                byte[] columnBytes, int start,
457                                                int end) throws DException {
458     byte[] clusterBytes = cluster.getBytes();
459     int newLength = end - start;
460     if (recordNumber == cluster.actualRecordCount) { // last record updation.. start pointers will not change
461
startPointer += 2; // write the new length
462
cluster.updateBytes(startPointer,
463                           CCzufDpowfsufs.getBytes(columnBytes.length));
464       startPointer += 4;
465       System.arraycopy(columnBytes, start, clusterBytes, startPointer,
466                        newLength); // check it, may be we can avoid System.arrayCopy()
467
startPointer += newLength;
468       cluster.updateClusterInformation(startPointer);
469     }
470     else {
471       short startPointerOfNextRecord = cluster.getStartPointerOfRecord( (short) (
472           recordNumber + 1));
473       short insertableAddress = CCzufDpowfsufs.getShortValue(clusterBytes, 0);
474       int tempLength = insertableAddress - startPointerOfNextRecord;
475       byte[] temp = new byte[tempLength];
476       System.arraycopy(clusterBytes, startPointerOfNextRecord, temp, 0,
477                        tempLength);
478       startPointer += 2;
479       cluster.updateBytes(startPointer,
480                           CCzufDpowfsufs.getBytes(columnBytes.length));
481       startPointer += 4;
482       System.arraycopy(columnBytes, start, clusterBytes, startPointer,
483                        newLength); // check it, may be we can avoid System.arrayCopy()
484
startPointer += newLength;
485       cluster.updateBytes(startPointer, temp);
486
487       short change = (short) (startPointer - startPointerOfNextRecord);
488       insertableAddress += change;
489       cluster.updateClusterInformation(insertableAddress);
490       int recordPointer = databaseProperties.CLUSTERSIZE -
491           versionHandler.NEWADDRESSLENGTH -
492           (recordNumber + 1) * versionHandler.LENGTH;
493       for (int i = recordNumber + 1; i <= cluster.actualRecordCount; i++) {
494         short strt = CCzufDpowfsufs.getShortValue(clusterBytes, recordPointer);
495         short now = (short) (strt + change);
496         cluster.updateBytes(recordPointer, CCzufDpowfsufs.getBytes(now));
497         cluster.updateColumnPositions(i - 1, now);
498         recordPointer -= versionHandler.LENGTH;
499       }
500     }
501   }
502
503   /**
504    * this method is called if the record to be deleted is written partially
505    * and so we have to adjust the other clusters also in which the data of this record is written
506    *
507    * @param user
508    * @param cluster - currently used cluster whose next is to be checked.
509    * @param recordNumber - always 1.
510    * @param columnIndex - column whose clusters are to be adjusted.
511    * @throws DException
512    */

513
514   private void adjustOtherClusters(_DatabaseUser user, Cluster cluster,
515                                    short recordNumber, int columnIndex) throws
516       DException {
517     Cluster currentCluster = cluster;
518     ClusterCharacteristics nextCC = cluster.getNextClusterCharacteristics();
519     if (nextCC == null)
520       return;
521
522     cluster = database.getClusterForWrite(user,
523                                           cluster.getNextClusterCharacteristics());
524     byte[] clusterBytes = cluster.getBytes();
525     short startPointer = cluster.getStartPointerOfRecord(recordNumber);
526     while (clusterBytes[startPointer + 1] != versionHandler.COMPLETE) {
527       nextCC = cluster.getNextClusterCharacteristics();
528       if (nextCC == null)
529         break;
530       database.addFreeCluster(user, cluster.getClusterAddress());
531       cluster = database.getClusterForWrite(user, nextCC);
532       clusterBytes = cluster.getBytes();
533       startPointer = cluster.getStartPointerOfRecord(recordNumber);
534     }
535     if (cluster.activeRecordCount == recordNumber &&
536         cluster.getClusterAddress() != lastAddresses[columnIndex] &&
537         cluster.getClusterAddress() != startAddresses[columnIndex]) {
538       int add = cluster.getClusterAddress();
539       cluster = database.getClusterForWrite(user,
540                                             cluster.getNextClusterCharacteristics());
541       database.addFreeCluster(user, add);
542     }
543     else {
544     }
545     currentCluster.setNextCluster(cluster);
546   }
547
548   /** private Cluster adjustOtherClusters(_DatabaseUser user,Cluster cluster,short recordNumber,int columnIndex) throws DException{
549          ClusterCharacteristics ccc = cluster.getClusterCharacteristics();
550          cluster = database.getClusterForWrite(user,cluster.getNextClusterCharacteristics());
551          byte[] clusterBytes = cluster.getBytes();
552          short startPointer = cluster.getStartPointerOfRecord(recordNumber);
553          while(clusterBytes[startPointer+1] != COMPLETE){
554              int add = cluster.getClusterAddress();
555        ClusterCharacteristics nextCC = cluster.getNextClusterCharacteristics();
556              database.addFreeCluster(user,cluster.getClusterAddress());
557              cluster = database.getClusterForWrite(user,nextCC);
558              clusterBytes = cluster.getBytes();
559              startPointer = cluster.getStartPointerOfRecord(recordNumber);
560          }
561          if(cluster.activeRecordCount == recordNumber && cluster.getClusterAddress() != lastAddresses[columnIndex]){
562              int add = cluster.getClusterAddress();
563              cluster = database.getClusterForWrite(user,cluster.getNextClusterCharacteristics());
564              database.addFreeCluster(user,add);
565          }
566          else{
567              deleteBytesFromThisCluster(user,cluster,recordNumber,columnIndex);
568          }
569          return cluster;
570      }
571    **/

572   /**
573    * Returns the occupied number of bytes by the record in this cluster.
574    */

575
576   private short getWrittenLengthInThisCluster(Cluster cluster,
577                                               short recordNumber) throws
578       DException {
579     short insertableAddress = CCzufDpowfsufs.getShortValue(cluster.getBytes(), 0);
580     short length = 0;
581     short start = cluster.getStartPointerOfRecord(recordNumber);
582     if (recordNumber == cluster.actualRecordCount) {
583       length = (short) (insertableAddress - start - 2 - 4);
584     }
585     else {
586       length = (short) (cluster.getStartPointerOfRecord( (short) (recordNumber +
587           1)) - start - 2 - 4);
588     }
589     return length;
590   }
591
592   /**
593    * Updates the blob bytes by the columnBytes passed if this record is written completely in this cluster
594    * If the new length of the record is less then or equal to the old length then call for
595    * updation is given else it throws exception "Can't Update Here"
596    *
597    * @param firstCluster : cluster for the record number passed
598    * @param columnBytes : new column bytes
599    * @param recordNumber : record number in this cluster
600    * @param startPointer : from where record is started in this cluster
601    *
602    */

603
604   private void updateIfFullyInserted(Cluster firstCluster, byte[] columnBytes,
605                                      short recordNumber, short startPointer,
606                                      byte[] clusterBytes) throws DException {
607     int oldLength = CCzufDpowfsufs.getIntValue(clusterBytes, startPointer + 2);
608     int newLength = columnBytes.length;
609     if (newLength <= oldLength ||
610         newLength <= oldLength + getMargin(firstCluster)) {
611       updateBytesOfLastRelatedCluster(firstCluster, recordNumber, startPointer,
612                                       columnBytes, 0, newLength);
613     }
614     else
615       throw new DException("DSE0",new Object JavaDoc[]{"Can't Update Here"});
616   }
617   /**
618    * Returns space which we have reserved for update bytes and whcih we can adjust in this cluster.
619    * @param cluster Cluster - whose margin is to be geted.
620    * @return short - margin.
621    */

622   private short getMargin(Cluster cluster) {
623     short spaceUsed = CCzufDpowfsufs.getShortValue(cluster.getBytes(), 0);
624     return (short) (databaseProperties.CLUSTERSIZE - spaceUsed -
625                     versionHandler.NEWADDRESSLENGTH -
626                     cluster.actualRecordCount * versionHandler.LENGTH);
627   }
628
629   /**
630    * If data is updated through stream then gets new bytes from stream , first inserts in old cluster and if
631    * needs then uses new clusters.
632    *
633    *
634    * @param user to perform write operations
635    * @param startAddress start cluster address of old inserted bytes
636    * @param dBlobUpdatable _LobUpdatable in which new bytes are set
637    *
638    * @return dBlobUpdatable in which first cluster address is set
639    */

640
641   private Object JavaDoc updateStream(_DatabaseUser user, Object JavaDoc lobUpdatable,
642                              int startAddress, int columnIndex,
643                              short recordNumber) throws DException {
644     deleteBlob(user, startAddress, columnIndex, recordNumber);
645     return insertBlobBytes(user, (_LobUpdatable) lobUpdatable, columnIndex);
646   }
647
648   Cluster getCluster(ClusterCharacteristics cc) throws DException {
649     return database.getClusterForRead(cc, cc.isBtreeCluster);
650   }
651
652   PersistentDatabase getDatabase() {
653     return database;
654   }
655
656   /**
657    * This method deletes the given record from the cluster if the record is written fully or partially
658    * If its Full then we have to delete it from the current cluster only and it doesn't effect any other cluster
659    * in any way while if its partial then other clusters are also to be adjusted accordingly.
660    * @param startAddress
661    * @param columnIndex
662    * @param recordNumber
663    * @throws DException
664    */

665
666   public void deleteBlob(_DatabaseUser user, int startAddress, int columnIndex,
667                          short recordNumber) throws DException {
668     try {
669       lock.lockTable();
670       deleteBlobWithoutLock(user, startAddress, columnIndex,recordNumber);
671     }
672      finally {
673       lock.releaseTable();
674     }
675
676   }
677
678   /**
679    * If record number to be deleted is equal to the actualRecordCount then mark the 1st byte
680    * as DELETE else we delete the given record and set the 1st byte as DELETE and shifts the rest of the bytes written in this cluster
681    * by the length of the deleted record-1 and decrements the ActiveRecordCount
682    * update the start pointers of effected record in column positions array and in cluster bytes
683    *
684    * @param user : write operations needed a user
685    * @param cluster : Cluster from where we have to delete the data.
686    * @param recordNumber : record number whose bytes are to be removed
687    * @param columnIndex : column index of this blob clob type of column in main table
688    */

689
690   private void deleteBytesFromThisCluster(_DatabaseUser user, Cluster cluster,
691                                           short recordNumber, int columnIndex,
692                                           byte[] clusterBytes) throws
693       DException {
694     short startPointer = cluster.getStartPointerOfRecord(recordNumber);
695     short insertableAddress = CCzufDpowfsufs.getShortValue(clusterBytes, 0);
696     if (recordNumber == cluster.actualRecordCount) {
697       cluster.updateByte(startPointer++, versionHandler.DELETE);
698       cluster.activeRecordCount--;
699       cluster.updateClusterInformation( (short) (startPointer));
700     }
701     else {
702       short length = getWrittenLengthInThisCluster(cluster, recordNumber);
703       short startPointerOfNextRecord = cluster.getStartPointerOfRecord( (short) (
704           recordNumber + 1));
705       int tempLength = insertableAddress - startPointerOfNextRecord;
706       byte[] temp = new byte[tempLength];
707       System.arraycopy(clusterBytes, startPointerOfNextRecord, temp, 0,
708                        tempLength);
709
710       cluster.updateByte(startPointer++, versionHandler.DELETE);
711       cluster.updateBytes(startPointer, temp);
712
713       cluster.activeRecordCount--;
714       short change = (short) (length + 4 + 2 - 1);
715       insertableAddress -= change;
716       cluster.updateClusterInformation(insertableAddress);
717       int i = recordNumber + 1;
718       int recordPointer = databaseProperties.CLUSTERSIZE -
719           versionHandler.NEWADDRESSLENGTH - i * versionHandler.LENGTH;
720       for (; i <= cluster.actualRecordCount; i++) {
721         short start = CCzufDpowfsufs.getShortValue(clusterBytes, recordPointer);
722         short now = (short) (start - change);
723         cluster.updateBytes(recordPointer, CCzufDpowfsufs.getBytes(now));
724         cluster.updateColumnPositions(i - 1, now);
725         recordPointer -= versionHandler.LENGTH;
726       }
727     }
728   }
729
730   public VersionHandler getVersionHandler() {
731     return versionHandler;
732   }
733   /**
734    * It deletes blob clob columns of specified record number.
735    * If startaddress and recordNumber are zero than simply return.
736    * firstly we check that is record completely written if yes than we
737    * delete its bytes from this cluster otherwise after deletion of
738    * bytes from this cluster we call adjust other clusters to reclaim
739    * all other clusters used to store data of this column.
740    * @param user _DatabaseUser
741    * @param startAddress int
742    * @param columnIndex int
743    * @param recordNumber short
744    * @throws DException
745    */

746   public void deleteBlobWithoutLock(_DatabaseUser user, int startAddress, int columnIndex,
747                           short recordNumber) throws DException {
748        if (startAddress == 0 && recordNumber == 0) {
749          return;
750        }
751        ClusterCharacteristics firstCC = new ClusterCharacteristics(startAddress, false);
752        Cluster cluster = database.getClusterForWrite(user, firstCC);
753        byte[] clusterBytes = cluster.getBytes();
754
755        short startPointer = cluster.getStartPointerOfRecord(recordNumber);
756        boolean isFull = clusterBytes[startPointer + 1] == versionHandler.FULL;
757        if (isFull) {
758          deleteBytesFromThisCluster(user, cluster, recordNumber, columnIndex,
759                                     clusterBytes);
760        }
761        else {
762          deleteBytesFromThisCluster(user, cluster, recordNumber, columnIndex,
763                                     clusterBytes);
764            adjustOtherClusters(user,cluster,(short)1,columnIndex);
765        }
766        if (cluster.activeRecordCount == 0 &&
767            cluster.getClusterAddress() != lastAddresses[columnIndex] &&
768            cluster.getClusterAddress() != startAddresses[columnIndex]) {
769          ClusterCharacteristics prev = cluster.getPreviousClusterCharacteristics();
770          if (prev != null) {
771            Cluster previousCluster = database.getClusterForWrite(user, prev);
772            previousCluster.setNextCluster(database.getClusterForWrite(user,
773                cluster.getNextClusterCharacteristics()));
774            database.addFreeCluster(user, cluster.getClusterAddress());
775          }
776        }
777       }
778
779       /**
780        * It gets all bytes of column from _LobUpdatable, then it gets Clusters from database to write bytes
781        * It maintains LinkList for all clusters, it sets first cluster address in _Lobupdatable and returns
782        * _LobUpdatable
783        * If it is a dblob object than it will be returned as it is.
784        * If it is a stream than we get this stream and insert its bytes after getting from stream.
785        * If it contains data as bytes than we calculate free space in last cluster
786        * and if we can write in it than write these bytes and again now while
787        * all bytes doesn't written than we get next clusters and write data into them..
788        * @param user To perform write operations
789        *
790        * @param dBlobUpdatable _LobUpdatable from which bytes of insertion are got
791        *
792        * @return _LobUpdatable in which first cluster address is set
793        */

794
795       public Object JavaDoc insertBlobBytesWithoutLock(_DatabaseUser user, Object JavaDoc dBlobUpdatable,
796                                     int columnIndex) throws DException {
797     _LobUpdatable blobUpdatable = (_LobUpdatable) dBlobUpdatable;
798         if ( ( (DBlobUpdatable) blobUpdatable).isDBlob()) {
799           return blobUpdatable;
800         }
801
802         if (blobUpdatable.isStream()) {
803           return insert(user, blobUpdatable, columnIndex);
804         }
805
806
807           Cluster previousCluster = null;
808           int startAddress = -1;
809           int lengthOfBytes = blobUpdatable.getLength();
810           int lastClusterId = lastAddresses[columnIndex];
811
812           Cluster firstCluster = null; //database.getClusterForWrite(user,new ClusterCharacteristics(startAddresses[columnIndex]));
813

814           ClusterCharacteristics lastCC = new ClusterCharacteristics(lastClusterId, false);
815           Cluster cluster = database.getClusterForWrite(user, lastCC);
816
817           int freeSpaceInCluster = getFreeSpaceForInsert(cluster);
818           short recordId = (short) (cluster.getActualRecordCount() + 1);
819           short actualRecordId = recordId;
820           if (!canWrite(freeSpaceInCluster, lengthOfBytes)) { // 4 : ACTIVE/DELETED,FULL/PARTIAL,LENGTH,MIN ONE BYTE,START POINTER
821
Cluster nextCluster = database.getNewCluster(user, null);
822             lastCC = nextCluster.getClusterCharacteristics();
823             cluster.setNextCluster(nextCluster);
824             manageClusters(user,cluster); // database.updateWriteClusters(cluster);
825
cluster = nextCluster;
826             lastAddresses[columnIndex] = lastCC.getStartAddress();
827            /* done by kuldeep after 3.2*/
828
829             firstCluster = database.getClusterForWrite(user,
830                   new ClusterCharacteristics(startAddresses[columnIndex], false));
831             firstCluster.setLastCluster(lastCC);
832             freeSpaceInCluster = getFreeSpaceForInsert(cluster);
833             actualRecordId = recordId = 1;
834           }
835           startAddress = lastCC.getStartAddress(); // address to set in dblob updatable as starting cluster address
836
int writtenSize = 0;
837           int sizeToWrite = lengthOfBytes;
838           int size = sizeToWrite < freeSpaceInCluster ? sizeToWrite :
839               freeSpaceInCluster;
840           int cnt = -1;
841           int totalBytesWritten = 0;
842           while (true) {
843             cnt++; // for the checking of COMPLETE or FULL
844
byte[] bytes = cluster.getBytes();
845             short insertableAddress = CCzufDpowfsufs.getShortValue(bytes, 0);
846             int pointer = insertableAddress;
847            totalBytesWritten += size;
848             cluster.updateByte(pointer++, versionHandler.ACTIVE);
849             cluster.updateByte(pointer++,
850                                totalBytesWritten == lengthOfBytes ? cnt > 0 ?
851                                versionHandler.COMPLETE : versionHandler.FULL :
852                                versionHandler.PARTIALLY);
853
854             cluster.updateBytes(pointer, CCzufDpowfsufs.getBytes(lengthOfBytes));
855             pointer += 4;
856             System.arraycopy(blobUpdatable.readBytes(writtenSize, size), 0, bytes,
857                              pointer, size);
858             cluster.actualRecordCount++;
859             cluster.activeRecordCount++;
860             int recordStartPointer = databaseProperties.CLUSTERSIZE -
861                 versionHandler.NEWADDRESSLENGTH -
862                 cluster.actualRecordCount * versionHandler.LENGTH;
863             cluster.updateBytes(recordStartPointer,
864                                 CCzufDpowfsufs.getBytes(insertableAddress));
865             updateColumnPositions(cluster);
866             cluster.updateColumnPositions( -1, insertableAddress);
867             cluster.updateClusterInformation( (short) (pointer + size));
868             writtenSize += size;
869             if (writtenSize == lengthOfBytes) {
870               manageClusters(user,cluster);// database.updateWriteClusters(cluster);
871
break;
872             }
873             previousCluster = cluster;
874             cluster = database.getNewCluster(user, null);
875             lastCC = cluster.getClusterCharacteristics();
876             previousCluster.setNextCluster(cluster);
877             if (firstCluster == null)
878               firstCluster = database.getClusterForWrite(user,
879                   new ClusterCharacteristics(startAddresses[columnIndex], false));
880             firstCluster.setLastCluster(lastCC);
881             manageClusters(user,previousCluster);// database.updateWriteClusters(previousCluster);
882
lastAddresses[columnIndex] = lastCC.getStartAddress();
883             freeSpaceInCluster = getFreeSpaceForInsert(cluster) +
884                 versionHandler.BLOBBYTESFORUPDATE; //changed
885
sizeToWrite = lengthOfBytes - writtenSize;
886             size = sizeToWrite < freeSpaceInCluster ? sizeToWrite :
887                 freeSpaceInCluster;
888             recordId = 1;
889           }
890           blobUpdatable.setRecordNumber(actualRecordId);
891           blobUpdatable.setStartingClusterAddress(startAddress);
892           return blobUpdatable;
893
894       }
895       /**
896           * It is used to control number of clusters in memory for temp database
897           * and user database if clusters of user exeeds count 50 than we
898           * write these clusters to file.
899           * we doesn't call updateWriteClusters for temp database because in it
900           * we write these clustes in swap file if limit exceeds now
901           * in case of temp database temp file is null so we directly
902           * transfer them into file.
903           * @param user _DatabaseUser
904           * @param cluster Cluster
905           * @throws DException
906           */

907          public void manageClusters(_DatabaseUser user,Cluster cluster) throws DException{
908            if(isTempDatabase){
909              if(user.getSize() > 50)
910              user.writeToFileWithLock();
911            }
912            else
913               database.updateWriteClusters(cluster);
914
915          }
916
917   /**
918    * Adds this cluster into free cluster list if and only if it is not the first one and not the last one.
919    *
920    * Reason : 1. In very first cluster of this column, we write some extra information.So we can't change our first cluster.
921    * 2. There is no advantage for adding the last cluster as free cluster because in just next insert call we need a cluster.
922    *
923    * @param user : write operations needed a user
924    * @param cluster : cluster whose bytes are totally free
925    * @param columnIndex : column index of this blob clob type of column in main table
926    */

927
928
929   /*************************************************************************************************************************
930    **************************************************************************************************************************
931    **************************************************************************************************************************
932    * Methods written below are used for testing ****************************************************************************/

933
934
935
936
937
938
939   /*************************************************************************************************************************
940    **************************************************************************************************************************
941    **************************************************************************************************************************
942    *************************************************************************************************************************/

943 }
944
Popular Tags