KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > com > daffodilwoods > daffodildb > server > datasystem > persistentsystem > PersistentDatabase


1 package com.daffodilwoods.daffodildb.server.datasystem.persistentsystem;
2
3 import java.io.*;
4 import java.lang.ref.*;
5 import java.util.*;
6 import java.util.Map JavaDoc.*;
7
8 import com.daffodilwoods.daffodildb.server.datadictionarysystem.*;
9 import com.daffodilwoods.daffodildb.server.datasystem.interfaces.*;
10 import com.daffodilwoods.daffodildb.server.datasystem.persistentsystem.versioninfo.*;
11 import com.daffodilwoods.daffodildb.utils.byteconverter.*;
12 import com.daffodilwoods.database.general.*;
13 import com.daffodilwoods.database.resource.*;
14
15 public class PersistentDatabase
16     implements DatabaseConstants, _Database, _ClusterProvider {
17
18   /**
19    * Main file for any Database in which data is permanantly stored
20    */

21   private DRandomAccessFileUpto3_2 databaseFile;
22
23   /**
24    * Provides clusters for read and write operations
25    */

26
27   private ClusterManager clusterManager;
28
29   /**
30    * Very First ClusterCharacteristics.Very First CLuster is used for storing the information of SystemTables.
31    */

32
33   private ClusterCharacteristics startClusterCharacteristics;
34
35   /**
36    * Used for maintaining the table level operations like creating a new table,dropTable etc.
37    */

38
39   private TableManager tableManager;
40
41   /**
42    * Name of The Database
43    */

44
45   private String JavaDoc databaseName;
46
47   /**
48    * For reusing the free space in clusters because of deletion.
49    */

50
51   private FreeSpaceManager freeSpaceManager;
52
53   /**
54    * For maintaing the lock on Database User
55    */

56   private UserLock userLock;
57
58   /**
59    * Next available address of database File.
60    */

61   private int nextAvailableAddress;
62
63   /**
64    * Manages all the power files creted for different users working simultaneously on the database.
65    */

66
67   private PowerFileFactoryUpto3_2 powerFileFactory;
68
69   /**
70    * May be we can remove this.Check it.used in equals method of DatabaseUser
71    */

72
73   private long userCode;
74
75   /**
76    * True means power file is used.
77    */

78
79   private boolean writeInPowerFile;
80
81   /**
82    * Contains the properties of this dataabase like CLUSTERSIZE etc.
83    */

84
85   public DatabaseProperties databaseProperties;
86
87   /**
88    * doesn't known.
89    */

90
91   protected static String JavaDoc DEFAULTBTREEUPDATE = "true";
92
93   /**
94    * Is database file is closed or not.
95    */

96   private boolean closed = false;
97
98   /**
99    * version handler to get constants like length etc.
100    */

101   VersionHandler versionHandler;
102
103   /**
104    * Path for database.
105    */

106   private String JavaDoc daffodilHome;
107   /**
108    * For database mode as read only.
109    */

110
111    private boolean isReadOnlyMode = false;
112
113    /**
114     * It perform all data base level operation's like create ,drop,alter table etc.
115     *
116     * @param file0 DRandomAccessFileUpto3_2
117     * @param databaseName0 String
118     * @param versionHandler0 VersionHandler
119     * @param daffodilHome0 String
120     * @param isReadOnlyMode0 boolean
121     * @throws DException
122     */

123    public PersistentDatabase(DRandomAccessFileUpto3_2 file0, String JavaDoc databaseName0,
124                             VersionHandler versionHandler0,String JavaDoc daffodilHome0,boolean isReadOnlyMode0) throws DException {
125     isReadOnlyMode = isReadOnlyMode0;
126     databaseFile = file0;
127     databaseName = databaseName0;
128     String JavaDoc databaseURL = file0.getURL();
129     versionHandler = versionHandler0;
130     databaseProperties = versionHandler.getDatabaseProperties();
131     tableManager = new TableManager(this, databaseProperties, versionHandler);
132     userCode = Long.MIN_VALUE;
133     startClusterCharacteristics = new ClusterCharacteristics(0, false);
134     userLock = new UserLock();
135     daffodilHome = daffodilHome0;
136     String JavaDoc path = (daffodilHome + File.separator +
137                    "daffodildb.ini").toLowerCase();
138     File f = new File(path);
139     if (f.exists()) {
140       try {
141         InputStream is = new FileInputStream(f);
142         Properties p = new Properties();
143         p.load(is);
144         String JavaDoc pfProperty = ("powerfile." + databaseName).trim().toLowerCase();
145         String JavaDoc str = p.getProperty(pfProperty);
146         DEFAULTBTREEUPDATE = p.getProperty("defaultbtreeupdate", "true");
147         writeInPowerFile = str == null ? true : str.equalsIgnoreCase("true");
148         String JavaDoc clusterProperty = ("clusters.write."+databaseName).toLowerCase();
149         String JavaDoc clusterInMemory = p.getProperty(clusterProperty);
150
151         if(clusterInMemory != null){
152          int writeClusters = Integer.parseInt(clusterInMemory);
153           databaseProperties.setClusterInMemoryForWrite(writeClusters < 75 ? 75 : writeClusters) ;
154         }
155         clusterProperty = ("clusters.read."+databaseName).toLowerCase();
156         clusterInMemory = p.getProperty(clusterProperty);
157         if(clusterInMemory != null){
158          int readClusters = Integer.parseInt(clusterInMemory);
159           databaseProperties.setClusterstInMemoryForRead(readClusters < 75 ? 75 : readClusters);
160         }
161
162
163       }
164       catch (Exception JavaDoc ex) {
165         throw new DException("DSE0", new Object JavaDoc[] {ex.getMessage()});
166       }
167     }
168     else
169       writeInPowerFile = true;
170     databaseProperties.setDatabaseName(databaseName);
171
172
173     init(databaseURL, true);
174
175   }
176
177   public PersistentDatabase(DRandomAccessFileUpto3_2 file0, String JavaDoc databaseName0,
178                             boolean flag, VersionHandler versionHandler0,String JavaDoc daffodilHome0) throws
179   DException {
180     databaseFile = file0;
181     databaseName = databaseName0;
182     String JavaDoc databaseURL = file0.getURL();
183     versionHandler = versionHandler0;
184     databaseProperties = versionHandler.getDatabaseProperties();
185     tableManager = new TableManager(this, databaseProperties, versionHandler);
186     userCode = Long.MIN_VALUE;
187     startClusterCharacteristics = new ClusterCharacteristics(0, false);
188     writeInPowerFile = flag;
189     userLock = new UserLock();
190     daffodilHome = daffodilHome0;
191     databaseProperties.setDatabaseName(databaseName);
192     databaseProperties.setClusterstInMemoryForRead(200);
193     databaseProperties.setClusterInMemoryForWrite(1000);
194     init(databaseURL, false);
195
196   }
197
198   /**
199    * Call of this method means user requires a cluster to write it's data.This method firstly check in FreeClusterList.If found any
200    * entry in it then reuse the cluster otherwise create a new cluster characteristics.
201    *
202    * Initialize the default header of cluster ( Informations like insertable address,active record count, actual record count etc. )
203    *
204    * Update the next available address in database file
205    *
206    * @param user : to perform write operations on cluster
207    * @param tableName : table which requires a new cluster.
208    * @param isBTreeCluster : wheter cluster is BTree cluster or table cluster
209    * @return : ClusterCharacteristics
210    */

211
212   ClusterCharacteristics createClusterCharacteristics(
213       _DatabaseUser user, QualifiedIdentifier tableName) throws DException {
214     return createClusterCharacteristics(user, tableName, false);
215   }
216
217   synchronized ClusterCharacteristics createClusterCharacteristics(
218       _DatabaseUser user, QualifiedIdentifier tableName, boolean isBTreeCluster) throws
219   DException {
220     int nextCluster = ( (TableManager) tableManager).checkFreeClusterList(user);
221     ClusterCharacteristics cla;
222     Cluster cluster;
223     if (nextCluster == -1) {
224       nextCluster = nextAvailableAddress;
225       nextAvailableAddress += 1;
226       updateNextAvailableAddress(); //check required only if new cluster taken
227
}
228     cla = new ClusterCharacteristics(nextCluster, isBTreeCluster);
229     cluster = clusterManager.getNewClusterForWrite(user, cla, isBTreeCluster);
230     updateNextInsertableAddressOfCluster(tableName, user, cluster);
231     cluster.initializeParameters();
232     return cla;
233   }
234
235   /**
236    * gives call to the method FixedRecordCluster in case of fixedTable if the record can be inserted fully in
237    * a single cluster else gives call to the method PartialFixedRecordCluster and in case of
238    * variableTable gives call to the method VariableRecordCluster
239    */

240
241   DatabaseUserTable getDatabaseUserTable(QualifiedIdentifier tableName,
242       _TableList table) throws DException {
243     TableProperties tableProperties = table.getTableProperties();
244     _RecordCluster recordCluster = versionHandler.getRecordCluster(tableProperties, databaseProperties);
245     return new DatabaseUserTable(table, recordCluster);
246   }
247
248   /**
249    * At the time of PersistentDatabase initialization it initializes the Next Available Address.
250    * @throws DException
251    */

252
253   private void initializeNextAvailableClusterAddress() throws DException {
254     nextAvailableAddress = (int) databaseFile.readLong(0);
255   }
256
257   /**
258    * We store the next available addres of database file in Zeroth pointer of database file.It
259    * updates that address whenever it changes.
260    * @param startCluster : Very First Cluster of database file.
261    * @throws DException
262    */

263
264   private void updateNextAvailableAddress() throws DException {
265     databaseFile.writeFirst(0,CCzufDpowfsufs.getBytes( (long)nextAvailableAddress));
266   }
267
268   /**
269    * 30 bytes of every cluster is used for internal handling means these bytes never contains the data bytes.
270    * This method initializes those 30 bytes.Description of these 30 bytes is given below.
271    * 0 - 1 : Insertable Address of this cluster
272    * 2 - 3 : actual record count
273    * 4 - 5 : active record count
274    * 6 - 9 : Previous Cluster Address
275    * 10 - 29 : space left in the header for future use(20 bytes)
276    * CLUSTERSIZE-4 : Next Cluster Address
277    * @param tableName
278    * @param user
279    * @param cluster
280    */

281
282   void updateNextInsertableAddressOfCluster(QualifiedIdentifier tableName,
283       _DatabaseUser user, Cluster cluster) throws
284   DException {
285     byte[] bytes = CCzufDpowfsufs.getBytes(versionHandler.
286         CLUSTER_STARTPOINTER);
287     byte[] zeroShortBytes = CCzufDpowfsufs.getBytes( (short) 0);
288     byte[] zeroIntBytes = CCzufDpowfsufs.getBytes(0);
289     byte[] zerolongBytes = CCzufDpowfsufs.getBytes(0L);
290     cluster.updateBytes(0, bytes);
291     cluster.updateBytes(versionHandler.LENGTH, zeroShortBytes);
292     cluster.updateBytes(2 * versionHandler.LENGTH, zeroShortBytes);
293     cluster.updateBytes(3 * versionHandler.LENGTH, zeroIntBytes);
294     cluster.updateBytes(versionHandler.ADDRESSLENGTH + versionHandler.LENGTH,
295                         zerolongBytes);
296     cluster.updateBytes(2 * versionHandler.ADDRESSLENGTH +
297                         versionHandler.LENGTH, zerolongBytes);
298     cluster.updateBytes(3 * versionHandler.ADDRESSLENGTH +
299                         versionHandler.LENGTH, zeroIntBytes);
300     cluster.updateBytes(databaseProperties.NEXTCLUSTERADDRESS, zeroIntBytes);
301   }
302
303   /**
304    * Persist the cluster bytes of cluster in map,in database file.
305    * @param map : list of clusters
306    c* @throws DException
307    */

308
309   private void writeDataInRespectiveClusters(HashMap map) throws DException {
310     Cluster cluster = null;
311     try {
312       Iterator entryIterator = map.entrySet().iterator();
313       Entry entry;
314       {
315         ClusterCharacteristics cc = null;
316         while (entryIterator.hasNext()) {
317           entry = (Entry) entryIterator.next();
318           cc = (ClusterCharacteristics) entry.getKey();
319           cluster = (Cluster) ( (WeakReference) entry.getValue()).get();
320           if (cluster == null)
321             cluster = getClusterForRead(cc, cc.isBtreeCluster);
322           if (cluster.isDirty()) {
323             long startAddress = (long) cc.getStartAddress() *
324                 (long) databaseProperties.CLUSTERSIZE;
325             databaseFile.write(startAddress, cluster.getBytes());
326             cluster.setDirty(false);
327           }
328           clusterManager.usedClusters.setStatusToRead(cc);
329         }
330       }
331     }
332     catch (Exception JavaDoc ne) {
333       throw new DException("DSE0", new Object JavaDoc[] {ne.getMessage()});
334     }
335   }
336   /**
337    * It returns database user.
338    * @throws DException
339    * @return _DatabaseUser
340    */

341   public _DatabaseUser getDatabaseUser() throws DException {
342     return new DatabaseUser(this, userCode++);
343   }
344
345   /**
346    * It also returns database user but adding these passed tables to user.
347    * so that affected cluster of these tables also added into user's used clusters.
348    *
349    * @param tables ArrayList - list of tables whose clusters are added into users clusters.
350    * @throws DException
351    * @return _DatabaseUser
352    */

353   public _DatabaseUser getDatabaseUser(ArrayList tables) throws DException {
354     return new DatabaseUser(this, userCode++, tables);
355   }
356
357   /**
358    * initializes all the required information like nextAvailableAddress,open the RandomAccessFiles,
359    * initialize System Tables,initialize indexes on system tables,initialize FreeSpaceManager etc.
360    * @param databaseURL
361    * @throws DException
362    */

363
364   private void init(String JavaDoc databaseURL, boolean writeInTempFile) throws
365   DException {
366     initializeFile(databaseURL, writeInTempFile);
367     boolean flag = CCzufDpowfsufs.getBoolean(databaseFile.readFirst(new byte[1],
368         versionHandler.DATABASEINFOSTORED)).booleanValue();
369     if (!flag) {
370       createNewDatabase();
371       freeSpaceManager = new FreeSpaceManager(getTable(SystemTables.
372           FREESPACEINFO));
373       tableManager.initializeSystemTableIndexes(true);
374       freeSpaceManager.setBtree(tableManager.createIndexOnSystemTable(
375           SystemTables.FREESPACEINFO, true));
376     }
377     else{
378       checkDataBase(databaseURL);
379       initializeNextAvailableClusterAddress();
380       versionHandler.initializeSystemTables(tableManager, null,
381           clusterManager.
382           getClusterForReadFirst(
383           startClusterCharacteristics).
384           getBytes(), databaseName);
385       freeSpaceManager = new FreeSpaceManager(getTable(SystemTables.
386           FREESPACEINFO));
387       tableManager.initializeSystemTableIndexes(false);
388         freeSpaceManager.setBtree(tableManager.createIndexOnSystemTable(
389             SystemTables.FREESPACEINFO, false));
390     }
391   }
392
393   /**
394    * open the Database File,Power File and Temporary File for read and write.
395    * @param databaseURL
396    * @throws DException
397    */

398
399   private void initializeFile(String JavaDoc databaseURL, boolean writeInTempFile) throws
400   DException {
401     try {
402       File ff = new File(databaseURL);
403       File ff1 = ff.getParentFile();
404       String JavaDoc str = ff.getName();
405       str = str.substring(0, str.lastIndexOf("."));
406       if (writeInTempFile && !isReadOnlyMode) {
407         ff = new File(ff1,versionHandler.getTempFileName(str));
408         RandomAccessFile tempFile = null;
409          tempFile = new RandomAccessFile(ff, "rw");
410           tempFile.setLength(0);
411         clusterManager = new ClusterManager(tempFile, this, databaseProperties,
412             versionHandler,daffodilHome);
413       }
414       else{
415         clusterManager = new ClusterManager(null, this, databaseProperties,
416                                             versionHandler,daffodilHome);
417       }
418       powerFileFactory = versionHandler.getPowerFileFactory(databaseURL);
419     }
420     catch (IOException fnfe) {
421             throw new DException("DSE2025", new Object JavaDoc[] {fnfe.getMessage()});
422     }
423   }
424
425
426
427
428   private void checkDataBase(String JavaDoc databaseURL) throws DException {
429     if(isReadOnlyMode)
430       return ;
431      try {
432        File ff = new File(databaseURL);
433        File ff1 = ff.getParentFile();
434        File[] files = ff1.listFiles(new PowerFileFilter());
435
436        for (int i = 0; i < files.length; i++) {
437          RandomAccessFile powerFile = new RandomAccessFile(files[i], "r");
438          powerFile.seek(versionHandler.ACTIVE_BYTE_POSITION_IN_POWERFILE);
439          if (powerFile.length() > 0 && powerFile.readByte() == versionHandler.ACTIVE) {
440            int length = powerFile.readInt();
441            int actualClusterWritten = powerFile.readInt() ;
442            byte bytesOfStartAddress[] = new byte[actualClusterWritten*4];
443            powerFile.read(bytesOfStartAddress);
444            byte[] bytes = new byte[databaseProperties.CLUSTERSIZE];
445            powerFile.seek((long)((length*4)+9)) ;
446            for (int j = 0; j < actualClusterWritten; j++) {
447              int stadd = CCzufDpowfsufs.getIntValue(bytesOfStartAddress ,j*4 );
448              powerFile.read(bytes);
449              long startAddress = (long) stadd *
450                                  (long) databaseProperties.CLUSTERSIZE;
451              databaseFile.write(startAddress, bytes);
452            }
453          }
454          powerFile.close();
455          files[i].delete();
456        }
457      }
458      catch (IOException IO) {
459        IO.printStackTrace();
460        throw new DException("DSE2025", new Object JavaDoc[] {IO.getMessage()});
461      }
462    }
463    /**
464     * It is used while creating a new database than to initialize system tables
465     * and to get very first cluster of database and add information into it.
466     * and also write information about database creation.
467     * @throws DException
468     */

469    private void createNewDatabase() throws DException {
470     _DatabaseUser user = getDatabaseUser();
471     Cluster startCluster = clusterManager.getStartClusterForWriteFirst(user,
472         startClusterCharacteristics, false);
473     startCluster.updateBytes(versionHandler.DATABASEINFOSTORED,
474                              CCzufDpowfsufs.getBytes(Utility.getBooleanValue("true") ));
475     nextAvailableAddress = 1;
476     versionHandler.writeInfoForSystemTables(startCluster, this);
477     versionHandler.initializeSystemTables(tableManager, user,
478         startCluster.getBytes(), databaseName);
479     user.writeToFile();
480     updateNextAvailableAddress();
481   }
482   /**
483    * to write information about first and last cluster of all system tables.
484    *
485    * @param index int - index where information of start and last cluster is to be stored.
486    * @param startCluster Cluster - start cluster for the table.
487    * @throws DException
488    */

489   public void writeInfoForSystemTable(int index, Cluster startCluster) throws
490   DException {
491     byte[] bytes = CCzufDpowfsufs.getBytes(new Long JavaDoc(nextAvailableAddress));
492     startCluster.updateBytes(index * versionHandler.ADDRESSLENGTH, bytes);
493     startCluster.updateBytes( (index + 1) * versionHandler.ADDRESSLENGTH, bytes);
494     nextAvailableAddress += 1;
495   }
496
497   /**
498    * It writes all clusters into database file it firstly writes data into
499    * power file if it is on than into main ddb file.
500    * @param map HashMap - It contains all clusters which are affected by user.
501    * @throws DException
502    */

503   void writeAllClusterToFile(HashMap map) throws DException{
504     if (closed)
505       return;
506
507     RandomAccessFile powerFile = null;
508     if (writeInPowerFile) {
509       powerFile = powerFileFactory.getFile(databaseProperties.CLUSTERSIZE,isReadOnlyMode);
510       writeDataInPowerFailureTable(map, powerFile);
511
512       writeDataInRespectiveClusters(map);
513       setPowerFailureByteNonActive(powerFile);
514       deleteDataFromPowerTable(powerFile);
515     }
516     else
517       writeDataInRespectiveClusters(map);
518   }
519
520   /**
521    * Returns a cluster for read from with the help of clusterManager.
522    * @param clusterCharacteristics : clusterCharacteristics for which cluster is needed.
523    * @return : Cluster
524    * @throws DException
525    */

526
527   Cluster getClusterForRead(ClusterCharacteristics clusterCharacteristics,
528                             boolean isBtreeCluster) throws DException {
529     return clusterManager.getClusterForRead(clusterCharacteristics,
530         isBtreeCluster);
531   }
532   /**
533    * It returns a cluster which user wants for write.
534    * @param user _DatabaseUser
535    * @param clusterCharacteristics : clusterCharacteristics for which cluster is needed.
536    * @throws DException
537    * @return Cluster : cluster
538    */

539   Cluster getClusterForWrite(_DatabaseUser user,
540                              ClusterCharacteristics clusterCharacteristics) throws
541   DException {
542     return clusterManager.getClusterForWrite(user, clusterCharacteristics);
543   }
544
545
546 /**
547  * It is called while all data is written in ddb file .
548  *
549  * @param powerFile RandomAccessFile
550  * @throws DException
551  */

552 private void setPowerFailureByteNonActive(RandomAccessFile powerFile) throws
553   DException {
554     try {
555       powerFile.seek(versionHandler.ACTIVE_BYTE_POSITION_IN_POWERFILE);
556       powerFile.write(versionHandler.POWERFILEDELETE);
557     }
558     catch (IOException ex) {
559       throw new DException("DSE0", new Object JavaDoc[] {ex.getMessage()});
560     }
561   }
562
563   /**
564    * To delete data of power file after successfull transfer of data into ddb file.
565    * @param powerFile RandomAccessFile
566    * @throws DException
567    */

568   private void deleteDataFromPowerTable(RandomAccessFile powerFile) throws
569   DException {
570     powerFileFactory.addInFreePowerFileList(powerFile);
571   }
572
573
574   /**
575    * To get system table TableInfo.
576    *
577    * @throws DException
578    * @return _Table
579    */

580   public _Table getTableInfo() throws DException {
581     return tableManager.getTableInfo();
582   }
583   /**
584    * To remove a table from table managers table's map.
585    *
586    * @param tableName QualifiedIdentifier - table to be removed.
587    */

588   public void removeTable(QualifiedIdentifier tableName) {
589     tableManager.removeTable(tableName);
590   }
591
592   /**
593    * To get a table.
594    *
595    * @param tableName QualifiedIdentifier - table name to be retrived.
596    * @param TablesBitSet BitSet
597    * @throws DException
598    * @return _Table
599    */

600   public synchronized _Table getTable(QualifiedIdentifier tableName,
601                                       BitSet TablesBitSet) throws DException {
602     return getTable(tableName);
603   }
604
605
606   /**
607    * While a table is dropped or all records are deleted than we call it to add cluster
608    * into free list so that it can be reused to store data again.
609    * @param user _DatabaseUser
610    * @param address int
611    * @throws DException
612    */

613   public void addFreeCluster(_DatabaseUser user, int address) throws DException {
614
615     ClusterCharacteristics cc = new ClusterCharacteristics(address, false);
616     clusterManager.removeClusterBeforeAddingInFreeList(cc);
617     user.removeCluster(cc);
618     tableManager.addFreeCluster(user, address);
619   }
620
621   /**
622    * Returns name of the database.
623    * @return String
624    */

625   public String JavaDoc getDatabaseName() {
626     return databaseName;
627   }
628
629   /**
630    * Store the information of new table which user want to create with the help of TableManager.
631    * @param tableName - name of table which user wants to create.
632    * @param columnInfo - column informations of the table which is to be created.
633    * @throws DException
634    */

635
636   public synchronized void createTable(QualifiedIdentifier tableName,
637                                        Object JavaDoc columnInfo) throws DException {
638     ColumnInformation ci = columnInfo instanceof TableCharacteristics ?
639                            (ColumnInformation) ( (TableCharacteristics) columnInfo).
640                            getColumnInformation() : (ColumnInformation) columnInfo;
641     tableManager.createTable(tableName, ci);
642   }
643
644   /**
645    * Remove all the information of table which user want to drop with the help of TableManager.
646    * @param tableName
647    * @param columnInfo
648    * @throws DException
649    */

650
651   public synchronized void dropTable(QualifiedIdentifier tableName) throws
652   DException {
653     tableManager.dropTable(tableName);
654   }
655
656   /**
657    * Change the name of table which user want to rename, with the help of TableManager.
658    * @param tableName
659    * @param columnInfo
660    * @throws DException
661    */

662
663   public synchronized void renameTable(QualifiedIdentifier oldTableName,
664                                        QualifiedIdentifier newTableName) throws
665   DException {
666     ( (TableManager) tableManager).renameTable(oldTableName, newTableName);
667   }
668   /**
669    * TO load a table instance into memory when user wants to perform any operation.
670    *
671    * @param tableName QualifiedIdentifier - table name to be geted.
672    * @throws DException
673    * @return _Table
674    */

675   public synchronized _Table getTable(QualifiedIdentifier tableName) throws
676   DException {
677     return tableManager.getTable(tableName);
678   }
679   /**
680    * To read bytes from the ddb file from desired pointer.
681    *
682    * @param startAddress int - pointer from which data is to be read.
683    * @throws DException
684    * @return byte[] - returns data readed from ddb file.
685    */

686   byte[] getBytes(int startAddress) throws DException{
687     long address = (long)startAddress * databaseProperties.CLUSTERSIZE;
688     return databaseFile.read(new byte[databaseProperties.CLUSTERSIZE],
689                              address);
690   }
691   /**
692    * returns temporary user for database.
693    * @throws DException
694    * @return _DatabaseUser
695    */

696   public _DatabaseUser getTempDatabaseUser() throws DException {
697       return new DatabaseUser(this, userCode++, true);
698   }
699
700
701
702   /**
703    * To write data into power file.
704    * In it we make a byte array and store all required info as a pattren for
705    * power file and writting all clusters to be written into bytes array and than
706    * write this byte array into power file.
707    *
708    * @param map HashMap
709    * @param powerFile RandomAccessFile
710    * @throws DException
711    */

712   private void writeDataInPowerFailureTable(HashMap map,RandomAccessFile powerFile) throws
713     DException {
714       Cluster cluster = null;
715       try {
716
717         int length = map.size();
718         byte bytes[] = new byte[length*4+8+1];
719         int index = 0;
720         bytes[index++] = versionHandler.ACTIVE;
721        CCzufDpowfsufs.putInt(bytes,index,length );
722         index += 4;
723         int noOfClustersForWrite = 0;
724         Iterator entryIterator = map.entrySet().iterator();
725             Entry entry;
726             {
727               ClusterCharacteristics cc = null;
728               powerFile.seek(bytes.length);
729               while (entryIterator.hasNext()) {
730                 entry = (Entry) entryIterator.next();
731                 cc = (ClusterCharacteristics) entry.getKey();
732                 cluster = (Cluster) ( (WeakReference) entry.getValue()).get();
733
734                 if (cluster == null)
735                   cluster = clusterManager.getClusterForPowerFile(cc,
736                       cc.isBtreeCluster);
737                 if (!cluster.isDirty()) {
738                   clusterManager.usedClusters.setStatusToRead(cc);
739                 }
740                 else {
741                   noOfClustersForWrite++;
742                   index += 4;
743                   CCzufDpowfsufs.putInt(bytes, index,
744                                        cluster.clusterCharacteristics.getStartAddress());
745                   powerFile.write(cluster.getBytes());
746                 }
747               }
748             }
749         CCzufDpowfsufs.putInt(bytes,5,noOfClustersForWrite );
750          powerFile.seek(versionHandler.ACTIVE_BYTE_POSITION_IN_POWERFILE);
751          powerFile.write(bytes) ;
752       }
753       catch (Exception JavaDoc E) {
754         throw new DException("DSE0", new Object JavaDoc[] {E.getMessage()});
755       }
756     }
757
758
759     /**
760      * To check that cluster limit exceeds or not if yes than write this cluster
761      * into temp file.
762      * @param cluster Cluster
763      * @throws DException
764      */

765
766  public void updateWriteClusters(Cluster cluster) throws DException {
767     clusterManager.updateWriteClusters(cluster);
768   }
769   /**
770    *
771    * @return UserLock
772    */

773   public UserLock getUserLock() {
774     return userLock;
775   }
776
777
778   /**
779    * To write data into file without encrption.
780    *
781    * @param address int - address at which data is to be written.
782    * @param bytes byte[] - bytes to be written.
783    * @throws DException
784    */

785   public void writeInFile(int address, byte[] bytes) throws DException {
786     databaseFile.writeFirst(address, bytes);
787   }
788   /**
789     * To read data into file without de-crption.
790     *
791     * @param address int - address from which data is to be retrived.
792     * @param length int - bytes to be retrived.
793     * @throws DException
794     */

795
796   public byte[] readBytes(int address, int length) throws DException {
797     return databaseFile.readFirst(new byte[length], address);
798   }
799   /**
800    * To read a cluster without de-cryption from given start address.
801    *
802    * @param startAddress int - address from cluster bytes to be read.
803    * @throws DException
804    * @return byte[] - bytes readed.
805    */

806   byte[] getBytesFirst(int startAddress) throws DException {
807     long address = (long)startAddress * databaseProperties.CLUSTERSIZE;
808     return databaseFile.readFirst(new byte[databaseProperties.CLUSTERSIZE],
809                                   address);
810   }
811
812   public ClusterManager getClusterManager() {
813     return clusterManager;
814   }
815   /**
816    * To close all database files.
817    * @throws DException
818    */

819   public void closeFile() throws DException {
820     try {
821       powerFileFactory.close();
822       databaseFile.close();
823       clusterManager.closeFile();
824       closed = true;
825     }
826     catch (IOException ex) {
827       throw new DException("DSE2025", new Object JavaDoc[] {ex.getMessage()});
828     }
829   }
830
831   protected void finalize() throws DException {
832     try {
833       powerFileFactory.close();
834       databaseFile.close();
835       clusterManager.closeFile();
836     }
837     catch (IOException ex) {
838       throw new DException("DSE2025", new Object JavaDoc[] {ex.getMessage()});
839     }
840   }
841   /**
842    * To get a new cluser for write and to be used in btree.
843    *
844    * @param user _DatabaseUser
845    * @throws DException
846    * @return Cluster
847    */

848   public Cluster getNewCluster(_DatabaseUser user) throws
849   DException {
850     return getNewCluster(user, null, true);
851   }
852
853   /**
854    * To get a cluster for write whose cc is passed .
855    *
856    * @param user _DatabaseUser
857    * @param cc ClusterCharacteristics of cluster required.
858    * @throws DException
859    * @return Cluster
860    */

861   public Cluster getCluster(_DatabaseUser user, ClusterCharacteristics cc) throws
862   DException {
863     return clusterManager.getClusterForWrite(user, cc, true);
864   }
865   /**
866    * To get a cluster for read whose cc is passed.
867    *
868    * @param cc ClusterCharacteristics of cluster to be geted .
869    * @throws DException
870    * @return Cluster
871    */

872   public Cluster getReadCluster(ClusterCharacteristics cc) throws DException {
873     return clusterManager.getClusterForRead(cc, true);
874   }
875
876   /**
877    * To filter power files of database.
878    * <p>Title: </p>
879    * <p>Description: </p>
880    * <p>Copyright: Copyright (c) 2003</p>
881    * <p>Company: </p>
882    * @author not attributable
883    * @version 1.0
884    */

885  static class PowerFileFilter
886       implements FilenameFilter {
887     public boolean accept(File dir, String JavaDoc name) {
888       return name.endsWith("PowerFile.log") ? true
889       : false;
890     }
891   }
892
893   /** Starts Pointer 30 ? it will take 4 bytes for root node address
894        After it from 34 it will take 8 bytes for BTree size.**/

895
896   /**
897    * Returns a cluster charactristics which can be used as btree control cluster.
898    * It gets a new cluster and just initialize its header so as it can be used as control cluster.
899    *
900    * @param user _DatabaseUser
901    * @throws DException
902    * @return ClusterCharacteristics
903    */

904   public ClusterCharacteristics getControlCluster(_DatabaseUser user) throws
905   DException {
906     Cluster cluster = getNewCluster(user, null, true);
907     cluster.updateBytes(3 * versionHandler.ADDRESSLENGTH +
908                         3 * versionHandler.LENGTH, CCzufDpowfsufs.getBytes(0));
909     cluster.updateBytes(4 * versionHandler.ADDRESSLENGTH +
910                         versionHandler.LENGTH,
911                         CCzufDpowfsufs.getBytes( (long) 0));
912     return cluster.getClusterCharacteristics();
913   }
914
915
916   /**
917    *
918    * @return DatabaseProperties
919    */

920   public DatabaseProperties getDatabaseProperties() {
921     return databaseProperties;
922   }
923   /**
924    * To get last cluster chatacteristics of soecified table.
925    *
926    * @param tableName QualifiedIdentifier tableName whose last cc is to be geted.
927    * @throws DException
928    * @return ClusterCharacteristics of last cluster.
929    */

930   ClusterCharacteristics getLastClusterClaracteristics(QualifiedIdentifier
931       tableName) throws DException {
932     return tableManager.getLastClusterClaracteristics(tableName);
933   }
934
935   /**
936    * To get data base file.
937    * @return DRandomAccessFileUpto3_2
938    */

939   public DRandomAccessFileUpto3_2 getDrandomAccessFile() {
940     return databaseFile;
941   }
942
943
944
945 /**
946  * To get all number of clusters used now it is used in get status method which is used for debugging.
947  *
948  * @throws DException
949  * @return double
950  */

951 public double getTotalNumberOfUsedClusters() throws DException {
952     return clusterManager.usedClusters.size();
953   }
954
955   /**
956    * To retun total number of tables currently loaded into memory.
957    * @return HashMap
958    */

959   public HashMap getTablesMap() {
960     return tableManager.getTableMap();
961   }
962
963   public VersionHandler getVersionHandler() {
964     return versionHandler;
965   }
966   /**
967    * To get power file property for write data into power file is true or not.
968    * @return boolean
969    */

970   public boolean getPowerFileOption(){
971     return writeInPowerFile;
972   }
973   /**
974    * To set flag for write data into power file property.
975    * @param flag boolean
976    */

977   public void setWriteInPowerFile(boolean flag){
978     writeInPowerFile = flag;
979   }
980   /**
981    * To get new cluster for a table.
982    *
983    * @param user _DatabaseUser
984    * @param tableName QualifiedIdentifier - table name for which cluster is needed.
985    * @throws DException
986    * @return Cluster
987    */

988   Cluster getNewCluster(_DatabaseUser user, QualifiedIdentifier tableName) throws DException {
989     return getNewCluster(user, tableName, false);
990   }
991   /**
992    * To get a new cluster for the specified table name if it is used in btree or not .
993    * In it we check firstly in free list if there is any than we use it otherwise
994    * a new is created.
995    *
996    * @param user _DatabaseUser
997    * @param tableName QualifiedIdentifier - table name for which cluster is to be needed.
998    * @param isBTreeCluster boolean - is this cluster is used in btree or not.
999    * @throws DException
1000   * @return Cluster - new cluster .
1001   */

1002  synchronized Cluster getNewCluster(_DatabaseUser user, QualifiedIdentifier tableName, boolean isBTreeCluster) throws
1003  DException {
1004    int nextCluster = ( (TableManager) tableManager).checkFreeClusterList(user);
1005    ClusterCharacteristics cla;
1006    Cluster cluster;
1007    if (nextCluster == -1) {
1008      nextCluster = nextAvailableAddress;
1009      nextAvailableAddress += 1;
1010      updateNextAvailableAddress(); //check required only if new cluster taken
1011
}
1012
1013    cla = new ClusterCharacteristics(nextCluster, isBTreeCluster);
1014    cluster = clusterManager.getNewClusterForWrite(user, cla, isBTreeCluster);
1015    updateNextInsertableAddressOfCluster(tableName, user, cluster);
1016    cluster.initializeParameters();
1017    return cluster;
1018  }
1019
1020  /**
1021   * It returens path of the data base.
1022   * @return String
1023   */

1024  public String JavaDoc getDaffodilHome(){
1025  return daffodilHome;
1026}
1027/**
1028 * To show all free clusters of data base.
1029 * @throws DException
1030 */

1031public void showFreeClusterList() throws DException{
1032 tableManager.showFreeClusterList() ;
1033 }
1034 /**
1035  *
1036  * @return FreeClusterList of database.
1037  */

1038 public FreeClusterList getFreeClusterList(){
1039    return tableManager.getFreeClusterList();
1040 }
1041
1042 /*
1043   private void initializeSystemTables(Cluster startCluster, _DatabaseUser user) throws
1044  DException {
1045    tableManager.initializeSystemTables(startCluster, user);
1046  }
1047   private void checkDataBaseOld(String databaseURL) throws DException {
1048     if(isReadOnlyMode)
1049       return ;
1050      try {
1051        File ff = new File(databaseURL);
1052        File ff1 = ff.getParentFile();
1053        File[] files = ff1.listFiles(new PowerFileFilter());
1054
1055        for (int i = 0; i < files.length; i++) {
1056          RandomAccessFile powerFile = new RandomAccessFile(files[i], "r");
1057          powerFile.seek(versionHandler.ACTIVE_BYTE_POSITION_IN_POWERFILE);
1058          if (powerFile.length() > 0 &&
1059              powerFile.readByte() == versionHandler.ACTIVE) {
1060            int length = powerFile.readInt();
1061            byte[] bytes = new byte[databaseProperties.CLUSTERSIZE];
1062            for (int j = 0; j < length; j++) {
1063              int stadd = powerFile.readInt();
1064              powerFile.read(bytes);
1065              long startAddress = (long) stadd *
1066                                  (long) databaseProperties.CLUSTERSIZE;
1067              databaseFile.write(startAddress, bytes);
1068            }
1069          }
1070          powerFile.close();
1071          files[i].delete();
1072        }
1073      }
1074      catch (IOException IO) {
1075        IO.printStackTrace();
1076        throw new DException("DSE2025", new Object[] {IO.getMessage()});
1077      }
1078    }
1079   private void writeDataInPowerFailureTableOld(HashMap map,RandomAccessFile powerFile) throws
1080    DException {
1081
1082
1083      Cluster cluster = null;
1084      try {
1085        ClusterCharacteristics[] cc = (ClusterCharacteristics[]) map.keySet().
1086                                      toArray(new ClusterCharacteristics[0]);
1087        int length = map.size();
1088        powerFile.seek(versionHandler.ACTIVE_BYTE_POSITION_IN_POWERFILE);
1089        powerFile.write(CCzufDpowfsufs.getBytes(new Byte(versionHandler.DELETE)));
1090        powerFile.seek(versionHandler.LENGTH_POSITION_IN_POWERFILE);
1091        powerFile.writeInt(length);
1092        int noOfClustersForWrite = 0;
1093        for (int i = 0; i < map.size(); i++) {
1094          cluster = (Cluster) ( (WeakReference) map.get(cc[i])).get();
1095          if (cluster == null)
1096
1097            cluster = clusterManager.getClusterForPowerFile(cc[i],
1098            cc[i].isBtreeCluster);
1099          if (!cluster.isDirty()) {
1100            map.remove(cc[i]);
1101            clusterManager.usedClusters.setStatusToRead(cc[i]);
1102          }
1103          else {
1104            noOfClustersForWrite++;
1105            powerFile.writeInt(cluster.clusterCharacteristics.getStartAddress());
1106            powerFile.write(cluster.getBytes());
1107          }
1108        }
1109        powerFile.seek(versionHandler.LENGTH_POSITION_IN_POWERFILE);
1110        powerFile.writeInt(noOfClustersForWrite);
1111      }
1112      catch (Exception E) {
1113        throw new DException("DSE0", new Object[] {E.getMessage()});
1114      }
1115    }
1116
1117
1118
1119 private void writeDataInPowerFailureTableUsingByteArray(HashMap map,RandomAccessFile powerFile) throws IOException,DException{
1120   int length = map.size() ;
1121   int clusterSize = databaseProperties.CLUSTERSIZE ;
1122   byte clusterBytes[] = new byte[length*clusterSize+length*4+8+1];
1123   int indexForAddress = 0,indexForClusterBytes = length*4+8+1;
1124
1125   clusterBytes[indexForAddress++] = versionHandler.ACTIVE;
1126        CCzufDpowfsufs.putInt(clusterBytes,indexForAddress,length);
1127        indexForAddress += 4;
1128        int noOfClustersForWrite = 0;
1129        Iterator entryIterator = map.entrySet().iterator();
1130        Entry entry;
1131            {
1132              ClusterCharacteristics cc = null;
1133              Cluster cluster ;
1134              while (entryIterator.hasNext()) {
1135                entry = (Entry) entryIterator.next();
1136                cc = (ClusterCharacteristics) entry.getKey();
1137                cluster = (Cluster) ( (WeakReference) entry.getValue()).get();
1138
1139                if (cluster == null)
1140                  cluster = clusterManager.getClusterForPowerFile(cc,
1141                      cc.isBtreeCluster);
1142                if (!cluster.isDirty()) {
1143                  clusterManager.usedClusters.setStatusToRead(cc);
1144                }
1145                else {
1146                  noOfClustersForWrite++;
1147                  indexForAddress += 4;
1148                  CCzufDpowfsufs.putInt(clusterBytes, indexForAddress,cluster.clusterCharacteristics.getStartAddress());
1149                 System.arraycopy(cluster.getBytes() ,0,clusterBytes,indexForClusterBytes ,clusterSize ) ;
1150                 indexForClusterBytes +=clusterSize;
1151                }
1152              }
1153            }
1154        CCzufDpowfsufs.putInt(clusterBytes,5,noOfClustersForWrite );
1155         powerFile.seek(versionHandler.ACTIVE_BYTE_POSITION_IN_POWERFILE);
1156if( indexForClusterBytes <= 32768 )
1157         powerFile.write(clusterBytes,0,indexForClusterBytes);
1158else{
1159  powerFile.write(clusterBytes,0,32768);
1160    powerFile.write(clusterBytes,32768,indexForClusterBytes-32768);
1161}
1162
1163 }
1164
1165/**
1166 * Check the database code at the time of Database initialization.
1167 * @throws DException
1168 */

1169
1170
1171/**
1172* Check the PowerFailureByte.
1173* If Byte is ACTIVE : Shift the data of Power file in database file.
1174* @throws DException
1175/*
1176  private void setPowerFailureByteActive(RandomAccessFile powerFile) throws
1177  DException {
1178    try {
1179      powerFile.seek(versionHandler.ACTIVE_BYTE_POSITION_IN_POWERFILE);
1180       powerFile.write(versionHandler.POWERFILEACTIVE);
1181    }
1182    catch (IOException ex) {
1183      throw new DException("DSE0", new Object[] {ex.getMessage()});
1184    }
1185  }
1186
1187 /**
1188   * Methods written below are Not Used Now
1189   */

1190
1191
1192/**
1193 * Update the first cluster address of the user table in TableInfo Table
1194 * @param tableName : tableName whose first cluster address is changed now
1195 * @param user : DatabaseUser
1196 * @param startAddress : new address of first cluster of this table
1197 */

1198
1199
1200/**
1201 * Updates the Last Cluster Address in First cluster of database file if the table
1202 * is a System Table else if its user table then give call to the TableManager updateLastRecordAddress
1203 * method where information is updated in TABLEINFO System Table which stores the information about
1204 * all user tables as CATALOGUE,SCHEMA,TABLENAME,STARTCLUSTERADDRESS,LASTCLUSTERADDRESS.
1205 *
1206 * @param tableName : tableName whose last cluster address is changed now
1207 * @param address : new Address of last cluster of this table
1208 */

1209
1210
1211
1212
1213
1214}
1215
Popular Tags