KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > ozoneDB > core > storage > classicStore > PersistenceSpace


1 // You can redistribute this software and/or modify it under the terms of
2
// the Ozone Core License version 1 published by ozone-db.org.
3
//
4
// The original code and portions created by SMB are
5
// Copyright (C) 1997-@year@ by SMB GmbH. All rights reserved.
6
//
7
// $Id$
8

9 package org.ozoneDB.core.storage.classicStore;
10
11 import org.ozoneDB.core.*;
12 import org.ozoneDB.core.storage.classicStore.ClassicStore;
13 import org.ozoneDB.core.storage.classicStore.Cluster;
14 import org.ozoneDB.core.storage.classicStore.ClusterID;
15 import org.ozoneDB.core.storage.classicStore.DeathObject;
16 import org.ozoneDB.util.*;
17 import org.ozoneDB.DxLib.*;
18 import org.ozoneDB.OzoneInternalException;
19
20 import java.io.*;
21
22
23 /**
24  * @author <a HREF="http://www.softwarebuero.de/">SMB</a>
25  */

26 public class PersistenceSpace extends Object JavaDoc {
27     final static String JavaDoc TRANSACTION_FLAG = "transaction";
28     final static int TRANSACTION_FLAG_VERSION = 1;
29     final static int PROPS_FILE_VERSION = 1;
30
31     final static String JavaDoc CID = "ozoneDB.classicStore.clusterID";
32
33     Env env;
34     ClassicStore classicStore;
35
36     Cluster currentCluster;
37     TransactionID currentTransaction;
38     DxSet touchedClusters;
39     DxSet clustersToCompress;
40
41
42     public PersistenceSpace( Env _env ) {
43         env = _env;
44         classicStore = (ClassicStore)env.getStoreManager();
45     }
46
47
48     /**
49      */

50     protected boolean startup() throws Exception JavaDoc {
51         //env.logWriter.newEntry (this, "PersistenceSpace.open", LogWriter.DEBUG);
52
File transFile = new File( env.getDatabaseDir() + Env.DATA_DIR, TRANSACTION_FLAG );
53         if (transFile.exists()) {
54             // we had a crash (transaction abort while commiting):
55
// rollback the transaction to get a consitent database
56
rollBackTransaction( transFile );
57         }
58
59         if (!readProperties()) {
60             // check, if the datadir is empty, i.e. we start the first time
61
String JavaDoc[] list = new File( env.getDatabaseDir() + Env.DATA_DIR ).list();
62             if (list.length != 0) {
63                 recover();
64             } else {
65                 newCluster();
66             }
67         }
68         return true;
69     }
70
71
72     /**
73      */

74     protected boolean shutdown() throws Exception JavaDoc {
75         //env.logWriter.newEntry (this, "PersistenceSpace.close", LogWriter.DEBUG);
76
if (currentCluster != null) {
77             writeProperties();
78             currentCluster.close();
79         }
80
81         currentCluster = null;
82         touchedClusters = null;
83         clustersToCompress = null;
84
85         return true;
86     }
87
88
89     /**
90      */

91     protected boolean readProperties() {
92         ClusterID cid = (ClusterID)env.state.property( CID, null );
93         if (cid == null) {
94             return false;
95         }
96
97         currentCluster = new Cluster( env, classicStore, cid );
98         return true;
99     }
100
101
102     /**
103      */

104     protected void writeProperties() {
105         env.state.setProperty( CID, currentCluster.cluID() );
106     }
107
108
109     /**
110      * begins a transaction commit with setting the transaction label
111      */

112     protected void startTransaction( TransactionID tid ) throws OzoneInternalException {
113         //env.logWriter.newEntry ("PersistenceSpace.beginTransaction: " + tid, LogWriter.DEBUG);
114
currentTransaction = tid;
115         touchedClusters = new DxHashSet();
116         clustersToCompress = new DxHashSet();
117
118         try {
119             // write the transaction flag to harddisk
120
FileOutputStream fo = new FileOutputStream( new File( env.getDatabaseDir() + Env.DATA_DIR, TRANSACTION_FLAG ) );
121             DataOutputStream out = new DataOutputStream( fo );
122             out.writeInt( TRANSACTION_FLAG_VERSION );
123             out.writeLong( currentTransaction.value() );
124             // rescue the current cluster id
125
out.writeLong( currentCluster.cluID().value() );
126             out.close();
127         } catch (IOException e) {
128             throw new OzoneInternalException("Failed to start transaction", e);
129         }
130     }
131
132
133     /** */
134     protected void prepareCommitTransaction( TransactionID tid ) throws OzoneInternalException {
135         try {
136             // close the current cluster stream
137
currentCluster.close();
138
139             // remove now the deleted clusters from disk
140
DxIterator it = clustersToCompress.iterator();
141             // 1 : compress all clusters
142
while (it.next() != null) {
143                 compressCluster( (ClusterID)it.object() );
144             }
145             // 2 : if everything was fine, remove the cluster files
146
it.reset();
147             while (it.next() != null) {
148                 new Cluster( env, classicStore, (ClusterID)it.object() ).removeFromDisk();
149             }
150         } catch (IOException e) {
151             throw new OzoneInternalException("Failed to prepare to commit the transaction", e);
152         }
153     }
154
155
156     /** */
157     protected void commitTransaction( TransactionID tid ) {
158         // remove the transaction label
159
File f = new File( env.getDatabaseDir() + Env.DATA_DIR, TRANSACTION_FLAG );
160         if (f.exists()) {
161             f.delete();
162         }
163
164         //env.logWriter.newEntry ("PersistenceSpace.endTransaction: " + currentTransaction, LogWriter.DEBUG);
165
touchedClusters = null;
166         clustersToCompress = null;
167         currentTransaction = null;
168     }
169
170
171     /** */
172     protected void abortTransaction( TransactionID tid ) {
173     }
174
175
176     /**
177      */

178     private void registerCluster( ClusterID cid ) throws OzoneInternalException {
179         if (!touchedClusters.contains( cid )) {
180             touchedClusters.add( cid );
181
182             try {
183                 // write the cluster id
184
FileOutputStream fo =
185                         new FileOutputStream( new File( env.getDatabaseDir() + Env.DATA_DIR, TRANSACTION_FLAG ).toString(), true );
186                 DataOutputStream out = new DataOutputStream( fo );
187                 out.writeLong( cid.value() );
188                 out.close();
189             } catch (IOException e) {
190                 throw new OzoneInternalException("Failed to register cluster", e);
191             }
192         }
193     }
194
195
196     /**
197      */

198     private Cluster newCluster() throws IOException {
199         // close the old cluster stream before creating a new one
200
Cluster oldCluster = null;
201         if (currentCluster != null) {
202             oldCluster = currentCluster;
203             currentCluster.close();
204         }
205
206         // retieve a new clusterid and create a cluster
207
currentCluster = new Cluster( env, classicStore, new ClusterID( env.keyGenerator.nextID() ) );
208
209         // check, if the last cluster has to be compressed;
210
// this can't be done in writeLeak() while the cluster is open
211
if (oldCluster != null && oldCluster.needsCompressing()) {
212             clustersToCompress.add( oldCluster.cluID() );
213         }
214
215         // save the current cluster
216
writeProperties();
217
218         return currentCluster;
219     }
220
221
222     /** */
223     protected Cluster readCluster( ClusterID cid, int whatToRead ) throws OzoneInternalException {
224         //env.logWriter.newEntry ("PersistenceSpace.readCluster: " + cid, LogWriter.DEBUG);
225
// opening the same file for writing _and_ reading causes trouble
226
Cluster cl = null;
227         try {
228             if (cid.equals( currentCluster.cluID() )) {
229                 currentCluster.close();
230             }
231
232             cl = new Cluster( env, classicStore, cid );
233             cl.readObjects( whatToRead, null );
234
235             // reopen, if necessary
236
if (cid.equals( currentCluster.cluID() )) {
237                 currentCluster.open();
238             }
239         } catch (Exception JavaDoc e) {
240             throw new OzoneInternalException("Failed to read cluster", e);
241         }
242
243         return cl;
244     }
245
246
247     /**
248      */

249     protected void compressCluster( ClusterID cid ) throws IOException {
250         //env.logWriter.newEntry ("PersistanceSpace.compressCluster: " + cid, LogWriter.DEBUG);
251
Cluster cl = new Cluster( env, classicStore, cid );
252         cl.readObjects( Cluster.DATA, null );
253
254         DeathObject dobj;
255         DxIterator it = cl.objects().iterator();
256         while ((dobj = (DeathObject)it.next()) != null) {
257             writeObject( dobj, false, false );
258         }
259     }
260
261
262     /**
263      */

264     protected ClusterID[] allClusters() {
265         File path = new File( env.getDatabaseDir() + Env.DATA_DIR );
266         String JavaDoc[] fileList = path.list( new FilenameFilter() {
267
268
269             public boolean accept( File dir, String JavaDoc name ) {
270                 return name.endsWith( Cluster.CLUSTER_FILE_SUFF );
271             }
272         } );
273         ClusterID[] result = new ClusterID[fileList.length];
274
275         for (int i = 0; i < fileList.length; i++) {
276             result[i] = new ClusterID( Long.parseLong( fileList[i].substring( 0,
277                     fileList[i].length() - Cluster.CLUSTER_FILE_SUFF.length() ) ) );
278         }
279
280         return result;
281     }
282
283
284     /**
285      */

286     protected ClusterID writeObject( DeathObject dobj, boolean serialize, boolean useClone ) throws IOException {
287         //env.logWriter.newEntry ("PersistenceSpace.writeObject: " + dobj.objID(), LogWriter.DEBUG);
288
// create new cluster if necessary
289
if (currentCluster.size() > Cluster.MAX_SIZE) {
290             newCluster();
291         }
292
293         // assign the current cluster to the current transaction
294
// we have to that _before_ writing the object, because if something
295
// goes wrong while registering the operation isn't performed
296
// and the database stays consistent
297
registerCluster( currentCluster.cluID() );
298
299         // at first set the object's clusterId and then write the object
300
dobj.container().setClusterID( currentCluster.cluID() );
301         currentCluster.appendObject( dobj, currentTransaction, serialize, useClone );
302         return currentCluster.cluID();
303     }
304
305
306     /**
307      */

308     protected void writeLeak( ClusterID cid, DeathObject dobj ) throws OzoneInternalException {
309         //env.logWriter.newEntry ("PersistenceSpace.writeLeak: " + cid + " : " + dobj.objID(), LogWriter.DEBUG);
310

311         // assign the touched cluster to the current transaction
312
// we have to that _before_ writeing the leak, because if something
313
// goes wrong while registering the operation isn't performed
314
// and the database stays consistent
315
registerCluster( cid );
316
317         // write the leak
318
Cluster cl = new Cluster( env, classicStore, cid );
319         try {
320             cl.writeLeak( dobj, currentTransaction );
321         } catch (IOException e) {
322             throw new OzoneInternalException("Failed to write leak", e);
323         }
324
325         // we must not compress the current cluster ! This is technical
326
// (we can't open the same file for read _and_ read at the same time)
327
// and logical (we can't append the objects of the cluster to the
328
// cluster itself) not possible. The current cluster will be
329
// compressed in newCluster()
330
if (currentCluster.cluID().equals( cid )) {
331             return;
332         }
333
334         // retrieve the cluster size simply of it's file size
335
// this is much faster than reading the whole cluster
336
long clSize = cl.fileHandle().length();
337         if (clSize > 0) {
338             //env.logWriter.newEntry ("LEAK_WEIGHT = " + cl.leakSize() + " / " + clSize, LogWriter.DEBUG);
339
if ((double)cl.leakSize() / clSize > Cluster.LEAK_WEIGHT) {
340                 clustersToCompress.add( cid );
341             }
342         }
343     }
344
345
346     /**
347      */

348     protected void fillObjectSpace() {
349         env.logWriter.newEntry( this, "ObjectSpace recovery ...", LogWriter.INFO );
350         int count = 0;
351         ClusterID[] clusters = allClusters();
352         for (int i = 0; i < clusters.length; i++) {
353             try {
354                 ObjectContainer os;
355                 Cluster cl = new Cluster( env, classicStore, clusters[i] );
356                 cl.readObjects( Cluster.STATE, null );
357                 DxIterator it = cl.objects().iterator();
358                 while ((os = (ObjectContainer)it.next()) != null) {
359                     ((ClassicStore)env.getStoreManager()).objectSpace.deleteObject( os );
360                     ((ClassicStore)env.getStoreManager()).objectSpace.addObject( os );
361                     count++;
362                 //env.logWriter.newEntry ("adding: " + os.id(), LogWriter.DEBUG);
363
}
364             } catch (Exception JavaDoc e) {
365                 env.fatalError( this, "fillObjectSpace: " + e.toString(), e );
366             }
367         }
368         env.logWriter.newEntry( this, count + " objects found.", LogWriter.INFO );
369     }
370
371
372     /**
373      * do some recover stuff besides transaction rollback
374      */

375     protected void recover() {
376     }
377
378
379     /**
380      */

381     protected void rollBackTransaction( File transFlag ) throws Exception JavaDoc {
382         TransactionID rollBackTid = null;
383         DxBag clusters = new DxArrayBag();
384         try {
385             // open the flag file
386
FileInputStream fi = new FileInputStream( transFlag );
387             DataInputStream in = new DataInputStream( fi );
388
389             in.readInt();
390             rollBackTid = new TransactionID( in.readLong() );
391             // recover the current cluster
392
currentCluster = new Cluster( env, classicStore, new ClusterID( in.readLong() ) );
393             // get all assigned clusters
394
while (fi.available() != 0) {
395                 clusters.add( new ClusterID( in.readLong() ) );
396             }
397
398             in.close();
399         } catch (IOException e) {
400             env.logWriter.newEntry( this, "rollback transaction: flag file corrupted", LogWriter.WARN );
401         }
402
403         //env.logWriter.newEntry ("rollback transaction: " + rollBackTid + " with " + clusters.count() + " clusters", LogWriter.DEBUG);
404

405         // rollback the clusters
406
ClusterID cid;
407         DxIterator it = clusters.iterator();
408         while ((cid = (ClusterID)it.next()) != null) {
409             Cluster cl = new Cluster( env, classicStore, cid );
410             //env.logWriter.newEntry ("rollback : " + cid, LogWriter.DEBUG);
411
cl.rollBack( rollBackTid );
412         }
413
414         // save the recovered properties
415
writeProperties();
416
417         transFlag.delete();
418         touchedClusters = null;
419         clustersToCompress = null;
420         currentTransaction = null;
421     }
422 }
423
Popular Tags