KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > apache > derby > impl > store > raw > data > BaseDataFileFactory


1 /*
2
3    Derby - Class org.apache.derby.impl.store.raw.data.BaseDataFileFactory
4
5    Licensed to the Apache Software Foundation (ASF) under one or more
6    contributor license agreements. See the NOTICE file distributed with
7    this work for additional information regarding copyright ownership.
8    The ASF licenses this file to you under the Apache License, Version 2.0
9    (the "License"); you may not use this file except in compliance with
10    the License. You may obtain a copy of the License at
11
12       http://www.apache.org/licenses/LICENSE-2.0
13
14    Unless required by applicable law or agreed to in writing, software
15    distributed under the License is distributed on an "AS IS" BASIS,
16    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17    See the License for the specific language governing permissions and
18    limitations under the License.
19
20  */

21
22 package org.apache.derby.impl.store.raw.data;
23
24
25 import org.apache.derby.iapi.reference.SQLState;
26 import org.apache.derby.iapi.reference.MessageId;
27
28 import org.apache.derby.impl.store.raw.data.AllocationActions;
29 import org.apache.derby.impl.store.raw.data.BaseContainerHandle;
30 import org.apache.derby.impl.store.raw.data.BasePage;
31 import org.apache.derby.impl.store.raw.data.DirectActions;
32 import org.apache.derby.impl.store.raw.data.LoggableActions;
33 import org.apache.derby.impl.store.raw.data.PageActions;
34 import org.apache.derby.impl.store.raw.data.RecordId;
35 import org.apache.derby.impl.store.raw.data.ReclaimSpace;
36
37 import org.apache.derby.iapi.services.info.ProductVersionHolder;
38 import org.apache.derby.iapi.services.info.ProductGenusNames;
39
40 import org.apache.derby.iapi.services.cache.CacheFactory;
41 import org.apache.derby.iapi.services.cache.CacheManager;
42 import org.apache.derby.iapi.services.cache.Cacheable;
43 import org.apache.derby.iapi.services.cache.CacheableFactory;
44 import org.apache.derby.iapi.services.context.ContextManager;
45 import org.apache.derby.iapi.services.daemon.DaemonService;
46 import org.apache.derby.iapi.services.daemon.Serviceable;
47 import org.apache.derby.iapi.services.monitor.ModuleControl;
48 import org.apache.derby.iapi.services.monitor.ModuleSupportable;
49 import org.apache.derby.iapi.services.monitor.Monitor;
50 import org.apache.derby.iapi.services.monitor.PersistentService;
51 import org.apache.derby.iapi.services.diag.Performance;
52 import org.apache.derby.iapi.services.sanity.SanityManager;
53 import org.apache.derby.iapi.services.io.FormatIdUtil;
54 import org.apache.derby.iapi.services.stream.HeaderPrintWriter;
55
56 import org.apache.derby.iapi.error.StandardException;
57 import org.apache.derby.iapi.services.i18n.MessageService;
58 import org.apache.derby.iapi.store.access.AccessFactoryGlobals;
59 import org.apache.derby.iapi.store.access.FileResource;
60 import org.apache.derby.iapi.store.access.TransactionController;
61 import org.apache.derby.iapi.store.raw.data.DataFactory;
62 import org.apache.derby.iapi.store.raw.data.RawContainerHandle;
63 import org.apache.derby.iapi.store.raw.log.LogFactory;
64 import org.apache.derby.iapi.store.raw.log.LogInstant;
65 import org.apache.derby.iapi.store.raw.ContainerHandle;
66 import org.apache.derby.iapi.store.raw.ContainerKey;
67 import org.apache.derby.iapi.store.raw.LockingPolicy;
68 import org.apache.derby.iapi.store.raw.Page;
69 import org.apache.derby.iapi.store.raw.RawStoreFactory;
70 import org.apache.derby.iapi.store.raw.RecordHandle;
71 import org.apache.derby.iapi.store.raw.StreamContainerHandle;
72 import org.apache.derby.iapi.store.raw.Transaction;
73 import org.apache.derby.iapi.store.raw.xact.RawTransaction;
74
75 import org.apache.derby.iapi.store.access.RowSource;
76
77 import org.apache.derby.io.StorageFactory;
78 import org.apache.derby.io.WritableStorageFactory;
79 import org.apache.derby.io.StorageFile;
80 import org.apache.derby.io.StorageRandomAccessFile;
81 import org.apache.derby.iapi.services.uuid.UUIDFactory;
82 import org.apache.derby.catalog.UUID;
83 import org.apache.derby.iapi.reference.Attribute;
84 import org.apache.derby.iapi.reference.Property;
85 import org.apache.derby.iapi.reference.SQLState;
86 import org.apache.derby.iapi.util.ByteArray;
87 import org.apache.derby.iapi.services.io.FileUtil;
88 import org.apache.derby.iapi.util.CheapDateFormatter;
89 import org.apache.derby.iapi.util.ReuseFactory;
90 import org.apache.derby.iapi.services.property.PropertyUtil;
91
92 import java.util.Properties JavaDoc;
93 import java.util.Hashtable JavaDoc;
94 import java.util.Enumeration JavaDoc;
95
96 import java.io.File JavaDoc;
97 import java.io.FilePermission JavaDoc;
98 import java.io.OutputStream JavaDoc;
99 import java.io.IOException JavaDoc;
100
101 import java.security.AccessController JavaDoc;
102 import java.security.PrivilegedAction JavaDoc;
103 import java.security.PrivilegedExceptionAction JavaDoc;
104 import java.security.PrivilegedActionException JavaDoc;
105
106 /**
107
108 Provides the abstract class with most of the implementation of DataFactory and
109 ModuleControl shared by all the different filesystem implementations.
110 <p>
111 RESOLVE (mikem - 2/19/98) -
112 Currently only getContainerClass() is abstract, there are probably more
113 routines which should be abstract. Also the other implementations should
114 probably inherit from the abstract class, rather than from the DataFileFactory
115 class. Also there probably should be a generic directory and the rest of the
116 filesystem implementations parallel to it.
117 I wanted to limit the changes going into the branch and then fix
118 inheritance stuff in main.
119 <p>
120 The code in this class was moved over from DataFileFactory.java and then
121 that file was made to inherit from this one.
122
123 **/

124
125 public final class BaseDataFileFactory
126     implements DataFactory, CacheableFactory, ModuleControl, ModuleSupportable, PrivilegedExceptionAction JavaDoc
127 {
128
129     private String JavaDoc subSubProtocol;
130     StorageFactory storageFactory;
131
132     /* writableStorageFactory == (WritableStorageFactory) storageFactory if
133      * storageFactory also implements WritableStorageFactory, null if the
134      * storageFactory is read-only.
135      */

136     WritableStorageFactory writableStorageFactory;
137
138     private long nextContainerId = System.currentTimeMillis();
139     private boolean databaseEncrypted;
140
141     private CacheManager pageCache;
142     private CacheManager containerCache;
143
144     private LogFactory logFactory;
145
146     private ProductVersionHolder jbmsVersion;
147
148     private RawStoreFactory rawStoreFactory; // associated raw store factory
149

150     private String JavaDoc dataDirectory; // root directory of files.
151

152     private boolean throwDBlckException; // if true throw db.lck
153
// exception, even on systems
154
// where lock file is not
155
// guaranteed.
156

157     private UUID identifier; // unique id for locking
158

159     private Object JavaDoc freezeSemaphore;
160
161
162     // is the data store frozen - protected by freezeSemaphore
163
private boolean isFrozen;
164
165
166     // how many writers are currently active in the data store -
167
// protected by freezeSemaphore
168
private int writersInProgress;
169
170
171     private boolean removeStubsOK;
172     private boolean isCorrupt;
173
174     // the database is being created, no logging
175
private boolean inCreateNoLog;
176
177     // lock against other JBMS opening the same database
178
private StorageRandomAccessFile fileLockOnDB;
179     private StorageFile exFileLock; //file handle to get exclusive lock
180
private HeaderPrintWriter istream;
181     private static final String JavaDoc LINE =
182         "----------------------------------------------------------------";
183
184     // disable syncing of data during page allocation. DERBY-888 changes
185
// the system to not require data syncing at allocation.
186
boolean dataNotSyncedAtAllocation = true;
187
188     // disable syncing of data during checkpoint.
189
boolean dataNotSyncedAtCheckpoint = false;
190
191     // these fields can be accessed directly by subclasses if it needs a
192
// different set of actions
193
private PageActions loggablePageActions;
194     private AllocationActions loggableAllocActions;
195
196     private boolean readOnly; // is this a read only data store
197
private boolean supportsRandomAccess;
198     private FileResource fileHandler; // my file handler, set by a
199
// sub-class in its boot method.
200

201
202     //hash table to keep track of information about dropped containers stubs
203
private Hashtable JavaDoc droppedTableStubInfo;
204
205     private Hashtable JavaDoc postRecoveryRemovedFiles;
206
207     private EncryptData containerEncrypter;
208
209
210     // PrivilegedAction actions
211
private int actionCode;
212     private static final int GET_TEMP_DIRECTORY_ACTION = 1;
213     private static final int REMOVE_TEMP_DIRECTORY_ACTION = 2;
214     private static final int GET_CONTAINER_PATH_ACTION = 3;
215     private static final int GET_ALTERNATE_CONTAINER_PATH_ACTION = 4;
216     private static final int FIND_MAX_CONTAINER_ID_ACTION = 5;
217     private static final int DELETE_IF_EXISTS_ACTION = 6;
218     private static final int GET_PATH_ACTION = 7;
219     private static final int POST_RECOVERY_REMOVE_ACTION = 8;
220     private static final int REMOVE_STUBS_ACTION = 9;
221     private static final int BOOT_ACTION = 10;
222     private static final int GET_LOCK_ON_DB_ACTION = 11;
223     private static final int RELEASE_LOCK_ON_DB_ACTION = 12;
224     private static final int RESTORE_DATA_DIRECTORY_ACTION = 13;
225     private static final int GET_CONTAINER_NAMES_ACTION = 14;
226
227     private ContainerKey containerId;
228     private boolean stub;
229     private StorageFile actionFile;
230     private UUID myUUID;
231     private UUIDFactory uuidFactory;
232     private String JavaDoc databaseDirectory;
233
234     private String JavaDoc backupPath;
235     private File backupRoot;
236     private String JavaDoc[] bfilelist;
237
238     /*
239     ** Constructor
240     */

241
242     public BaseDataFileFactory()
243     {
244     }
245
246     /*
247     ** Methods of ModuleControl
248     */

249
250     public boolean canSupport(Properties JavaDoc startParams)
251     {
252
253         String JavaDoc serviceType = startParams.getProperty(PersistentService.TYPE);
254         if (serviceType == null)
255             return false;
256
257         if (!handleServiceType(serviceType))
258             return false;
259
260         if (startParams.getProperty(PersistentService.ROOT) == null)
261             return false;
262
263         return true;
264     }
265
266     public void boot(boolean create, Properties JavaDoc startParams)
267         throws StandardException
268     {
269
270         jbmsVersion = Monitor.getMonitor().getEngineVersion();
271
272         dataDirectory = startParams.getProperty(PersistentService.ROOT);
273
274         UUIDFactory uf = Monitor.getMonitor().getUUIDFactory();
275
276         identifier = uf.createUUID();
277
278         PersistentService ps = Monitor.getMonitor().getServiceType(this);
279
280         try
281         {
282             storageFactory =
283             ps.getStorageFactoryInstance(
284                 true,
285                 dataDirectory,
286                 startParams.getProperty(
287                     Property.STORAGE_TEMP_DIRECTORY,
288                     PropertyUtil.getSystemProperty(
289                         Property.STORAGE_TEMP_DIRECTORY)),
290                 identifier.toANSIidentifier());
291         }
292         catch(IOException JavaDoc ioe)
293         {
294             if (create)
295             {
296                 throw StandardException.newException(
297                     SQLState.SERVICE_DIRECTORY_CREATE_ERROR,
298                     ioe, dataDirectory);
299             }
300             else
301             {
302                 throw StandardException.newException(
303                     SQLState.DATABASE_NOT_FOUND, ioe, dataDirectory);
304             }
305         }
306
307         if (storageFactory instanceof WritableStorageFactory)
308             writableStorageFactory = (WritableStorageFactory) storageFactory;
309
310         actionCode = BOOT_ACTION;
311
312         try
313         {
314             AccessController.doPrivileged( this);
315         }
316         catch (PrivilegedActionException JavaDoc pae)
317         {
318             // BOOT_ACTION does not throw any exceptions.
319
}
320         
321         String JavaDoc value =
322             startParams.getProperty(Property.FORCE_DATABASE_LOCK,
323                 PropertyUtil.getSystemProperty(Property.FORCE_DATABASE_LOCK));
324         throwDBlckException =
325             Boolean.valueOf(
326                 (value != null ? value.trim() : value)).booleanValue();
327
328         if (!isReadOnly()) // read only db, not interested in filelock
329
getJBMSLockOnDB(identifier, uf, dataDirectory);
330
331
332         //If the database is being restored/created from backup
333
//the restore the data directory(seg*) from backup
334
String JavaDoc restoreFrom =null;
335         restoreFrom = startParams.getProperty(Attribute.CREATE_FROM);
336         if(restoreFrom == null)
337             restoreFrom = startParams.getProperty(Attribute.RESTORE_FROM);
338         if(restoreFrom == null)
339             restoreFrom = startParams.getProperty(Attribute.ROLL_FORWARD_RECOVERY_FROM);
340
341         if (restoreFrom !=null)
342         {
343             try
344             {
345                 // restoreFrom and createFrom operations also need to know if database
346
// is encrypted
347
String JavaDoc dataEncryption =
348                     startParams.getProperty(Attribute.DATA_ENCRYPTION);
349                 databaseEncrypted = Boolean.valueOf(dataEncryption).booleanValue();
350                 restoreDataDirectory(restoreFrom);
351             }
352             catch(StandardException se)
353             {
354                 releaseJBMSLockOnDB();
355                 throw se;
356             }
357         }
358
359         logMsg(LINE);
360         long bootTime = System.currentTimeMillis();
361         String JavaDoc readOnlyMsg = (isReadOnly())
362             ? MessageService.getTextMessage(MessageId.STORE_BOOT_READONLY_MSG)
363             : "";
364
365         logMsg(CheapDateFormatter.formatDate(bootTime) +
366                MessageService.getTextMessage(MessageId.STORE_BOOT_MSG,
367                                              jbmsVersion,
368                                              identifier,
369                                              dataDirectory,
370                                              readOnlyMsg));
371
372         uf = null;
373
374
375
376         CacheFactory cf = (CacheFactory)
377             Monitor.startSystemModule(
378                 org.apache.derby.iapi.reference.Module.CacheFactory);
379
380         int pageCacheSize = getIntParameter(
381                     RawStoreFactory.PAGE_CACHE_SIZE_PARAMETER,
382                     null,
383                     RawStoreFactory.PAGE_CACHE_SIZE_DEFAULT,
384                     RawStoreFactory.PAGE_CACHE_SIZE_MINIMUM,
385                     RawStoreFactory.PAGE_CACHE_SIZE_MAXIMUM);
386
387         pageCache =
388                 cf.newCacheManager(this,
389                     "PageCache",
390                     pageCacheSize / 2,
391                     pageCacheSize);
392
393         int fileCacheSize = getIntParameter(
394                     "derby.storage.fileCacheSize",
395                     null,
396                     100,
397                     2,
398                     100);
399
400         containerCache =
401             cf.newCacheManager(
402                 this, "ContainerCache", fileCacheSize / 2, fileCacheSize);
403
404         if (create)
405         {
406             String JavaDoc noLog =
407                 startParams.getProperty(Property.CREATE_WITH_NO_LOG);
408
409             inCreateNoLog =
410                 (noLog != null && Boolean.valueOf(noLog).booleanValue());
411
412         }
413
414         freezeSemaphore = new Object JavaDoc();
415
416         droppedTableStubInfo = new Hashtable JavaDoc();
417
418         // If derby.system.durability=test then set flags to disable sync of
419
// data pages at allocation when file is grown, disable sync of data
420
// writes during checkpoint
421
if (Property.DURABILITY_TESTMODE_NO_SYNC.equalsIgnoreCase(
422             PropertyUtil.getSystemProperty(Property.DURABILITY_PROPERTY)))
423         {
424             // - disable syncing of data during checkpoint.
425
dataNotSyncedAtCheckpoint = true;
426
427             // log message stating that derby.system.durability
428
// is set to a mode, where syncs wont be forced and the
429
// possible consequences of setting this mode
430
Monitor.logMessage(MessageService.getTextMessage(
431                 MessageId.STORE_DURABILITY_TESTMODE_NO_SYNC,
432                 Property.DURABILITY_PROPERTY,
433                 Property.DURABILITY_TESTMODE_NO_SYNC));
434         }
435         else if (Performance.MEASURE)
436         {
437             // development build only feature, must by hand set the
438
// Performance.MEASURE variable and rebuild. Useful during
439
// development to compare/contrast effect of syncing, release
440
// users can use the above relaxed durability option to disable
441
// all syncing.
442

443             // debug only flag - disable syncing of data during checkpoint.
444
dataNotSyncedAtCheckpoint =
445                 PropertyUtil.getSystemBoolean(
446                     Property.STORAGE_DATA_NOT_SYNCED_AT_CHECKPOINT);
447
448             if (dataNotSyncedAtCheckpoint)
449                 Monitor.logMessage(
450                     "Warning: " +
451                     Property.STORAGE_DATA_NOT_SYNCED_AT_CHECKPOINT +
452                     "set to true.");
453         }
454
455         fileHandler = new RFResource( this);
456     } // end of boot
457

458     public void stop()
459     {
460         boolean OK = false;
461
462         if (rawStoreFactory != null)
463         {
464             DaemonService rawStoreDaemon = rawStoreFactory.getDaemon();
465             if (rawStoreDaemon != null)
466                 rawStoreDaemon.stop();
467         }
468
469         long shutdownTime = System.currentTimeMillis();
470         logMsg("\n" + CheapDateFormatter.formatDate(shutdownTime) +
471                 MessageService.getTextMessage(
472                     MessageId.STORE_SHUTDOWN_MSG,
473                     getIdentifier()));
474         istream.println(LINE);
475
476         if (!isCorrupt)
477         {
478             try
479             {
480                 if (pageCache != null && containerCache != null)
481                 {
482                     pageCache.shutdown();
483                     containerCache.shutdown();
484
485                     OK = true;
486                 }
487
488             }
489             catch (StandardException se)
490             {
491                 se.printStackTrace(istream.getPrintWriter());
492             }
493         }
494
495         removeTempDirectory();
496
497         if (isReadOnly()) // do enough to close all files, then return
498
{
499             return;
500         }
501
502
503         // re-enable stub removal until a better method can be found.
504
// only remove stub if caches are cleaned
505
if (removeStubsOK && OK)
506             removeStubs();
507
508         releaseJBMSLockOnDB();
509     } // end of stop
510

511     /*
512     ** CacheableFactory
513     */

514     public Cacheable newCacheable(CacheManager cm)
515     {
516         if (cm == pageCache)
517         {
518             StoredPage sp = new StoredPage();
519             sp.setFactory(this);
520             return sp;
521         }
522
523         // container cache
524
return newContainerObject();
525     }
526
527     /**
528         Database creation finished
529
530         @exception StandardException Standard cloudscape exception policy.
531     */

532     public void createFinished() throws StandardException
533     {
534         if (!inCreateNoLog)
535         {
536             throw StandardException.newException(
537                 SQLState.FILE_DATABASE_NOT_IN_CREATE);
538         }
539
540         // the changes in cache are not logged, they have to be flushed to disk
541
checkpoint();
542         inCreateNoLog = false;
543     }
544
545     /*
546     ** Methods of DataFactory
547     */

548     
549     public ContainerHandle openContainer(
550     RawTransaction t,
551     ContainerKey containerId,
552     LockingPolicy locking,
553     int mode)
554         throws StandardException
555     {
556         return openContainer(
557                 t, containerId, locking, mode, false /* is not dropped */);
558     }
559
560
561     /**
562         @see DataFactory#openDroppedContainer
563         @exception StandardException Standard Cloudscape error policy
564     */

565     public RawContainerHandle openDroppedContainer(
566     RawTransaction t,
567     ContainerKey containerId,
568     LockingPolicy locking,
569     int mode)
570          throws StandardException
571     {
572         // since we are opening a possible dropped container
573
// lets not add any actions that will take palce on a commit.
574
mode |= ContainerHandle.MODE_NO_ACTIONS_ON_COMMIT;
575
576         return openContainer(
577                 t, containerId, locking, mode, true /* droppedOK */);
578     }
579
580     /**
581         @see DataFactory#openContainer
582         @exception StandardException Standard Cloudscape error policy
583     */

584     private RawContainerHandle openContainer(
585     RawTransaction t,
586     ContainerKey identity,
587     LockingPolicy locking,
588     int mode,
589     boolean droppedOK)
590          throws StandardException
591     {
592
593         if (SanityManager.DEBUG)
594         {
595
596             if ((mode & (ContainerHandle.MODE_READONLY | ContainerHandle.MODE_FORUPDATE))
597                 == (ContainerHandle.MODE_READONLY | ContainerHandle.MODE_FORUPDATE))
598             {
599                 SanityManager.THROWASSERT("update and readonly mode specified");
600             }
601
602         }
603
604         boolean waitForLock = ((mode & ContainerHandle.MODE_LOCK_NOWAIT) == 0);
605
606
607         if ((mode & ContainerHandle.MODE_OPEN_FOR_LOCK_ONLY) != 0)
608         {
609             // Open a container for lock only, we don't care if it exists, is
610
// deleted or anything about it. The container handle we return is
611
// closed and cannot be used for fetch or update etc.
612
BaseContainerHandle lockOnlyHandle =
613                 new BaseContainerHandle(
614                     getIdentifier(), t, identity, locking, mode);
615
616             if (lockOnlyHandle.useContainer(true, waitForLock))
617                 return lockOnlyHandle;
618             else
619                 return null;
620         }
621
622
623         BaseContainerHandle c;
624
625         // see if the container exists
626
FileContainer container = (FileContainer) containerCache.find(identity);
627         if (container == null)
628             return null;
629         
630         if (identity.getSegmentId() == ContainerHandle.TEMPORARY_SEGMENT)
631         {
632
633             if (SanityManager.DEBUG)
634             {
635                 SanityManager.ASSERT(container instanceof TempRAFContainer);
636             }
637
638             if ((mode & ContainerHandle.MODE_TEMP_IS_KEPT) ==
639                     ContainerHandle.MODE_TEMP_IS_KEPT)
640             {
641                 // if the mode is kept, then, we do not want to truncate
642
mode |= ContainerHandle.MODE_UNLOGGED;
643             }
644             else
645             {
646                 // this should be OK even if the table was opened read-only
647
mode |=
648                     (ContainerHandle.MODE_UNLOGGED |
649                      ContainerHandle.MODE_TRUNCATE_ON_ROLLBACK);
650             }
651             
652             locking =
653                 t.newLockingPolicy(
654                     LockingPolicy.MODE_NONE,
655                     TransactionController.ISOLATION_NOLOCK, true);
656         }
657         else
658         {
659             // real tables
660
if (inCreateNoLog)
661             {
662                 mode |=
663                     (ContainerHandle.MODE_UNLOGGED |
664                      ContainerHandle.MODE_CREATE_UNLOGGED);
665             } else {
666
667                 // make sure everything is logged if logArchived is turn on
668
// clear all UNLOGGED flag
669
if (logFactory.logArchived()) {
670                     mode &= ~(ContainerHandle.MODE_UNLOGGED |
671                               ContainerHandle.MODE_CREATE_UNLOGGED);
672
673                 } else {
674
675                     // block the online backup if the container is being
676
// opened in unlogged mode, if the backup is already
677
// running then convert all unlogged opens to logged ones,
678
// otherwise online backup copy will be inconsistent.
679

680                     if (((mode & ContainerHandle.MODE_UNLOGGED) ==
681                          ContainerHandle.MODE_UNLOGGED) ||
682                         ((mode & ContainerHandle.MODE_CREATE_UNLOGGED) ==
683                          ContainerHandle.MODE_CREATE_UNLOGGED))
684                     {
685                         if (!t.blockBackup(false)) {
686                             // when a backup is in progress transaction can not
687
// block the backup, so convert unlogged opens
688
// to logged mode.
689
mode &= ~(ContainerHandle.MODE_UNLOGGED |
690                                       ContainerHandle.MODE_CREATE_UNLOGGED);
691                         }
692                     }
693
694                 }
695
696             }
697
698             // if mode is UNLOGGED but not CREATE_UNLOGGED, then force the
699
// container from cache when the transaction commits. For
700
// CREATE_UNLOGGED, client has the responsibility of forcing the
701
// cache.
702
if (((mode & ContainerHandle.MODE_UNLOGGED) ==
703                     ContainerHandle.MODE_UNLOGGED) &&
704                 ((mode & ContainerHandle.MODE_CREATE_UNLOGGED) == 0))
705             {
706                 mode |= ContainerHandle.MODE_FLUSH_ON_COMMIT;
707             }
708         }
709
710         PageActions pageActions = null;
711         AllocationActions allocActions = null;
712
713         if ((mode & ContainerHandle.MODE_FORUPDATE) ==
714             ContainerHandle.MODE_FORUPDATE)
715         {
716
717             if ((mode & ContainerHandle.MODE_UNLOGGED) == 0)
718             {
719                 // get the current loggable actions
720
pageActions = getLoggablePageActions();
721                 allocActions = getLoggableAllocationActions();
722                 
723             }
724             else
725             {
726                 // unlogged
727
pageActions = new DirectActions();
728                 allocActions = new DirectAllocActions();
729             }
730         }
731
732         c = new BaseContainerHandle(
733                 getIdentifier(), t, pageActions,
734                 allocActions, locking, container, mode);
735
736         // see if we can use the container
737
try
738         {
739             if (!c.useContainer(droppedOK, waitForLock))
740             {
741                 containerCache.release(container);
742                 return null;
743             }
744         }
745         catch (StandardException se)
746         {
747             containerCache.release(container);
748             throw se;
749         }
750
751         return c;
752     }
753
754     /** Add a container with a specified page size to a segment.
755         @exception StandardException Standard Cloudscape error policy
756     */

757     public long addContainer(
758     RawTransaction t,
759     long segmentId,
760     long input_containerid,
761     int mode,
762     Properties JavaDoc tableProperties,
763     int temporaryFlag)
764         throws StandardException
765     {
766         if (SanityManager.DEBUG)
767         {
768             if ((mode & ContainerHandle.MODE_CREATE_UNLOGGED) != 0)
769                 SanityManager.ASSERT(
770                     (mode & ContainerHandle.MODE_UNLOGGED) != 0,
771                     "cannot have CREATE_UNLOGGED set but UNLOGGED not set");
772         }
773
774         // If client has provided a containerid then use it, else use the
775
// internally generated one from getNextId().
776
long containerId =
777             ((input_containerid != ContainerHandle.DEFAULT_ASSIGN_ID) ?
778                  input_containerid : getNextId());
779
780         ContainerKey identity = new ContainerKey(segmentId, containerId);
781
782         boolean tmpContainer = (segmentId == ContainerHandle.TEMPORARY_SEGMENT);
783
784         ContainerHandle ch = null;
785         LockingPolicy cl = null;
786
787         if (!tmpContainer)
788         {
789             // lock the container before we create it.
790

791             if (isReadOnly())
792             {
793                 throw StandardException.newException(
794                         SQLState.DATA_CONTAINER_READ_ONLY);
795             }
796
797             cl = t.newLockingPolicy(LockingPolicy.MODE_CONTAINER,
798                     TransactionController.ISOLATION_SERIALIZABLE, true);
799             
800             if (SanityManager.DEBUG)
801                 SanityManager.ASSERT(cl != null);
802
803             ch = t.openContainer(identity, cl,
804                    (ContainerHandle.MODE_FORUPDATE |
805                     ContainerHandle.MODE_OPEN_FOR_LOCK_ONLY));
806         }
807
808         FileContainer container =
809             (FileContainer) containerCache.create(identity, tableProperties);
810
811         // create the first alloc page and the first user page,
812
// if this fails for any reason the transaction
813
// will roll back and the container will be dropped (removed)
814
ContainerHandle containerHdl = null;
815         Page firstPage = null;
816
817         try
818         {
819             // if opening a temporary container with IS_KEPT flag set,
820
// make sure to open it with IS_KEPT too.
821
if (tmpContainer &&
822                 ((temporaryFlag & TransactionController.IS_KEPT) ==
823                      TransactionController.IS_KEPT))
824             {
825
826                 mode |= ContainerHandle.MODE_TEMP_IS_KEPT;
827             }
828
829             // open no-locking as we already have the container locked
830
containerHdl =
831                 t.openContainer(
832                     identity, null, (ContainerHandle.MODE_FORUPDATE | mode));
833
834             // we just added it, containerHdl should not be null
835
if (SanityManager.DEBUG)
836                 SanityManager.ASSERT(containerHdl != null);
837
838             if (!tmpContainer)
839             {
840                 // make it persistent (in concept if not in reality)
841
RawContainerHandle rch = (RawContainerHandle)containerHdl;
842
843                 ContainerOperation lop =
844                     new ContainerOperation(rch, ContainerOperation.CREATE);
845
846                 // mark the container as pre-dirtied so that if a checkpoint
847
// happens after the log record is sent to the log stream, the
848
// cache cleaning will wait for this change.
849
rch.preDirty(true);
850                 try
851                 {
852                     t.logAndDo(lop);
853
854                     // flush the log to reduce the window between where
855
// the container is created & synced and the log record
856
// for it makes it to disk. If we fail in this
857
// window we will leave a stranded container file.
858
flush(t.getLastLogInstant());
859                 }
860                 finally
861                 {
862                     // in case logAndDo fail, make sure the container is not
863
// stuck in preDirty state.
864
rch.preDirty(false);
865                 }
866             }
867
868             firstPage = containerHdl.addPage();
869
870         }
871         finally
872         {
873
874             if (firstPage != null)
875             {
876                 firstPage.unlatch();
877                 firstPage = null;
878             }
879             
880             containerCache.release(container);
881
882             if (containerHdl != null)
883             {
884                 containerHdl.close();
885                 containerHdl = null;
886             }
887
888             if (!tmpContainer)
889             {
890                 // this should do nothing, since we requested isolation 3
891
// but we can't assume that, so call the policy correctly.
892

893                 cl.unlockContainer(t, ch);
894             }
895         }
896
897         return containerId;
898     }
899
900     /** Add and load a stream container
901         @exception StandardException Standard Cloudscape error policy
902     */

903     public long addAndLoadStreamContainer(
904     RawTransaction t,
905     long segmentId,
906     Properties JavaDoc tableProperties,
907     RowSource rowSource)
908         throws StandardException
909     {
910         long containerId = getNextId();
911
912         ContainerKey identity = new ContainerKey(segmentId, containerId);
913
914         // create and load the stream container
915
StreamFileContainer sContainer =
916             new StreamFileContainer(identity, this, tableProperties);
917         sContainer.load(rowSource);
918
919         return containerId;
920     }
921
922
923     /**
924         open an exsisting streamContainer
925
926         @see DataFactory#openStreamContainer
927         @exception StandardException Standard Cloudscape error policy
928     */

929     public StreamContainerHandle openStreamContainer(
930     RawTransaction t,
931     long segmentId,
932     long containerId,
933     boolean hold)
934          throws StandardException
935     {
936
937         ContainerKey identity = new ContainerKey(segmentId, containerId);
938
939         StreamFileContainerHandle c;
940
941         // open the container with the identity
942
StreamFileContainer container = new StreamFileContainer(identity, this);
943         container = container.open(false);
944         if (container == null)
945             return null;
946
947         c = new StreamFileContainerHandle(getIdentifier(), t, container, hold);
948
949         // see if we can use the container
950
if (c.useContainer())
951             return c;
952         else
953             return null;
954     }
955
956     /**
957         Drop a stream container.
958
959         <P><B>Synchronisation</B>
960         <P>
961         This call will remove the container.
962
963         @exception StandardException Standard Cloudscape error policy
964     */

965     public void dropStreamContainer(
966     RawTransaction t,
967     long segmentId,
968     long containerId)
969         throws StandardException
970     {
971
972         boolean tmpContainer = (segmentId == ContainerHandle.TEMPORARY_SEGMENT);
973
974         StreamContainerHandle containerHdl = null;
975
976         try
977         {
978             ContainerKey ckey = new ContainerKey(segmentId, containerId);
979
980             // close all open containers and 'onCommit' objects of the container
981
t.notifyObservers(ckey);
982
983             containerHdl = t.openStreamContainer(segmentId, containerId, false);
984             if (tmpContainer && (containerHdl != null))
985             {
986                 containerHdl.removeContainer();
987                 return;
988             }
989         }
990         finally
991         {
992             if (containerHdl != null)
993                 containerHdl.close();
994         }
995     }
996
997     /**
998         re-Create a container during redo recovery.
999
1000        called ONLY during recovery load tran.
1001
1002        @exception StandardException Standard Cloudscape Error policy
1003     */

1004    public void reCreateContainerForRedoRecovery(
1005    RawTransaction t,
1006    long segmentId,
1007    long containerId,
1008    ByteArray containerInfo)
1009         throws StandardException
1010    {
1011        if (SanityManager.DEBUG)
1012            SanityManager.ASSERT(segmentId != ContainerHandle.TEMPORARY_SEGMENT,
1013                "Cannot recreate temp container during load tran");
1014
1015        ContainerKey identity = new ContainerKey(segmentId, containerId);
1016
1017        // no need to lock container during load tran
1018
// no need to create any page for the container, they will be created
1019
// as their log records are encountered later in load tran
1020

1021        FileContainer container =
1022            (FileContainer)containerCache.create(identity, containerInfo);
1023
1024        containerCache.release(container);
1025    }
1026
1027    /**
1028        Drop a container.
1029
1030        <P><B>Synchronisation</B>
1031        <P>
1032        This call will mark the container as dropped and then obtain an CX lock
1033        (table level exclusive lock) on the container. Once a container has
1034        been marked as dropped it cannot be retrieved by an openContainer()
1035        call unless explicitly with droppedOK.
1036        <P>
1037        Once the exclusive lock has been obtained the container is removed
1038        and all its pages deallocated. The container will be fully removed
1039        at the commit time of the transaction.
1040
1041        @exception StandardException Standard Cloudscape error policy
1042    */

1043    public void dropContainer(
1044    RawTransaction t,
1045    ContainerKey ckey)
1046         throws StandardException
1047    {
1048        boolean tmpContainer =
1049            (ckey.getSegmentId() == ContainerHandle.TEMPORARY_SEGMENT);
1050
1051        LockingPolicy cl = null;
1052
1053        if (!tmpContainer)
1054        {
1055            if (isReadOnly())
1056            {
1057                throw StandardException.newException(
1058                        SQLState.DATA_CONTAINER_READ_ONLY);
1059            }
1060
1061            cl =
1062                t.newLockingPolicy(
1063                    LockingPolicy.MODE_CONTAINER,
1064                    TransactionController.ISOLATION_SERIALIZABLE, true);
1065        
1066            if (SanityManager.DEBUG)
1067                SanityManager.ASSERT(cl != null);
1068        }
1069
1070        // close all open containers and 'onCommit' objects of this container
1071
t.notifyObservers(ckey);
1072
1073        RawContainerHandle containerHdl = (RawContainerHandle)
1074            t.openContainer(ckey, cl, ContainerHandle.MODE_FORUPDATE);
1075
1076        // If container is already dropped or is no longer there, throw
1077
// containerVanished exception unless container is temporary, in that
1078
// case just return. Upper layer is supposed to prevent such from
1079
// happening thru some means other than the lock we are getting here.
1080
try
1081        {
1082            if (containerHdl == null ||
1083                containerHdl.getContainerStatus() != RawContainerHandle.NORMAL)
1084            {
1085                // If we are a temp container, don't worry about it.
1086
if (tmpContainer)
1087                {
1088                    if (containerHdl != null)
1089                        containerHdl.removeContainer((LogInstant)null);
1090                    return;
1091                }
1092                else
1093                {
1094                    throw StandardException.newException(
1095                            SQLState.DATA_CONTAINER_VANISHED, ckey);
1096                }
1097            }
1098
1099            // Container exist, is updatable and we got the lock.
1100
if (tmpContainer)
1101            {
1102                containerHdl.dropContainer((LogInstant)null, true);
1103                containerHdl.removeContainer((LogInstant)null);
1104            }
1105            else
1106            {
1107                ContainerOperation lop =
1108                    new ContainerOperation(
1109                            containerHdl, ContainerOperation.DROP);
1110
1111                // mark the container as pre-dirtied so that if a checkpoint
1112
// happens after the log record is sent to the log stream, the
1113
// cache cleaning will wait for this change.
1114
containerHdl.preDirty(true);
1115                try
1116                {
1117                    t.logAndDo(lop);
1118                }
1119                finally
1120                {
1121                    // in case logAndDo fail, make sure the container is not
1122
// stuck in preDirty state.
1123
containerHdl.preDirty(false);
1124                }
1125
1126
1127                // remember this as a post commit work item
1128
Serviceable p =
1129                    new ReclaimSpace(
1130                            ReclaimSpace.CONTAINER,
1131                            ckey,
1132                            this,
1133                            true /* service ASAP */);
1134
1135                if (SanityManager.DEBUG)
1136                {
1137                    if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace))
1138                    {
1139                        SanityManager.DEBUG(
1140                            DaemonService.DaemonTrace,
1141                            "Add post commit work " + p);
1142                    }
1143                }
1144
1145                t.addPostCommitWork(p);
1146            }
1147
1148        }
1149        finally
1150        {
1151            if (containerHdl != null)
1152                containerHdl.close();
1153        }
1154
1155
1156    }
1157
1158
1159    /**
1160     * Implement checkpoint operation, write/sync all pages in cache.
1161     * <p>
1162     * The derby write ahead log algorithm uses checkpoint of the data
1163     * cache to determine points of the log no longer required by
1164     * restart recovery.
1165     * <p>
1166     * This implementation uses the 2 cache interfaces to force all dirty
1167     * pages to disk:
1168     *
1169     * WRITE DIRTY PAGES TO OS:
1170     * In the first step all pages in the page cache
1171     * are written, but not synced (pagecache.cleanAll). The cachemanager
1172     * cleanAll() interface guarantees that every dirty page that exists
1173     * when this call is first made will have it's clean() method called.
1174     * The data cache (CachedPage.clean()), will call writePage but not
1175     * sync the page.
1176     * By using the java write then sync, the checkpoint is
1177     * usually doing async I/O, allowing the OS to schedule multiple I/O's
1178     * to the file as efficiently as it can.
1179     * Note that it has been observed that checkpoints
1180     * can flood the I/O system because these writes are not synced, see
1181     * DERBY-799 - checkpoint should probably somehow restrict the rate
1182     * it sends out those I/O's - it was observed a simple sleep every
1183     * N writes fixed most of the problem.
1184     *
1185     * FORCE THOSE DIRTY WRITES TO DISK:
1186     * To force the I/O's to disk, the system calls each open dirty file
1187     * and uses the java interface to sync any outstanding dirty pages to
1188     * disk (containerCache.cleanAll()). The open container cache does
1189     * this work in RAFContainer.clean() by writing it's header out and
1190     * syncing the file. (Note if any change is made to checkpoint to
1191     * sync the writes vs. syncing the file, one probably still needs to
1192     * write the container header out and sync it).
1193     *
1194     * @exception StandardException Standard exception policy.
1195     **/

1196    public void checkpoint() throws StandardException
1197    {
1198        pageCache.cleanAll();
1199        containerCache.cleanAll();
1200    }
1201
1202    public void idle() throws StandardException
1203    {
1204        pageCache.ageOut();
1205        containerCache.ageOut();
1206    }
1207
1208    public void setRawStoreFactory(
1209    RawStoreFactory rsf,
1210    boolean create,
1211    Properties JavaDoc startParams)
1212         throws StandardException
1213    {
1214
1215        rawStoreFactory = rsf;
1216
1217        /*
1218         * boot the log factory here because different implementation of the
1219         * data factory wants different types of log factory
1220         */

1221        bootLogFactory(create, startParams);
1222
1223    }
1224
1225
1226    /**
1227        Return my unique identifier
1228
1229        @see DataFactory#getIdentifier
1230    */

1231    public UUID getIdentifier()
1232    {
1233        return identifier;
1234    }
1235
1236    /*
1237    ** Called by post commit daemon, calling ReclaimSpace.performWork()
1238    */

1239    public int reclaimSpace(
1240    Serviceable work,
1241    ContextManager contextMgr)
1242         throws StandardException
1243    {
1244        if (work == null)
1245            return Serviceable.DONE;
1246
1247        Transaction tran =
1248            rawStoreFactory.findUserTransaction(
1249                contextMgr, AccessFactoryGlobals.SYS_TRANS_NAME);
1250
1251        if (SanityManager.DEBUG)
1252        {
1253            SanityManager.ASSERT(tran != null, "null transaction");
1254
1255            if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace))
1256                SanityManager.DEBUG(DaemonService.DaemonTrace,
1257                                    "Performing post commit work " + work);
1258        }
1259
1260        return ReclaimSpaceHelper.reclaimSpace(this, (RawTransaction)tran,
1261                                               (ReclaimSpace)work);
1262    }
1263
1264    /**
1265        Really this is just a convience routine for callers that might not
1266        have access to a log factory.
1267    */

1268    public StandardException markCorrupt(StandardException originalError)
1269    {
1270        boolean firsttime = !isCorrupt;
1271
1272        isCorrupt = true;
1273        if (getLogFactory() != null)
1274            getLogFactory().markCorrupt(originalError);
1275
1276        // if firsttime markCorrupt is called, release the JBMS lock so user
1277
// can move the database if so desired.
1278
if (firsttime)
1279        {
1280            // get rid of everything from the cache without first cleaning them
1281
if (pageCache != null)
1282                pageCache.discard(null);
1283
1284            if (containerCache != null)
1285                containerCache.discard(null);
1286
1287            // don't read in any more pages
1288
pageCache = null;
1289            containerCache = null;
1290
1291            releaseJBMSLockOnDB();
1292        }
1293
1294        return originalError;
1295    }
1296
1297    public FileResource getFileHandler()
1298    {
1299        return fileHandler;
1300    }
1301
1302    public void removeStubsOK()
1303    {
1304        removeStubsOK = true;
1305    }
1306
1307    /*
1308    ** Implementation specific methods
1309    */

1310
1311    public int getIntParameter(
1312    String JavaDoc parameterName,
1313    Properties JavaDoc properties,
1314    int defaultValue,
1315    int minimumValue,
1316    int maximumValue)
1317    {
1318
1319        int newValue;
1320
1321        String JavaDoc parameter = null;
1322        
1323        if (properties != null)
1324            parameter = properties.getProperty(parameterName);
1325
1326        if (parameter == null)
1327            parameter = PropertyUtil.getSystemProperty(parameterName);
1328
1329        if (parameter != null)
1330        {
1331            try
1332            {
1333                newValue = Integer.parseInt(parameter);
1334
1335                if ((newValue >= minimumValue) && (newValue <= maximumValue))
1336                    return newValue;
1337            }
1338            catch (NumberFormatException JavaDoc nfe)
1339            {
1340                // just leave the size at the default.
1341
}
1342        }
1343
1344        return defaultValue;
1345    }
1346
1347    CacheManager getContainerCache()
1348    {
1349        return containerCache;
1350    }
1351
1352    CacheManager getPageCache()
1353    {
1354        return pageCache;
1355    }
1356
1357    public long[] getCacheStats(String JavaDoc cacheName)
1358    {
1359
1360        if (cacheName == null)
1361        {
1362            // cache name is not specified, return the default.
1363
return getPageCache().getCacheStats();
1364        }
1365
1366        if (cacheName.equals("pageCache"))
1367        {
1368            return getPageCache().getCacheStats();
1369        }
1370        else
1371        {
1372            // return default set of cache.
1373
return getPageCache().getCacheStats();
1374        }
1375    }
1376
1377    public void resetCacheStats(String JavaDoc cacheName)
1378    {
1379        if (cacheName == null)
1380        {
1381            // cache name is not specified, return the default.
1382
getPageCache().resetCacheStats();
1383            return;
1384        }
1385
1386        if (cacheName.equals("pageCache"))
1387        {
1388            getPageCache().resetCacheStats();
1389        }
1390        else
1391        {
1392            // default
1393
getPageCache().resetCacheStats();
1394        }
1395    }
1396
1397    /**
1398        Ask the log factory to flush up to this log instant.
1399
1400        @exception StandardException cannot sync log file
1401    */

1402    void flush(LogInstant instant)
1403         throws StandardException
1404    {
1405        getLogFactory().flush(instant);
1406    }
1407
1408    /**
1409        Ask the log factory to flush the side log up to this bip location
1410        Not implemented in this class - subclass who deals with side log must
1411        override this.
1412
1413        @exception StandardException Cloudscape Standard Error Policy
1414    */

1415    private void syncSideLog(long bipLocation)
1416         throws StandardException
1417    {
1418        return;
1419    }
1420
1421
1422    LogFactory getLogFactory()
1423    {
1424        return logFactory;
1425    }
1426
1427
1428    RawStoreFactory getRawStoreFactory()
1429    {
1430        return rawStoreFactory;
1431    }
1432
1433    /**
1434        Get the root directory of the data storage area. Is always guaranteed
1435        to be an absolute path.
1436    */

1437    public String JavaDoc getRootDirectory()
1438    {
1439        return dataDirectory;
1440    }
1441
1442    /**
1443     * Return the Class of the Containers to be produced by this factory.
1444     * <p>
1445     * Concrete implementations of a DataFactory must implement this routine
1446     * to indicate what kind of containers are produced. For instance
1447     * the DataFileFactory produce RAFContainer's.
1448     * <p>
1449     * It is expected that this class is called only once, and thus does
1450     * not worry about the overhead of repeated Class.forName() lookups.
1451     *
1452     * @return The Class object for the Container class.
1453     *
1454     **/

1455    Cacheable newContainerObject()
1456    {
1457        if( supportsRandomAccess)
1458            return new RAFContainer(this);
1459        else
1460            return new InputStreamContainer( this);
1461    }
1462
1463    /**
1464     * This page is going from clean to dirty, this is a chance for the
1465     * sub class to do something if so desired
1466     *
1467     * @exception StandardException Standard Cloudscape Error Policy
1468     */

1469    private void pageToDirty(RawTransaction t, StoredPage page)
1470         throws StandardException
1471    {
1472        return; // this implementation does nothing
1473
}
1474
1475    /*
1476     * Get the loggable page action that is associated with this implementation
1477     *
1478     * @return the PageActions
1479     * @exception StandardExceptions Standard Cloudscape Error Policy
1480     */

1481    private PageActions getLoggablePageActions() throws StandardException
1482    {
1483        if (loggablePageActions == null)
1484            loggablePageActions = new LoggableActions();
1485        return loggablePageActions;
1486    }
1487
1488    /**
1489     * Get the loggable allocation action associated with this implementation
1490     *
1491     * @return the PageActions
1492     */

1493    private AllocationActions getLoggableAllocationActions()
1494    {
1495        if (loggableAllocActions == null)
1496            loggableAllocActions = new LoggableAllocActions();
1497        return loggableAllocActions;
1498    }
1499
1500    synchronized StorageFile getTempDirectory()
1501    {
1502        actionCode = GET_TEMP_DIRECTORY_ACTION;
1503        try
1504        {
1505            return (StorageFile) AccessController.doPrivileged( this);
1506        }
1507        catch (PrivilegedActionException JavaDoc pae)
1508        {
1509            // getTempDirectory does not actually throw an exception
1510
return null;
1511        }
1512    }
1513    
1514    private synchronized void removeTempDirectory()
1515    {
1516        if( storageFactory != null)
1517        {
1518            actionCode = REMOVE_TEMP_DIRECTORY_ACTION;
1519            try
1520            {
1521                AccessController.doPrivileged( this);
1522            }
1523            catch (PrivilegedActionException JavaDoc pae)
1524            {
1525                // removeTempDirectory does not throw an exception
1526
}
1527        }
1528    }
1529
1530    /**
1531     * Return the path to a container file.
1532     * <p>
1533     * Return the path to a container file that is relative to the root
1534     * directory.
1535     * <p>
1536     * The format of the name of an existing container file is:
1537     * segNNN/cXXX.dat
1538     * The format of the name of a stub describing a dropped container file is:
1539     * segNNN/dXXX.dat
1540     *
1541     * NNN = segment number, currently 0 is where normal db files are found.
1542     * XXX = The hex representation of the container number
1543     *
1544     * The store will always create containers with this format name, but
1545     * the store will also recognize the following two formats when attempting
1546     * to open files - as some copy tools have uppercased our filesnames when
1547     * moving across operating systems:
1548     *
1549     * The format of the name of an existing container file is:
1550     * segNNN/CXXX.DAT
1551     * The format of the name of a stub describing a dropped container file is:
1552     * segNNN/DXXX.DAT
1553     * <p>
1554     *
1555     *
1556     * @param containerId The container being opened/created
1557     * @param stub True if the file name for the stub is requested,
1558     * otherwise the file name for the data file
1559     *
1560     * @return The StorageFile representing path to container relative to root.
1561     *
1562     **/

1563    public StorageFile getContainerPath(
1564    ContainerKey containerId,
1565    boolean stub)
1566    {
1567        return getContainerPath(containerId, stub, GET_CONTAINER_PATH_ACTION);
1568    }
1569
1570    private synchronized StorageFile getContainerPath(
1571    ContainerKey containerId,
1572    boolean stub,
1573    int code)
1574    {
1575        actionCode = code;
1576        try
1577        {
1578            this.containerId = containerId;
1579            this.stub = stub;
1580            try
1581            {
1582                return (StorageFile) AccessController.doPrivileged( this);
1583            }
1584            catch (PrivilegedActionException JavaDoc pae)
1585            {
1586                // getContainerPath does not throw an exception
1587
return null;
1588            }
1589        }
1590        finally
1591        {
1592            this.containerId = null;
1593        }
1594    }
1595
1596
1597    /**
1598        Return an alternate path to container file relative to the root directory.
1599        The alternate path uses upper case 'C','D', and 'DAT' instead of
1600        lower case - there have been cases of people copying the database and
1601        somehow upper casing all the file names.
1602
1603        The intended use is as a bug fix for track 3444.
1604
1605        @param containerId The container being opened/created
1606        @param stub True if the file name for the stub is requested, otherwise the file name for the data file
1607
1608    */

1609    public StorageFile getAlternateContainerPath(
1610    ContainerKey containerId,
1611    boolean stub)
1612    {
1613        return getContainerPath(
1614                    containerId, stub, GET_ALTERNATE_CONTAINER_PATH_ACTION);
1615    }
1616
1617
1618
1619    /**
1620        Remove stubs in this database. Stubs are committed deleted containers
1621    */

1622    private synchronized void removeStubs()
1623    {
1624        actionCode = REMOVE_STUBS_ACTION;
1625        try
1626        {
1627            AccessController.doPrivileged( this);
1628        }
1629        catch (PrivilegedActionException JavaDoc pae)
1630        {
1631            // removeStubs does not throw an exception
1632
}
1633    }
1634
1635    /**
1636     * keeps track of information about the stub files of the committed deleted
1637     * containers. We use the info to delete them at checkpoints.
1638     * In addition to the file info , we also keep track of the identity of the
1639     * container; which helps to remove entry in the cache and the log instant
1640     * when the stub was created, which helps us to figure out whether we
1641     * require the stub file for the crash recovery.
1642     * We maintain the information in a hashtable:
1643     * key(LOG INSTANT) Values: File handle , and ContainerIdentity.
1644     **/

1645    public void stubFileToRemoveAfterCheckPoint(
1646    StorageFile file,
1647    LogInstant logInstant,
1648    Object JavaDoc identity)
1649    {
1650        if(droppedTableStubInfo != null)
1651        {
1652            Object JavaDoc[] removeInfo = new Object JavaDoc[2];
1653            removeInfo[0] = file;
1654            removeInfo[1] = identity;
1655            droppedTableStubInfo.put(logInstant, removeInfo);
1656        }
1657    }
1658
1659    /**
1660     * Delete the stub files that are not required for recovery. A stub file
1661     * is not required to be around if the recovery is not going to see
1662     * any log record that belongs to that container. Since the stub files
1663     * are created as a post commit operation, they are not necessary during
1664     * undo operation of the recovery.
1665     *
1666     * To remove a stub file we have to be sure that it was created before the
1667     * redoLWM in the check point record. We can be sure that the stub is not
1668     * required if the log instant when it was created is less than the redoLWM.
1669     */

1670    public void removeDroppedContainerFileStubs(
1671    LogInstant redoLWM)
1672        throws StandardException
1673    {
1674    
1675        if (droppedTableStubInfo != null)
1676        {
1677            synchronized(droppedTableStubInfo)
1678            {
1679                for (Enumeration JavaDoc e = droppedTableStubInfo.keys();
1680                     e.hasMoreElements(); )
1681                {
1682                    LogInstant logInstant = (LogInstant) e.nextElement();
1683                    if(logInstant.lessThan(redoLWM))
1684                    {
1685                        
1686                        Object JavaDoc[] removeInfo =
1687                            (Object JavaDoc[]) droppedTableStubInfo.get(logInstant);
1688                        Object JavaDoc identity = removeInfo[1];
1689                        //delete the entry in the container cache.
1690
Cacheable ccentry = containerCache.findCached(identity);
1691                        if(ccentry!=null)
1692                            containerCache.remove(ccentry);
1693
1694                        //delete the stub we don't require it during recovery
1695
synchronized( this)
1696                        {
1697                            actionFile = (StorageFile)removeInfo[0];
1698                            actionCode = DELETE_IF_EXISTS_ACTION;
1699                            try
1700                            {
1701                                if (AccessController.doPrivileged(this) != null)
1702                                {
1703                                    //if we successfuly delete the file remove
1704
//it from the hash table.
1705
droppedTableStubInfo.remove(logInstant);
1706                                }
1707                            }
1708                            catch (PrivilegedActionException JavaDoc pae)
1709                            {
1710                                // DELETE_IF_EXISTS does not throw an exception
1711
}
1712                        }
1713                    }
1714                }
1715            }
1716        }
1717    }
1718
1719
1720
1721
1722
1723
1724    /**
1725     * Find the largest containerid is seg 0.
1726     * <p>
1727     * Do a file list of the files in seg0 and return the highest numbered
1728     * file found.
1729     * <p>
1730     * Until I figure out some reliable place to store this information across
1731     * a boot of the system, this is what is used following a boot to assign
1732     * the next conglomerate id when a new conglomerate is created. It is
1733     * only called at most once, and then the value is cached by calling store
1734     * code.
1735     * <p>
1736     *
1737     * @return The largest containerid in seg0.
1738     **/

1739    private synchronized long findMaxContainerId()
1740    {
1741        actionCode = FIND_MAX_CONTAINER_ID_ACTION;
1742        try
1743        {
1744            return ((Long JavaDoc) AccessController.doPrivileged( this)).longValue();
1745        }
1746        catch (PrivilegedActionException JavaDoc pae)
1747        {
1748            // findMaxContainerId does not throw an exception
1749
return 0;
1750        }
1751    }
1752
1753    private void bootLogFactory(
1754    boolean create,
1755    Properties JavaDoc startParams)
1756        throws StandardException
1757    {
1758
1759        if (isReadOnly())
1760        {
1761            startParams.put(
1762                LogFactory.RUNTIME_ATTRIBUTES, LogFactory.RT_READONLY);
1763        }
1764
1765        logFactory = (LogFactory)
1766            Monitor.bootServiceModule(
1767                create, this,
1768                rawStoreFactory.getLogFactoryModule(), startParams);
1769    }
1770
1771
1772    /**
1773        Does this factory support this service type.
1774    */

1775    private boolean handleServiceType(
1776    String JavaDoc type)
1777    {
1778        try
1779        {
1780            PersistentService ps =
1781                Monitor.getMonitor().getServiceProvider(type);
1782            return ps != null && ps.hasStorageFactory();
1783        }
1784        catch (StandardException se)
1785        {
1786            return false;
1787        }
1788    }
1789
1790    /**
1791        check to see if we are the only JBMS opened against this database.
1792
1793        <BR>This method does nothing if this database is read only or we cannot
1794        access files directly on the database directory.
1795
1796        <BR>We first see if a file named db.lck exists on the top database
1797        directory (i.e., the directory where service.properties lives). If it
1798        doesn't exist, we create it and write to it our identity which is
1799        generated per boot of the JBMS.
1800
1801        <BR>If the db.lck file already exists when we boot this database, we
1802        try to delete it first, assuming that an opened RandomAccessFile can
1803        act as a file lock against delete. If that succeeds, we may hold a
1804        file lock against subsequent JBMS that tries to attach to this
1805        database before we exit.
1806
1807        <BR>We test to see if we think an opened file will prevent it from
1808        being deleted, if so, we will hold on to the open file descriptor and
1809        use it as a filelock. If not, and we started out deleting an existing
1810        db.lck file, we issue a warning message to the info stream that we are
1811        about to attached to a database which may already have another JBMS
1812        attached to it. Then we overwrite that db.lck file with our identity.
1813
1814        <BR>Upon shutdown, we delete the db.lck file. If the system crash
1815        instead of shutdown cleanly, it will be cleaned up the next time the
1816        system boots
1817
1818        @exception StandardException another JBMS is already attached to the
1819        database at this directory
1820    */

1821    private void getJBMSLockOnDB(
1822    UUID myUUID,
1823    UUIDFactory uuidFactory,
1824    String JavaDoc databaseDirectory)
1825         throws StandardException
1826    {
1827        if (fileLockOnDB != null) // I already got the lock!
1828
return;
1829
1830        if (isReadOnly())
1831            return;
1832        if (SanityManager.DEBUG)
1833        {
1834            if (myUUID == null)
1835                SanityManager.THROWASSERT("myUUID == null");
1836        }
1837
1838        synchronized( this)
1839        {
1840            actionCode = GET_LOCK_ON_DB_ACTION;
1841            this.myUUID = myUUID;
1842            this.uuidFactory = uuidFactory;
1843            this.databaseDirectory = databaseDirectory;
1844            
1845            try
1846            {
1847                AccessController.doPrivileged( this);
1848            }
1849            catch (PrivilegedActionException JavaDoc pae)
1850            {
1851                throw (StandardException) pae.getException();
1852            }
1853            finally
1854            {
1855                this.myUUID = null;
1856                this.uuidFactory = null;
1857                this.databaseDirectory = null;
1858            }
1859        }
1860
1861        // OK file lock is reliable, we think... keep the fileLockOnDB file
1862
// descriptor open to prevent other JBMS from booting
1863
// fileLockOnDB is not null in this case
1864
}
1865
1866    // Called from within a privilege block
1867
private void privGetJBMSLockOnDB() throws StandardException
1868    {
1869        boolean fileLockExisted = false;
1870        String JavaDoc blownUUID = null;
1871
1872        StorageFile fileLock = storageFactory.newStorageFile( DB_LOCKFILE_NAME);
1873
1874        try
1875        {
1876            // assume we are not read only
1877
// SECURITY PERMISSION MP1
1878
if (fileLock.exists())
1879            {
1880                fileLockExisted = true;
1881
1882                // see what it says in case we cannot count on delete failing
1883
// when someone else have an opened file descriptor.
1884
// I may be blowing this JBMS's lock away
1885
// SECURITY PERMISSION MP1
1886
// SECURITY PERMISSION OP4
1887
fileLockOnDB = fileLock.getRandomAccessFile( "rw");
1888                try
1889                {
1890                    blownUUID = fileLockOnDB.readUTF();
1891                }
1892                catch (IOException JavaDoc ioe)
1893                {
1894                    // The previous owner of the lock may have died before
1895
// finish writing its UUID down.
1896
fileLockExisted = false;
1897                }
1898
1899                fileLockOnDB.close();
1900                fileLockOnDB = null;
1901
1902                // SECURITY PERMISSION OP5
1903
if (!fileLock.delete())
1904                {
1905                    throw StandardException.newException(
1906                        SQLState.DATA_MULTIPLE_JBMS_ON_DB,
1907                        databaseDirectory);
1908                }
1909            }
1910
1911            // if file does not exists, we grab it immediately - there is a
1912
// possibility that some other JBMS got to it sooner than we do,
1913
// check the UUID after we write it to make sure
1914
// SECURITY PERMISSION MP1
1915
// SECURITY PERMISSION OP5
1916
fileLockOnDB = fileLock.getRandomAccessFile( "rw");
1917
1918            // write it out for future reference
1919
fileLockOnDB.writeUTF(myUUID.toString());
1920
1921            fileLockOnDB.sync( false);
1922            fileLockOnDB.seek(0);
1923            // check the UUID
1924
UUID checkUUID = uuidFactory.recreateUUID(fileLockOnDB.readUTF());
1925            if (!checkUUID.equals(myUUID))
1926            {
1927                throw StandardException.newException(
1928                    SQLState.DATA_MULTIPLE_JBMS_ON_DB, databaseDirectory);
1929            }
1930        }
1931        catch (IOException JavaDoc ioe)
1932        {
1933            // probably a read only db, don't do anything more
1934
readOnly = true;
1935            try
1936            {
1937                if (fileLockOnDB != null)
1938                    fileLockOnDB.close();
1939            }
1940            catch (IOException JavaDoc ioe2)
1941            { /* did the best I could */ }
1942            fileLockOnDB = null;
1943
1944            return;
1945        }
1946
1947        if (fileLock.delete())
1948        {
1949            // if I can delete it while I am holding a opened file descriptor,
1950
// then the file lock is unreliable - send out a warning if I
1951
// have blown off another JBMS's lock on the DB
1952

1953            Object JavaDoc[] args = new Object JavaDoc[3];
1954            args[0] = myUUID;
1955            args[1] = databaseDirectory;
1956            args[2] = blownUUID;
1957
1958            //Try the exlcusive file lock method approach available in jdk1.4 or
1959
//above jvms where delete machanism does not reliably prevent
1960
//double booting of derby databases. If we don't get a reliable
1961
//exclusive lock still we send out a warning.
1962

1963            int exLockStatus = StorageFile.NO_FILE_LOCK_SUPPORT ;
1964            //If user has chosen to force lock option don't bother
1965
//about applying exclusive file lock mechanism
1966
if(!throwDBlckException)
1967            {
1968                exFileLock =
1969                    storageFactory.newStorageFile( DB_EX_LOCKFILE_NAME);
1970                exLockStatus = exFileLock.getExclusiveFileLock();
1971            }
1972
1973            if (exLockStatus == StorageFile.NO_FILE_LOCK_SUPPORT)
1974            {
1975                if (fileLockExisted && !throwDBlckException)
1976                {
1977
1978                    StandardException multipleJBMSWarning =
1979                      StandardException.newException(
1980                          SQLState.DATA_MULTIPLE_JBMS_WARNING, args);
1981
1982                    String JavaDoc warningMsg =
1983                      MessageService.getCompleteMessage(
1984                          SQLState.DATA_MULTIPLE_JBMS_WARNING, args);
1985
1986                    logMsg(warningMsg);
1987
1988                    // RESOLVE - need warning support. Output to
1989
// system.err.println rather than just send warning
1990
// message to derby.log.
1991
System.err.println(warningMsg);
1992
1993                }
1994            }
1995
1996            // filelock is unreliable, but we should at least leave a file
1997
// there to warn the next person
1998
try
1999            {
2000                // the existing fileLockOnDB file descriptor may already be
2001
// deleted by the delete call, close it and create the file
2002
// again
2003
if(fileLockOnDB != null)
2004                    fileLockOnDB.close();
2005                fileLockOnDB = fileLock.getRandomAccessFile( "rw");
2006
2007                // write it out for future reference
2008
fileLockOnDB.writeUTF(myUUID.toString());
2009
2010                fileLockOnDB.sync( false);
2011                fileLockOnDB.close();
2012            }
2013            catch (IOException JavaDoc ioe)
2014            {
2015                try
2016                {
2017                    fileLockOnDB.close();
2018                }
2019                catch (IOException JavaDoc ioe2)
2020                {
2021                    /* did the best I could */
2022                }
2023            }
2024            finally
2025            {
2026                fileLockOnDB = null;
2027            }
2028
2029            if (fileLockExisted && throwDBlckException)
2030            {
2031                // user has chosen that we always throw exception, throw it
2032
// now that we have reinstated the lock file.
2033
throw StandardException.newException(
2034                    SQLState.DATA_MULTIPLE_JBMS_FORCE_LOCK, args);
2035            }
2036        
2037            if(exLockStatus == StorageFile.EXCLUSIVE_FILE_LOCK_NOT_AVAILABLE)
2038            {
2039                
2040                throw StandardException.newException(
2041                    SQLState.DATA_MULTIPLE_JBMS_ON_DB,
2042                    databaseDirectory);
2043            }
2044
2045        }
2046    } // end of privGetJBMSLockOnDB
2047

2048    private void releaseJBMSLockOnDB()
2049    {
2050        if (isReadOnly())
2051            return;
2052
2053        synchronized( this)
2054        {
2055            actionCode = RELEASE_LOCK_ON_DB_ACTION;
2056            try
2057            {
2058                AccessController.doPrivileged( this);
2059            }
2060            catch (PrivilegedActionException JavaDoc pae)
2061            {
2062                // do nothing - it may be read only medium, who knows what the
2063
// problem is
2064
}
2065            finally
2066            {
2067                fileLockOnDB = null;
2068            }
2069        }
2070    }
2071
2072    private void privReleaseJBMSLockOnDB() throws IOException JavaDoc
2073    {
2074        if (fileLockOnDB != null)
2075            fileLockOnDB.close();
2076
2077        if (storageFactory != null)
2078        {
2079            StorageFile fileLock =
2080                storageFactory.newStorageFile(DB_LOCKFILE_NAME);
2081
2082            fileLock.delete();
2083        }
2084
2085        //release the lock that is acquired using tryLock() to prevent
2086
//multiple jvm booting the same database on Unix environments.
2087
if(exFileLock != null)
2088            exFileLock.releaseExclusiveFileLock();
2089
2090        return;
2091    } // end of privReleaseJBMSLockOnDB
2092

2093    private void logMsg(String JavaDoc msg)
2094    {
2095        if (istream == null)
2096        {
2097            istream = Monitor.getStream();
2098        }
2099
2100        istream.println(msg);
2101    }
2102
2103    public final boolean databaseEncrypted()
2104    {
2105        return databaseEncrypted;
2106    }
2107
2108    public void setDatabaseEncrypted()
2109    {
2110        databaseEncrypted = true;
2111    }
2112
2113    public int encrypt(
2114    byte[] cleartext,
2115    int offset,
2116    int length,
2117    byte[] ciphertext,
2118    int outputOffset,
2119    boolean newEngine)
2120         throws StandardException
2121    {
2122        return rawStoreFactory.encrypt(
2123                    cleartext, offset, length,
2124                    ciphertext, outputOffset,
2125                    newEngine);
2126    }
2127
2128    public int decrypt(
2129    byte[] ciphertext,
2130    int offset,
2131    int length,
2132    byte[] cleartext,
2133    int outputOffset)
2134         throws StandardException
2135    {
2136        return rawStoreFactory.decrypt(
2137                ciphertext, offset, length, cleartext, outputOffset);
2138    }
2139
2140
2141    
2142    public void encryptAllContainers(RawTransaction t) throws StandardException
2143    {
2144        containerEncrypter = new EncryptData(this);
2145        // encrypt all the conatiners in the databse
2146
containerEncrypter.encryptAllContainers(t);
2147    }
2148
2149
2150    /*
2151     * Remover old versions of the containers after (re)encryption
2152     * of the database.
2153     * @param inRecovery <code> true </code>, if cleanup is
2154     * happening during recovery.
2155     */

2156    public void removeOldVersionOfContainers(boolean inRecovery)
2157        throws StandardException
2158    {
2159        // check if old containers are being during recovery
2160
// because of a crash after successful completion of
2161
// (re)encryption of the dataabase, but before the
2162
// (re)encryption cleanup was complete.
2163
if (inRecovery) {
2164            containerEncrypter = new EncryptData(this);
2165        }
2166        containerEncrypter.removeOldVersionOfContainers(inRecovery);
2167        containerEncrypter = null;
2168    }
2169
2170
2171    /**
2172        Returns the encryption block size used by the algorithm at time of
2173        creation of an encrypted database
2174     */

2175    public int getEncryptionBlockSize()
2176    {
2177        return rawStoreFactory.getEncryptionBlockSize();
2178    }
2179
2180    public String JavaDoc getVersionedName(String JavaDoc name, long generationId)
2181    {
2182        return name.concat(".G".concat(Long.toString(generationId)));
2183    }
2184
2185    /**
2186     * Return an id which can be used to create a container.
2187     * <p>
2188     * Return an id number with is greater than any existing container
2189     * in the current database. Caller will use this to allocate future
2190     * container numbers - most likely caching the value and then incrementing
2191     * it as it is used.
2192     * <p>
2193     *
2194     * @return The an id which can be used to create a container.
2195     *
2196     * @exception StandardException Standard exception policy.
2197     **/

2198    public long getMaxContainerId()
2199        throws StandardException
2200    {
2201        return(findMaxContainerId());
2202    }
2203
2204    synchronized long getNextId() {
2205        return nextContainerId++;
2206    }
2207
2208    /** return a secure random number */
2209    int random()
2210    {
2211        return databaseEncrypted ? rawStoreFactory.random() : 0;
2212    }
2213
2214    /**
2215        Add a file to the list of files to be removed post recovery.
2216    */

2217    void fileToRemove( StorageFile file, boolean remove)
2218    {
2219        if (postRecoveryRemovedFiles == null)
2220            postRecoveryRemovedFiles = new Hashtable JavaDoc();
2221        String JavaDoc path = null;
2222        synchronized( this)
2223        {
2224            actionCode = GET_PATH_ACTION;
2225            actionFile = file;
2226            try
2227            {
2228                path = (String JavaDoc) AccessController.doPrivileged( this);
2229            }
2230            catch (PrivilegedActionException JavaDoc pae)
2231            {
2232                // GET_PATH does not throw an exception
2233
}
2234            finally
2235            {
2236                actionFile = null;
2237            }
2238        }
2239        if (remove) // to be removed
2240
postRecoveryRemovedFiles.put(path, file);
2241        else
2242            postRecoveryRemovedFiles.remove(path);
2243    
2244    }
2245
2246    /**
2247        Called after recovery is performed.
2248
2249        @exception StandardException Standard Cloudscape Error Policy
2250    */

2251    public void postRecovery() throws StandardException
2252    {
2253
2254        // hook up the cache cleaner daemon after recovery is finished
2255
DaemonService daemon = rawStoreFactory.getDaemon();
2256
2257        if (daemon == null)
2258            return;
2259
2260        containerCache.useDaemonService(daemon);
2261
2262        pageCache.useDaemonService(daemon);
2263        if (postRecoveryRemovedFiles != null)
2264        {
2265            synchronized( this)
2266            {
2267                actionCode = POST_RECOVERY_REMOVE_ACTION;
2268                try
2269                {
2270                    AccessController.doPrivileged( this);
2271                }
2272                catch (PrivilegedActionException JavaDoc pae)
2273                {
2274                    // POST_RECOVERY_REMOVE does not throw an exception
2275
}
2276            }
2277            postRecoveryRemovedFiles = null;
2278        }
2279    }
2280
2281    public void freezePersistentStore() throws StandardException
2282    {
2283        synchronized(freezeSemaphore)
2284        {
2285            if (isFrozen)
2286            {
2287                throw StandardException.newException(
2288                        SQLState.RAWSTORE_NESTED_FREEZE);
2289            }
2290
2291            // set this to true first to stop all writes from starting after
2292
// this.
2293
isFrozen = true;
2294
2295            // wait for all in progress write to finish
2296
try
2297            {
2298                while(writersInProgress > 0)
2299                {
2300                    try
2301                    {
2302                        freezeSemaphore.wait();
2303                    }
2304                    catch (InterruptedException JavaDoc ie)
2305                    {
2306                        // make sure we are not stuck in frozen state if we
2307
// caught an interrupt exception and the calling
2308
// thread may not have a chance to call unfreeze
2309
isFrozen = false;
2310                        freezeSemaphore.notifyAll();
2311
2312                        throw StandardException.interrupt(ie);
2313                    }
2314                }
2315            }
2316            catch (RuntimeException JavaDoc rte)
2317            {
2318                // make sure we are not stuck in frozen state if we
2319
// caught a run time exception and the calling thread may not
2320
// have a chance to call unfreeze
2321
isFrozen = false;
2322                freezeSemaphore.notifyAll();
2323                throw rte; // rethrow run time exception
2324
}
2325
2326            if (SanityManager.DEBUG)
2327                SanityManager.ASSERT(writersInProgress == 0 &&
2328                                     isFrozen == true,
2329                                     "data store is not properly frozen");
2330        }
2331    }
2332
2333    public void unfreezePersistentStore()
2334    {
2335        synchronized(freezeSemaphore)
2336        {
2337            isFrozen = false;
2338            freezeSemaphore.notifyAll();
2339        }
2340    }
2341
2342    public void writeInProgress() throws StandardException
2343    {
2344        synchronized(freezeSemaphore)
2345        {
2346            // do not start write, the persistent store is frozen
2347
while(isFrozen)
2348            {
2349                try
2350                {
2351                    freezeSemaphore.wait();
2352                }
2353                catch (InterruptedException JavaDoc ie)
2354                {
2355                    throw StandardException.interrupt(ie);
2356                }
2357            }
2358
2359            // store is not frozen, proceed to write - do this last
2360
writersInProgress++;
2361        }
2362    }
2363    
2364    public void writeFinished()
2365    {
2366        synchronized(freezeSemaphore)
2367        {
2368            if (SanityManager.DEBUG)
2369                SanityManager.ASSERT(writersInProgress > 0,
2370                                     "no writers in progress");
2371
2372            writersInProgress--;
2373            freezeSemaphore.notifyAll(); // wake up the freezer
2374
}
2375    }
2376
2377
2378    /*
2379     * Find all the all the containers stored in the seg0 directory and
2380     * backup each container to the specified backup location.
2381     */

2382    public void backupDataFiles(Transaction rt, File backupDir) throws StandardException
2383    {
2384                
2385        /*
2386         * List of containers that needs to be backed up are identified by
2387         * simply reading the list of files in seg0.
2388         * All container that are created after the container list is created
2389         * when backup is in progress are recreated on restore using the
2390         * transaction log.
2391         */

2392
2393        String JavaDoc[] files = getContainerNames();
2394        
2395        if (files != null) {
2396            // No user visible locks are acquired to backup the database. A stable backup
2397
// is made by latching the pages and internal synchronization
2398
// mechanisms.
2399
LockingPolicy lockPolicy = rt.newLockingPolicy(LockingPolicy.MODE_NONE,
2400                                                            TransactionController.ISOLATION_NOLOCK,
2401                                                            false);
2402            long segmentId = 0;
2403
2404            // loop through all the files in seg0 and backup all valid containers.
2405
for (int f = files.length-1; f >= 0 ; f--) {
2406                long containerId;
2407                try {
2408                    containerId =
2409                        Long.parseLong(files[f].substring(1, (files[f].length() -4)), 16);
2410                }
2411                catch (Throwable JavaDoc t)
2412                {
2413                    // ignore errors from parse, it just means that someone put
2414
// a file in seg0 that we didn't expect. Continue with the
2415
// next one.
2416
continue;
2417                }
2418
2419                ContainerKey identity = new ContainerKey(segmentId, containerId);
2420
2421                /* Not necessary to get the container thru the transaction.
2422                 * Backup opens in container in read only mode , No need to
2423                 * transition the transaction to active state.
2424                 *
2425                 * dropped container stubs also has to be backed up
2426                 * for restore to work correctly. That is
2427                 * why we are using a open call that let us
2428                 * open dropped containers.
2429                 */

2430
2431                ContainerHandle containerHdl = openDroppedContainer((RawTransaction)rt,
2432                                                                    identity, lockPolicy,
2433                                                                    ContainerHandle.MODE_READONLY);
2434                /*
2435                 * Note 1:
2436                 * If a container creation is in progress , open call will wait
2437                 * until it is complete; It will never return a handle to a
2438                 * container that is partially created. (see cache manager code
2439                 * for more details)
2440                 *
2441                 * Note 2:
2442                 * if a container creation failed in the middle after the list
2443                 * of the names are read from seg0, it will not exist in
2444                 * the database any more, so nothing to backup. Attempt
2445                 * to open such container will return null.
2446                 *
2447                 */

2448
2449                if( containerHdl != null) {
2450                    containerHdl.backupContainer(backupDir.getPath());
2451                    containerHdl.close();
2452                }
2453            }
2454        } else
2455        {
2456            if (SanityManager.DEBUG)
2457                SanityManager.THROWASSERT("backup process is unable to read container names in seg0");
2458        }
2459    }
2460
2461    /**
2462     * get all the names of the files in seg 0.
2463     * MT - This method needs to be synchronized to avoid conflicts
2464     * with other privileged actions execution in this class.
2465     * @return An array of all the file names in seg0.
2466     **/

2467    synchronized String JavaDoc[] getContainerNames()
2468    {
2469        actionCode = GET_CONTAINER_NAMES_ACTION;
2470        try{
2471            return (String JavaDoc[]) AccessController.doPrivileged( this);
2472        }
2473        catch( PrivilegedActionException JavaDoc pae){ return null;}
2474    }
2475
2476
2477
2478    /**
2479     * removes the data directory(seg*) from database home directory and
2480     * restores it from backup location.
2481     * This function gets called only when any of the folling attributes
2482     * are specified on connection URL:
2483     * Attribute.CREATE_FROM (Create database from backup if it does not exist)
2484     * Attribute.RESTORE_FROM (Delete the whole database if it exists and
2485     * then restore * it from backup)
2486     * Attribute.ROLL_FORWARD_RECOVERY_FROM:(Perform Rollforward Recovery;
2487     * except for the log directory everthing else is replced by the copy from
2488     * backup. log files in the backup are copied to the existing online log
2489     * directory.
2490     *
2491     * In all the cases, data directory(seg*) is replaced by the data directory
2492     * directory from backup when this function is called.
2493     */

2494    private void restoreDataDirectory(String JavaDoc backupPath)
2495        throws StandardException
2496    {
2497        File bsegdir; //segment directory in the backup
2498
File backupRoot = new java.io.File JavaDoc(backupPath); //root dir of backup db
2499

2500        /* To be safe we first check if the backup directory exist and it has
2501         * atleast one seg* directory before removing the current data directory.
2502         *
2503         * This will fail with a security exception unless the database engine
2504         * and all its callers have permission to read the backup directory.
2505         */

2506        String JavaDoc[] bfilelist = backupRoot.list();
2507        if(bfilelist !=null)
2508        {
2509            boolean segmentexist = false;
2510            for (int i = 0; i < bfilelist.length; i++)
2511            {
2512                //check if it is a seg* directory
2513
if(bfilelist[i].startsWith("seg"))
2514                {
2515                    bsegdir = new File(backupRoot , bfilelist[i]);
2516                    if(bsegdir.exists() && bsegdir.isDirectory())
2517                    {
2518                        segmentexist = true;
2519                        break;
2520                    }
2521                }
2522            }
2523        
2524            if(!segmentexist)
2525            {
2526                throw
2527                  StandardException.newException(
2528                      SQLState.DATA_DIRECTORY_NOT_FOUND_IN_BACKUP, backupRoot);
2529            }
2530        }
2531        else
2532        {
2533            
2534            throw StandardException.newException(
2535                    SQLState.DATA_DIRECTORY_NOT_FOUND_IN_BACKUP, backupRoot);
2536        }
2537
2538        synchronized (this)
2539        {
2540            actionCode = RESTORE_DATA_DIRECTORY_ACTION;
2541            this.backupPath = backupPath;
2542            this.backupRoot = backupRoot;
2543            this.bfilelist = bfilelist;
2544            try
2545            {
2546                AccessController.doPrivileged( this);
2547            }
2548            catch (PrivilegedActionException JavaDoc pae)
2549            {
2550                throw (StandardException) pae.getException();
2551            }
2552            finally
2553            {
2554                this.backupPath = null;
2555                this.backupRoot = null;
2556                this.bfilelist = null;
2557            }
2558        }
2559    }
2560
2561    private void privRestoreDataDirectory() throws StandardException
2562    {
2563        StorageFile csegdir; //segment directory in the current db home
2564
StorageFile dataRoot =
2565            storageFactory.newStorageFile( null); //root dir of db
2566

2567        //Remove the seg* directories in the current database home directory
2568
String JavaDoc[] cfilelist = dataRoot.list();
2569        if(cfilelist!=null)
2570        {
2571            for (int i = 0; i < cfilelist.length; i++)
2572            {
2573                //delete only the seg* directories in the database home
2574
if(cfilelist[i].startsWith("seg"))
2575                {
2576                    csegdir = storageFactory.newStorageFile( cfilelist[i]);
2577                    if(!csegdir.deleteAll())
2578                    {
2579                        throw
2580                          StandardException.newException(
2581                              SQLState.UNABLE_TO_REMOVE_DATA_DIRECTORY,
2582                              csegdir);
2583                    }
2584                }
2585            }
2586        }
2587
2588        //copy the seg* directories from backup to current database home
2589
for (int i = 0; i < bfilelist.length; i++)
2590        {
2591            //copy only the seg* directories and copy them from backup
2592
if (bfilelist[i].startsWith("seg"))
2593            {
2594                csegdir = storageFactory.newStorageFile( bfilelist[i]);
2595                File bsegdir1 = new java.io.File JavaDoc(backupRoot, bfilelist[i]);
2596                if (!FileUtil.copyDirectory(
2597                        writableStorageFactory, bsegdir1, csegdir))
2598                {
2599                    throw
2600                      StandardException.newException(
2601                          SQLState.UNABLE_TO_COPY_DATA_DIRECTORY,
2602                          bsegdir1, csegdir);
2603                }
2604            }
2605            else if (databaseEncrypted &&
2606                     bfilelist[i].startsWith(
2607                         Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE))
2608            {
2609                // Case of encrypted database and usage of an external
2610
// encryption key, there is an extra file with name given by
2611
// Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE that needs to be
2612
// copied over during createFrom/restore operations.
2613

2614                //copy the file
2615
File fromFile = new File(backupRoot,bfilelist[i]);
2616                StorageFile toFile =
2617                    storageFactory.newStorageFile(bfilelist[i]);
2618
2619                if (!FileUtil.copyFile(writableStorageFactory,fromFile,toFile))
2620                {
2621                    throw StandardException.newException(
2622                            SQLState.UNABLE_TO_COPY_DATA_DIRECTORY,
2623                            bfilelist[i], toFile);
2624                }
2625            }
2626        }
2627
2628    } // end of privRestoreDataDirectory
2629

2630    /**
2631        Is the store read-only.
2632    */

2633    public boolean isReadOnly()
2634    {
2635        // return what the baseDataFileFactory thinks
2636
return readOnly;
2637    }
2638
2639    /**
2640     * @return The StorageFactory used by this dataFactory
2641     */

2642    public StorageFactory getStorageFactory()
2643    {
2644        return storageFactory;
2645    }
2646
2647    // PrivilegedExceptionAction method
2648
public final Object JavaDoc run() throws Exception JavaDoc
2649    {
2650        switch( actionCode)
2651        {
2652        case BOOT_ACTION:
2653            readOnly = storageFactory.isReadOnlyDatabase();
2654            supportsRandomAccess = storageFactory.supportsRandomAccess();
2655            return null;
2656            
2657        case GET_TEMP_DIRECTORY_ACTION:
2658            return storageFactory.getTempDir();
2659
2660        case REMOVE_TEMP_DIRECTORY_ACTION:
2661            StorageFile tempDir = storageFactory.getTempDir();
2662            if( tempDir != null)
2663                tempDir.deleteAll();
2664            return null;
2665
2666        case GET_CONTAINER_PATH_ACTION:
2667        case GET_ALTERNATE_CONTAINER_PATH_ACTION:
2668        {
2669            StringBuffer JavaDoc sb = new StringBuffer JavaDoc("seg");
2670            sb.append(containerId.getSegmentId());
2671            sb.append(storageFactory.getSeparator());
2672            if( actionCode == GET_CONTAINER_PATH_ACTION)
2673            {
2674                sb.append(stub ? 'd' : 'c');
2675                sb.append(Long.toHexString(containerId.getContainerId()));
2676                sb.append(".dat");
2677            }
2678            else
2679            {
2680                sb.append(stub ? 'D' : 'C');
2681                sb.append(Long.toHexString(containerId.getContainerId()));
2682                sb.append(".DAT");
2683            }
2684            return storageFactory.newStorageFile( sb.toString());
2685        } // end of cases GET_CONTAINER_PATH_ACTION & GET_ALTERNATE_CONTAINER_PATH_ACTION
2686

2687        case REMOVE_STUBS_ACTION:
2688        {
2689            char separator = storageFactory.getSeparator();
2690            StorageFile root = storageFactory.newStorageFile( null);
2691
2692            // get all the non-temporary data segment, they start with "seg"
2693
String JavaDoc[] segs = root.list();
2694            for (int s = segs.length-1; s >= 0; s--)
2695            {
2696                if (segs[s].startsWith("seg"))
2697                {
2698                    StorageFile seg =
2699                        storageFactory.newStorageFile(root, segs[s]);
2700
2701                    if (seg.exists() && seg.isDirectory())
2702                    {
2703                        String JavaDoc[] files = seg.list();
2704                        for (int f = files.length-1; f >= 0 ; f--)
2705                        {
2706                            // stub
2707
if (files[f].startsWith("D") ||
2708                                files[f].startsWith("d"))
2709                            {
2710                                StorageFile stub =
2711                                    storageFactory.newStorageFile(
2712                                        root, segs[s] + separator + files[f]);
2713
2714                                boolean delete_status = stub.delete();
2715                                
2716                                if (SanityManager.DEBUG)
2717                                {
2718                                    // delete should always work, code which
2719
// created the StorageFactory already
2720
// checked for existence.
2721
if (!delete_status)
2722                                    {
2723                                        SanityManager.THROWASSERT(
2724                                            "delete of stub (" +
2725                                            stub + ") failed.");
2726                                    }
2727                                }
2728                            }
2729                        }
2730                    }
2731                }
2732            }
2733            break;
2734        } // end of case REMOVE_STUBS_ACTION
2735

2736        case FIND_MAX_CONTAINER_ID_ACTION:
2737        {
2738            long maxnum = 1;
2739            StorageFile seg = storageFactory.newStorageFile( "seg0");
2740
2741            if (seg.exists() && seg.isDirectory())
2742            {
2743                // create an array with names of all files in seg0
2744
String JavaDoc[] files = seg.list();
2745
2746                // loop through array looking for maximum containerid.
2747
for (int f = files.length-1; f >= 0 ; f--)
2748                {
2749                    try
2750                    {
2751                        long fileNumber =
2752                          Long.parseLong(
2753                              files[f].substring(
2754                                  1, (files[f].length() -4)), 16);
2755
2756                        if (fileNumber > maxnum)
2757                            maxnum = fileNumber;
2758                    }
2759                    catch (Throwable JavaDoc t)
2760                    {
2761                        // ignore errors from parse, it just means that someone
2762
// put a file in seg0 that we didn't expect. Continue
2763
// with the next one.
2764
}
2765                }
2766            }
2767            return ReuseFactory.getLong( maxnum);
2768        } // end of case FIND_MAX_CONTAINER_ID_ACTION
2769

2770        case DELETE_IF_EXISTS_ACTION:
2771        {
2772            boolean ret = actionFile.exists() && actionFile.delete();
2773            actionFile = null;
2774            return ret ? this : null;
2775        } // end of case DELETE_IF_EXISTS_ACTION
2776

2777        case GET_PATH_ACTION:
2778        {
2779            String JavaDoc path = actionFile.getPath();
2780            actionFile = null;
2781            return path;
2782        } // end of case GET_PATH_ACTION
2783

2784        case POST_RECOVERY_REMOVE_ACTION:
2785        {
2786            for (Enumeration JavaDoc e = postRecoveryRemovedFiles.elements();
2787                    e.hasMoreElements(); )
2788            {
2789                StorageFile f = (StorageFile) e.nextElement();
2790                if (f.exists())
2791                {
2792                    boolean delete_status = f.delete();
2793
2794                    if (SanityManager.DEBUG)
2795                    {
2796                        // delete should always work, code which
2797
// created the StorageFactory already
2798
// checked for existence.
2799
if (!delete_status)
2800                        {
2801                            SanityManager.THROWASSERT(
2802                                "delete of stub (" + stub + ") failed.");
2803                        }
2804                    }
2805                }
2806            }
2807            return null;
2808        }
2809
2810        case GET_LOCK_ON_DB_ACTION:
2811            privGetJBMSLockOnDB();
2812            return null;
2813
2814        case RELEASE_LOCK_ON_DB_ACTION:
2815            privReleaseJBMSLockOnDB();
2816            return null;
2817
2818        case RESTORE_DATA_DIRECTORY_ACTION:
2819            privRestoreDataDirectory();
2820            return null;
2821        case GET_CONTAINER_NAMES_ACTION:
2822        {
2823            StorageFile seg = storageFactory.newStorageFile( "seg0");
2824            if (seg.exists() && seg.isDirectory())
2825            {
2826                // return the names of all files in seg0
2827
return seg.list();
2828            }
2829            return null;
2830        } // end of case GET_CONTAINER_NAMES_ACTION
2831

2832        }
2833        return null;
2834    } // end of run
2835
}
2836
Popular Tags