KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > apache > derby > impl > store > raw > log > LogToFile


1 /*
2
3    Derby - Class org.apache.derby.impl.store.raw.log.LogToFile
4
5    Licensed to the Apache Software Foundation (ASF) under one or more
6    contributor license agreements. See the NOTICE file distributed with
7    this work for additional information regarding copyright ownership.
8    The ASF licenses this file to you under the Apache License, Version 2.0
9    (the "License"); you may not use this file except in compliance with
10    the License. You may obtain a copy of the License at
11
12       http://www.apache.org/licenses/LICENSE-2.0
13
14    Unless required by applicable law or agreed to in writing, software
15    distributed under the License is distributed on an "AS IS" BASIS,
16    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17    See the License for the specific language governing permissions and
18    limitations under the License.
19
20  */

21
22 package org.apache.derby.impl.store.raw.log;
23
24 import org.apache.derby.iapi.services.diag.Performance;
25
26 import org.apache.derby.impl.store.raw.log.CheckpointOperation;
27 import org.apache.derby.impl.store.raw.log.LogCounter;
28 import org.apache.derby.impl.store.raw.log.LogRecord;
29 import org.apache.derby.impl.store.raw.log.StreamLogScan;
30
31 // need this to print nested exception that corrupts the database
32
import org.apache.derby.iapi.services.context.ErrorStringBuilder;
33
34 import org.apache.derby.iapi.services.info.ProductGenusNames;
35 import org.apache.derby.iapi.services.info.ProductVersionHolder;
36
37 import org.apache.derby.iapi.reference.MessageId;
38 import org.apache.derby.iapi.reference.Property;
39 import org.apache.derby.iapi.reference.SQLState;
40
41 import org.apache.derby.iapi.services.daemon.DaemonService;
42 import org.apache.derby.iapi.services.daemon.Serviceable;
43 import org.apache.derby.iapi.services.context.ContextManager;
44 import org.apache.derby.iapi.services.context.ContextService;
45 import org.apache.derby.iapi.services.context.ShutdownException;
46 import org.apache.derby.iapi.services.monitor.Monitor;
47 import org.apache.derby.iapi.services.monitor.ModuleControl;
48 import org.apache.derby.iapi.services.monitor.ModuleSupportable;
49 import org.apache.derby.iapi.services.monitor.PersistentService;
50 import org.apache.derby.iapi.services.sanity.SanityManager;
51 import org.apache.derby.iapi.services.io.Formatable;
52 import org.apache.derby.iapi.services.io.TypedFormat;
53 import org.apache.derby.iapi.services.io.FormatIdUtil;
54 import org.apache.derby.iapi.services.io.StoredFormatIds;
55 import org.apache.derby.iapi.services.stream.HeaderPrintWriter;
56 import org.apache.derby.iapi.services.stream.PrintWriterGetHeader;
57 import org.apache.derby.iapi.services.stream.InfoStreams;
58 import org.apache.derby.iapi.error.StandardException;
59 import org.apache.derby.iapi.services.i18n.MessageService;
60 import org.apache.derby.iapi.store.access.AccessFactory;
61 import org.apache.derby.iapi.store.access.AccessFactoryGlobals;
62 import org.apache.derby.iapi.store.access.TransactionController;
63 import org.apache.derby.iapi.store.raw.Loggable;
64 import org.apache.derby.iapi.store.raw.RawStoreFactory;
65 import org.apache.derby.iapi.store.raw.ScanHandle;
66 import org.apache.derby.iapi.store.raw.log.LogFactory;
67 import org.apache.derby.iapi.store.raw.log.Logger;
68 import org.apache.derby.iapi.store.raw.log.LogInstant;
69 import org.apache.derby.iapi.store.raw.log.LogScan;
70 import org.apache.derby.iapi.store.raw.Transaction;
71 import org.apache.derby.iapi.store.raw.xact.RawTransaction;
72 import org.apache.derby.iapi.store.raw.xact.TransactionFactory;
73 import org.apache.derby.iapi.store.raw.data.DataFactory;
74 import org.apache.derby.iapi.services.property.PersistentSet;
75
76 import org.apache.derby.iapi.store.access.DatabaseInstant;
77 import org.apache.derby.catalog.UUID;
78 import org.apache.derby.iapi.services.uuid.UUIDFactory;
79 import org.apache.derby.iapi.services.property.PropertyUtil;
80 import org.apache.derby.iapi.reference.Attribute;
81 import org.apache.derby.iapi.services.io.FileUtil;
82 import org.apache.derby.iapi.util.ReuseFactory;
83
84 import org.apache.derby.io.StorageFactory;
85 import org.apache.derby.io.WritableStorageFactory;
86 import org.apache.derby.io.StorageFile;
87 import org.apache.derby.io.StorageRandomAccessFile;
88
89 import java.io.File JavaDoc; // Plain files are used for backups
90
import java.io.IOException JavaDoc;
91 import java.io.SyncFailedException JavaDoc;
92 import java.io.ByteArrayOutputStream JavaDoc;
93 import java.io.DataOutputStream JavaDoc;
94 import java.io.ByteArrayInputStream JavaDoc;
95 import java.io.DataInputStream JavaDoc;
96 import java.io.FileNotFoundException JavaDoc;
97
98 import java.net.MalformedURLException JavaDoc;
99 import java.net.URL JavaDoc;
100
101 import java.util.Properties JavaDoc;
102 import java.util.Vector JavaDoc;
103 import java.util.zip.CRC32 JavaDoc;
104
105 /**
106
107     This is an implementation of the log using a non-circular file system file.
108     No support for incremental log backup or media recovery.
109     Only crash recovery is supported.
110     <P>
111     The 'log' is a stream of log records. The 'log' is implemented as
112     a series of numbered log files. These numbered log files are logically
113     continuous so a transaction can have log records that span multiple log files.
114     A single log record cannot span more then one log file. The log file number
115     is monotonically increasing.
116     <P>
117     The log belongs to a log factory of a RawStore. In the current implementation,
118     each RawStore only has one log factory, so each RawStore only has one log
119     (which composed of multiple log files).
120     At any given time, a log factory only writes new log records to one log file,
121     this log file is called the 'current log file'.
122     <P>
123     A log file is named log<em>logNumber</em>.dat
124     <P>
125     Everytime a checkpoint is taken, a new log file is created and all subsequent
126     log records will go to the new log file. After a checkpoint is taken, old
127     and useless log files will be deleted.
128     <P>
129     RawStore exposes a checkpoint method which clients can call, or a checkpoint is
130     taken automatically by the RawStore when
131     <OL>
132     <LI> the log file grows beyond a certain size (configurable, default 100K bytes)
133     <LI> RawStore is shutdown and a checkpoint hasn't been done "for a while"
134     <LI> RawStore is recovered and a checkpoint hasn't been done "for a while"
135     </OL>
136     <P>
137     This LogFactory is responsible for the formats of 2 kinds of file: the log
138     file and the log control file. And it is responsible for the format of the
139     log record wrapper.
140     <P> <PRE>
141
142     Format of log control file
143
144     @format_id FILE_STREAM_LOG_FILE
145     @purpose The log control file contains information about which log files
146     are present and where the last checkpoint log record is located.
147     @upgrade
148     @disk_layout
149     (pre-v15)
150         int format id
151         int log file version
152         long the log instant (LogCounter) of the last completed checkpoint
153     (v15 onward)
154         int format id
155         int obsolete log file version
156         long the log instant (LogCounter) of the last completed checkpoint
157         int JBMS version
158         int checkpoint interval
159         long spare (value set to 0)
160         long spare (value set to 0)
161         long spare (value set to 0)
162
163     @end_format
164     </PRE>
165     <HR WIDTH="100%">
166     <PRE>
167
168     Format of the log file
169
170     @format_id FILE_STREAM_LOG_FILE
171     @purpose The log file contains log record which record all the changes
172     to the database. The complete transaction log is composed of a series of
173     log files.
174     @upgrade
175     @disk_layout
176         int format id - the format Id of this log file
177         int obsolete log file version - not used
178         long log file number - this number orders the log files in a
179                         series to form the complete transaction log
180         long prevLogRecord - log instant of the previous log record, in the
181                 previous log file.
182
183         [log record wrapper]* one or more log records with wrapper
184
185         int endMarker - value of zero. The beginning of a log record wrapper
186                 is the length of the log record, therefore it is never zero
187         [int fuzzy end]* zero or more int's of value 0, in case this log file
188                 has been recovered and any incomplete log record set to zero.
189     @end_format
190     </PRE>
191     <HR WIDTH="100%">
192     <PRE>
193
194     Format of the log record wrapper
195
196     @format_id none. The format is implied by the FILE_STREAM_LOG_FILE
197     @purpose The log record wrapper provides information for the log scan.
198     @upgrade
199     @disk_layout
200         length(int) length of the log record (for forward scan)
201         instant(long) LogInstant of the log record
202         logRecord(byte[length]) byte array that is written by the FileLogger
203         length(int) length of the log record (for backward scan)
204     @end_format
205     </PRE>
206
207
208     <P>Multithreading considerations:<BR>
209     Log Factory must be MT-safe.
210     <P>
211     Class is final as it has methods with privilege blocks
212     and implements PrivilegedExceptionAction.
213     */

214
215 public final class LogToFile implements LogFactory, ModuleControl, ModuleSupportable,
216                                   Serviceable, java.security.PrivilegedExceptionAction JavaDoc
217 {
218
219     private static int fid = StoredFormatIds.FILE_STREAM_LOG_FILE;
220
221     // format Id must fit in 4 bytes
222

223     /**
224         Return my format identifier.
225     */

226     public int getTypeFormatId() {
227         return StoredFormatIds.FILE_STREAM_LOG_FILE;
228     }
229
230     // at the beginning of every log file is the following information:
231
// the log file formatId
232
// the log file version (int)
233
// the log file number (long)
234
// the log instant at the end of the last log record in the previous file (long)
235
public static final int LOG_FILE_HEADER_SIZE = 24;
236
237     protected static final int LOG_FILE_HEADER_PREVIOUS_LOG_INSTANT_OFFSET = LOG_FILE_HEADER_SIZE-8;
238
239     // Number of bytes overhead of each log record.
240
// 4 bytes of length at the beginning, 8 bytes of log instant,
241
// and 4 bytes of length at the end
242
public static final int LOG_RECORD_OVERHEAD = 4+8+4;
243
244     public static final String JavaDoc DBG_FLAG = SanityManager.DEBUG ? "LogTrace" : null;
245     public static final String JavaDoc DUMP_LOG_ONLY = SanityManager.DEBUG ? "DumpLogOnly" : null;
246     public static final String JavaDoc DUMP_LOG_FROM_LOG_FILE =
247         SanityManager.DEBUG ? "derby.storage.logDumpStart" : null;
248     protected static final String JavaDoc LOG_SYNC_STATISTICS = "LogSyncStatistics";
249
250     // If you change this number, then JBMS 1.1x and 1.2x will give a really
251
// horrendous error message when booting against a db created by you. When
252
// we decided that we don't need to worry about people mis-using the
253
// product that way, then we can change this. Just remember, before we do,
254
// all existing database will have the number 9 in there.
255
private static final int OBSOLETE_LOG_VERSION_NUMBER = 9;
256
257     /* how big the log file should be before checkpoint or log switch is taken */
258     private static final int DEFAULT_LOG_SWITCH_INTERVAL = 1024*1024;
259     private static final int LOG_SWITCH_INTERVAL_MIN = 100000;
260     private static final int LOG_SWITCH_INTERVAL_MAX = 128*1024*1024;
261     private static final int CHECKPOINT_INTERVAL_MIN = 100000;
262     private static final int CHECKPOINT_INTERVAL_MAX = 128*1024*1024;
263     private static final int DEFAULT_CHECKPOINT_INTERVAL = 10*1024*1024;
264
265     //log buffer size values
266
private static final int DEFAULT_LOG_BUFFER_SIZE = 32768; //32K
267
private static final int LOG_BUFFER_SIZE_MIN = 8192; //8k
268
private static final int LOG_BUFFER_SIZE_MAX = LOG_SWITCH_INTERVAL_MAX;
269     private int logBufferSize = DEFAULT_LOG_BUFFER_SIZE;
270
271     /* Log Control file flags. */
272     private static final byte IS_BETA_FLAG = 0x1;
273     
274     /**
275      * When the derby.system.durability property is set to 'test', the store
276      * system will not force sync calls in the following cases
277      * - for the log file at each commit
278      * - for the log file before data page is forced to disk
279      * - for page allocation when file is grown
280      * - for data writes during checkpoint
281      * This means it is possible that the recovery system may not work properly,
282      * committed transactions may be lost, and/or database may not
283      * be in a consistent state.
284      * In order that we recognize this case that the database was previously
285      * at any time booted in this mode, this value is written out
286      * into the log control file. This will help prevent us from
287      * wasting time to resolve issues in such cases.
288      * @see org.apache.derby.iapi.reference.Property#DURABILITY_PROPERTY
289      * This value is written as part of the log control file flags byte.
290      */

291     private static final byte IS_DURABILITY_TESTMODE_NO_SYNC_FLAG = 0x2;
292     
293     /**
294      * keeps track of if the database was booted previously at any time with
295      * derby.system.durability=test
296      */

297     private static boolean wasDBInDurabilityTestModeNoSync = false;
298     
299     /* to err on the conservative side, unless otherwise set, assume log
300      * archive is ON
301      */

302     private static final String JavaDoc DEFAULT_LOG_ARCHIVE_DIRECTORY = "DEFAULT";
303
304     private int logSwitchInterval = DEFAULT_LOG_SWITCH_INTERVAL;
305     private int checkpointInterval = DEFAULT_CHECKPOINT_INTERVAL;
306
307     String JavaDoc dataDirectory; // where files are stored
308
private WritableStorageFactory logStorageFactory;
309     
310     private boolean logBeingFlushed; // is the log in the middle of a flush
311
// (access of the variable should sync on this)
312

313     protected LogAccessFile logOut; // an output stream to the log file
314
// (access of the variable should sync on this)
315
private StorageRandomAccessFile firstLog = null;
316     protected long endPosition = -1; // end position of the current log file
317
long lastFlush = 0; // the position in the current log
318
// file that has been flushed to disk
319

320     long logFileNumber = -1; // current log file number
321
// other than during boot and recovery time,
322
// logFileNumber is only changed by
323
// switchLogFile, which is synchronized.
324
//
325
// MT - Anyone accessing this number should
326
// synchronized on this if the current log file
327
// must not be changed. If not synchronized,
328
// the log file may have been switched.
329

330     long firstLogFileNumber = -1;
331                                 // first log file that makes up the active
332
// portion (with active transactions) of the
333
// log.
334
//
335
// MT - This value is set during recovery or
336
// during log truncation. In the former single
337
// thread is assumed. In the latter
338
// must be synchronized with this to access
339
// or change.
340

341     private long maxLogFileNumber = LogCounter.MAX_LOGFILE_NUMBER;
342     private CheckpointOperation currentCheckpoint;
343                                 // last checkpoint successfully taken
344
//
345
// MT - only changed or access in recovery or
346
// checkpoint, both are single thread access
347

348     long checkpointInstant;
349                                 // log instant of te curerntCheckpoint
350

351     private DaemonService checkpointDaemon; // the background worker thread who is going to
352
// do checkpoints for this log factory.
353

354     private int myClientNumber;
355                                 // use this number to talk to checkpoint Daemon
356

357     private volatile boolean checkpointDaemonCalled;
358                                 // checkpoint Daemon called already - it is not
359
// important that this value is correct, the
360
// daemon just need to be called once in a
361
// while. Deamon can handle multiple posts.
362

363     private long logWrittenFromLastCheckPoint = 0;
364                                 // keeps track of the amout of log written between checkpoints
365
private RawStoreFactory rawStoreFactory;
366                                 // use this only after recovery is finished
367

368     protected DataFactory dataFactory;
369                                 // use this only after revocery is finished
370

371     protected boolean ReadOnlyDB; // true if this db is read only, i.e, cannot
372
// append log records
373

374
375     // DEBUG DEBUG - do not truncate log files
376
private boolean keepAllLogs;
377
378     // if database is encrypted, the content of the log files are encrypted
379
private boolean databaseEncrypted;
380
381     // the following booleans are used to put the log factory into various
382
// states
383
private boolean recoveryNeeded = true; // log needs to be recovered
384
private boolean inCheckpoint = false; // in the middle of a checkpoint
385
private boolean inRedo = false; // in the middle of redo loop
386
private boolean inLogSwitch = false;
387
388     // make sure we don't do anything after the log factory has been stopped
389
private boolean stopped = false;
390
391     // if log is to go to another device, this variable is set. If null, then
392
// log goes to the log subdirectory underneath the data directory
393
String JavaDoc logDevice;
394
395     // disable syncing of log file when running in derby.system.durability=test
396
private boolean logNotSynced = false;
397
398     private volatile boolean logArchived = false;
399     private boolean logSwitchRequired = false;
400
401     /** DEBUG test only */
402     int test_logWritten = 0;
403     int test_numRecordToFillLog = -1;
404     private int mon_flushCalls;
405     private int mon_syncCalls;
406     private int mon_numLogFlushWaits;
407     private boolean mon_LogSyncStatistics;
408     private int mon_numBytesToLog;
409
410
411     /**
412         If not null then something is corrupt in the raw store and this represents the original error.
413     */

414     protected volatile StandardException corrupt;
415
416     /**
417         If frozen, don't allow anything on disk to change.
418      */

419     private boolean isFrozen;
420
421     /**
422       Product Version information. Invarient after boot.
423       */

424     ProductVersionHolder jbmsVersion;
425
426     /**
427         On disk database version information. When running in soft upgrade this version
428         may be different to jbmsVersion.
429     */

430     private int onDiskMajorVersion;
431     private int onDiskMinorVersion;
432     private boolean onDiskBeta;
433     
434     private CRC32 JavaDoc checksum = new CRC32 JavaDoc(); // holder for the checksum
435

436     
437     /**
438      * Note: Why logging system support file sync and write sync ?
439      * Note : The reason to support file and write sync of logs is
440      * there was no support to do write sync until jdk1.4 and then
441      * there was write sync jvm bug in jdk1.4.1, only in jdk1.4.2 write
442      * sync(rws mode) mechanism can be used corretly.
443      * Default in JVMS >= jdk1.4.2 is write sync(see the boot method for jvm checks).
444      *
445      * Write sync mechanism support is added for performance reasons.
446      * On commits, logging system has to make sure the log for committed
447      * transaction is on disk. With out write sync , log is written to the
448      * disk and then fsync() is used on commits to make log is written to the
449      * disk for sure. On most of the OS , fsync() calls are expensive.
450      * On heavey commit oriented systems , file sync make the system run slow.
451      * This problem is solved by using write sync on preallocated log file.
452      * write sync is much faster than doing write and file sync to a file.
453      * File should be preallocated for write syncs to perform better than
454      * the file sync method. Whenever a new log file is created,
455      * logSwitchInterval size is preallocated by writing zeros after file after the header.
456      */

457
458     /*If set to true , write sync will be used to do log write other file
459      * level sync is used.
460      */

461     private boolean isWriteSynced = false;
462
463     
464     // log file that is yet to be copied to backup, updates to this variable
465
// needs to visible checkpoint thread.
466
private volatile long logFileToBackup ;
467     // It is set to true when online backup is in progress, updates to
468
// this variable needs to visible to checkpoint thread.
469
private volatile boolean backupInProgress = false;
470    
471
472     /**
473         MT- not needed for constructor
474     */

475     public LogToFile() {
476         keepAllLogs = PropertyUtil.getSystemBoolean(RawStoreFactory.KEEP_TRANSACTION_LOG);
477         
478
479         if (Performance.MEASURE)
480             mon_LogSyncStatistics = PropertyUtil.getSystemBoolean(LOG_SYNC_STATISTICS);
481     }
482
483     /*
484     ** Methods of Corruptable
485     */

486
487     /**
488         Once the log factory is makred as corrupt then the raw sto
489     */

490     public StandardException markCorrupt(StandardException originalError) {
491
492         boolean firsttime = false;
493
494         synchronized (this)
495         {
496             if (corrupt == null && originalError != null)
497             {
498                 corrupt = originalError;
499                 firsttime = true;
500             }
501         }
502
503         // only print the first error
504
if (corrupt == originalError)
505             logErrMsg(corrupt);
506
507
508         // this is the first time someone detects error, shutdown the
509
// system as much as possible without further damaging it
510
if (firsttime)
511         {
512             synchronized(this)
513             {
514                 stopped = true;
515
516                 if (logOut != null)
517                 {
518                     try
519                     {
520                         logOut.corrupt(); // get rid of open file descriptor
521
}
522                     catch (IOException JavaDoc ioe)
523                     {
524                         // don't worry about it, just trying to clean up
525
}
526                 }
527
528                 // NullPointerException is preferred over corrupting the database
529
logOut = null;
530             }
531
532             if (dataFactory != null)
533                 dataFactory.markCorrupt(null);
534
535         }
536
537         return originalError;
538     }
539
540     private void checkCorrupt() throws StandardException
541     {
542         synchronized (this)
543         {
544             if (corrupt != null)
545             {
546                 throw StandardException.newException(
547                         SQLState.LOG_STORE_CORRUPT, corrupt);
548             }
549         }
550     }
551
552     /*
553     ** Methods of LogFactory
554     */

555
556     /**
557         MT- not needed
558     */

559     public Logger getLogger() {
560
561         if (ReadOnlyDB)
562             return null;
563         else
564             return new FileLogger(this);
565     }
566
567     /**
568         Recover the rawStore to a consistent state using the log.
569
570         <P>
571         In this implementation, the log is a stream of log records stored in
572         one or more flat files. Recovery is done in 2 passes: redo and undo.
573         <BR> <B>Redo pass</B>
574         <BR> In the redo pass, reconstruct the state of the rawstore by
575         repeating exactly what happened before as recorded in the log.
576         <BR><B>Undo pass</B>
577         <BR> In the undo pass, all incomplete transactions are rolled back in
578         the order from the most recently started to the oldest.
579
580         <P>MT - synchronization provided by caller - RawStore boot.
581         This method is guaranteed to be the only method being called and can
582         assume single thread access on all fields.
583
584         @see Loggable#needsRedo
585         @see FileLogger#redo
586
587         @exception StandardException Standard Cloudscape error policy
588     */

589     public void recover(
590     RawStoreFactory rsf,
591     DataFactory df,
592     TransactionFactory tf)
593          throws StandardException
594     {
595         if (SanityManager.DEBUG)
596         {
597             SanityManager.ASSERT(rsf != null, "raw store factory == null");
598             SanityManager.ASSERT(df != null, "data factory == null");
599         }
600
601         checkCorrupt();
602
603         rawStoreFactory = rsf;
604         dataFactory = df;
605         
606         // initialize the log writer only after the rawstorefactory is available,
607
// log writer requires encryption block size info from rawstore factory
608
// to encrypt checksum log records.
609
if (firstLog != null)
610             logOut = new LogAccessFile(this, firstLog, logBufferSize);
611
612         // we don't want to set ReadOnlyDB before recovery has a chance to look
613
// at the latest checkpoint and determine that the database is shutdown
614
// cleanly. If the medium is read only but there are logs that need
615
// to be redone or in flight transactions, we are hosed. The logs that
616
// are redone will leave dirty pages in the cache.
617

618         if (recoveryNeeded)
619         {
620             try
621             {
622                 /////////////////////////////////////////////////////////////
623
//
624
// During boot time, the log control file is accessed and
625
// logFileNumber is determined. LogOut is not set up.
626
// LogFileNumber is the log file the latest checkpoint lives in,
627
// or 1. It may not be the latest log file (the system may have
628
// crashed between the time a new log was generated and the
629
// checkpoint log written), that can only be determined at the
630
// end of recovery redo.
631
//
632
/////////////////////////////////////////////////////////////
633

634                 FileLogger logger = (FileLogger)getLogger();
635
636                 /////////////////////////////////////////////////////////////
637
//
638
// try to find the most recent checkpoint
639
//
640
/////////////////////////////////////////////////////////////
641
if (checkpointInstant != LogCounter.INVALID_LOG_INSTANT)
642                 {
643                     currentCheckpoint =
644                         findCheckpoint(checkpointInstant, logger);
645                 }
646
647                 // if we are only interested in dumping the log, start from the
648
// beginning of the first log file
649
if (SanityManager.DEBUG)
650                 {
651                     if (SanityManager.DEBUG_ON(DUMP_LOG_ONLY))
652                     {
653                         currentCheckpoint = null;
654
655                         System.out.println("Dump log only");
656
657                         // unless otherwise specified, 1st log file starts at 1
658
String JavaDoc beginLogFileNumber =
659                             PropertyUtil.getSystemProperty(
660                                 DUMP_LOG_FROM_LOG_FILE);
661
662                         if (beginLogFileNumber != null)
663                         {
664                             logFileNumber =
665                                 Long.valueOf(beginLogFileNumber).longValue();
666                         }
667                         else
668                         {
669                             logFileNumber = 1;
670                         }
671                     }
672                 }
673
674                 if (SanityManager.DEBUG)
675                 {
676                     if (SanityManager.DEBUG_ON("setCheckpoint"))
677                     {
678                         currentCheckpoint = null;
679
680                         System.out.println("Set Checkpoint.");
681
682                         // unless otherwise specified, 1st log file starts at 1
683
String JavaDoc checkpointStartLogStr =
684                             PropertyUtil.getSystemProperty(
685                                 "derby.storage.checkpointStartLog");
686
687                         String JavaDoc checkpointStartOffsetStr =
688                             PropertyUtil.getSystemProperty(
689                                 "derby.storage.checkpointStartOffset");
690
691
692                         if ((checkpointStartLogStr != null) &&
693                             (checkpointStartOffsetStr != null))
694                         {
695                             checkpointInstant =
696                                 LogCounter.makeLogInstantAsLong(
697                                     Long.valueOf(checkpointStartLogStr).longValue(),
698                                     Long.valueOf(checkpointStartOffsetStr).longValue());
699                         }
700                         else
701                         {
702                             SanityManager.THROWASSERT(
703                                 "must set derby.storage.checkpointStartLog and derby.storage.checkpointStartOffset, if setting setCheckpoint.");
704                         }
705
706                         currentCheckpoint =
707                             findCheckpoint(checkpointInstant, logger);
708                     }
709                 }
710
711                 long redoLWM = LogCounter.INVALID_LOG_INSTANT;
712                 long undoLWM = LogCounter.INVALID_LOG_INSTANT;
713                 long ttabInstant = LogCounter.INVALID_LOG_INSTANT;
714
715                 StreamLogScan redoScan = null;
716                 if (currentCheckpoint != null)
717                 {
718                     Formatable transactionTable = null;
719
720                     // RESOLVE: sku
721
// currentCheckpoint.getTransactionTable();
722

723                     // need to set the transaction table before the undo
724
tf.useTransactionTable(transactionTable);
725
726                     redoLWM = currentCheckpoint.redoLWM();
727                     undoLWM = currentCheckpoint.undoLWM();
728
729                     if (transactionTable != null)
730                         ttabInstant = checkpointInstant;
731
732                     if (SanityManager.DEBUG)
733                     {
734                         if (SanityManager.DEBUG_ON(DBG_FLAG))
735                         {
736                             SanityManager.DEBUG(DBG_FLAG,
737                                 "Found checkpoint at " +
738                                 LogCounter.toDebugString(checkpointInstant) +
739                                 " " + currentCheckpoint.toString());
740                         }
741                     }
742
743                     firstLogFileNumber = LogCounter.getLogFileNumber(redoLWM);
744
745                     // figure out where the first interesting log file is.
746
if (LogCounter.getLogFileNumber(undoLWM) <
747                             firstLogFileNumber)
748                     {
749                         firstLogFileNumber =
750                             LogCounter.getLogFileNumber(undoLWM);
751                     }
752
753
754                     // if the checkpoint record doesn't have a transaction
755
// table, we need to rebuild it by scanning the log from
756
// the undoLWM. If it does have a transaction table, we
757
// only need to scan the log from the redoLWM
758

759                     redoScan = (StreamLogScan)
760                         openForwardsScan(undoLWM, (LogInstant)null);
761
762                 }
763                 else
764                 {
765                     // no checkpoint
766
tf.useTransactionTable((Formatable)null);
767
768                     long start =
769                         LogCounter.makeLogInstantAsLong(
770                             logFileNumber, LOG_FILE_HEADER_SIZE);
771
772                     // no checkpoint, start redo from the beginning of the
773
// file - assume this is the first log file
774
firstLogFileNumber = logFileNumber;
775
776                     redoScan = (StreamLogScan)
777                         openForwardsScan(start, (LogInstant)null);
778                 }
779
780                 // open a transaction that is used for redo and rollback
781
RawTransaction recoveryTransaction =
782                     tf.startTransaction(
783                         rsf,
784                         ContextService.getFactory().getCurrentContextManager(),
785                         AccessFactoryGlobals.USER_TRANS_NAME);
786
787                 // make this transaction aware that it is a recovery transaction
788
// and don't spew forth post commit work while replaying the log
789
recoveryTransaction.recoveryTransaction();
790
791                 /////////////////////////////////////////////////////////////
792
//
793
// Redo loop - in FileLogger
794
//
795
/////////////////////////////////////////////////////////////
796

797                 //
798
// set log factory state to inRedo so that if redo caused any
799
// dirty page to be written from the cache, it won't flush the
800
// log since the end of the log has not been determined and we
801
// know the log record that caused the page to change has
802
// already been written to the log. We need the page write to
803
// go thru the log factory because if the redo has a problem,
804
// the log factory is corrupt and the only way we know not to
805
// write out the page in a checkpoint is if it check with the
806
// log factory, and that is done via a flush - we use the WAL
807
// protocol to stop corrupt pages from writing to the disk.
808
//
809
inRedo = true;
810
811                 long logEnd =
812                     logger.redo(
813                         recoveryTransaction, tf, redoScan, redoLWM,
814                         ttabInstant);
815
816                 inRedo = false;
817                 
818
819                 
820                 // if we are only interested in dumping the log, don't alter
821
// the database and prevent anyone from using the log
822
if (SanityManager.DEBUG)
823                 {
824                     if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY))
825                     {
826                         Monitor.logMessage("_____________________________________________________");
827                         Monitor.logMessage("\n\t\t Log dump finished");
828                         Monitor.logMessage("_____________________________________________________");
829                         // just in case, it has not been set anyway
830
logOut = null;
831
832                         return;
833                     }
834                 }
835
836
837                 /////////////////////////////////////////////////////////////
838
//
839
// determine where the log ends
840
//
841
/////////////////////////////////////////////////////////////
842
StorageRandomAccessFile theLog = null;
843
844
845                 // if logend == LogCounter.INVALID_LOG_SCAN, that means there
846
// is no log record in the log - most likely it is corrupted in
847
// some way ...
848
if (logEnd == LogCounter.INVALID_LOG_INSTANT)
849                 {
850                     Monitor.logTextMessage(MessageId.LOG_LOG_NOT_FOUND);
851
852                     StorageFile logFile = getLogFileName(logFileNumber);
853
854                     if (privExists(logFile))
855                     {
856                         // if we can delete this strange corrupted file, do so,
857
// otherwise, skip it
858
if (!privDelete(logFile))
859                         {
860                             logFile = getLogFileName(++logFileNumber);
861                         }
862                     }
863
864                     try
865                     {
866                         theLog = privRandomAccessFile(logFile, "rw");
867                     }
868                     catch (IOException JavaDoc ioe)
869                     {
870                         theLog = null;
871                     }
872
873                     if (theLog == null || !privCanWrite(logFile))
874                     {
875                         if (theLog != null)
876                             theLog.close();
877
878                         theLog = null;
879
880                         ReadOnlyDB = true;
881                     }
882                     else
883                     {
884                         try
885                         {
886                             // no previous log file or previous log position
887
if (!initLogFile(
888                                     theLog, logFileNumber,
889                                     LogCounter.INVALID_LOG_INSTANT))
890                             {
891                                 throw markCorrupt(
892                                     StandardException.newException(
893                                         SQLState.LOG_SEGMENT_NOT_EXIST,
894                                         logFile.getPath()));
895                             }
896                         }
897                         catch (IOException JavaDoc ioe)
898                         {
899                             throw markCorrupt(
900                                 StandardException.newException(
901                                     SQLState.LOG_IO_ERROR, ioe));
902                         }
903
904                         // successfully init'd the log file - set up markers,
905
// and position at the end of the log.
906
endPosition = theLog.getFilePointer();
907                         lastFlush = endPosition;
908                         
909                         //if write sync is true , prellocate the log file
910
//and reopen the file in rws mode.
911
if(isWriteSynced)
912                         {
913                             //extend the file by wring zeros to it
914
preAllocateNewLogFile(theLog);
915                             theLog.close();
916                             theLog = openLogFileInWriteMode(logFile);
917                             //postion the log at the current end postion
918
theLog.seek(endPosition);
919                         }
920                         
921                         if (SanityManager.DEBUG)
922                         {
923                             SanityManager.ASSERT(
924                                 endPosition == LOG_FILE_HEADER_SIZE,
925                                 "empty log file has wrong size");
926                         }
927                         
928                         //because we already incrementing the log number
929
//here, no special log switch required for
930
//backup recoveries.
931
logSwitchRequired = false;
932                     }
933                 }
934                 else
935                 {
936                     // logEnd is the instant of the next log record in the log
937
// it is used to determine the last known good position of
938
// the log
939
logFileNumber = LogCounter.getLogFileNumber(logEnd);
940
941                     ReadOnlyDB = df.isReadOnly();
942
943                     StorageFile logFile = getLogFileName(logFileNumber);
944
945                     if (!ReadOnlyDB)
946                     {
947                         // if datafactory doesn't think it is readonly, we can
948
// do some futher test of our own
949
try
950                         {
951                             if(isWriteSynced)
952                                 theLog = openLogFileInWriteMode(logFile);
953                             else
954                                 theLog = privRandomAccessFile(logFile, "rw");
955                         }
956                         catch (IOException JavaDoc ioe)
957                         {
958                             theLog = null;
959                         }
960                         if (theLog == null || !privCanWrite(logFile))
961                         {
962                             if (theLog != null)
963                                 theLog.close();
964                             theLog = null;
965
966                             ReadOnlyDB = true;
967                         }
968                     }
969
970                     if (!ReadOnlyDB)
971                     {
972                         endPosition = LogCounter.getLogFilePosition(logEnd);
973
974                         //
975
// The end of the log is at endPosition. Which is where
976
// the next log should be appending.
977
//
978
// if the last log record ends before the end of the
979
// log file, then this log file has a fuzzy end.
980
// Zap all the bytes to between endPosition to EOF to 0.
981
//
982
// the end log marker is 4 bytes (of zeros)
983
//
984
// if endPosition + 4 == logOut.length, we have a
985
// properly terminated log file
986
//
987
// if endPosition + 4 is > logOut.length, there are 0,
988
// 1, 2, or 3 bytes of 'fuzz' at the end of the log. We
989
// can ignore that because it is guaranteed to be
990
// overwritten by the next log record.
991
//
992
// if endPosition + 4 is < logOut.length, we have a
993
// partial log record at the end of the log.
994
//
995
// We need to overwrite all of the incomplete log
996
// record, because if we start logging but cannot
997
// 'consume' all the bad log, then the log will truly
998
// be corrupted if the next 4 bytes (the length of the
999
// log record) after that is small enough that the next
1000
// time the database is recovered, it will be
1001
// interpreted that the whole log record is in the log
1002
// and will try to objectify, only to get classNotFound
1003
// error or worse.
1004
//
1005

1006                        //find out if log had incomplete log records at the end.
1007
if (redoScan.isLogEndFuzzy())
1008                        {
1009                            theLog.seek(endPosition);
1010                            long eof = theLog.length();
1011
1012                            Monitor.logTextMessage(MessageId.LOG_INCOMPLETE_LOG_RECORD,
1013                                logFile, new Long JavaDoc(endPosition), new Long JavaDoc(eof));
1014
1015                            /* Write zeros from incomplete log record to end of file */
1016                            long nWrites = (eof - endPosition)/logBufferSize;
1017                            int rBytes = (int)((eof - endPosition) % logBufferSize);
1018                            byte zeroBuf[]= new byte[logBufferSize];
1019                            
1020                            //write the zeros to file
1021
while(nWrites-- > 0)
1022                                theLog.write(zeroBuf);
1023                            if(rBytes !=0)
1024                                theLog.write(zeroBuf, 0, rBytes);
1025                            
1026                            if(!isWriteSynced)
1027                                syncFile(theLog);
1028                        }
1029
1030                        if (SanityManager.DEBUG)
1031                        {
1032                            if (theLog.length() != endPosition)
1033                            {
1034                                SanityManager.ASSERT(
1035                                    theLog.length() > endPosition,
1036                                    "log end > log file length, bad scan");
1037                            }
1038                        }
1039
1040                        // set the log to the true end position,
1041
// and not the end of the file
1042

1043                        lastFlush = endPosition;
1044                        theLog.seek(endPosition);
1045                    }
1046                }
1047
1048                if (theLog != null)
1049                    logOut = new LogAccessFile(this, theLog, logBufferSize);
1050                
1051                if(logSwitchRequired)
1052                    switchLogFile();
1053
1054
1055                boolean noInFlightTransactions = tf.noActiveUpdateTransaction();
1056
1057                if (ReadOnlyDB)
1058                {
1059                    // in the unlikely event that someone detects we are
1060
// dealing with a read only db, check to make sure the
1061
// database is quiesce when it was copied with no unflushed
1062
// dirty buffer
1063
if (!noInFlightTransactions)
1064                    {
1065                        throw StandardException.newException(
1066                                SQLState.LOG_READ_ONLY_DB_NEEDS_UNDO);
1067                    }
1068                }
1069
1070                /////////////////////////////////////////////////////////////
1071
//
1072
// Undo loop - in transaction factory. It just gets one
1073
// transaction at a time from the transaction table and calls
1074
// undo, no different from runtime.
1075
//
1076
/////////////////////////////////////////////////////////////
1077

1078                if (SanityManager.DEBUG)
1079                {
1080                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1081                        SanityManager.DEBUG(LogToFile.DBG_FLAG,
1082                            "About to call undo(), transaction table =" +
1083                            tf.getTransactionTable());
1084                }
1085
1086                if (!noInFlightTransactions)
1087                {
1088                    if (SanityManager.DEBUG)
1089                    {
1090                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1091                            SanityManager.DEBUG(LogToFile.DBG_FLAG,
1092                                "In recovery undo, rollback inflight transactions");
1093                    }
1094
1095                    tf.rollbackAllTransactions(recoveryTransaction, rsf);
1096
1097                    if (SanityManager.DEBUG)
1098                    {
1099                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1100                            SanityManager.DEBUG(
1101                                LogToFile.DBG_FLAG, "finish recovery undo,");
1102                    }
1103                }
1104                else
1105                {
1106                    if (SanityManager.DEBUG)
1107                    {
1108                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1109                            SanityManager.DEBUG(LogToFile.DBG_FLAG,
1110                                "No in flight transaction, no recovery undo work");
1111                    }
1112                }
1113
1114                /////////////////////////////////////////////////////////////
1115
//
1116
// XA prepared xact loop - in transaction factory. At this
1117
// point only prepared transactions should be left in the
1118
// transaction table, all others should have been aborted or
1119
// committed and removed from the transaction table. It just
1120
// gets one transaction at a time from the transaction table,
1121
// creates a real context and transaction, reclaims locks,
1122
// and leaves the new xact in the transaction table.
1123
//
1124
/////////////////////////////////////////////////////////////
1125

1126                if (SanityManager.DEBUG)
1127                {
1128                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1129                        SanityManager.DEBUG(LogToFile.DBG_FLAG,
1130                            "About to call rePrepare(), transaction table =" +
1131                            tf.getTransactionTable());
1132                }
1133
1134                tf.handlePreparedXacts(rsf);
1135
1136                if (SanityManager.DEBUG)
1137                {
1138                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1139                        SanityManager.DEBUG(LogToFile.DBG_FLAG,
1140                            "Finished rePrepare(), transaction table =" +
1141                            tf.getTransactionTable());
1142                }
1143
1144                /////////////////////////////////////////////////////////////
1145
//
1146
// End of recovery.
1147
//
1148
/////////////////////////////////////////////////////////////
1149

1150                // recovery is finished. Close the transaction
1151
recoveryTransaction.close();
1152
1153
1154                // notify the dataFactory that recovery is completed,
1155
// but before the checkpoint is written.
1156
dataFactory.postRecovery();
1157
1158
1159                //////////////////////////////////////////////////////////////
1160
// set the transaction factory short id, we have seen all the
1161
// trasactions in the log, and at the minimum, the checkpoint
1162
// transaction will be there. Set the shortId to the next
1163
// value.
1164
//////////////////////////////////////////////////////////////
1165
tf.resetTranId();
1166
1167                // do a checkpoint (will flush the log) if there is any rollback
1168
// if can't checkpoint for some reasons, flush log and carry on
1169
if (!ReadOnlyDB)
1170                {
1171                    boolean needCheckpoint = true;
1172
1173                    // if we can figure out there there is very little in the
1174
// log (less than 1000 bytes), we haven't done any
1175
// rollbacks, then don't checkpoint. Otherwise checkpoint.
1176
if (currentCheckpoint != null && noInFlightTransactions &&
1177                        redoLWM != LogCounter.INVALID_LOG_INSTANT &&
1178                        undoLWM != LogCounter.INVALID_LOG_INSTANT)
1179                    {
1180                        if ((logFileNumber == LogCounter.getLogFileNumber(redoLWM))
1181                            && (logFileNumber == LogCounter.getLogFileNumber(undoLWM))
1182                            && (endPosition < (LogCounter.getLogFilePosition(redoLWM) + 1000)))
1183                            needCheckpoint = false;
1184                    }
1185
1186                        if (needCheckpoint && !checkpoint(rsf, df, tf, false))
1187                            flush(logFileNumber, endPosition);
1188                }
1189
1190                logger.close();
1191
1192                recoveryNeeded = false;
1193            }
1194            catch (IOException JavaDoc ioe)
1195            {
1196                if (SanityManager.DEBUG)
1197                    ioe.printStackTrace();
1198
1199                throw markCorrupt(
1200                    StandardException.newException(SQLState.LOG_IO_ERROR, ioe));
1201            }
1202            catch (ClassNotFoundException JavaDoc cnfe)
1203            {
1204                throw markCorrupt(
1205                    StandardException.newException(
1206                        SQLState.LOG_CORRUPTED, cnfe));
1207            }
1208            catch (StandardException se)
1209            {
1210                throw markCorrupt(se);
1211            }
1212            catch (Throwable JavaDoc th)
1213            {
1214                if (SanityManager.DEBUG)
1215                {
1216                    SanityManager.showTrace(th);
1217                    th.printStackTrace();
1218                }
1219
1220                throw markCorrupt(
1221                    StandardException.newException(
1222                        SQLState.LOG_RECOVERY_FAILED, th));
1223            }
1224        }
1225        else
1226        {
1227
1228            tf.useTransactionTable((Formatable)null);
1229
1230            // set the transaction factory short id
1231
tf.resetTranId();
1232        }
1233
1234        // done with recovery
1235

1236        /////////////////////////////////////////////////////////////
1237
// setup checktpoint daemon
1238
/////////////////////////////////////////////////////////////
1239
checkpointDaemon = rawStoreFactory.getDaemon();
1240        if (checkpointDaemon != null)
1241        {
1242            myClientNumber =
1243                checkpointDaemon.subscribe(this, true /*onDemandOnly */);
1244        }
1245    }
1246
1247 
1248    /**
1249        Checkpoint the rawStore.
1250
1251        <P> MT- Only one checkpoint is to be taking place at any given time.
1252
1253        <P> The steps of a checkpoint are
1254
1255        <OL>
1256        <LI> switch to a new log file if possible
1257        <PRE>
1258            freeze the log (for the transition to a new log file)
1259                flush current log file
1260                create and flush the new log file (with file number 1 higher
1261                than the previous log file). The new log file becomes the
1262                current log file.
1263            unfreeze the log
1264        </PRE>
1265        <LI> start checkpoint transaction
1266        <LI> gather interesting information about the rawStore:
1267                    the current log instant (redoLWM)
1268                    the earliest active transaction begin tran log record
1269                    instant (undoLWM), all the truncation LWM set by clients
1270                    of raw store (replication)
1271        <LI> clean the buffer cache
1272        <LI> log the next checkpoint log record, which contains
1273                (repPoint, undoLWM, redoLWM) and commit checkpoint transaction.
1274        <LI> synchronously write the control file containing the next checkpoint
1275                log record log instant
1276        <LI> the new checkpoint becomes the current checkpoint.
1277                Somewhere near the beginning of each log file should be a
1278                checkpoint log record (not guarenteed to be there)
1279        <LI> see if the log can be truncated
1280
1281        <P>
1282        The earliest useful log record is determined by the repPoint and the
1283        undoLWM, whichever is earlier.
1284        <P>
1285        Every log file whose log file number is smaller than the earliest
1286        useful log record's log file number can be deleted.
1287
1288        <P><PRE>
1289            Transactions can be at the following states w/r to a checkpoint -
1290            consider the log as a continous stream and not as series of log
1291            files for the sake of clarity.
1292            |(BT)-------(ET)| marks the begin and end of a transaction.
1293            . checkpoint started
1294            . |__undoLWM |
1295            . V |___redoLWM
1296            . |___TruncationLWM
1297            . |
1298            . V
1299            1 |-----------------|
1300            2 |--------------------------------|
1301            3 |-------|
1302            4 |--------------------------------------(end of log)
1303            5 |-^-|
1304            . Checkpoint Log Record
1305            ---A--->|<-------B--------->|<-------------C-----------
1306        </PRE>
1307
1308        <P>
1309        There are only 3 periods of interest : <BR>
1310            A) before undoLWM, B) between undo and redo LWM, C) after redoLWM.
1311
1312        <P>
1313        Transaction 1 started in A and terminates in B.<BR>
1314            During redo, we should only see log records and endXact from this
1315            transaction in the first phase (between undoLWM and redoLWM). No
1316            beginXact log record for this transaction will be seen.
1317
1318        <P>
1319        Transaction 2 started in B (right on the undoLWM) and terminated in C.<BR>
1320            Any transaction that terminates in C must have a beginXact at or
1321            after undoLWM. In other words, no transaction can span A, B and C.
1322            During redo, we will see beginXact, other log records and endXact
1323            for this transaction.
1324
1325        <P>
1326        Transaction 3 started in B and ended in B.<BR>
1327            During redo, we will see beginXact, other log records and endXact
1328            for this transaction.
1329
1330        <P>
1331        Transaction 4 begins in B and never ends.<BR>
1332            During redo, we will see beginXact, other log records.
1333            In undo, this loser transaction will be rolled back.
1334
1335        <P>
1336        Transaction 5 is the transaction taking the checkpoint.<BR>
1337            The checkpoint action started way back in time but the checkpoint
1338            log record is only written after the buffer cache has been flushed.
1339
1340        <P>
1341        Note that if any time elapse between taking the undoLWM and the
1342            redoLWM, then it will create a 4th period of interest.
1343
1344        @exception StandardException - encounter exception while doing
1345                                       checkpoint.
1346    */

1347    public boolean checkpoint(RawStoreFactory rsf,
1348                              DataFactory df,
1349                              TransactionFactory tf,
1350                              boolean wait)
1351         throws StandardException
1352    {
1353
1354        // call checkpoint with no pre-started transaction
1355
boolean done = checkpointWithTran(null, rsf, df, tf);
1356
1357        //above checpoint call will return 'false' without
1358
//performing the checkpoint if some other thread is doing checkpoint.
1359
//In cases like backup it is necesary to wait for the
1360
//checkpoint to complete before copying the files. 'wait' flag get passed
1361
//in as 'true' by such cases.
1362
//When wait flag is true, we will wait here until the other thread which
1363
//is actually doing the the checkpoint completes.
1364

1365        if(!done && wait)
1366        {
1367            synchronized(this)
1368            {
1369                //wait until the thread that is doing the checkpoint completes it.
1370
while(inCheckpoint)
1371                {
1372                    try
1373                    {
1374                        wait();
1375                    }
1376                    catch (InterruptedException JavaDoc ie)
1377                    {
1378                        throw StandardException.interrupt(ie);
1379                    }
1380                }
1381                done = true;
1382            }
1383        }
1384
1385        return done;
1386    }
1387
1388
1389    /**
1390        checkpoint with pre-start transaction
1391
1392        @exception StandardException Cloudscape Standard Error Policy
1393    */

1394    protected boolean checkpointWithTran(RawTransaction cptran,
1395                               RawStoreFactory rsf,
1396                               DataFactory df,
1397                               TransactionFactory tf)
1398         throws StandardException
1399    {
1400        boolean proceed = true;
1401        LogInstant redoLWM;
1402
1403        // we may be called to stop the database after a bad error, make sure
1404
// logout is set
1405
if (logOut == null)
1406        {
1407            return false;
1408        }
1409
1410        long approxLogLength;
1411
1412        synchronized (this)
1413        {
1414            // has someone else found a problem in the raw store?
1415
if (corrupt != null)
1416            {
1417                throw StandardException.newException(SQLState.LOG_STORE_CORRUPT, corrupt);
1418            }
1419
1420            // if another checkpoint is in progress, don't do anything
1421
if (inCheckpoint == true)
1422                proceed = false;
1423            else
1424                inCheckpoint = true;
1425
1426            approxLogLength = endPosition; // current end position
1427

1428            // don't return from inside of a sync block
1429
}
1430
1431        if (!proceed)
1432        {
1433            return false;
1434        }
1435
1436        // needCPtran == true if we are not supplied with a pre-started transaction
1437
boolean needCPTran = (cptran == null);
1438
1439        if (SanityManager.DEBUG)
1440        {
1441            if (logSwitchInterval == 0)
1442            {
1443                SanityManager.THROWASSERT(
1444                    "switching log file: Approx log length = " +
1445                    approxLogLength + " logSwitchInterval = 0");
1446            }
1447        }
1448
1449
1450        try
1451        {
1452            if (approxLogLength > logSwitchInterval)
1453            {
1454                switchLogFile();
1455                //log switch is occuring in conjuction with the
1456
//checkpoint, set the amount of log written from last checkpoint to zero.
1457
logWrittenFromLastCheckPoint = 0;
1458            }else
1459            {
1460                //checkpoint is happening without the log switch,
1461
//in the middle of a log file. Amount of log written already for
1462
//the current log file should not be included in caluculation
1463
//of when next check point is due. By assigning the negative
1464
//value of amount of log writtent for this file. Later it will
1465
//be subtracted when we switch the log file or while calculating whether
1466
//we are due a for checkpoint a flush time.
1467
logWrittenFromLastCheckPoint = -endPosition;
1468            }
1469
1470            if (SanityManager.DEBUG)
1471            {
1472                // if this debug flag is set on, just switch log file
1473

1474                if (SanityManager.DEBUG_ON(TEST_LOG_SWITCH_LOG))
1475                    return false;
1476            }
1477
1478
1479            // start a checkpoint transaction
1480
if (needCPTran)
1481                cptran = tf.startInternalTransaction(rsf,
1482                ContextService.getFactory().getCurrentContextManager());
1483
1484            /////////////////////////////////////////////////////
1485
// gather a snapshot of the various interesting points of the log
1486
/////////////////////////////////////////////////////
1487
long undoLWM_long;
1488            long redoLWM_long;
1489
1490            synchronized(this) // we could synchronized on something else, it
1491
// doesn't matter as long as logAndDo sync on
1492
// the same thing
1493
{
1494                // The redo LWM is the current log instant. We are going to
1495
// clean the cache shortly, any log record before this point
1496
// will not ever need to be redone.
1497
redoLWM_long = currentInstant();
1498                redoLWM = new LogCounter(redoLWM_long);
1499
1500                // The undo LWM is what we need to rollback all transactions.
1501
// Synchronize this with the starting of a new transaction so
1502
// that the transaction factory can have a consistent view
1503
// See FileLogger.logAndDo
1504

1505                LogCounter undoLWM = (LogCounter)(tf.firstUpdateInstant());
1506                if (undoLWM == null)
1507                    undoLWM_long = redoLWM_long; // no active transaction
1508
else
1509                    undoLWM_long = undoLWM.getValueAsLong();
1510
1511            }
1512
1513            /////////////////////////////////////////////////////
1514
// clean the buffer cache
1515
/////////////////////////////////////////////////////
1516
df.checkpoint();
1517
1518
1519            /////////////////////////////////////////////////////
1520
// write out the checkpoint log record
1521
/////////////////////////////////////////////////////
1522

1523            // send the checkpoint record to the log
1524
Formatable transactionTable = tf.getTransactionTable();
1525
1526            CheckpointOperation nextCheckpoint =
1527                new CheckpointOperation(
1528                    redoLWM_long, undoLWM_long, transactionTable);
1529
1530            cptran.logAndDo(nextCheckpoint);
1531
1532            LogCounter checkpointInstant =
1533                (LogCounter)(cptran.getLastLogInstant());
1534
1535            if (checkpointInstant != null)
1536            {
1537                // since checkpoint is an internal transaction, I need to
1538
// flush it to make sure it actually goes to the log
1539
flush(checkpointInstant);
1540            }
1541            else
1542            {
1543                throw StandardException.newException(
1544                        SQLState.LOG_CANNOT_LOG_CHECKPOINT);
1545            }
1546
1547            cptran.commit();
1548
1549            if (needCPTran)
1550            {
1551                cptran.close(); // if we started it, we will close it
1552
cptran = null;
1553            }
1554
1555            /////////////////////////////////////////////////////
1556
// write out the log control file which contains the last
1557
// successful checkpoint log record
1558
/////////////////////////////////////////////////////
1559

1560            if (!writeControlFile(getControlFileName(),
1561                                  checkpointInstant.getValueAsLong()))
1562            {
1563                throw StandardException.newException(
1564                        SQLState.LOG_CONTROL_FILE, getControlFileName());
1565            }
1566
1567            // next checkpoint becomes the current checkpoint
1568
currentCheckpoint = nextCheckpoint;
1569
1570
1571            ////////////////////////////////////////////////////
1572
// see if we can reclaim some log space
1573
////////////////////////////////////////////////////
1574

1575            if (!logArchived())
1576            {
1577                truncateLog(currentCheckpoint);
1578            }
1579
1580            // delete the committted container drop stubs
1581
// that are no longer required during recovery.
1582
// If a backup is in progress don't delete the stubs until
1583
// it is done. Backup needs to copy all the stubs that
1584
// are needed to recover from the backup checkpoint on restore.
1585
if(!backupInProgress)
1586                df.removeDroppedContainerFileStubs(redoLWM);
1587        
1588        }
1589        catch (IOException JavaDoc ioe)
1590        {
1591            throw markCorrupt(
1592                    StandardException.newException(SQLState.LOG_IO_ERROR, ioe));
1593        }
1594        finally
1595        {
1596            synchronized(this)
1597            {
1598                
1599
1600                inCheckpoint = false;
1601                notifyAll();
1602            }
1603
1604            if (cptran != null && needCPTran)
1605            {
1606                try
1607                {
1608                    cptran.commit();
1609                    cptran.close();
1610                }
1611                catch (StandardException se)
1612                {
1613                    throw markCorrupt(StandardException.newException(
1614                                            SQLState.LOG_CORRUPTED, se));
1615                }
1616            }
1617        }
1618
1619        return true;
1620    }
1621
1622    /**
1623        Flush all unwritten log record up to the log instance indicated to disk
1624        and sync.
1625        Also check to see if database is frozen or corrupt.
1626
1627        <P>MT - not needed, wrapper method
1628
1629        @param where flush log up to here
1630
1631        @exception StandardException Standard Cloudscape error policy
1632    */

1633    public void flush(LogInstant where) throws StandardException
1634    {
1635        long fileNumber;
1636        long wherePosition;
1637
1638        if (where == null) {
1639            // don't flush, just use this to check if database is frozen or
1640
// corrupt
1641
fileNumber = 0;
1642            wherePosition = LogCounter.INVALID_LOG_INSTANT;
1643        } else {
1644            LogCounter whereC = (LogCounter) where;
1645            fileNumber = whereC.getLogFileNumber();
1646            wherePosition = whereC.getLogFilePosition();
1647        }
1648        flush(fileNumber, wherePosition);
1649    }
1650
1651    /**
1652        Flush all unwritten log record to disk and sync.
1653        Also check to see if database is frozen or corrupt.
1654
1655        <P>MT - not needed, wrapper method
1656
1657        @exception StandardException Standard Cloudscape error policy
1658    */

1659    public void flushAll() throws StandardException
1660    {
1661        long fnum;
1662        long whereTo;
1663
1664        synchronized(this)
1665        {
1666            fnum = logFileNumber;
1667            whereTo = endPosition;
1668        }
1669
1670        flush(fnum, whereTo);
1671    }
1672
1673    /*
1674     * Private methods that helps to implement methods of LogFactory
1675     */

1676
1677    /**
1678        Verify that we the log file is of the right format and of the right
1679        version and log file number.
1680
1681        <P>MT - not needed, no global variables used
1682
1683        @param logFileName the name of the log file
1684        @param number the log file number
1685        @return true if the log file is of the current version and of the
1686        correct format
1687
1688        @exception StandardException Standard Cloudscape error policy
1689    */

1690    private boolean verifyLogFormat(StorageFile logFileName, long number)
1691         throws StandardException
1692    {
1693        boolean ret = false;
1694        try
1695        {
1696            StorageRandomAccessFile log = privRandomAccessFile(logFileName, "r");
1697            ret = verifyLogFormat(log, number);
1698            log.close();
1699        }
1700        catch (IOException JavaDoc ioe)
1701        {
1702            
1703        }
1704
1705        return ret;
1706    }
1707
1708    /**
1709        Verify that we the log file is of the right format and of the right
1710        version and log file number. The log file position is set to the
1711        beginning.
1712
1713        <P>MT - MT-unsafe, caller must synchronize
1714
1715        @param log the log file
1716        @param number the log file number
1717        @return true if the log file is of the current version and of the
1718        correct format
1719
1720        @exception StandardException Standard Cloudscape error policy
1721    */

1722    private boolean verifyLogFormat(StorageRandomAccessFile log, long number)
1723         throws StandardException
1724    {
1725        try
1726        {
1727            log.seek(0);
1728            int logfid = log.readInt();
1729            int obsoleteLogVersion = log.readInt(); // this value is useless, for
1730
// backwards compatibility
1731
long logNumber = log.readLong();
1732
1733            if (logfid != fid || logNumber != number)
1734            {
1735                throw StandardException.newException(
1736                        SQLState.LOG_INCOMPATIBLE_FORMAT, dataDirectory);
1737            }
1738        }
1739        catch (IOException JavaDoc ioe)
1740        {
1741            throw StandardException.newException(
1742                    SQLState.LOG_CANNOT_VERIFY_LOG_FORMAT, ioe, dataDirectory);
1743        }
1744
1745        return true;
1746    }
1747
1748    /**
1749        Initialize the log to the correct format with the given version and
1750        log file number. The new log file must be empty. After initializing,
1751        the file is synchronously written to disk.
1752
1753        <P>MT - synchornization provided by caller
1754
1755        @param newlog the new log file to be initialized
1756        @param number the log file number
1757        @param prevLogRecordEndInstant the end position of the previous log record
1758
1759        @return true if the log file is empty, else false.
1760
1761        @exception IOException if new log file cannot be accessed or initialized
1762    */

1763
1764    private boolean initLogFile(StorageRandomAccessFile newlog, long number,
1765                                long prevLogRecordEndInstant)
1766         throws IOException JavaDoc, StandardException
1767    {
1768        if (newlog.length() != 0)
1769            return false;
1770
1771        if (SanityManager.DEBUG)
1772        {
1773            if ( SanityManager.DEBUG_ON(TEST_LOG_FULL))
1774                testLogFull();
1775        }
1776        if (SanityManager.DEBUG)
1777        {
1778            if (SanityManager.DEBUG_ON(TEST_SWITCH_LOG_FAIL1))
1779                throw new IOException JavaDoc("TestLogSwitchFail1");
1780        }
1781
1782
1783        newlog.seek(0);
1784
1785        newlog.writeInt(fid);
1786        newlog.writeInt(OBSOLETE_LOG_VERSION_NUMBER); // for silly backwards compatibility reason
1787
newlog.writeLong(number);
1788        newlog.writeLong(prevLogRecordEndInstant);
1789
1790        syncFile(newlog);
1791
1792        return true;
1793    }
1794
1795    /**
1796        Switch to the next log file if possible.
1797
1798        <P>MT - log factory is single threaded thru a log file switch, the log
1799        is frozen for the duration of the switch
1800    */

1801    private void switchLogFile() throws StandardException
1802    {
1803        boolean switchedOver = false;
1804
1805        /////////////////////////////////////////////////////
1806
// Freeze the log for the switch over to a new log file.
1807
// This blocks out any other threads from sending log
1808
// record to the log stream.
1809
//
1810
// The switching of the log file and checkpoint are really
1811
// independent events, they are tied together just because a
1812
// checkpoint is the natural place to switch the log and vice
1813
// versa. This could happen before the cache is flushed or
1814
// after the checkpoint log record is written.
1815
/////////////////////////////////////////////////////
1816
synchronized (this)
1817        {
1818
1819            // Make sure that this thread of control is guaranteed to complete
1820
// it's work of switching the log file without having to give up
1821
// the semaphore to a backup or another flusher. Do this by looping
1822
// until we have the semaphore, the log is not being flushed, and
1823
// the log is not frozen for backup. Track (2985).
1824
while(logBeingFlushed | isFrozen)
1825            {
1826                try
1827                {
1828                    wait();
1829                }
1830                catch (InterruptedException JavaDoc ie)
1831                {
1832                    throw StandardException.interrupt(ie);
1833                }
1834            }
1835
1836            // we have an empty log file here, refuse to switch.
1837
if (endPosition == LOG_FILE_HEADER_SIZE)
1838            {
1839                if (SanityManager.DEBUG)
1840                {
1841                    Monitor.logMessage("not switching from an empty log file (" +
1842                           logFileNumber + ")");
1843                }
1844                return;
1845            }
1846
1847            // log file isn't being flushed right now and logOut is not being
1848
// used.
1849
StorageFile newLogFile = getLogFileName(logFileNumber+1);
1850
1851            if (logFileNumber+1 >= maxLogFileNumber)
1852            {
1853                throw StandardException.newException(
1854                        SQLState.LOG_EXCEED_MAX_LOG_FILE_NUMBER,
1855                        new Long JavaDoc(maxLogFileNumber));
1856            }
1857
1858            StorageRandomAccessFile newLog = null; // the new log file
1859
try
1860            {
1861                // if the log file exist and cannot be deleted, cannot
1862
// switch log right now
1863
if (privExists(newLogFile) && !privDelete(newLogFile))
1864                {
1865                    logErrMsg(MessageService.getTextMessage(
1866                        MessageId.LOG_NEW_LOGFILE_EXIST,
1867                        newLogFile.getPath()));
1868                    return;
1869                }
1870
1871                try
1872                {
1873                    newLog = privRandomAccessFile(newLogFile, "rw");
1874                }
1875                catch (IOException JavaDoc ioe)
1876                {
1877                    newLog = null;
1878                }
1879
1880                if (newLog == null || !privCanWrite(newLogFile))
1881                {
1882                    if (newLog != null)
1883                        newLog.close();
1884                    newLog = null;
1885
1886                    return;
1887                }
1888
1889                if (initLogFile(newLog, logFileNumber+1,
1890                                LogCounter.makeLogInstantAsLong(logFileNumber, endPosition)))
1891                {
1892
1893                    // New log file init ok, close the old one and
1894
// switch over, after this point, need to shutdown the
1895
// database if any error crops up
1896
switchedOver = true;
1897
1898                    // write out an extra 0 at the end to mark the end of the log
1899
// file.
1900

1901                    logOut.writeEndMarker(0);
1902
1903                    endPosition += 4;
1904                    //set that we are in log switch to prevent flusher
1905
//not requesting to switch log again
1906
inLogSwitch = true;
1907                    // flush everything including the int we just wrote
1908
flush(logFileNumber, endPosition);
1909                    
1910                    
1911                    // simulate out of log error after the switch over
1912
if (SanityManager.DEBUG)
1913                    {
1914                        if (SanityManager.DEBUG_ON(TEST_SWITCH_LOG_FAIL2))
1915                            throw new IOException JavaDoc("TestLogSwitchFail2");
1916                    }
1917
1918
1919                    logOut.close(); // close the old log file
1920

1921                    logWrittenFromLastCheckPoint += endPosition;
1922
1923                    endPosition = newLog.getFilePointer();
1924                    lastFlush = endPosition;
1925                    
1926                    if(isWriteSynced)
1927                    {
1928                        //extend the file by wring zeros to it
1929
preAllocateNewLogFile(newLog);
1930                        newLog.close();
1931                        newLog = openLogFileInWriteMode(newLogFile);
1932                        newLog.seek(endPosition);
1933                    }
1934
1935                    logOut = new LogAccessFile(this, newLog, logBufferSize);
1936                    newLog = null;
1937
1938
1939                    if (SanityManager.DEBUG)
1940                    {
1941                        if (endPosition != LOG_FILE_HEADER_SIZE)
1942                            SanityManager.THROWASSERT(
1943                                            "new log file has unexpected size" +
1944                                             + endPosition);
1945                    }
1946                    logFileNumber++;
1947
1948                    if (SanityManager.DEBUG)
1949                    {
1950                        SanityManager.ASSERT(endPosition == LOG_FILE_HEADER_SIZE,
1951                                             "empty log file has wrong size");
1952                    }
1953
1954                }
1955                else // something went wrong, delete the half baked file
1956
{
1957                    newLog.close();
1958                    newLog = null;
1959
1960                    if (privExists(newLogFile))
1961                        privDelete(newLogFile);
1962                    newLogFile = null;
1963
1964                    logErrMsg(MessageService.getTextMessage(
1965                        MessageId.LOG_CANNOT_CREATE_NEW,
1966                        newLogFile.getPath()));
1967                }
1968
1969            }
1970            catch (IOException JavaDoc ioe)
1971            {
1972
1973                inLogSwitch = false;
1974                // switching log file is an optional operation and there is no direct user
1975
// control. Just sends a warning message to whomever, if any,
1976
// system adminstrator there may be
1977

1978                logErrMsg(MessageService.getTextMessage(
1979                    MessageId.LOG_CANNOT_CREATE_NEW_DUETO,
1980                    newLogFile.getPath(),
1981                    ioe.toString()));
1982
1983                try
1984                {
1985                    if (newLog != null)
1986                    {
1987                        newLog.close();
1988                        newLog = null;
1989                    }
1990                }
1991                catch (IOException JavaDoc ioe2) {}
1992
1993                if (newLogFile != null && privExists(newLogFile))
1994                {
1995                    privDelete(newLogFile);
1996                    newLogFile = null;
1997                }
1998
1999                if (switchedOver) // error occur after old log file has been closed!
2000
{
2001                    logOut = null; // limit any damage
2002
throw markCorrupt(
2003                        StandardException.newException(
2004                                SQLState.LOG_IO_ERROR, ioe));
2005                }
2006            }
2007            
2008            inLogSwitch = false;
2009        }
2010        // unfreezes the log
2011
}
2012
2013    /**
2014        Flush all unwritten log record up to the log instance indicated to disk
2015        without syncing.
2016
2017        <P>MT - not needed, wrapper method
2018
2019        @param wherePosition flush log up to here
2020
2021        @exception IOException Failed to flush to the log
2022    */

2023    private void flushBuffer(long fileNumber, long wherePosition)
2024        throws IOException JavaDoc, StandardException
2025    {
2026        synchronized (this) {
2027            if (fileNumber < logFileNumber) // history
2028
return;
2029
2030            // A log instant indicates the start of a log record
2031
// but not how long it is. Thus the amount of data in
2032
// the logOut buffer is irrelevant. We can only
2033
// not flush the buffer if the real synced flush
2034
// included this required log instant. This is because
2035
// we never flush & sync partial log records.
2036

2037            if (wherePosition < lastFlush) // already flushed
2038
return;
2039
2040            // We don't update lastFlush here because lastFlush
2041
// is the last position in the log file that has been
2042
// flushed *and* synced to disk. Here we only flush.
2043
// ie. lastFlush should be renamed lastSync.
2044
//
2045
// We could have another variable indicating to which
2046
// point the log has been flushed which this routine
2047
// could take advantage of. This would only help rollbacks though.
2048

2049            logOut.flushLogAccessFile();
2050        }
2051    }
2052    /** Get rid of old and unnecessary log files
2053
2054        <P> MT- only one truncate log is allowed to be taking place at any
2055        given time. Synchronized on this.
2056
2057     */

2058    private void truncateLog(CheckpointOperation checkpoint)
2059    {
2060        long oldFirstLog;
2061        long firstLogNeeded;
2062
2063        if (keepAllLogs)
2064            return;
2065        if ((firstLogNeeded = getFirstLogNeeded(checkpoint))==-1)
2066            return;
2067        
2068        // when backup is in progress, log files that are yet to
2069
// be copied to the backup should not be deleted, even
2070
// if they are not required for crash recovery.
2071
if(backupInProgress) {
2072            long logFileNeededForBackup = logFileToBackup;
2073            // check if the log file number is yet to be copied
2074
// to the backup is less than the log file required
2075
// for crash recovery, if it is then make the first
2076
// log file that should not be deleted is the log file
2077
// that is yet to be copied to the backup.
2078
if (logFileNeededForBackup < firstLogNeeded)
2079                firstLogNeeded = logFileNeededForBackup;
2080        }
2081
2082        oldFirstLog = firstLogFileNumber;
2083        firstLogFileNumber = firstLogNeeded;
2084
2085        while(oldFirstLog < firstLogNeeded)
2086        {
2087            StorageFile uselessLogFile = null;
2088            try
2089            {
2090                uselessLogFile = getLogFileName(oldFirstLog);
2091                if (privDelete(uselessLogFile))
2092                {
2093                    if (SanityManager.DEBUG)
2094                    {
2095                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2096                            SanityManager.DEBUG(DBG_FLAG, "truncating useless log file " + uselessLogFile.getPath());
2097                    }
2098                }
2099                else
2100                {
2101                    if (SanityManager.DEBUG)
2102                    {
2103                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2104                            SanityManager.DEBUG(DBG_FLAG, "Fail to truncate useless log file " + uselessLogFile.getPath());
2105                    }
2106                }
2107            }
2108            catch (StandardException se)
2109            {
2110                if (SanityManager.DEBUG)
2111                    SanityManager.THROWASSERT("error opening log segment while deleting "
2112                                              + uselessLogFile.getPath(), se);
2113
2114                // if insane, just leave it be
2115
}
2116
2117            oldFirstLog++;
2118        }
2119    }
2120
2121   
2122
2123    /**
2124     * Return the "oldest" log file still needed by recovery.
2125     * <p>
2126     * Returns the log file that contains the undoLWM, ie. the oldest
2127     * log record of all uncommitted transactions in the given checkpoint.
2128     *
2129     * If no checkpoint is given then returns -1, indicating all log records
2130     * may be necessary.
2131     *
2132     **/

2133    private long getFirstLogNeeded(CheckpointOperation checkpoint)
2134    {
2135        long firstLogNeeded;
2136
2137        // one truncation at a time
2138
synchronized (this)
2139        {
2140            firstLogNeeded =
2141                (checkpoint != null ?
2142                     LogCounter.getLogFileNumber(checkpoint.undoLWM()) : -1);
2143
2144            if (SanityManager.DEBUG)
2145            {
2146                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2147                    SanityManager.DEBUG(DBG_FLAG,
2148                       "truncatLog: undoLWM firstlog needed " + firstLogNeeded);
2149            }
2150
2151            if (SanityManager.DEBUG)
2152            {
2153                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2154                {
2155                    SanityManager.DEBUG(DBG_FLAG,
2156                      "truncatLog: checkpoint truncationLWM firstlog needed " +
2157                      firstLogNeeded);
2158                    SanityManager.DEBUG(DBG_FLAG,
2159                      "truncatLog: firstLogFileNumber = " + firstLogFileNumber);
2160                }
2161            }
2162        }
2163        return firstLogNeeded;
2164    }
2165
2166
2167    /**
2168        Carefully write out this value to the control file.
2169        We do safe write of this data by writing the data
2170        into two files every time we write the control data.
2171        we write checksum at the end of the file, so if by
2172        chance system crashes while writing into the file,
2173        using the checksum we find that the control file
2174        is hosed then we use the mirror file, which will have
2175        the condrol data written at last check point.
2176
2177        see comment at beginning of file for log control file format.
2178
2179        <P> MT- synchronized by caller
2180    */

2181    boolean writeControlFile(StorageFile logControlFileName, long value)
2182         throws IOException JavaDoc, StandardException
2183    {
2184        StorageRandomAccessFile logControlFile = null;
2185
2186        ByteArrayOutputStream JavaDoc baos = new ByteArrayOutputStream JavaDoc(64);
2187        DataOutputStream JavaDoc daos = new DataOutputStream JavaDoc(baos);
2188
2189        daos.writeInt(fid);
2190
2191        // so that when this db is booted by 1.1x and 1.2x JBMS, a IOException
2192
// stack trace rather than some error message that tells
2193
// the user to delete the database will show up.
2194
daos.writeInt(OBSOLETE_LOG_VERSION_NUMBER);
2195        daos.writeLong(value);
2196
2197        if (onDiskMajorVersion == 0) {
2198            onDiskMajorVersion = jbmsVersion.getMajorVersion();
2199            onDiskMinorVersion = jbmsVersion.getMinorVersion();
2200            onDiskBeta = jbmsVersion.isBeta();
2201        }
2202
2203        // previous to 1.3, that's all we wrote.
2204
// from 1.3 and onward, also write out the JBMSVersion
2205
daos.writeInt(onDiskMajorVersion);
2206        daos.writeInt(onDiskMinorVersion);
2207
2208        // For 2.0 beta we added the build number and the isBeta indication.
2209
// (5 bytes from our first spare long)
2210
daos.writeInt(jbmsVersion.getBuildNumberAsInt());
2211
2212        byte flags = 0;
2213        if (onDiskBeta)
2214            flags |= IS_BETA_FLAG;
2215        
2216        // When database is booted with derby.system.durability=test,
2217
// this mode does not guarantee that
2218
// - database will recover
2219
// - committed transactions will not be lost
2220
// - database will be in a consistent state
2221
// Hence necessary to keep track of this state so we don't
2222
// waste time resolving issues in such cases.
2223
// wasDBInDurabilityTestModeNoSync has information if database was
2224
// previously booted at any time in this mode
2225
if (logNotSynced || wasDBInDurabilityTestModeNoSync)
2226            flags |= IS_DURABILITY_TESTMODE_NO_SYNC_FLAG;
2227        daos.writeByte(flags);
2228
2229        //
2230
// write some spare bytes after 2.0 we have 3 + 2(8) spare bytes.
2231
long spare = 0;
2232       
2233        daos.writeByte(0);
2234        daos.writeByte(0);
2235        daos.writeByte(0);
2236        daos.writeLong(spare);
2237        daos.flush();
2238        // write the checksum for the control data written
2239
checksum.reset();
2240        checksum.update(baos.toByteArray(), 0, baos.size());
2241        daos.writeLong(checksum.getValue());
2242        daos.flush();
2243
2244        try
2245        {
2246            checkCorrupt();
2247
2248            try
2249            {
2250                logControlFile = privRandomAccessFile(logControlFileName, "rw");
2251            }
2252            catch (IOException JavaDoc ioe)
2253            {
2254                logControlFile = null;
2255                return false;
2256            }
2257
2258            if (!privCanWrite(logControlFileName))
2259                return false;
2260
2261            if (SanityManager.DEBUG)
2262            {
2263                if (SanityManager.DEBUG_ON(TEST_LOG_FULL))
2264                    testLogFull();
2265            }
2266
2267            logControlFile.seek(0);
2268            logControlFile.write(baos.toByteArray());
2269            syncFile(logControlFile);
2270            logControlFile.close();
2271
2272            // write the same data to mirror control file
2273
try
2274            {
2275                logControlFile =
2276                    privRandomAccessFile(getMirrorControlFileName(), "rw");
2277            }
2278            catch (IOException JavaDoc ioe)
2279            {
2280                logControlFile = null;
2281                return false;
2282            }
2283
2284            logControlFile.seek(0);
2285            logControlFile.write(baos.toByteArray());
2286            syncFile(logControlFile);
2287
2288        }
2289        finally
2290        {
2291            if (logControlFile != null)
2292                logControlFile.close();
2293        }
2294
2295        return true;
2296
2297    }
2298
2299    /*
2300        Carefully read the content of the control file.
2301
2302        <P> MT- read only
2303    */

2304    private long readControlFile(StorageFile logControlFileName, Properties JavaDoc startParams)
2305         throws IOException JavaDoc, StandardException
2306    {
2307        StorageRandomAccessFile logControlFile = null;
2308        ByteArrayInputStream JavaDoc bais = null;
2309        DataInputStream JavaDoc dais = null;
2310        logControlFile = privRandomAccessFile(logControlFileName, "r");
2311        boolean upgradeNeeded = false;
2312        long value = LogCounter.INVALID_LOG_INSTANT;
2313        long onDiskChecksum = 0;
2314        long controlFilelength = logControlFile.length();
2315        byte barray[] = null;
2316
2317        try
2318        {
2319            // The length of the file is less than the minimum in any version
2320
// It is possibly hosed , no point in reading data from this file
2321
// skip reading checksum control file is before 1.5
2322
if (controlFilelength < 16)
2323                onDiskChecksum = -1;
2324            else if (controlFilelength == 16)
2325            {
2326                barray = new byte[16];
2327                logControlFile.readFully(barray);
2328            }else if (controlFilelength > 16)
2329            {
2330                barray = new byte[(int) logControlFile.length() - 8];
2331                logControlFile.readFully(barray);
2332                onDiskChecksum = logControlFile.readLong();
2333                if (onDiskChecksum !=0 )
2334                {
2335                    checksum.reset();
2336                    checksum.update(barray, 0, barray.length);
2337                }
2338            }
2339
2340            if ( onDiskChecksum == checksum.getValue() || onDiskChecksum ==0)
2341            {
2342
2343                bais = new ByteArrayInputStream JavaDoc(barray);
2344                dais = new DataInputStream JavaDoc(bais);
2345
2346                if (dais.readInt() != fid)
2347                {
2348                    throw StandardException.newException(
2349                            SQLState.LOG_INCOMPATIBLE_FORMAT, dataDirectory);
2350                }
2351    
2352                int obsoleteVersion = dais.readInt();
2353                value = dais.readLong();
2354    
2355                if (SanityManager.DEBUG)
2356                {
2357                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2358                        SanityManager.DEBUG(LogToFile.DBG_FLAG,
2359                            "log control file ckp instance = " +
2360                            LogCounter.toDebugString(value));
2361                }
2362    
2363    
2364                // from version 1.5 onward, we added an int for storing JBMS
2365
// version and an int for storing checkpoint interval
2366
// and log switch interval
2367
onDiskMajorVersion = dais.readInt();
2368                onDiskMinorVersion = dais.readInt();
2369                int dbBuildNumber = dais.readInt();
2370                int flags = dais.readByte();
2371                
2372                // check if the database was booted previously at any time with
2373
// derby.system.durability=test mode
2374
// If yes, then on a boot error we report that this setting is
2375
// probably the cause for the error and also log a warning
2376
// in the derby.log that this mode was set previously
2377
wasDBInDurabilityTestModeNoSync =
2378                    (flags & IS_DURABILITY_TESTMODE_NO_SYNC_FLAG) != 0;
2379
2380                if (SanityManager.DEBUG) {
2381                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2382                        SanityManager.DEBUG(LogToFile.DBG_FLAG,
2383                        "log control file, was derby.system.durability set to test = " +
2384                        wasDBInDurabilityTestModeNoSync);
2385                }
2386                    
2387                
2388                onDiskBeta = (flags & IS_BETA_FLAG) != 0;
2389                if (onDiskBeta)
2390                {
2391                    // if is beta, can only be booted by exactly the same
2392
// version
2393
if (!jbmsVersion.isBeta() ||
2394                        onDiskMajorVersion != jbmsVersion.getMajorVersion() ||
2395                        onDiskMinorVersion != jbmsVersion.getMinorVersion())
2396                    {
2397                        boolean forceBetaUpgrade = false;
2398                        if (SanityManager.DEBUG)
2399                        {
2400                            // give ourselves an out for this beta check for debugging purposes
2401
if (SanityManager.DEBUG_ON("forceBetaUpgrade"))
2402                            {
2403                                Monitor.logMessage("WARNING !! : forcing beta upgrade.");
2404                                forceBetaUpgrade =true;
2405                            }
2406                        }
2407
2408                        if (!forceBetaUpgrade)
2409                        {
2410                            throw StandardException.newException(
2411                                SQLState.LOG_CANNOT_UPGRADE_BETA,
2412                                dataDirectory,
2413                                ProductVersionHolder.simpleVersionString(onDiskMajorVersion, onDiskMinorVersion, onDiskBeta));
2414                        }
2415                    }
2416                }
2417                    
2418    
2419                // JBMS_VERSION must be numbered in a way so that it is ever
2420
// increasing. We are backwards compatible but not forwards
2421
// compatible
2422
//
2423
if (onDiskMajorVersion > jbmsVersion.getMajorVersion() ||
2424                    (onDiskMajorVersion == jbmsVersion.getMajorVersion() &&
2425                     onDiskMinorVersion > jbmsVersion.getMinorVersion()))
2426                {
2427                    // don't need to worry about point release, no format
2428
// upgrade is allowed.
2429
throw StandardException.newException(
2430                            SQLState.LOG_INCOMPATIBLE_VERSION,
2431                            dataDirectory,
2432                            ProductVersionHolder.simpleVersionString(onDiskMajorVersion, onDiskMinorVersion, onDiskBeta));
2433                }
2434
2435                // Ensure that upgrade has been requested for a major or minor upgrade
2436
// maintaince (point) versions should not require an upgrade.
2437
if ((onDiskMajorVersion != jbmsVersion.getMajorVersion()) ||
2438                    (onDiskMinorVersion != jbmsVersion.getMinorVersion()))
2439                {
2440                    upgradeNeeded = true;
2441                }
2442                // if checksum is zeros in version > 3.5 file is hosed
2443
// except incase of upgrade from versions <= 3.5
2444
if (onDiskChecksum == 0 &&
2445                    (!(onDiskMajorVersion <= 3 && onDiskMinorVersion <=5) ||
2446                    onDiskMajorVersion == 0))
2447                    value = LogCounter.INVALID_LOG_INSTANT;
2448            }
2449        }
2450        finally
2451        {
2452            if (logControlFile != null)
2453                logControlFile.close();
2454            if (bais != null)
2455                bais.close();
2456            if (dais != null)
2457                dais.close();
2458        }
2459
2460        if (upgradeNeeded)
2461        {
2462            if (Monitor.isFullUpgrade(startParams,
2463                ProductVersionHolder.simpleVersionString(onDiskMajorVersion, onDiskMinorVersion, onDiskBeta))) {
2464
2465                onDiskMajorVersion = jbmsVersion.getMajorVersion();
2466                onDiskMinorVersion = jbmsVersion.getMinorVersion();
2467                onDiskBeta = jbmsVersion.isBeta();
2468
2469                // Write out the new log control file with the new
2470
// version, the database has been upgraded
2471

2472                if (!writeControlFile(logControlFileName, value))
2473                {
2474                    throw StandardException.newException(
2475                            SQLState.LOG_CONTROL_FILE, logControlFileName);
2476                }
2477            }
2478        }
2479
2480        return value;
2481
2482    }
2483
2484
2485
2486    /**
2487     * Create the directory where transaction log should go.
2488     * @exception StandardException Standard Error Policy
2489    */

2490    private void createLogDirectory() throws StandardException
2491    {
2492        StorageFile logDir =
2493            logStorageFactory.newStorageFile(LogFactory.LOG_DIRECTORY_NAME);
2494
2495        if (privExists(logDir)) {
2496            // make sure log directory is empty.
2497
String JavaDoc[] logfiles = privList(logDir);
2498            if (logfiles != null) {
2499                if(logfiles.length != 0) {
2500                    throw StandardException.newException(
2501                        SQLState.LOG_SEGMENT_EXIST, logDir.getPath());
2502                }
2503            }
2504            
2505        }else {
2506            // create the log directory.
2507
if (!privMkdirs(logDir)) {
2508                throw StandardException.newException(
2509                    SQLState.LOG_SEGMENT_NOT_EXIST, logDir.getPath());
2510            }
2511        }
2512    }
2513
2514    /*
2515        Return the directory the log should go.
2516
2517        <P> MT- read only
2518        @exception StandardException Cloudscape Standard Error Policy
2519    */

2520    public StorageFile getLogDirectory() throws StandardException
2521    {
2522        StorageFile logDir = null;
2523
2524        logDir = logStorageFactory.newStorageFile( LogFactory.LOG_DIRECTORY_NAME);
2525
2526        if (!privExists(logDir))
2527        {
2528            throw StandardException.newException(
2529                    SQLState.LOG_SEGMENT_NOT_EXIST, logDir.getPath());
2530        }
2531
2532        return logDir;
2533    }
2534
2535    public String JavaDoc getCanonicalLogPath()
2536    {
2537        if (logDevice == null)
2538            return null;
2539        else
2540        {
2541            try
2542            {
2543                return logStorageFactory.getCanonicalName();
2544            }
2545            catch (IOException JavaDoc ioe)
2546            {
2547                return null;
2548                // file not found
2549
}
2550        }
2551    }
2552
2553
2554    /**
2555        Return the control file name
2556
2557        <P> MT- read only
2558    */

2559    private StorageFile getControlFileName() throws StandardException
2560    {
2561        return logStorageFactory.newStorageFile( getLogDirectory(), "log.ctrl");
2562    }
2563
2564    /**
2565        Return the mirror control file name
2566
2567        <P> MT- read only
2568    */

2569    private StorageFile getMirrorControlFileName() throws StandardException
2570    {
2571        return logStorageFactory.newStorageFile( getLogDirectory(), "logmirror.ctrl");
2572    }
2573
2574    /**
2575        Given a log file number, return its file name
2576
2577        <P> MT- read only
2578    */

2579    private StorageFile getLogFileName(long filenumber) throws StandardException
2580    {
2581        return logStorageFactory.newStorageFile( getLogDirectory(), "log" + filenumber + ".dat");
2582    }
2583
2584    /*
2585        Find a checkpoint log record at the checkpointInstant
2586
2587        <P> MT- read only
2588    */

2589    private CheckpointOperation findCheckpoint(long checkpointInstant, FileLogger logger)
2590         throws IOException JavaDoc, StandardException, ClassNotFoundException JavaDoc
2591    {
2592        StreamLogScan scan = (StreamLogScan)
2593            openForwardsScan(checkpointInstant, (LogInstant)null);
2594
2595        // estimated size of a checkpoint log record, which contains 3 longs
2596
// and assorted other log record overhead
2597
Loggable lop = logger.readLogRecord(scan, 100);
2598                                
2599        scan.close();
2600
2601        if (lop instanceof CheckpointOperation)
2602            return (CheckpointOperation)lop;
2603        else
2604            return null;
2605    }
2606
2607
2608    /*
2609     * Functions to help the Logger open a log scan on the log.
2610     */

2611
2612    /**
2613        Scan backward from start position.
2614
2615        <P> MT- read only
2616
2617        @exception IOException cannot access the log
2618        @exception StandardException Standard Cloudscape error policy
2619    */

2620    protected LogScan openBackwardsScan(long startAt, LogInstant stopAt)
2621         throws IOException JavaDoc, StandardException
2622    {
2623        checkCorrupt();
2624
2625        // backward from end of log
2626
if (startAt == LogCounter.INVALID_LOG_INSTANT)
2627            return openBackwardsScan(stopAt);
2628
2629
2630        // ensure any buffered data is written to the actual file
2631
flushBuffer(LogCounter.getLogFileNumber(startAt),
2632                    LogCounter.getLogFilePosition(startAt));
2633
2634        return new Scan(this, startAt, stopAt, Scan.BACKWARD);
2635    }
2636
2637    /**
2638        Scan backward from end of log.
2639        <P> MT- read only
2640
2641        @exception IOException cannot access the log
2642        @exception StandardException Standard Cloudscape error policy
2643    */

2644    protected LogScan openBackwardsScan(LogInstant stopAt)
2645         throws IOException JavaDoc, StandardException
2646    {
2647        checkCorrupt();
2648
2649        // current instant log instant of the next log record to be
2650
// written out, which is at the end of the log
2651
// ensure any buffered data is written to the actual file
2652
long startAt;
2653        synchronized (this)
2654        {
2655            // flush the whole buffer to ensure the complete
2656
// end of log is in the file.
2657
logOut.flushLogAccessFile();
2658            startAt = currentInstant();
2659        }
2660
2661        return new Scan(this, startAt, stopAt, Scan.BACKWARD_FROM_LOG_END);
2662    }
2663
2664    /**
2665      @see LogFactory#openFlushedScan
2666      @exception StandardException Ooops.
2667     */

2668    public ScanHandle openFlushedScan(DatabaseInstant start,int groupsIWant)
2669         throws StandardException
2670    {
2671        return new FlushedScanHandle(this,start,groupsIWant);
2672    }
2673    
2674
2675
2676    /**
2677        Scan Forward from start position.
2678
2679        <P> MT- read only
2680
2681        @param startAt - if startAt == INVALID_LOG_INSTANT,
2682            start from the beginning of the log. Otherwise, start scan from startAt.
2683        @param stopAt - if not null, stop at this log instant (inclusive).
2684            Otherwise, stop at the end of the log
2685
2686        @exception IOException cannot access the log
2687        @exception StandardException Standard Cloudscape error policy
2688    */

2689    protected LogScan openForwardsScan(long startAt, LogInstant stopAt)
2690         throws IOException JavaDoc, StandardException
2691    {
2692        checkCorrupt();
2693
2694        if (startAt == LogCounter.INVALID_LOG_INSTANT)
2695        {
2696            startAt = firstLogInstant();
2697        }
2698
2699        // ensure any buffered data is written to the actual file
2700
if (stopAt != null) {
2701            LogCounter stopCounter = (LogCounter) stopAt;
2702            flushBuffer(stopCounter.getLogFileNumber(),
2703                        stopCounter.getLogFilePosition());
2704        } else {
2705            synchronized (this) {
2706                if (logOut != null)
2707                    // flush to the end of the log
2708
logOut.flushLogAccessFile();
2709            }
2710        }
2711
2712        return new Scan(this, startAt, stopAt, Scan.FORWARD);
2713    }
2714
2715    /*
2716     * Methods to help a log scan switch from one log file to the next
2717     */

2718
2719    /**
2720        Open a log file and position the file at the beginning.
2721        Used by scan to switch to the next log file
2722
2723        <P> MT- read only
2724
2725        @exception StandardException Standard Cloudscape error policy
2726        @exception IOException cannot access the log at the new position.
2727    */

2728    protected StorageRandomAccessFile getLogFileAtBeginning(long filenumber)
2729         throws IOException JavaDoc, StandardException
2730    {
2731        long instant = LogCounter.makeLogInstantAsLong(filenumber,
2732                                                       LOG_FILE_HEADER_SIZE);
2733        return getLogFileAtPosition(instant);
2734    }
2735
2736
2737    /**
2738        Get a read-only handle to the log file positioned at the stated position
2739
2740        <P> MT- read only
2741
2742        @return null if file does not exist or of the wrong format
2743        @exception IOException cannot access the log at the new position.
2744        @exception StandardException Standard Cloudscape error policy
2745    */

2746    protected StorageRandomAccessFile getLogFileAtPosition(long logInstant)
2747         throws IOException JavaDoc, StandardException
2748    {
2749        checkCorrupt();
2750
2751        long filenum = LogCounter.getLogFileNumber(logInstant);
2752        long filepos = LogCounter.getLogFilePosition(logInstant);
2753
2754        StorageFile fileName = getLogFileName(filenum);
2755        if (!privExists(fileName))
2756        {
2757            if (SanityManager.DEBUG)
2758            {
2759                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2760                    SanityManager.DEBUG(LogToFile.DBG_FLAG, fileName.getPath() + " does not exist");
2761            }
2762
2763            return null;
2764        }
2765
2766
2767        StorageRandomAccessFile log = null;
2768
2769        try
2770        {
2771            log = privRandomAccessFile(fileName, "r");
2772
2773            // verify that the log file is of the right format
2774
if (!verifyLogFormat(log, filenum))
2775            {
2776                if (SanityManager.DEBUG)
2777                {
2778                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2779                        SanityManager.DEBUG(LogToFile.DBG_FLAG, fileName.getPath() + " format mismatch");
2780                }
2781
2782                log.close();
2783                log = null;
2784            }
2785            else
2786            {
2787                log.seek(filepos);
2788            }
2789        }
2790        catch (IOException JavaDoc ioe)
2791        {
2792            try
2793            {
2794                if (log != null)
2795                {
2796                    log.close();
2797                    log = null;
2798                }
2799
2800                if (SanityManager.DEBUG)
2801                {
2802                    SanityManager.THROWASSERT("cannot get to position " + filepos +
2803                                              " for log file " + fileName.getPath(), ioe);
2804                }
2805            }
2806            catch (IOException JavaDoc ioe2)
2807            {}
2808            throw ioe;
2809        }
2810
2811        return log;
2812
2813    }
2814
2815    /*
2816    ** Methods of ModuleControl
2817    */

2818
2819    public boolean canSupport(Properties JavaDoc startParams)
2820    {
2821        String JavaDoc runtimeLogAttributes = startParams.getProperty(LogFactory.RUNTIME_ATTRIBUTES);
2822        if (runtimeLogAttributes != null) {
2823            if (runtimeLogAttributes.equals(LogFactory.RT_READONLY))
2824                return false;
2825        }
2826
2827        return true;
2828    }
2829
2830
2831
2832
2833    /**
2834        Boot up the log factory.
2835        <P> MT- caller provide synchronization
2836
2837        @exception StandardException log factory cannot start up
2838    */

2839    public void boot(boolean create, Properties JavaDoc startParams) throws StandardException
2840    {
2841        dataDirectory = startParams.getProperty(PersistentService.ROOT);
2842        
2843        logDevice = startParams.getProperty(Attribute.LOG_DEVICE);
2844        if( logDevice != null)
2845        {
2846            // in case the user specifies logDevice in URL form
2847
String JavaDoc logDeviceURL = null;
2848            try {
2849                URL JavaDoc url = new URL JavaDoc(logDevice);
2850                logDeviceURL = url.getFile();
2851            } catch (MalformedURLException JavaDoc ex) {}
2852            if (logDeviceURL != null)
2853                logDevice = logDeviceURL;
2854        }
2855
2856
2857        if(create) {
2858            getLogStorageFactory();
2859            createLogDirectory();
2860            
2861        } else {
2862            // check if the database is being restored from the backup,
2863
// if it is then restore the logs.
2864
if (!restoreLogs(startParams)) {
2865                // set the log storage factory.
2866
getLogStorageFactory();
2867                if (logDevice != null)
2868                {
2869                    // Make sure we find the log, do not assume
2870
// it is OK that the log is not there because
2871
// it could be a user typo(like when users edit
2872
// service.properties to change the log device
2873
// while restoring from backups using OS copy.
2874
StorageFile checklogDir =
2875                        logStorageFactory.newStorageFile(
2876                                 LogFactory.LOG_DIRECTORY_NAME);
2877                    if (!privExists(checklogDir))
2878                    {
2879                        throw
2880                            StandardException.newException(
2881                            SQLState.LOG_FILE_NOT_FOUND, checklogDir.getPath());
2882
2883                    }
2884                }
2885            }
2886        }
2887                
2888        //if user does not set the right value for the log buffer size,
2889
//default value is used instead.
2890
logBufferSize = PropertyUtil.getSystemInt(org.apache.derby.iapi.reference.Property.LOG_BUFFER_SIZE,
2891                                                   LOG_BUFFER_SIZE_MIN,
2892                                                   LOG_BUFFER_SIZE_MAX,
2893                                                   DEFAULT_LOG_BUFFER_SIZE);
2894        jbmsVersion = Monitor.getMonitor().getEngineVersion();
2895
2896        
2897        String JavaDoc logArchiveMode =
2898            startParams.getProperty(Property.LOG_ARCHIVE_MODE);
2899        logArchived = Boolean.valueOf(logArchiveMode).booleanValue();
2900        
2901        //get log factorty properties if any set in derby.properties
2902
getLogFactoryProperties(null);
2903
2904        /* check if the storage factory supports write sync(rws). If so, use it unless
2905         * derby.storage.fileSyncTransactionLog property is set true by user.
2906         */

2907
2908        if (logStorageFactory.supportsRws())
2909        {
2910            //write sync can be used in the jvm that database is running on.
2911
//disable write sync if derby.storage.fileSyncTransactionLog is true
2912
isWriteSynced =
2913                !(PropertyUtil.getSystemBoolean(Property.FILESYNC_TRANSACTION_LOG));
2914        }
2915        else
2916        {
2917            isWriteSynced = false;
2918        }
2919
2920
2921        // If derby.system.durability=test is set,then set flag to
2922
// disable sync of log records at commit and log file before
2923
// data page makes it to disk
2924
if (Property.DURABILITY_TESTMODE_NO_SYNC.equalsIgnoreCase(
2925               PropertyUtil.getSystemProperty(Property.DURABILITY_PROPERTY)))
2926        {
2927            // disable syncing of log.
2928
logNotSynced = true;
2929            //if log not being synced;files shouldn't be open in write sync mode
2930
isWriteSynced = false;
2931        }
2932        else if (Performance.MEASURE)
2933        {
2934            // development build only feature, must by hand set the
2935
// Performance.MEASURE variable and rebuild. Useful during
2936
// development to compare/contrast effect of syncing, release
2937
// users can use the above relaxed durability option to disable
2938
// all syncing.
2939

2940            logNotSynced =
2941                PropertyUtil.getSystemBoolean(
2942                    Property.STORAGE_LOG_NOT_SYNCED);
2943
2944            if (logNotSynced)
2945            {
2946                isWriteSynced = false;
2947                Monitor.logMessage("Performance.logNotSynced = true");
2948            }
2949        }
2950
2951        // try to access the log
2952
// if it doesn't exist, create it.
2953
// if it does exist, run recovery
2954

2955        boolean createNewLog = create;
2956
2957        if (SanityManager.DEBUG)
2958            SanityManager.ASSERT(fid != -1, "invalid log format Id");
2959
2960        checkpointInstant = LogCounter.INVALID_LOG_INSTANT;
2961        try
2962        {
2963            StorageFile logControlFileName = getControlFileName();
2964
2965            StorageFile logFile;
2966
2967            if (!createNewLog)
2968            {
2969                if (privExists(logControlFileName))
2970                {
2971                    checkpointInstant =
2972                        readControlFile(logControlFileName, startParams);
2973
2974                    // in case system was running previously with
2975
// derby.system.durability=test then print a message
2976
// to the derby log
2977
if (wasDBInDurabilityTestModeNoSync)
2978                    {
2979                        // print message stating that the database was
2980
// previously atleast at one time running with
2981
// derby.system.durability=test mode
2982
Monitor.logMessage(MessageService.getTextMessage(
2983                            MessageId.LOG_WAS_IN_DURABILITY_TESTMODE_NO_SYNC,
2984                            Property.DURABILITY_PROPERTY,
2985                            Property.DURABILITY_TESTMODE_NO_SYNC));
2986                    }
2987                        
2988                    if (checkpointInstant == LogCounter.INVALID_LOG_INSTANT &&
2989                                        privExists(getMirrorControlFileName()))
2990                    {
2991                        checkpointInstant =
2992                            readControlFile(
2993                                getMirrorControlFileName(), startParams);
2994                    }
2995
2996                }
2997                else if (logDevice != null)
2998                {
2999                    // Do not throw this error if logDevice is null because
3000
// in a read only configuration, it is acceptable
3001
// to not have a log directory. But clearly, if the
3002
// logDevice property is set, then it should be there.
3003
throw StandardException.newException(
3004                            SQLState.LOG_FILE_NOT_FOUND,
3005                            logControlFileName.getPath());
3006                }
3007
3008                if (checkpointInstant != LogCounter.INVALID_LOG_INSTANT)
3009                    logFileNumber = LogCounter.getLogFileNumber(checkpointInstant);
3010                else
3011                    logFileNumber = 1;
3012
3013                logFile = getLogFileName(logFileNumber);
3014
3015                // if log file is not there or if it is of the wrong format, create a
3016
// brand new log file and do not attempt to recover the database
3017

3018                if (!privExists(logFile))
3019                {
3020                    if (logDevice != null)
3021                    {
3022                        throw StandardException.newException(
3023                                SQLState.LOG_FILE_NOT_FOUND,
3024                                logControlFileName.getPath());
3025                    }
3026
3027                    logErrMsg(MessageService.getTextMessage(
3028                        MessageId.LOG_MAYBE_INCONSISTENT,
3029                        logFile.getPath()));
3030
3031                    createNewLog = true;
3032                }
3033                else if (!verifyLogFormat(logFile, logFileNumber))
3034                {
3035                    Monitor.logTextMessage(MessageId.LOG_DELETE_INCOMPATIBLE_FILE, logFile);
3036
3037                    // blow away the log file if possible
3038
if (!privDelete(logFile) && logFileNumber == 1)
3039                    {
3040                        logErrMsgForDurabilityTestModeNoSync();
3041                        throw StandardException.newException(
3042                            SQLState.LOG_INCOMPATIBLE_FORMAT, dataDirectory);
3043                    }
3044
3045                    // If logFileNumber > 1, we are not going to write that
3046
// file just yet. Just leave it be and carry on. Maybe
3047
// when we get there it can be deleted.
3048

3049                    createNewLog = true;
3050                }
3051            }
3052
3053            if (createNewLog)
3054            {
3055                // brand new log. Start from log file number 1.
3056

3057                // create or overwrite the log control file with an invalid
3058
// checkpoint instant since there is no checkpoint yet
3059
if (writeControlFile(logControlFileName,
3060                                     LogCounter.INVALID_LOG_INSTANT))
3061                {
3062                    firstLogFileNumber = 1;
3063                    logFileNumber = 1;
3064                    if (SanityManager.DEBUG)
3065                    {
3066                        if (SanityManager.DEBUG_ON(TEST_MAX_LOGFILE_NUMBER))
3067                        {
3068                            // set the value to be two less than max possible
3069
// log number, test case will perform some ops to
3070
// hit the max number case.
3071
firstLogFileNumber =
3072                                LogCounter.MAX_LOGFILE_NUMBER -2;
3073
3074                            logFileNumber = LogCounter.MAX_LOGFILE_NUMBER -2;
3075                        }
3076                    }
3077                    logFile = getLogFileName(logFileNumber);
3078
3079                    if (privExists(logFile))
3080                    {
3081                        // this log file maybe there because the system may have
3082
// crashed right after a log switch but did not write
3083
// out any log record
3084
Monitor.logTextMessage(
3085                            MessageId.LOG_DELETE_OLD_FILE, logFile);
3086
3087                        if (!privDelete(logFile))
3088                        {
3089                            logErrMsgForDurabilityTestModeNoSync();
3090                            throw StandardException.newException(
3091                                    SQLState.LOG_INCOMPATIBLE_FORMAT,
3092                                    dataDirectory);
3093                        }
3094                    }
3095
3096                    // don't need to try to delete it, we know it isn't there
3097
firstLog = privRandomAccessFile(logFile, "rw");
3098
3099                    if (!initLogFile(firstLog, logFileNumber, LogCounter.INVALID_LOG_INSTANT))
3100                    {
3101                        throw StandardException.newException(
3102                            SQLState.LOG_SEGMENT_NOT_EXIST, logFile.getPath());
3103                    }
3104
3105                    endPosition = firstLog.getFilePointer();
3106                    lastFlush = firstLog.getFilePointer();
3107
3108                    //if write sync is true , prellocate the log file
3109
//and reopen the file in rws mode.
3110
if(isWriteSynced)
3111                    {
3112                        //extend the file by wring zeros to it
3113
preAllocateNewLogFile(firstLog);
3114                        firstLog.close();
3115                        firstLog = openLogFileInWriteMode(logFile);
3116                        //postion the log at the current log end postion
3117
firstLog.seek(endPosition);
3118                    }
3119
3120                    if (SanityManager.DEBUG)
3121                    {
3122                        SanityManager.ASSERT(
3123                            endPosition == LOG_FILE_HEADER_SIZE,
3124                            "empty log file has wrong size");
3125                    }
3126                }
3127                else
3128                {
3129                    // read only database
3130
ReadOnlyDB = true;
3131                    logOut = null;
3132                    firstLog = null;
3133                }
3134
3135                recoveryNeeded = false;
3136            }
3137            else
3138            {
3139                // log file exist, need to run recovery
3140
recoveryNeeded = true;
3141            }
3142
3143        }
3144        catch (IOException JavaDoc ioe)
3145        {
3146            throw Monitor.exceptionStartingModule(ioe);
3147        }
3148            
3149        // Number of the log file that can be created in Derby is increased from
3150
// 2^22 -1 to 2^31 -1 in version 10.1. But if the database is running on
3151
// engines 10.1 or above on a softupgrade from versions 10.0 or
3152
// before, the max log file number that can be created is
3153
// still limited to 2^22 -1, because users can revert back to older versions
3154
// which does not have logic to handle a log file number greater than
3155
// 2^22-1.
3156

3157        // set max possible log file number to derby 10.0 limit, if the database is not
3158
// fully upgraded to or created in version 10.1 or above.
3159
if (!checkVersion(RawStoreFactory.DERBY_STORE_MAJOR_VERSION_10,
3160                          RawStoreFactory.DERBY_STORE_MINOR_VERSION_1))
3161            maxLogFileNumber = LogCounter.DERBY_10_0_MAX_LOGFILE_NUMBER;
3162
3163    } // end of boot
3164

3165    private void getLogStorageFactory() throws StandardException
3166    {
3167        if( logDevice == null)
3168        {
3169            DataFactory df = (DataFactory) Monitor.findServiceModule( this, DataFactory.MODULE);
3170            logStorageFactory = (WritableStorageFactory) df.getStorageFactory();
3171        }
3172        else
3173        {
3174            try
3175            {
3176                PersistentService ps = Monitor.getMonitor().getServiceType(this);
3177                logStorageFactory = (WritableStorageFactory) ps.getStorageFactoryInstance( false, logDevice, null, null);
3178            }
3179            catch( IOException JavaDoc ioe)
3180            {
3181                if( SanityManager.DEBUG)
3182                    SanityManager.NOTREACHED();
3183                throw StandardException.newException( SQLState.LOG_FILE_NOT_FOUND, ioe, logDevice);
3184            }
3185        }
3186    } // end of getLogStorageFactory
3187

3188    /**
3189        Stop the log factory
3190        <P> MT- caller provide synchronization
3191        (RESOLVE: this should be called AFTER dataFactory and transFactory are
3192        stopped)
3193    */

3194    public void stop() {
3195
3196
3197        // stop our checkpoint
3198
if (checkpointDaemon != null) {
3199            checkpointDaemon.unsubscribe(myClientNumber);
3200            checkpointDaemon.stop();
3201        }
3202
3203        synchronized(this)
3204        {
3205            stopped = true;
3206
3207            if (logOut != null) {
3208                try {
3209                    logOut.flushLogAccessFile();
3210                    logOut.close();
3211                }
3212                catch (IOException JavaDoc ioe) {}
3213                catch(StandardException se){}
3214                logOut = null;
3215            }
3216        }
3217
3218      
3219        if (SanityManager.DEBUG &&
3220            Performance.MEASURE &&
3221            mon_LogSyncStatistics)
3222        {
3223            Monitor.logMessage("number of times someone waited = " +
3224                           mon_numLogFlushWaits +
3225                           "\nnumber of times flush is called = " +
3226                           mon_flushCalls +
3227                           "\nnumber of sync is called = " +
3228                           mon_syncCalls +
3229                           "\ntotal number of bytes written to log = " +
3230                           LogAccessFile.mon_numBytesToLog +
3231                           "\ntotal number of writes to log file = " +
3232                           LogAccessFile.mon_numWritesToLog);
3233        }
3234        
3235
3236        // delete obsolete log files,left around by earlier crashes
3237
if(corrupt == null && ! logArchived() && !keepAllLogs && !ReadOnlyDB)
3238            deleteObsoleteLogfiles();
3239
3240        if( logDevice != null)
3241            logStorageFactory.shutdown();
3242        logStorageFactory = null;
3243    }
3244
3245
3246
3247    /* delete the log files, that might have been left around if we crashed
3248     * immediately after the checkpoint before truncations of logs completed.
3249     * see bug no: 3519 , for more details.
3250     */

3251
3252    private void deleteObsoleteLogfiles(){
3253        StorageFile logDir;
3254        //find the first log file number that is useful
3255
long firstLogNeeded = getFirstLogNeeded(currentCheckpoint);
3256        if (firstLogNeeded == -1)
3257            return;
3258
3259        // when backup is in progress, log files that are yet to
3260
// be copied to the backup should not be deleted, even
3261
// if they are not required for crash recovery.
3262
if(backupInProgress) {
3263            long logFileNeededForBackup = logFileToBackup;
3264            // check if the log file number is yet to be copied
3265
// to the backup is less than the log file required
3266
// for crash recovery, if it is then make the first
3267
// log file that should not be deleted is the log file
3268
// that is yet to be copied to the backup.
3269
if (logFileNeededForBackup < firstLogNeeded)
3270                firstLogNeeded = logFileNeededForBackup;
3271        }
3272
3273        try{
3274            logDir = getLogDirectory();
3275        }catch (StandardException se)
3276        {
3277            if (SanityManager.DEBUG)
3278                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
3279                    SanityManager.DEBUG(DBG_FLAG, "error opening log segment dir");
3280            return;
3281        }
3282            
3283        String JavaDoc[] logfiles = privList(logDir);
3284        if (logfiles != null)
3285        {
3286            StorageFile uselessLogFile = null;
3287            long fileNumber;
3288            for(int i=0 ; i < logfiles.length; i++)
3289            {
3290                // delete the log files that are not needed any more
3291
if(logfiles[i].startsWith("log") && logfiles[i].endsWith(".dat"))
3292                {
3293                    fileNumber = Long.parseLong(logfiles[i].substring(3, (logfiles[i].length() -4)));
3294                    if(fileNumber < firstLogNeeded )
3295                    {
3296                        uselessLogFile = logStorageFactory.newStorageFile(logDir, logfiles[i]);
3297                        if (privDelete(uselessLogFile))
3298                        {
3299                            if (SanityManager.DEBUG)
3300                            {
3301                                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
3302                                    SanityManager.DEBUG(DBG_FLAG, "truncating obsolete log file " + uselessLogFile.getPath());
3303                            }
3304                        }
3305                        else
3306                        {
3307                            if (SanityManager.DEBUG)
3308                            {
3309                                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
3310                                    SanityManager.DEBUG(DBG_FLAG, "Fail to truncate obsolete log file " + uselessLogFile.getPath());
3311                            }
3312                        }
3313                    }
3314                }
3315            }
3316        }
3317    }
3318
3319    /*
3320     * Serviceable methods
3321     */

3322
3323    public boolean serviceASAP()
3324    {
3325        return false;
3326    }
3327
3328    // @return true, if this work needs to be done on a user thread immediately
3329
public boolean serviceImmediately()
3330    {
3331        return false;
3332    }
3333
3334
3335    public void getLogFactoryProperties(PersistentSet set)
3336         throws StandardException
3337    {
3338        String JavaDoc lsInterval;
3339        String JavaDoc cpInterval;
3340        if(set == null)
3341        {
3342            lsInterval=PropertyUtil.getSystemProperty(org.apache.derby.iapi.reference.Property.LOG_SWITCH_INTERVAL);
3343            cpInterval=PropertyUtil.getSystemProperty(org.apache.derby.iapi.reference.Property.CHECKPOINT_INTERVAL);
3344        }else
3345        {
3346            lsInterval = PropertyUtil.getServiceProperty(set, org.apache.derby.iapi.reference.Property.LOG_SWITCH_INTERVAL);
3347            cpInterval = PropertyUtil.getServiceProperty(set, org.apache.derby.iapi.reference.Property.CHECKPOINT_INTERVAL);
3348        }
3349
3350        /* log switch interval */
3351        if (lsInterval != null)
3352        {
3353            logSwitchInterval = Integer.parseInt(lsInterval);
3354                    // make sure checkpoint and log switch interval are within range
3355
if (logSwitchInterval < LOG_SWITCH_INTERVAL_MIN)
3356                logSwitchInterval = LOG_SWITCH_INTERVAL_MIN;
3357            else if (logSwitchInterval > LOG_SWITCH_INTERVAL_MAX)
3358                logSwitchInterval = LOG_SWITCH_INTERVAL_MAX;
3359        }
3360
3361        /* checkpoint interval */
3362        if (cpInterval != null)
3363        {
3364            checkpointInterval = Integer.parseInt(cpInterval);
3365            if (checkpointInterval < CHECKPOINT_INTERVAL_MIN)
3366                checkpointInterval = CHECKPOINT_INTERVAL_MIN;
3367            else if(checkpointInterval > CHECKPOINT_INTERVAL_MAX)
3368                checkpointInterval = CHECKPOINT_INTERVAL_MAX;
3369        }
3370    }
3371
3372    public int performWork(ContextManager context)
3373    {
3374        synchronized(this)
3375        {
3376            if (corrupt != null)
3377                return Serviceable.DONE; // don't do this again.
3378
}
3379
3380        // check to see if checkpointInterval and logSwitchInterval has changed
3381
AccessFactory af =
3382            (AccessFactory)Monitor.getServiceModule(this, AccessFactory.MODULE);
3383
3384        try
3385        {
3386            if (af != null)
3387            {
3388                TransactionController tc = null;
3389                try
3390                {
3391                    tc = af.getAndNameTransaction(
3392                            context, AccessFactoryGlobals.SYS_TRANS_NAME);
3393
3394                    getLogFactoryProperties(tc);
3395                }
3396                finally
3397                {
3398                    if (tc != null)
3399                        tc.commit();
3400                }
3401            }
3402
3403            // checkpoint will start its own internal transaction on the current
3404
// context.
3405
rawStoreFactory.checkpoint();
3406        }
3407        catch (StandardException se)
3408        {
3409            Monitor.logTextMessage(MessageId.LOG_CHECKPOINT_EXCEPTION);
3410            logErrMsg(se);
3411        }
3412        catch (ShutdownException shutdown)
3413        {
3414            // If we are shutting down, just ignore the error and let the
3415
// system go down without printing errors to the log.
3416
}
3417
3418        checkpointDaemonCalled = false;
3419
3420        return Serviceable.DONE;
3421    }
3422
3423
3424    /*
3425    ** Implementation specific methods
3426    */

3427
3428    /**
3429        Append length bytes of data to the log prepended by a long log instant
3430        and followed by 4 bytes of length information.
3431
3432        <P>
3433        This method is synchronized to ensure log records are added sequentially
3434        to the end of the log.
3435
3436        <P>MT- single threaded through this log factory. Log records are
3437        appended one at a time.
3438
3439        @exception StandardException Log Full.
3440
3441    */

3442    protected long appendLogRecord(byte[] data, int offset, int length,
3443            byte[] optionalData, int optionalDataOffset, int optionalDataLength)
3444         throws StandardException
3445    {
3446        long instant;
3447        boolean testIncompleteLogWrite = false;
3448
3449        if (ReadOnlyDB)
3450        {
3451            throw StandardException.newException(
3452                SQLState.LOG_READ_ONLY_DB_UPDATE);
3453        }
3454
3455        if (length <= 0)
3456        {
3457            throw StandardException.newException(
3458                    SQLState.LOG_ZERO_LENGTH_LOG_RECORD);
3459        }
3460
3461        // RESOLVE: calculate checksum here
3462
if (SanityManager.DEBUG)
3463        {
3464            if (SanityManager.DEBUG_ON(TEST_LOG_INCOMPLETE_LOG_WRITE))
3465            {
3466                /// /// /// /// /// /// /// /// /// ///
3467
//
3468
// go into this alternate route instead
3469
//
3470
/// /// /// /// /// /// /// /// /// ///
3471
return logtest_appendPartialLogRecord(data, offset, length,
3472                                                      optionalData,
3473                                                      optionalDataOffset,
3474                                                      optionalDataLength);
3475
3476            }
3477
3478        }
3479
3480        try
3481        {
3482            if (SanityManager.DEBUG)
3483            {
3484                if (SanityManager.DEBUG_ON(TEST_LOG_FULL))
3485                    testLogFull(); // if log is 'full' this routine will throw an
3486
// exception
3487
}
3488
3489            synchronized (this)
3490            {
3491                // has someone else found a problem in the raw store?
3492
if (corrupt != null)
3493                {
3494                    throw StandardException.newException(
3495                            SQLState.LOG_STORE_CORRUPT, corrupt);
3496                }
3497
3498                if (logOut == null)
3499                {
3500                    throw StandardException.newException(SQLState.LOG_NULL);
3501                }
3502
3503                /*
3504                 * NOTE!!
3505                 *
3506                 * subclass which logs special record to the stream depends on
3507                 * the EXACT byte sequence of the following segment of code.
3508                 * If you change this, not only will you need to write upgrade
3509                 * code for this class, you also need to find all the subclass
3510                 * which write log record to log stream directly to make sure
3511                 * they are OK
3512                 */

3513
3514                // see if the log file is too big, if it is, switch to the next
3515
// log file
3516
if ((endPosition + LOG_RECORD_OVERHEAD + length) >=
3517                    LogCounter.MAX_LOGFILE_SIZE)
3518                {
3519                    switchLogFile();
3520
3521                    // still too big?? Giant log record?
3522
if ((endPosition + LOG_RECORD_OVERHEAD + length) >=
3523                        LogCounter.MAX_LOGFILE_SIZE)
3524                    {
3525                        throw StandardException.newException(
3526                                SQLState.LOG_EXCEED_MAX_LOG_FILE_SIZE,
3527                                new Long JavaDoc(logFileNumber),
3528                                new Long JavaDoc(endPosition),
3529                                new Long JavaDoc(length),
3530                                new Long JavaDoc(LogCounter.MAX_LOGFILE_SIZE));
3531                    }
3532                }
3533
3534                //reserve the space for the checksum log record
3535
endPosition += logOut.reserveSpaceForChecksum(length, logFileNumber,endPosition);
3536
3537                // don't call currentInstant since we are already in a
3538
// synchronzied block
3539
instant =
3540                    LogCounter.makeLogInstantAsLong(logFileNumber, endPosition);
3541
3542                logOut.writeLogRecord(
3543                    length, instant, data, offset,
3544                    optionalData, optionalDataOffset, optionalDataLength);
3545
3546                if (optionalDataLength != 0)
3547                {
3548                    if (SanityManager.DEBUG)
3549                    {
3550                        if (optionalData == null)
3551                            SanityManager.THROWASSERT(
3552                            "optionalDataLength = " + optionalDataLength +
3553                            " with null Optional data");
3554
3555                        if (optionalData.length <
3556                                             (optionalDataOffset+optionalDataLength))
3557                            SanityManager.THROWASSERT(
3558                            "optionalDataLength = " + optionalDataLength +
3559                            " optionalDataOffset = " + optionalDataOffset +
3560                            " optionalData.length = " + optionalData.length);
3561                    }
3562                }
3563
3564                endPosition += (length + LOG_RECORD_OVERHEAD);
3565            }
3566        }
3567        catch (IOException JavaDoc ioe)
3568        {
3569            throw markCorrupt(StandardException.newException(
3570                    SQLState.LOG_FULL, ioe));
3571        }
3572
3573        return instant;
3574    }
3575
3576    /*
3577     * Misc private functions to access the log
3578     */

3579
3580    /**
3581        Get the current log instant - this is the log instant of the Next log
3582        record to be written out
3583        <P> MT - This method is synchronized to ensure that it always points to
3584        the end of a log record, not the middle of one.
3585    */

3586    protected synchronized long currentInstant()
3587    {
3588        return LogCounter.makeLogInstantAsLong(logFileNumber, endPosition);
3589    }
3590
3591    protected synchronized long endPosition()
3592    {
3593        return endPosition;
3594    }
3595
3596    /**
3597        Return the current log file number.
3598
3599        <P> MT - this method is synchronized so that
3600        it is not in the middle of being changed by swithLogFile
3601    */

3602    private synchronized long getLogFileNumber()
3603    {
3604        return logFileNumber;
3605    }
3606
3607    /**
3608        Get the first valid log instant - this is the beginning of the first
3609        log file
3610
3611        <P>MT- synchronized on this
3612    */

3613    private synchronized long firstLogInstant()
3614    {
3615        return LogCounter.makeLogInstantAsLong(firstLogFileNumber, LOG_FILE_HEADER_SIZE);
3616    }
3617
3618    /**
3619        Flush the log such that the log record written with the instant
3620        wherePosition is guaranteed to be on disk.
3621
3622        <P>MT - only one flush is allowed to be taking place at any given time
3623        (RESOLVE: right now it single thread thru the log factory while the log
3624        is frozen)
3625
3626        @exception StandardException cannot sync log file
3627
3628    */

3629    protected void flush(long fileNumber, long wherePosition) throws StandardException
3630    {
3631
3632        long potentialLastFlush = 0;
3633
3634        synchronized (this)
3635        {
3636            if (Performance.MEASURE)
3637                mon_flushCalls++;
3638            try
3639            {
3640                boolean waited;
3641                do
3642                {
3643                    // THIS CORRUPT CHECK MUST BE FIRST, before any check that
3644
// sees if the log has already been flushed to this
3645
// point. This is based upon the assumption that every
3646
// dirty page in the cache must call flush() before it is
3647
// written out. has someone else found a problem in the
3648
// raw store?
3649

3650                    if (corrupt != null)
3651                    {
3652                        throw StandardException.newException(
3653                                SQLState.LOG_STORE_CORRUPT, corrupt);
3654                    }
3655
3656                    // now check if database is frozen
3657
while (isFrozen)
3658                    {
3659                        try
3660                        {
3661                            wait();
3662                        }
3663                        catch (InterruptedException JavaDoc ie)
3664                        {
3665                            throw StandardException.interrupt(ie);
3666                        }
3667                    }
3668
3669                    // if we are just testing to see to see the database is
3670
// frozen or corrupt (wherePosition == INVALID_LOG_INSTANT)
3671
// then we can return now.
3672
// if the log file is already flushed up to where we are
3673
// interested in, just return.
3674
if (wherePosition == LogCounter.INVALID_LOG_INSTANT ||
3675                        fileNumber < logFileNumber ||
3676                        wherePosition < lastFlush)
3677                    {
3678                        return;
3679                    }
3680
3681                    // if we are not corrupt and we are in the middle of redo,
3682
// we know the log record has already been flushed since we haven't written any log
3683
// yet.
3684
if (recoveryNeeded && inRedo)
3685                    {
3686                        return;
3687                    }
3688
3689
3690                    if (SanityManager.DEBUG)
3691                    {
3692                        if (fileNumber > getLogFileNumber())
3693                            SanityManager.THROWASSERT(
3694                              "trying to flush a file that is not there yet " +
3695                                 fileNumber + " " + logFileNumber);
3696
3697                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
3698                        {
3699                            SanityManager.DEBUG(
3700                                DBG_FLAG, "Flush log to " + wherePosition);
3701                        }
3702                    }
3703
3704                    // There could be multiple threads who wants to flush the
3705
// log file, see if I can be the one.
3706
if (logBeingFlushed)
3707                    {
3708                        waited = true;
3709                        try
3710                        {
3711                            if (Performance.MEASURE)
3712                                mon_numLogFlushWaits++;
3713                            wait(); // release log semaphore to let non-flushing
3714
// threads log stuff while all the flushing
3715
// threads wait.
3716

3717                            // now we continue back to see if the sync
3718
// we waited for, flushed the portion
3719
// of log we are interested in.
3720
}
3721                        catch (InterruptedException JavaDoc ie)
3722                        {
3723                            throw StandardException.interrupt(ie);
3724                        }
3725                    }
3726                    else
3727                    {
3728                        waited = false;
3729
3730                        // logBeingFlushed is false, I am flushing the log now.
3731
if(!isWriteSynced)
3732                        {
3733                            // Flush any data from the buffered log
3734
logOut.flushLogAccessFile();
3735                        }else
3736                        {
3737                            //add active buffers to dirty buffer list
3738
//to flush to the disk.
3739
logOut.switchLogBuffer();
3740                        }
3741
3742                        potentialLastFlush = endPosition; // we will flush to to the end
3743

3744                        // once logBeingFlushed is set, need to release
3745
// the logBeingFlushed flag in finally block.
3746
logBeingFlushed = true;
3747                    }
3748
3749                } while (waited) ;
3750                // if I have waited, go down do loop again - hopefully,
3751
// someone else have already flushed it for me already.
3752
}
3753            catch (IOException JavaDoc ioe)
3754            {
3755                throw markCorrupt(StandardException.newException(
3756                    SQLState.LOG_CANNOT_FLUSH,
3757                    ioe,
3758                    getLogFileName(logFileNumber).getPath()));
3759            }
3760        } // unfreeze log manager to accept more log records
3761

3762        boolean syncSuceed = false;
3763        try
3764        {
3765            if (SanityManager.DEBUG)
3766            {
3767                SanityManager.ASSERT(logBeingFlushed,
3768                                     "flushing log without logBeingFlushed set");
3769                SanityManager.ASSERT(potentialLastFlush > 0,
3770                                     "potentialLastFlush not set");
3771
3772                if (SanityManager.DEBUG_ON(TEST_LOG_FULL))
3773                    testLogFull();
3774
3775                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
3776                    SanityManager.DEBUG(DBG_FLAG, "Begin log sync...");
3777            }
3778
3779            
3780            if (Performance.MEASURE)
3781                mon_syncCalls++;
3782
3783            if (isWriteSynced)
3784            {
3785                //LogAccessFile.flushDirtyBuffers() will allow only one write
3786
//sync at a time, flush requests will get queued
3787
logOut.flushDirtyBuffers();
3788            }
3789            else
3790            {
3791                if (!logNotSynced)
3792                    logOut.syncLogAccessFile();
3793            }
3794
3795            syncSuceed = true;
3796
3797            if (SanityManager.DEBUG)
3798            {
3799                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
3800                    SanityManager.DEBUG(DBG_FLAG, "end log sync.");
3801            }
3802        }
3803        catch (SyncFailedException JavaDoc sfe)
3804        {
3805            throw markCorrupt(StandardException.newException(
3806                SQLState.LOG_CANNOT_FLUSH,
3807                sfe,
3808                getLogFileName(logFileNumber).getPath()));
3809        }
3810        catch (IOException JavaDoc ioe)
3811        {
3812            throw markCorrupt(StandardException.newException(
3813                SQLState.LOG_CANNOT_FLUSH,
3814                ioe,
3815                getLogFileName(logFileNumber).getPath()));
3816        }
3817        finally
3818        {
3819            synchronized(this)
3820            {
3821                logBeingFlushed = false; // done flushing
3822

3823                // update lastFlush under synchronized this instead of synchronized(logOut)
3824
if (syncSuceed)
3825                {
3826                    lastFlush = potentialLastFlush;
3827                }
3828
3829
3830                // We may actually have flushed more than that because someone
3831
// may have done a logOut.flushBuffer right before the sync
3832
// call. But this is guarenteed to be flushed.
3833
notifyAll();
3834            }
3835        }
3836
3837        
3838        // get checkpoint Daemon to work
3839
if ((logWrittenFromLastCheckPoint + potentialLastFlush) > checkpointInterval &&
3840                    checkpointDaemon != null && !checkpointDaemonCalled && !inLogSwitch)
3841        {
3842            // following synchronized block is required to make
3843
// sure only one checkpoint request get scheduled.
3844
synchronized(this)
3845            {
3846                // recheck if checkpoint is still required, it is possible some other
3847
// thread might have already scheduled a checkpoint and completed it.
3848
if ((logWrittenFromLastCheckPoint + potentialLastFlush) > checkpointInterval &&
3849                    checkpointDaemon != null && !checkpointDaemonCalled && !inLogSwitch)
3850                {
3851                    checkpointDaemonCalled = true;
3852                    checkpointDaemon.serviceNow(myClientNumber);
3853                }
3854            }
3855
3856        }else
3857        {
3858            // switch the log if required, this case will occur
3859
// if log switch interval is less than the checkpoint interval
3860
// other wise , checkpoint daemon would be doing log switches along
3861
// with the checkpoints.
3862
if (potentialLastFlush > logSwitchInterval &&
3863                !checkpointDaemonCalled && !inLogSwitch)
3864            {
3865                // following synchronized block is required to make sure only
3866
// one thread switches the log file at a time.
3867
synchronized(this)
3868                {
3869                    // recheck if log switch is still required, it is possible some other
3870
// thread might have already switched the log file.
3871
if (potentialLastFlush > logSwitchInterval &&
3872                        !checkpointDaemonCalled && !inLogSwitch)
3873                    {
3874                        inLogSwitch = true;
3875                        switchLogFile();
3876                    }
3877                }
3878            }
3879        }
3880    }
3881
3882    /**
3883     * Utility routine to call sync() on the input file descriptor.
3884     * <p>
3885    */

3886    private void syncFile( StorageRandomAccessFile raf)
3887        throws StandardException
3888    {
3889        for( int i=0; ; )
3890        {
3891            // 3311: JVM sync call sometimes fails under high load against NFS
3892
// mounted disk. We re-try to do this 20 times.
3893
try
3894            {
3895                raf.sync( false);
3896
3897                // the sync succeed, so return
3898
break;
3899            }
3900            catch (IOException JavaDoc ioe)
3901            {
3902                i++;
3903                try
3904                {
3905                    // wait for .2 of a second, hopefully I/O is done by now
3906
// we wait a max of 4 seconds before we give up
3907
Thread.sleep(200);
3908                }
3909                catch( InterruptedException JavaDoc ie )
3910                {
3911                    //does not matter weather I get interrupted or not
3912
}
3913
3914                if( i > 20 )
3915                {
3916                    throw StandardException.newException(
3917                                SQLState.LOG_FULL, ioe);
3918                }
3919            }
3920        }
3921    }
3922
3923
3924    /**
3925      Open a forward scan of the transaction log.
3926
3927      <P> MT- read only
3928      @exception StandardException Standard cloudscape exception policy
3929    */

3930    public LogScan openForwardsFlushedScan(LogInstant startAt)
3931         throws StandardException
3932    {
3933        checkCorrupt();
3934
3935        // no need to flush the buffer as it's a flushed scan
3936

3937        return new FlushedScan(this,((LogCounter)startAt).getValueAsLong());
3938    }
3939
3940
3941    /**
3942      Get a forwards scan
3943
3944      @exception StandardException Standard Cloudscape error policy
3945      */

3946    public LogScan openForwardsScan(LogInstant startAt,LogInstant stopAt)
3947         throws StandardException
3948    {
3949        try
3950        {
3951            long startLong;
3952        
3953            if (startAt == null)
3954                startLong = LogCounter.INVALID_LOG_INSTANT;
3955            else
3956                startLong = ((LogCounter)startAt).getValueAsLong();
3957
3958            return openForwardsScan(startLong, stopAt);
3959        }
3960
3961        catch (IOException JavaDoc ioe)
3962        {
3963            throw markCorrupt(StandardException.newException(
3964                                        SQLState.LOG_IO_ERROR, ioe));
3965        }
3966
3967    }
3968
3969    public final boolean databaseEncrypted()
3970    {
3971        return databaseEncrypted;
3972    }
3973
3974
3975    /*
3976     * Set that the database is encrypted , all the transaction log has
3977     * to be encrypted, and flush the log if requesed. Log needs to
3978     * be flushed first, if this is being set during (re) encryption
3979     * of an existing database.
3980     *
3981     * @param flushLog true, if log needs to be flushed,
3982     * otherwise false.
3983     */

3984    public void setDatabaseEncrypted(boolean flushLog)
3985        throws StandardException
3986    {
3987        if (flushLog)
3988            flushAll();
3989        databaseEncrypted = true;
3990    }
3991
3992
3993    /*
3994     * set up a new log file to start writing
3995     * the log records into the new log file
3996     * after this call.
3997     *
3998     * <P>MT - synchronization provided by caller - RawStore boot,
3999     * This method is called while re-encrypting the database
4000     * at databse boot time.
4001     */

4002    public void startNewLogFile() throws StandardException
4003    {
4004        // switch the database to a new log file.
4005
switchLogFile();
4006    }
4007
4008
4009    /*
4010     * find if the checkpoint is in the last log file.
4011     *
4012     * <P>MT - synchronization provided by caller - RawStore boot,
4013     * This method is called only if a crash occured while
4014     * re-encrypting the database at boot time.
4015     * @return <code> true </code> if if the checkpoint is
4016     * in the last log file, otherwise
4017     * <code> false </code>.
4018     */

4019    public boolean isCheckpointInLastLogFile()
4020        throws StandardException
4021    {
4022        // check if the checkpoint is done in the last log file.
4023
long logFileNumberAfterCheckpoint =
4024            LogCounter.getLogFileNumber(checkpointInstant) + 1;
4025
4026        // check if there is a log file after
4027
// the log file that has the last
4028
// checkpoint record.
4029
StorageFile logFileAfterCheckpoint =
4030            getLogFileName(logFileNumberAfterCheckpoint);
4031        // System.out.println("checking " + logFileAfterCheckpoint);
4032
if (privExists(logFileAfterCheckpoint))
4033            return false;
4034        else
4035            return true;
4036    }
4037    
4038    /*
4039     * delete the log file after the checkpoint.
4040     *
4041     * <P>MT - synchronization provided by caller - RawStore boot,
4042     * This method is called only if a crash occured while
4043     * re-encrypting the database at boot time.
4044     */

4045    public void deleteLogFileAfterCheckpointLogFile()
4046        throws StandardException
4047    {
4048        long logFileNumberAfterCheckpoint =
4049            LogCounter.getLogFileNumber(checkpointInstant) + 1;
4050
4051        StorageFile logFileAfterCheckpoint =
4052            getLogFileName(logFileNumberAfterCheckpoint);
4053
4054        // System.out.println("deleting " + logFileAfterCheckpoint);
4055

4056        if (privExists(logFileAfterCheckpoint))
4057        {
4058            // delete the log file (this must have beend encrypted
4059
// with the new key.
4060
if (!privDelete(logFileAfterCheckpoint))
4061            {
4062                // throw exception, recovery can not be performed
4063
// without deleting the log file encyrpted with new key.
4064
throw StandardException.newException(
4065                           SQLState.UNABLE_TO_DELETE_FILE,
4066                           logFileAfterCheckpoint);
4067            }
4068        }
4069    }
4070
4071
4072    /**
4073        @see RawStoreFactory#encrypt
4074        @exception StandardException Standard Cloudscape Error Policy
4075     */

4076    public int encrypt(byte[] cleartext, int offset, int length,
4077                          byte[] ciphertext, int outputOffset)
4078         throws StandardException
4079    {
4080        return rawStoreFactory.encrypt(cleartext, offset, length,
4081                                       ciphertext, outputOffset, false);
4082    }
4083
4084    /**
4085        @see RawStoreFactory#decrypt
4086        @exception StandardException Standard Cloudscape Error Policy
4087     */

4088    public int decrypt(byte[] ciphertext, int offset, int length,
4089                             byte[] cleartext, int outputOffset)
4090         throws StandardException
4091    {
4092        return rawStoreFactory.decrypt(ciphertext, offset, length, cleartext, outputOffset);
4093    }
4094
4095    /**
4096        return the encryption block size used during encrypted db creation
4097     */

4098        public int getEncryptionBlockSize()
4099    {
4100        return rawStoreFactory.getEncryptionBlockSize();
4101    }
4102
4103    /**
4104       returns the length that will make the data to be multiple of encryption
4105       block size based on the given length. Block cipher algorithms like DES
4106       and Blowfish ..etc require their input to be an exact multiple of the block size.
4107    */

4108    public int getEncryptedDataLength(int length)
4109    {
4110        if ((length % getEncryptionBlockSize()) != 0)
4111        {
4112            return length + getEncryptionBlockSize() - (length % getEncryptionBlockSize());
4113        }
4114
4115        return length;
4116    }
4117
4118
4119
4120    /**
4121      Get the instant of the first record which was not
4122      flushed.
4123
4124      <P>This only works after running recovery the first time.
4125      <P>MT - RESOLVE:
4126      */

4127    public synchronized LogInstant getFirstUnflushedInstant()
4128    {
4129        if (SanityManager.DEBUG)
4130            SanityManager.ASSERT(logFileNumber > 0 && lastFlush > 0);
4131
4132        return new LogCounter(logFileNumber,lastFlush);
4133    }
4134
4135
4136    /**
4137     * Backup restore - stop sending log record to the log stream
4138     * @exception StandardException Standard Cloudscape error policy
4139     */

4140    public void freezePersistentStore() throws StandardException
4141    {
4142        // if I get into this synchronized block, I know I am not in the middle
4143
// of a write because writing to the log file is synchronized under this.
4144
synchronized(this)
4145        {
4146            isFrozen = true;
4147        }
4148    }
4149
4150    /**
4151     * Backup restore - start sending log record to the log stream
4152     * @exception StandardException Standard Cloudscape error policy
4153     */

4154    public void unfreezePersistentStore() throws StandardException
4155    {
4156        synchronized(this)
4157        {
4158            isFrozen = false;
4159            notifyAll();
4160        }
4161    }
4162
4163    /**
4164     * Backup restore - is the log being archived to some directory?
4165     * if log archive mode is enabled return true else false
4166     */

4167    public boolean logArchived()
4168    {
4169        return logArchived;
4170    }
4171
4172    /**
4173       Check to see if a database has been upgraded to the required
4174       level in order to use a store feature.
4175       @param requiredMajorVersion required database Engine major version
4176       @param requiredMinorVersion required database Engine minor version
4177       @return True if the database has been upgraded to the required level, false otherwise.
4178    **/

4179    boolean checkVersion(int requiredMajorVersion, int requiredMinorVersion)
4180    {
4181        if(onDiskMajorVersion > requiredMajorVersion )
4182        {
4183            return true;
4184        }
4185        else
4186        {
4187            if(onDiskMajorVersion == requiredMajorVersion &&
4188               onDiskMinorVersion >= requiredMinorVersion)
4189                return true;
4190        }
4191        
4192        return false;
4193    }
4194
4195
4196    /**
4197     * Check to see if a database has been upgraded to the required
4198     * level in order to use a store feature.
4199     *
4200     * @param requiredMajorVersion required database Engine major version
4201     * @param requiredMinorVersion required database Engine minor version
4202     * @param feature Non-null to throw an exception, null to return the
4203     * state of the version match.
4204     * @return <code> true </code> if the database has been upgraded to
4205     * the required level, <code> false </code> otherwise.
4206     * @exception StandardException
4207     * if the database is not at the require version
4208     * when <code>feature</code> feature is
4209     * not <code> null </code>.
4210     */

4211    public boolean checkVersion(int requiredMajorVersion,
4212                                int requiredMinorVersion,
4213                                String JavaDoc feature) throws StandardException
4214    {
4215        
4216        boolean isRequiredVersion =
4217            checkVersion(requiredMajorVersion, requiredMinorVersion);
4218
4219        // if the database is not at the required version , throw exception
4220
// if the feature is non-null .
4221
if (!isRequiredVersion && feature != null)
4222        {
4223            throw StandardException.newException(
4224                  SQLState.LANG_STATEMENT_UPGRADE_REQUIRED, feature,
4225                  ProductVersionHolder.simpleVersionString(onDiskMajorVersion,
4226                                                           onDiskMinorVersion,
4227                                                           onDiskBeta),
4228                  ProductVersionHolder.simpleVersionString(requiredMajorVersion,
4229                                                           requiredMinorVersion,
4230                                                           false));
4231        }
4232
4233        return isRequiredVersion;
4234    }
4235
4236
4237    /*
4238    ** Sending information to the user without throwing exception.
4239    ** There are times when unusual external or system related things happen in
4240    ** the log which the user may be interested in but which doesn't impede on
4241    ** the normal operation of the store. When such an event occur, just send
4242    ** a message or a warning message to the user rather than throw an
4243    ** exception, which will rollback a user transaction or crash the database.
4244    **
4245    ** logErrMsg - sends a warning message to the user
4246    */

4247
4248
4249    /**
4250        Print error message to user about the log
4251        MT - not needed, informational only
4252    */

4253    protected void logErrMsg(String JavaDoc msg)
4254    {
4255        logErrMsgForDurabilityTestModeNoSync();
4256        Monitor.logTextMessage(MessageId.LOG_BEGIN_ERROR);
4257        Monitor.logMessage(msg);
4258        Monitor.logTextMessage(MessageId.LOG_END_ERROR);
4259    }
4260
4261    /**
4262        Print error message to user about the log
4263        MT - not needed, informational only
4264    */

4265    protected void logErrMsg(Throwable JavaDoc t)
4266    {
4267        logErrMsgForDurabilityTestModeNoSync();
4268        if (corrupt != null)
4269        {
4270            Monitor.logTextMessage(MessageId.LOG_BEGIN_CORRUPT_STACK);
4271            printErrorStack(corrupt);
4272            Monitor.logTextMessage(MessageId.LOG_END_CORRUPT_STACK);
4273        }
4274
4275        if (t != corrupt)
4276        {
4277            Monitor.logTextMessage(MessageId.LOG_BEGIN_ERROR_STACK);
4278            printErrorStack(t);
4279            Monitor.logTextMessage(MessageId.LOG_END_ERROR_STACK);
4280        }
4281    }
4282
4283
4284    /**
4285     * In case of boot errors, and if database is either booted
4286     * with derby.system.durability=test or was previously at any time booted in
4287     * this mode, mention in the error message that the error is probably
4288     * because the derby.system.durability was set.
4289     * Dont want to waste time to resolve issues in such
4290     * cases
4291     * <p>
4292     * MT - not needed, informational only
4293     */

4294    private void logErrMsgForDurabilityTestModeNoSync()
4295    {
4296        if (logNotSynced || wasDBInDurabilityTestModeNoSync)
4297        {
4298            Monitor.logTextMessage(
4299                MessageId.LOG_DURABILITY_TESTMODE_NO_SYNC_ERR,
4300                Property.DURABILITY_PROPERTY,
4301                Property.DURABILITY_TESTMODE_NO_SYNC);
4302        }
4303    }
4304
4305    /**
4306     * print stack trace from the Throwable including
4307     * its nested exceptions
4308     * @param t trace starts from this error
4309     */

4310    private void printErrorStack(Throwable JavaDoc t)
4311    {
4312        ErrorStringBuilder esb =
4313            new ErrorStringBuilder(Monitor.getStream().getHeader());
4314        esb.stackTrace(t);
4315        Monitor.logMessage(esb.get().toString());
4316        esb.reset();
4317    }
4318
4319
4320    /**
4321     * Testing support
4322     */

4323    /**
4324        Writes out a partial log record - takes the appendLogRecord.
4325        Need to shutdown the database before another log record gets written,
4326        or the database is not recoverable.
4327    */

4328    private long logtest_appendPartialLogRecord(byte[] data, int offset,
4329                                                int length,
4330                                                byte[] optionalData,
4331                                                int optionalDataOffset,
4332                                                int optionalDataLength)
4333        throws StandardException
4334    {
4335        if (SanityManager.DEBUG)
4336        {
4337            int bytesToWrite = 1;
4338
4339            String JavaDoc TestPartialLogWrite = PropertyUtil.getSystemProperty(TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES);
4340            if (TestPartialLogWrite != null)
4341            {
4342                bytesToWrite = Integer.valueOf(TestPartialLogWrite).intValue();
4343            }
4344
4345            Monitor.logMessage("TEST_LOG_INCOMPLETE_LOG_WRITE: writing " + bytesToWrite +
4346                   " bytes out of " + length + " + " + LOG_RECORD_OVERHEAD + " log record");
4347
4348            long instant;
4349            try
4350            {
4351                                
4352                synchronized (this)
4353                {
4354                    // reserve the space for the checksum log record
4355
// NOTE: bytesToWrite include the log record overhead.
4356
endPosition +=
4357                        logOut.reserveSpaceForChecksum(((length + LOG_RECORD_OVERHEAD)
4358                                                        < bytesToWrite ? length :
4359                                                        (bytesToWrite - LOG_RECORD_OVERHEAD)),
4360                                                       logFileNumber,endPosition);
4361                    instant = currentInstant();
4362
4363                    //check if the length of the records to be written is
4364
//actually smaller than the number of bytesToWrite
4365
if(length + LOG_RECORD_OVERHEAD < bytesToWrite)
4366                        endPosition += (length + LOG_RECORD_OVERHEAD);
4367                    else
4368                        endPosition += bytesToWrite;
4369
4370                    while(true) // so we can break out without returning out of
4371
// sync block...
4372
{
4373                            if (bytesToWrite < 4)
4374                            {
4375                                int shift = 3;
4376                                while(bytesToWrite-- > 0)
4377                                {
4378                                    logOut.write((byte)((length >>> 8*shift) & 0xFF));
4379                                    shift--;
4380                                }
4381                                break;
4382                            }
4383
4384                            // the length before the log record
4385
logOut.writeInt(length);
4386                            bytesToWrite -= 4;
4387
4388                            if (bytesToWrite < 8)
4389                            {
4390                                int shift = 7;
4391                                while(bytesToWrite-- > 0)
4392                                {
4393                                    logOut.write((byte)((instant >>> 8*shift) & 0xFF));
4394                                    shift--;
4395                                }
4396                                break;
4397                            }
4398
4399                            // the log instant
4400
logOut.writeLong(instant);
4401                            bytesToWrite -= 8;
4402
4403                            if (bytesToWrite < length)
4404                            {
4405                                int dataLength = length - optionalDataLength;
4406                                if(bytesToWrite < dataLength)
4407                                    logOut.write(data, offset,bytesToWrite);
4408                                else
4409                                {
4410                                    logOut.write(data, offset, dataLength);
4411                                    bytesToWrite -= dataLength ;
4412                                    if(optionalDataLength != 0 && bytesToWrite > 0)
4413                                        logOut.write(optionalData, optionalDataOffset, bytesToWrite);
4414                                }
4415                                break;
4416                            }
4417
4418                            // the log data
4419
logOut.write(data, offset, length - optionalDataLength);
4420                            //write optional data
4421
if(optionalDataLength != 0)
4422                                logOut.write(optionalData, optionalDataOffset, optionalDataLength);
4423
4424                            bytesToWrite -= length;
4425
4426                            if (bytesToWrite < 4)
4427                            {
4428                                int shift = 3;
4429                                while(bytesToWrite-- > 0)
4430                                {
4431                                    logOut.write((byte)((length >>> 8*shift) & 0xFF));
4432                                    shift--;
4433                                }
4434                                break;
4435                            }
4436
4437                            // the length after the log record
4438
logOut.writeInt(length);
4439                            break;
4440
4441                        }
4442
4443                        // do make sure the partial write gets on disk by sync'ing it
4444
flush(logFileNumber, endPosition);
4445
4446                    }
4447
4448
4449            }
4450            catch (IOException JavaDoc ioe)
4451            {
4452                throw StandardException.newException(SQLState.LOG_FULL, ioe);
4453            }
4454
4455            return instant;
4456        }
4457        return 0;
4458    }
4459
4460    /**
4461        Simulate a log full condition
4462
4463        if TEST_LOG_FULL is set to true, then the property
4464        TEST_RECORD_TO_FILL_LOG indicates the number of times this function is
4465        call before an IOException simulating a log full condition is raised.
4466
4467        If TEST_RECORD_TO_FILL_LOG is not set, it defaults to 100 log record
4468    */

4469    protected void testLogFull() throws IOException JavaDoc
4470    {
4471        if (SanityManager.DEBUG)
4472        {
4473            if (test_numRecordToFillLog < 0)
4474            {
4475                String JavaDoc RecordToFillLog = PropertyUtil.getSystemProperty(TEST_RECORD_TO_FILL_LOG);
4476                if (RecordToFillLog != null)
4477                    test_numRecordToFillLog = Integer.valueOf(RecordToFillLog).intValue();
4478                else
4479                    test_numRecordToFillLog = 100;
4480            }
4481
4482            if (++test_logWritten > test_numRecordToFillLog)
4483                throw new IOException JavaDoc("TestLogFull " + test_numRecordToFillLog +
4484                                      " written " + test_logWritten);
4485
4486        }
4487    }
4488
4489    /**
4490     * Get the log file to Simulate a log corruption
4491     * FOR UNIT TESTING USAGE ONLY
4492    */

4493    public StorageRandomAccessFile getLogFileToSimulateCorruption(long filenum) throws IOException JavaDoc, StandardException
4494    {
4495        if (SanityManager.DEBUG)
4496        {
4497            //long filenum = LogCounter.getLogFileNumber(logInstant);
4498
// long filepos = LogCounter.getLogFilePosition(logInstant);
4499
StorageFile fileName = getLogFileName(filenum);
4500            StorageRandomAccessFile log = null;
4501            return privRandomAccessFile(fileName, "rw");
4502        }
4503        
4504        return null;
4505
4506    }
4507    
4508
4509    /*********************************************************************
4510     * Log Testing
4511     *
4512     * Implementations may use these strings to simulate error conditions for
4513     * testing purposes.
4514     *
4515     *********************************************************************/

4516
4517    /**
4518      Set to true if we want the checkpoint to only switch the log but not
4519      actually do the checkpoint
4520    */

4521    public static final String JavaDoc TEST_LOG_SWITCH_LOG = SanityManager.DEBUG ? "TEST_LOG_SWITCH_LOG" : null ;
4522
4523    /**
4524      Set to true if we want the up comming log record to be only partially
4525      written. The database is corrupted if not immediately shutdown.
4526      Set TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES to the number of bytes to write
4527      out, default is 1 byte.
4528    */

4529    public static final String JavaDoc TEST_LOG_INCOMPLETE_LOG_WRITE = SanityManager.DEBUG ? "TEST_LOG_INCOMPLETE_LOG_WRITE" : null;
4530
4531    /**
4532      Set to the number of bytes we want the next log record to actually write
4533      out, only used when TEST_LOG_INCOMPLETE_LOG_WRITE is on. Default is 1
4534      byte.
4535    */

4536    public static final String JavaDoc TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES = SanityManager.DEBUG ? "derbyTesting.unittest.partialLogWrite" : null;
4537
4538    /**
4539      Set to true if we want to simulate a log full condition
4540    */

4541    public static final String JavaDoc TEST_LOG_FULL =
4542        SanityManager.DEBUG ? "TEST_LOG_FULL" : null;
4543
4544    /**
4545      Set to true if we want to simulate a log full condition while switching log
4546    */

4547    public static final String JavaDoc TEST_SWITCH_LOG_FAIL1 =
4548        SanityManager.DEBUG ? "TEST_SWITCH_LOG_FAIL1" : null;
4549    public static final String JavaDoc TEST_SWITCH_LOG_FAIL2 =
4550        SanityManager.DEBUG ? "TEST_SWITCH_LOG_FAIL2" : null;
4551
4552
4553    /**
4554      Set to the number of log record we want to write before the log is
4555      simulated to be full.
4556    */

4557    public static final String JavaDoc TEST_RECORD_TO_FILL_LOG =
4558        SanityManager.DEBUG ? "derbyTesting.unittest.recordToFillLog" : null;
4559
4560    /**
4561     * Set to true if we want to simulate max possible log file number is
4562     * being used.
4563    */

4564    public static final String JavaDoc TEST_MAX_LOGFILE_NUMBER =
4565        SanityManager.DEBUG ? "testMaxLogFileNumber" : null;
4566
4567    
4568    //enable the log archive mode
4569
public void enableLogArchiveMode() throws StandardException
4570    {
4571
4572        //if the log archive mode is already enabled; thre is nothing to do
4573
if(!logArchived)
4574        {
4575            logArchived = true;
4576            AccessFactory af =
4577            (AccessFactory)Monitor.getServiceModule(this, AccessFactory.MODULE);
4578
4579            if (af != null)
4580            {
4581                TransactionController tc = null;
4582                tc = af.getTransaction(
4583                        ContextService.getFactory().getCurrentContextManager());
4584                tc.setProperty(Property.LOG_ARCHIVE_MODE , "true", true);
4585            }
4586        }
4587    }
4588
4589    // disable the log archive mode
4590
public void disableLogArchiveMode() throws StandardException
4591    {
4592        AccessFactory af =
4593            (AccessFactory)Monitor.getServiceModule(this, AccessFactory.MODULE);
4594        if (af != null)
4595        {
4596            TransactionController tc = null;
4597            tc = af.getTransaction(ContextService.getFactory().getCurrentContextManager());
4598            tc.setProperty(Property.LOG_ARCHIVE_MODE , "false", true);
4599        }
4600        logArchived = false;
4601    }
4602
4603    //delete the online archived log files
4604
public void deleteOnlineArchivedLogFiles()
4605    {
4606        deleteObsoleteLogfiles();
4607    }
4608
4609
4610    /*
4611     * Start the transaction log backup.
4612     *
4613     * The transaction log is required to bring the database to the consistent
4614     * state on restore.
4615     *
4616     * All the log files that are created after the backup starts
4617     * must be kept around until they are copied into the backup,
4618     * even if there are checkpoints when backup is in progress.
4619     *
4620     * Copy the log control files to the backup (the checkpoint recorded in the
4621     * control files is the backup checkpoint). Restore will use the checkpoint
4622     * info in these control files to perform recovery to bring
4623     * the database to the consistent state.
4624     *
4625     * Find first log file that needs to be copied into the backup to bring
4626     * the database to the consistent state on restore.
4627     *
4628     * In the end, existing log files that are needed to recover from the backup
4629     * checkpoint are copied into the backup, any log that gets generated after
4630     * this call are also copied into the backup after all the information
4631     * in the data containers is written to the backup, when endLogBackup()
4632     * is called.
4633     *
4634     * @param toDir - location where the log files should be copied to.
4635     * @exception StandardException Standard Derby error policy
4636     *
4637     */

4638    public void startLogBackup(File toDir) throws StandardException
4639    {
4640        
4641        // synchronization is necessary to make sure NO parallel
4642
// checkpoint happens when the current checkpoint information
4643
// is being copied to the backup.
4644

4645        synchronized(this)
4646        {
4647            // wait until the thread that is doing the checkpoint completes it.
4648
while(inCheckpoint)
4649            {
4650                try
4651                {
4652                    wait();
4653                }
4654                catch (InterruptedException JavaDoc ie)
4655                {
4656                    throw StandardException.interrupt(ie);
4657                }
4658            }
4659        
4660            backupInProgress = true;
4661        
4662            // copy the control files.
4663
StorageFile fromFile;
4664            File toFile;
4665            // copy the log control file
4666
fromFile = getControlFileName();
4667            toFile = new File(toDir,fromFile.getName());
4668            if(!privCopyFile(fromFile, toFile))
4669            {
4670                throw StandardException.newException(
4671                    SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile, toFile);
4672            }
4673
4674            // copy the log mirror control file
4675
fromFile = getMirrorControlFileName();
4676            toFile = new File(toDir,fromFile.getName());
4677            if(!privCopyFile(fromFile, toFile))
4678            {
4679                throw StandardException.newException(
4680                    SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile, toFile);
4681            }
4682
4683            // find the first log file number that is active
4684
logFileToBackup = getFirstLogNeeded(currentCheckpoint);
4685        }
4686
4687        // copy all the log files that have to go into the backup
4688
backupLogFiles(toDir, getLogFileNumber() - 1);
4689    }
4690
4691    /*
4692     * copy the log files into the given backup location
4693     *
4694     * @param toDir - location to copy the log files to
4695     * @param lastLogFileToBackup - last log file that needs to be copied.
4696     **/

4697    private void backupLogFiles(File toDir, long lastLogFileToBackup)
4698        throws StandardException
4699    {
4700
4701        while(logFileToBackup <= lastLogFileToBackup)
4702        {
4703            StorageFile fromFile = getLogFileName(logFileToBackup);
4704            File toFile = new File(toDir, fromFile.getName());
4705            if(!privCopyFile(fromFile, toFile))
4706            {
4707                throw StandardException.newException(
4708                    SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile, toFile);
4709            }
4710            logFileToBackup++;
4711        }
4712    }
4713
4714    /*
4715     * copy all the log files that have to go into the backup
4716     * and mark that backup is compeleted.
4717     *
4718     * @param toDir - location where the log files should be copied to.
4719     * @exception StandardException Standard Derby error policy
4720     */

4721    public void endLogBackup(File toDir) throws StandardException
4722    {
4723        long lastLogFileToBackup;
4724
4725
4726        // Make sure all log records are synced to disk. The online backup
4727
// copied data "through" the cache, so may have picked up dirty pages
4728
// which have not yet synced the associated log records to disk.
4729
// Without this force, the backup may end up with page versions
4730
// in the backup without their associated log records.
4731
flush(logFileNumber, endPosition);
4732
4733        if (logArchived)
4734        {
4735            // when the log is being archived for roll-forward recovery
4736
// we would like to switch to a new log file.
4737
// otherwise during restore logfile in the backup could
4738
// overwrite the more uptodate log files in the
4739
// online log path. And also we would like to mark the end
4740
// marker for the log file other wise during roll-forward recovery,
4741
// if we see a log file with fuzzy end, we think that is the
4742
// end of the recovery.
4743
switchLogFile();
4744            lastLogFileToBackup = getLogFileNumber() - 1;
4745        }
4746        else
4747        {
4748            // for a plain online backup partial filled up log file is ok,
4749
// no need to do a log switch.
4750
lastLogFileToBackup = getLogFileNumber();
4751        }
4752
4753        // backup all the log that got generated after the backup started.
4754
backupLogFiles(toDir, lastLogFileToBackup);
4755
4756        // mark that backup is completed.
4757
backupInProgress = false;
4758    }
4759
4760
4761    /*
4762     * backup is not in progress any more, it failed for some reason.
4763     **/

4764    public void abortLogBackup()
4765    {
4766        backupInProgress = false;
4767    }
4768
4769
4770    // Is the transaction in rollforward recovery
4771
public boolean inRFR()
4772    {
4773        /*
4774         *Logging System does not differentiate between the
4775         *crash-recovery and a rollforward recovery.
4776         *Except in case of rollforward atttempt on
4777         *read only databases to check for pending Transaction.
4778         *(See the comments in recovery() function)
4779         */

4780
4781        if(recoveryNeeded)
4782        {
4783            boolean readOnly = false;
4784            try{
4785                readOnly = !privCanWrite(getControlFileName());
4786            }catch(StandardException se)
4787            {
4788                //Exception should never have come here
4789
//because getControlFileName() is called
4790
//earlier at boot time, if there were problems
4791
//it should have showed up earlier.
4792
//We just ignore this error and hope that
4793
//datafactory must have market it as read only if that is the case.
4794
}
4795
4796            readOnly = readOnly || (dataFactory == null ? false :dataFactory.isReadOnly());
4797            return !readOnly;
4798        }else{
4799            return false;
4800        }
4801    }
4802
4803    /**
4804     * redo a checkpoint during rollforward recovery
4805    */

4806    public void checkpointInRFR(LogInstant cinstant, long redoLWM, DataFactory df) throws StandardException
4807    {
4808        //sync the data
4809
df.checkpoint();
4810
4811        //write the log control file; this will make sure that restart of the
4812
//rollfoward recovery will start this log instant next time instead of
4813
//from the beginning.
4814
try{
4815            if (!writeControlFile(getControlFileName(), ((LogCounter)cinstant).getValueAsLong()))
4816            {
4817                throw StandardException.newException(
4818                                                 SQLState.LOG_CONTROL_FILE, getControlFileName());
4819            }
4820        }
4821        catch (IOException JavaDoc ioe)
4822        {
4823            throw markCorrupt(
4824                    StandardException.newException(SQLState.LOG_IO_ERROR, ioe));
4825        }
4826        //remove the stub files
4827
df.removeDroppedContainerFileStubs(new LogCounter(redoLWM));
4828        
4829    }
4830
4831
4832    /**
4833     *
4834     * This function restores logs based on the following attributes
4835     * are specified on connection URL:
4836     * Attribute.CREATE_FROM (Create database from backup if it does not exist)
4837     * Attribute.RESTORE_FROM (Delete the whole database if it exists and then
4838     * restore it from backup)
4839     * Attribute.ROLL_FORWARD_RECOVERY_FROM:(Perform Rollforward Recovery;
4840     * except for the log directory everthing else is replced by the copy from
4841     * backup. log files in the backup are copied to the existing online log
4842     * directory.
4843     *
4844     * In cases of RESTORE_FROM whole databases directoy is
4845     * is removed in Directory.java while restoring service.properties
4846     * so even the log directory is removed.
4847     * In case of CREATE_FROM , log directoy will not exist if
4848     * we came so far bacause it should fail if a database already exists.
4849     * In case ROLL_FORWARD_RECOVERY_FROM log directotry should not be removed.
4850     * So only thing that needs to be done here is create a
4851     * a log directory if it does not exists and copy the
4852     * log files(including control files) that exists in the backup from which
4853     * we are are trying to restore the database to the onlie log directory.
4854     */

4855    private boolean restoreLogs(Properties JavaDoc properties) throws StandardException
4856    {
4857
4858        String JavaDoc backupPath =null;
4859        boolean isCreateFrom = false;
4860        boolean isRestoreFrom = false;
4861
4862        //check if the user requested for restore/recovery/create from backup
4863
backupPath = properties.getProperty(Attribute.CREATE_FROM);
4864        if (backupPath != null) {
4865            isCreateFrom = true;
4866        } else {
4867            backupPath = properties.getProperty(Attribute.RESTORE_FROM);
4868            if (backupPath != null) {
4869                isRestoreFrom = true;
4870            } else {
4871                backupPath = properties.getProperty(
4872                                  Attribute.ROLL_FORWARD_RECOVERY_FROM);
4873                // if the backup is not NULL then it is a rollforward recovery.
4874
}
4875        }
4876
4877        if(backupPath !=null)
4878        {
4879            if(!isCreateFrom){
4880                if(logDevice == null){
4881                    /**
4882                     * In restoreFrom/rollForwardRecoveryFrom mode when no
4883                     * logDevice on URL then the log is restored to the same
4884                     * location where the log was when backup was taken.
4885                     * In createFrom mode behaviour is same as when create=true,
4886                     * i.e unless user specifies the logDevice on URL, log will
4887                     * be copied to the database home dir.
4888                     * Note: LOG_DEVICE_AT_BACKUP will get set if log is not in
4889                     * default location(db home).
4890                     */

4891                    logDevice =
4892                        properties.getProperty(Property.LOG_DEVICE_AT_BACKUP);
4893                }
4894            }
4895        
4896            getLogStorageFactory();
4897            StorageFile logDir;
4898            logDir = logStorageFactory.newStorageFile(
4899                             LogFactory.LOG_DIRECTORY_NAME);
4900                
4901            //remove the log directory in case of restoreFrom
4902
//if it exist, this happens if the log device is on seperate
4903
//location than the db home.
4904
if (isRestoreFrom && logDevice != null)
4905            {
4906                if(!privRemoveDirectory(logDir))
4907                {
4908                    //it may be just a file, try deleting it
4909
if(!privDelete(logDir))
4910                    {
4911                        throw StandardException.newException(
4912                            SQLState.UNABLE_TO_REMOVE_DATA_DIRECTORY,
4913                            getLogDirPath( logDir));
4914                    }
4915                }
4916            }
4917
4918            // if it is a create/restore from backup,
4919
// create the log directory.
4920
if (isCreateFrom || isRestoreFrom) {
4921                createLogDirectory();
4922            }
4923
4924            File backupLogDir = new File(backupPath, LogFactory.LOG_DIRECTORY_NAME);
4925            String JavaDoc[] logfilelist = privList(backupLogDir);
4926            if(logfilelist !=null)
4927            {
4928                for (int i = 0; i < logfilelist.length; i++)
4929                {
4930                    File blogFile = new File(backupLogDir, logfilelist[i]);
4931                    StorageFile clogFile = logStorageFactory.newStorageFile(logDir, logfilelist[i]);
4932                    if(!privCopyFile(blogFile , clogFile))
4933                    {
4934                        throw
4935                            StandardException.newException(SQLState.UNABLE_TO_COPY_LOG_FILE, blogFile, clogFile);
4936                    }
4937                }
4938            }else
4939            {
4940                throw StandardException.newException(SQLState.LOG_DIRECTORY_NOT_FOUND_IN_BACKUP,backupLogDir);
4941            }
4942            //we need to switch the log file after redo while
4943
//doing recovery from backups, otherwise we will
4944
//be replacing updated log after a restore withe
4945
// a log in the backup on next restore.
4946
logSwitchRequired = true;
4947
4948            // log is restored from backup.
4949
return true;
4950        } else {
4951            // log is not restored from backup.
4952
return false;
4953        }
4954    }
4955
4956    /*preallocate the given log File to the logSwitchInterval size;
4957     *file is extended by writing zeros after the header till
4958     *the log file size the set by the user.
4959     */

4960    private void preAllocateNewLogFile(StorageRandomAccessFile log) throws IOException JavaDoc, StandardException
4961    {
4962        //preallocate a file by writing zeros into it .
4963

4964        if (SanityManager.DEBUG)
4965        {
4966            int currentPostion = (int)log.getFilePointer();
4967            SanityManager.ASSERT(currentPostion == LOG_FILE_HEADER_SIZE,
4968                                 "New Log File Is not Correctly Initialized");
4969        }
4970
4971        int amountToWrite = logSwitchInterval - LOG_FILE_HEADER_SIZE ;
4972        int bufferSize = logBufferSize * 2;
4973        byte[] emptyBuffer = new byte[bufferSize];
4974        int nWrites = amountToWrite/bufferSize;
4975        int remainingBytes = amountToWrite % bufferSize;
4976        
4977        try{
4978            while(nWrites-- > 0)
4979                log.write(emptyBuffer);
4980
4981            if(remainingBytes !=0)
4982                log.write(emptyBuffer , 0 ,remainingBytes);
4983
4984            //sync the file
4985
syncFile(log);
4986        }catch(IOException JavaDoc ie)
4987        {
4988            //ignore io exceptions during preallocations
4989
//because this more for performance improvements
4990
//system shoulf work fine even without preallocations.
4991

4992            //RESOLVE: If the exception is because of no
4993
//space, might be good idea to trigger a checkpoint.
4994

4995            //In debug mode throw the exception
4996
if (SanityManager.DEBUG)
4997            {
4998                throw ie;
4999            }
5000        }
5001    } // end of preAllocateNewLogFile
5002

5003
5004    /**
5005     * open the given log file name for writes; if file can not be
5006     * be opened in write sync mode then disable the write sync mode and
5007     * open the file in "rw" mode.
5008     */

5009    private StorageRandomAccessFile openLogFileInWriteMode(StorageFile logFile) throws IOException JavaDoc
5010    {
5011        StorageRandomAccessFile log;
5012        try{
5013            log = privRandomAccessFile(logFile, "rws");
5014        }catch(FileNotFoundException JavaDoc ex)
5015        {
5016            // Normally this exception should never occur. For some reason
5017
// currently on Mac JVM 1.4.2 FileNotFoundException exception is
5018
// thrown if a file is opened in "rws" mode and if it already
5019
// exists. Please refere to Derby-1 for more/ details on this issue.
5020
// Temporary workaround to avoid this problem is to make the logging
5021
// system use file sync mechanism.
5022

5023            // disable the write sync and open the file in "rw" mode.
5024
isWriteSynced = false;
5025            log = privRandomAccessFile(logFile, "rw");
5026        }
5027        
5028        return log ;
5029    }
5030
5031
5032    private String JavaDoc getLogDirPath( StorageFile logDir)
5033    {
5034        if( logDevice == null)
5035            return logDir.toString();
5036        return logDevice + logStorageFactory.getSeparator() + logDir.toString();
5037    } // end of getLogDirPath
5038

5039    /*
5040        Following methods require Priv Blocks to run under a security manager.
5041    */

5042    private int action;
5043    private StorageFile activeFile;
5044    private File toFile;
5045    private String JavaDoc activePerms;
5046
5047    protected boolean privExists(StorageFile file)
5048    {
5049        return runBooleanAction(0, file);
5050    }
5051
5052    protected boolean privDelete(StorageFile file)
5053    {
5054        return runBooleanAction(1, file);
5055    }
5056
5057    private synchronized StorageRandomAccessFile privRandomAccessFile(StorageFile file, String JavaDoc perms)
5058        throws IOException JavaDoc
5059    {
5060        action = 2;
5061        activeFile = file;
5062        activePerms = perms;
5063        try
5064        {
5065            return (StorageRandomAccessFile) java.security.AccessController.doPrivileged(this);
5066        }
5067        catch (java.security.PrivilegedActionException JavaDoc pae)
5068        {
5069            throw (IOException JavaDoc) pae.getException();
5070        }
5071    }
5072
5073    protected boolean privCanWrite(StorageFile file)
5074    {
5075        return runBooleanAction(3, file);
5076    }
5077
5078    protected boolean privMkdirs(StorageFile file)
5079    {
5080        return runBooleanAction(4, file);
5081    }
5082
5083    private synchronized String JavaDoc[] privList(File file)
5084    {
5085        action = 8;
5086        toFile = file;
5087
5088        try
5089        {
5090            return (String JavaDoc[]) java.security.AccessController.doPrivileged(this);
5091        }
5092        catch (java.security.PrivilegedActionException JavaDoc pae)
5093        {
5094            return null;
5095        }
5096    }
5097    
5098    private synchronized String JavaDoc[] privList(StorageFile file)
5099    {
5100        action = 5;
5101        activeFile = file;
5102
5103        try
5104        {
5105            return (String JavaDoc[]) java.security.AccessController.doPrivileged(this);
5106        }
5107        catch (java.security.PrivilegedActionException JavaDoc pae)
5108        {
5109            return null;
5110        }
5111    }
5112
5113
5114    private synchronized boolean privCopyFile(StorageFile from, File to)
5115    {
5116        action = 6;
5117        activeFile = from;
5118        toFile = to;
5119        try
5120        {
5121            return ((Boolean JavaDoc) java.security.AccessController.doPrivileged(this)).booleanValue();
5122        }
5123        catch (java.security.PrivilegedActionException JavaDoc pae)
5124        {
5125            return false;
5126        }
5127    }
5128
5129    private synchronized boolean privCopyFile(File from, StorageFile to)
5130    {
5131        action = 9;
5132        activeFile = to;
5133        toFile = from;
5134        try
5135        {
5136            return ((Boolean JavaDoc) java.security.AccessController.doPrivileged(this)).booleanValue();
5137        }
5138        catch (java.security.PrivilegedActionException JavaDoc pae)
5139        {
5140            return false;
5141        }
5142    }
5143
5144    private boolean privRemoveDirectory(StorageFile file)
5145    {
5146        return runBooleanAction(7, file);
5147    }
5148
5149
5150    private synchronized boolean runBooleanAction(int action, StorageFile file) {
5151        this.action = action;
5152        this.activeFile = file;
5153
5154        try {
5155            return ((Boolean JavaDoc) java.security.AccessController.doPrivileged(this)).booleanValue();
5156        } catch (java.security.PrivilegedActionException JavaDoc pae) {
5157            return false;
5158        }
5159    }
5160
5161
5162    
5163
5164    public final Object JavaDoc run() throws IOException JavaDoc {
5165        switch (action) {
5166        case 0:
5167            // SECURITY PERMISSION - MP1
5168
return ReuseFactory.getBoolean(activeFile.exists());
5169        case 1:
5170            // SECURITY PERMISSION - OP5
5171
return ReuseFactory.getBoolean(activeFile.delete());
5172        case 2:
5173            // SECURITY PERMISSION - MP1 and/or OP4
5174
// dependening on the value of activePerms
5175
return activeFile.getRandomAccessFile(activePerms);
5176        case 3:
5177            // SECURITY PERMISSION - OP4
5178
return ReuseFactory.getBoolean(activeFile.canWrite());
5179        case 4:
5180            // SECURITY PERMISSION - OP4
5181
return ReuseFactory.getBoolean(activeFile.mkdirs());
5182        case 5:
5183            // SECURITY PERMISSION - MP1
5184
return activeFile.list();
5185        case 6:
5186            // SECURITY PERMISSION - OP4 (Have to check these codes ??)
5187
return ReuseFactory.getBoolean(FileUtil.copyFile(logStorageFactory, activeFile, toFile));
5188        case 7:
5189            // SECURITY PERMISSION - OP4
5190
if( ! activeFile.exists())
5191                return ReuseFactory.getBoolean( true);
5192            return ReuseFactory.getBoolean(activeFile.deleteAll());
5193        case 8:
5194            return toFile.list();
5195        case 9:
5196            return ReuseFactory.getBoolean(FileUtil.copyFile( logStorageFactory, toFile, activeFile));
5197
5198        default:
5199            return null;
5200        }
5201    }
5202}
5203
Popular Tags