KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > apache > derby > impl > store > raw > data > RAFContainer


1 /*
2
3    Derby - Class org.apache.derby.impl.store.raw.data.RAFContainer
4
5    Licensed to the Apache Software Foundation (ASF) under one or more
6    contributor license agreements. See the NOTICE file distributed with
7    this work for additional information regarding copyright ownership.
8    The ASF licenses this file to you under the Apache License, Version 2.0
9    (the "License"); you may not use this file except in compliance with
10    the License. You may obtain a copy of the License at
11
12       http://www.apache.org/licenses/LICENSE-2.0
13
14    Unless required by applicable law or agreed to in writing, software
15    distributed under the License is distributed on an "AS IS" BASIS,
16    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17    See the License for the specific language governing permissions and
18    limitations under the License.
19
20  */

21
22 package org.apache.derby.impl.store.raw.data;
23
24 import org.apache.derby.iapi.reference.SQLState;
25 import org.apache.derby.impl.store.raw.data.BaseContainer;
26 import org.apache.derby.impl.store.raw.data.BaseContainerHandle;
27 import org.apache.derby.impl.store.raw.data.BasePage;
28
29 import org.apache.derby.iapi.services.cache.Cacheable;
30 import org.apache.derby.iapi.services.context.ContextService;
31 import org.apache.derby.iapi.services.monitor.Monitor;
32 import org.apache.derby.iapi.services.diag.Performance;
33 import org.apache.derby.iapi.services.sanity.SanityManager;
34 import org.apache.derby.iapi.services.io.FormatIdUtil;
35
36 import org.apache.derby.iapi.error.StandardException;
37
38 import org.apache.derby.iapi.store.raw.ContainerHandle;
39 import org.apache.derby.iapi.store.raw.ContainerKey;
40 import org.apache.derby.iapi.store.raw.Loggable;
41 import org.apache.derby.iapi.store.raw.log.LogInstant;
42 import org.apache.derby.iapi.store.raw.xact.RawTransaction;
43
44 import org.apache.derby.io.StorageFactory;
45 import org.apache.derby.io.WritableStorageFactory;
46 import org.apache.derby.io.StorageFile;
47 import org.apache.derby.io.StorageRandomAccessFile;
48 import org.apache.derby.iapi.services.io.FileUtil;
49 import java.util.Vector JavaDoc;
50
51 import java.io.DataInput JavaDoc;
52 import java.io.IOException JavaDoc;
53 import java.io.File JavaDoc;
54 import java.io.RandomAccessFile JavaDoc;
55 import java.security.AccessController JavaDoc;
56 import java.security.PrivilegedExceptionAction JavaDoc;
57 import java.security.PrivilegedActionException JavaDoc;
58 import java.lang.reflect.Method JavaDoc;
59 import java.lang.reflect.Constructor JavaDoc;
60
61 /**
62     RAFContainer (short for RandomAccessFileContainer) is a concrete subclass of FileContainer
63     for FileContainers which are implemented on java.io.RandomAccessFile.
64 */

65
66 class RAFContainer extends FileContainer implements PrivilegedExceptionAction JavaDoc
67 {
68
69     /*
70      * Immutable fields
71      */

72     protected StorageRandomAccessFile fileData;
73
74     /*
75     ** Mutable fields, only valid when the identity is valid.
76     */

77     protected boolean needsSync;
78
79     /* privileged actions */
80     private int actionCode;
81     private static final int GET_FILE_NAME_ACTION = 1;
82     private static final int CREATE_CONTAINER_ACTION = 2;
83     private static final int REMOVE_FILE_ACTION = 3;
84     private static final int OPEN_CONTAINER_ACTION = 4;
85     private static final int STUBBIFY_ACTION = 5;
86     private static final int BACKUP_CONTAINER_ACTION = 6;
87     private static final int GET_RANDOM_ACCESS_FILE_ACTION = 7;
88     private ContainerKey actionIdentity;
89     private boolean actionStub;
90     private boolean actionErrorOK;
91     private boolean actionTryAlternatePath;
92     private StorageFile actionFile;
93     private LogInstant actionInstant;
94     private String JavaDoc actionBackupLocation;
95     private BaseContainerHandle actionContainerHandle;
96
97     private boolean inBackup = false;
98     private boolean inRemove = false;
99
100     /* Fields with references to classes and methods in ReentrantLock
101      * introduced in Java 1.5. Reflection is used to only use these
102      * interfaces if they exist.
103      *
104      */

105     private static Class JavaDoc fairLockClass;
106     private static Constructor JavaDoc fairLockConstructor;
107     private static Method JavaDoc lock;
108     private static Method JavaDoc unlock;
109     private static boolean hasJava5FairLocks = false;
110
111     // Use reflection to find the constructor, lock() and unlock() in
112
// java.util.concurrent.locks.ReentrantLock. If the class and its
113
// methods are found, hasJava5FairLocks will be true and fair
114
// locking can be used.
115
static {
116         try {
117             fairLockClass =
118                 Class.forName("java.util.concurrent.locks.ReentrantLock");
119
120             fairLockConstructor =
121                 fairLockClass.getConstructor(new Class JavaDoc[] { Boolean.TYPE });
122
123             lock = fairLockClass.getMethod("lock", new Class JavaDoc[0]);
124             unlock = fairLockClass.getMethod("unlock", new Class JavaDoc[0]);
125             hasJava5FairLocks = true;
126         }
127         catch (NoSuchMethodException JavaDoc nsme) {}
128         catch (ClassNotFoundException JavaDoc cnfe) {}
129     }
130
131     /**
132      * Object of type java.util.concurrent.locks.ReentrantLock. It is
133      * used to prevent starvation when many threads are reading from
134      * the same file.
135      */

136     private Object JavaDoc fairLock;
137
138     /*
139      * Constructors
140      */

141
142     RAFContainer(BaseDataFileFactory factory) {
143         super(factory);
144
145         // If Java 1.5 fair locks are available, construct one.
146
if (hasJava5FairLocks) {
147             try {
148                 // construct a lock with fairness set to true
149
fairLock =
150                     fairLockConstructor.newInstance(
151                         new Object JavaDoc[] { Boolean.TRUE });
152             } catch (Exception JavaDoc e) {
153                 // couldn't construct the lock, fall back to old behaviour
154

155                 hasJava5FairLocks = false;
156                 if (SanityManager.DEBUG) {
157                     SanityManager.THROWASSERT(
158                         "failed constructing ReentrantLock", e);
159                 }
160             }
161         }
162     }
163
164     /*
165     ** Methods overriding super-class
166     */

167
168     synchronized public boolean isDirty() {
169         return super.isDirty() || needsSync;
170     }
171
172     /*
173     ** Methods of Cacheable
174     */

175
176     /**
177         Set container's identity
178         @exception StandardException Standard Cloudscape error policy
179     */

180     public Cacheable setIdentity(Object JavaDoc key) throws StandardException {
181
182         ContainerKey newIdentity = (ContainerKey) key;
183
184         // if this is an open for a temp container then return an object of that type
185
if (newIdentity.getSegmentId() == ContainerHandle.TEMPORARY_SEGMENT) {
186
187             TempRAFContainer tmpContainer = new TempRAFContainer(dataFactory);
188             return tmpContainer.setIdent(newIdentity);
189         }
190
191         return setIdent(newIdentity);
192     }
193
194     /**
195         @exception StandardException Standard Cloudscape error policy
196      */

197     public Cacheable createIdentity(Object JavaDoc key, Object JavaDoc createParameter) throws StandardException {
198
199         ContainerKey newIdentity = (ContainerKey) key;
200
201         if (newIdentity.getSegmentId() == ContainerHandle.TEMPORARY_SEGMENT) {
202             TempRAFContainer tmpContainer = new TempRAFContainer(dataFactory);
203             return tmpContainer.createIdent(newIdentity, createParameter);
204         }
205
206         return createIdent(newIdentity, createParameter);
207     }
208
209
210     /*
211     ** Container creation, opening, and closing
212     */

213
214     /**
215         Remove the container
216
217         @exception StandardException Standard Cloudscape error policy
218     */

219     protected void removeContainer(LogInstant instant, boolean leaveStub)
220          throws StandardException
221     {
222
223         try {
224             synchronized(this)
225             {
226                 inRemove = true;
227                 // wait until the thread that is doing the backup stops
228
// before proceeding with the remove.
229
while(inBackup)
230                 {
231                     try {
232                         wait();
233                     }
234                     catch (InterruptedException JavaDoc ie)
235                     {
236                         throw StandardException.interrupt(ie);
237                     }
238                 }
239             }
240
241         // discard all of my pages in the cache
242
pageCache.discard(identity);
243         stubbify(instant);
244         }finally
245         {
246             synchronized(this) {
247                 inRemove = false;
248                 notifyAll();
249             }
250         }
251
252         // RESOLVE: leaveStub false
253
}
254
255     final void closeContainer() {
256
257         if (fileData != null) {
258             try {
259                 fileData.close();
260             } catch (IOException JavaDoc ioe) {
261             } finally {
262
263                 fileData = null;
264             }
265         }
266     }
267
268
269     /*
270     ** Methods used solely by StoredPage
271     */

272
273     /**
274         Read a page into the supplied array.
275
276         <BR> MT - thread safe
277         @exception IOException exception reading page
278         @exception StandardException Standard Cloudscape error policy
279     */

280     protected void readPage(long pageNumber, byte[] pageData)
281          throws IOException JavaDoc, StandardException
282     {
283         if (SanityManager.DEBUG) {
284             SanityManager.ASSERT(!getCommittedDropState());
285         }
286
287         long pageOffset = pageNumber * pageSize;
288
289         // Use Java 1.5 fair locks if they are available.
290
if (hasJava5FairLocks) {
291             try {
292                 lock.invoke(fairLock, null);
293             } catch (Exception JavaDoc e) {
294                 // Something bad happened while trying to lock the
295
// region. Since the locking is not required for
296
// anything other than ensuring fairness, it is ok to
297
// fall back to pre-1.5 behaviour.
298
hasJava5FairLocks = false;
299                 if (SanityManager.DEBUG) {
300                     SanityManager.THROWASSERT(
301                         "failed invoking ReentrantLock.lock()", e);
302                 }
303             }
304         }
305
306         try {
307             // Starvation might occur at this point if many threads
308
// are waiting for the monitor. This section is therefore
309
// surrounded by calls to ReentrantLock.lock()/unlock() if
310
// we are running Java 1.5 or higher.
311
synchronized (this) {
312                 fileData.seek(pageOffset);
313                 fileData.readFully(pageData, 0, pageSize);
314             }
315         } finally {
316             // Unlock this section.
317
if (hasJava5FairLocks) {
318                 try {
319                     unlock.invoke(fairLock, null);
320                 } catch (Exception JavaDoc e) {
321                     // An error occurred while unlocking the
322
// region. The region might still be locked, so
323
// we'd better stop using this kind of
324
// locking. There will be no loss of
325
// functionality, only a possible loss of
326
// fairness.
327
hasJava5FairLocks = false;
328                     if (SanityManager.DEBUG) {
329                         SanityManager.THROWASSERT(
330                             "failed invoking ReentrantLock.unlock()", e);
331                     }
332                 }
333             }
334         }
335
336         if (dataFactory.databaseEncrypted() &&
337             pageNumber != FIRST_ALLOC_PAGE_NUMBER)
338         {
339             decryptPage(pageData, pageSize);
340         }
341     }
342
343     /**
344         Write a page from the supplied array.
345
346         <BR> MT - thread safe
347
348         @exception StandardException Standard Cloudscape error policy
349         @exception IOException IO error accessing page
350     */

351     protected void writePage(long pageNumber, byte[] pageData, boolean syncPage)
352          throws IOException JavaDoc, StandardException
353     {
354         synchronized(this)
355         {
356
357             if (getCommittedDropState())
358             {
359                 // committed and dropped, do nothing.
360
// This file container may only be a stub
361

362                 return;
363             }
364
365             ///////////////////////////////////////////////////
366
//
367
// RESOLVE: right now, no logical -> physical mapping.
368
// We can calculate the offset. In the future, we may need to
369
// look at the allocation page or the in memory translation table
370
// to figure out where the page should go
371
//
372
/////////////////////////////////////////////////
373

374             long pageOffset = pageNumber * pageSize;
375
376             byte [] encryptionBuf = null;
377             if (dataFactory.databaseEncrypted()
378                 && pageNumber != FIRST_ALLOC_PAGE_NUMBER)
379             {
380                 // We cannot encrypt the page in place because pageData is
381
// still being accessed as clear text. The encryption
382
// buffer is shared by all who access this container and can
383
// only be used within the synchronized block.
384

385                 encryptionBuf = getEncryptionBuffer();
386             }
387
388             byte[] dataToWrite =
389                 updatePageArray(pageNumber, pageData, encryptionBuf, false);
390
391             try
392             {
393                 fileData.seek(pageOffset);
394
395                 /**
396                     On EPOC (www.symbian.com) a seek beyond the end of
397                     a file just moves the file pointer to the end of the file.
398
399                 */

400                 if (fileData.getFilePointer() != pageOffset)
401                     padFile(fileData, pageOffset);
402
403                 dataFactory.writeInProgress();
404                 try
405                 {
406                     fileData.write(dataToWrite, 0, pageSize);
407                 }
408                 finally
409                 {
410                     dataFactory.writeFinished();
411                 }
412             }
413             catch (IOException JavaDoc ioe)
414             {
415                 // On some platforms, if we seek beyond the end of file, or try
416
// to write beyond the end of file (not appending to it, but
417
// skipping some bytes), it will give IOException.
418
// Try writing zeros from the current end of file to pageOffset
419
// and see if we can then do the seek/write. The difference
420
// between pageOffset and current end of file is almost always
421
// going to be the multiple of pageSize
422

423                 if (!padFile(fileData, pageOffset))
424                     throw ioe; // not writing beyond EOF, rethrow exception
425

426                 if (SanityManager.DEBUG)
427                 {
428                     SanityManager.ASSERT(
429                         fileData.length() >= pageOffset,
430                         "failed to blank filled missing pages");
431                 }
432
433                 fileData.seek(pageOffset);
434                 dataFactory.writeInProgress();
435                 try
436                 {
437                     fileData.write(dataToWrite, 0, pageSize);
438                 }
439                 finally
440                 {
441                     dataFactory.writeFinished();
442                 }
443             }
444
445             if (syncPage)
446             {
447                 dataFactory.writeInProgress();
448                 try
449                 {
450                     if (!dataFactory.dataNotSyncedAtAllocation)
451                         fileData.sync( false);
452                 }
453                 finally
454                 {
455                     dataFactory.writeFinished();
456                 }
457             }
458             else
459             {
460                 needsSync = true;
461             }
462         }
463
464     }
465
466     /**
467      * Update the page array with container header if the page is a first alloc
468      * page and encrypt the page data if the database is encrypted.
469      * @param pageNumber the page number of the page
470      * @param pageData byte array that has the actual page data.
471      * @param encryptionBuf buffer that is used to store encryted version of the
472      * page.
473      * @return byte array of the the page data as it should be on the disk.
474      */

475     private byte[] updatePageArray(long pageNumber,
476                                    byte[] pageData,
477                                    byte[] encryptionBuf,
478                                    boolean encryptWithNewEngine)
479         throws StandardException, IOException JavaDoc
480     {
481         if (pageNumber == FIRST_ALLOC_PAGE_NUMBER)
482         {
483             // write header into the alloc page array regardless of dirty
484
// bit because the alloc page have zero'ed out the borrowed
485
// space
486
writeHeader(pageData);
487
488             if (SanityManager.DEBUG)
489             {
490                 if (FormatIdUtil.readFormatIdInteger(pageData) != AllocPage.FORMAT_NUMBER)
491                     SanityManager.THROWASSERT(
492                             "expect " +
493                             AllocPage.FORMAT_NUMBER +
494                             "got " +
495                             FormatIdUtil.readFormatIdInteger(pageData));
496             }
497
498             return pageData;
499
500         }
501         else
502         {
503             if (dataFactory.databaseEncrypted() || encryptWithNewEngine)
504             {
505                 return encryptPage(pageData,
506                                    pageSize,
507                                    encryptionBuf,
508                                    encryptWithNewEngine);
509             }
510             else
511             {
512                 return pageData;
513             }
514         }
515     }
516
517
518     /**
519         Pad the file upto the passed in page offset.
520         Returns true if the file needed padding.
521     */

522
523     private boolean padFile(StorageRandomAccessFile file, long pageOffset)
524         throws IOException JavaDoc, StandardException {
525
526         long currentEOF = file.length();
527         if (currentEOF >= pageOffset)
528             return false;
529
530         // all objects in java are by definition initialized
531
byte zero[] = new byte[pageSize];
532
533         file.seek(currentEOF);
534
535         while(currentEOF < pageOffset)
536         {
537             dataFactory.writeInProgress();
538             try
539             {
540                 long len = pageOffset - currentEOF;
541                 if (len > pageSize)
542                     len = pageSize;
543
544                 file.write(zero, 0, (int) len);
545             }
546             finally
547             {
548                 dataFactory.writeFinished();
549             }
550             currentEOF += pageSize;
551         }
552
553         return true;
554     }
555
556     /**
557      * Clean the container.
558      * <p>
559      * Write out the container header and sync all dirty pages of this
560      * container to disk before returning.
561      * <p>
562      * checkpoint calls this interface through callbacks by telling
563      * the cache manager to clean all containers in the open container
564      * cache. This sync of the file happens as part of writing and then
565      * syncing the container header in writeRAFHeader().
566      * <p>
567      *
568      * @param forRemove Is clean called because container is being removed?
569      *
570      * @exception StandardException Standard exception policy.
571      **/

572     public void clean(boolean forRemove) throws StandardException
573     {
574         boolean waited = false;
575
576         synchronized (this) {
577
578             // committed and dropped, do nothing.
579
// This file container has already been stubbified
580
if (getCommittedDropState()) {
581                 clearDirty();
582                 return;
583             }
584
585             // The container is about to change, need to wait till it is really
586
// changed. We are in the predirty state only for the duration
587
// where the log record that changed the container has been sent to
588
// the log and before the change actually happened.
589
while(preDirty == true)
590             {
591                 waited = true;
592                 try
593                 {
594                     wait();
595                 }
596                 catch (InterruptedException JavaDoc ie)
597                 {
598                     throw StandardException.interrupt(ie);
599                 }
600             }
601
602             if (waited)
603             {
604                 // someone else may have stubbified this while we waited
605
if (getCommittedDropState())
606                 {
607                     clearDirty();
608                     return;
609                 }
610             }
611
612
613             if (forRemove) {
614
615                 // removeFile()
616
// clearDirty();
617

618             } else if (isDirty()) {
619  
620                 try {
621
622                     // Cannot get the alloc page and write it out
623
// because in order to do so, the alloc page will need to
624
// find this container object. But this container object
625
// is in the middle of being cleaned and may not be
626
// 'found' and we will hang.
627
//
628
// Instead, just clobber the container info, which is
629
// checksum'ed seperately from the alloc page
630
//
631
writeRAFHeader(fileData,
632                                    false, // don't create, container exists
633
true); // syncfile
634

635                     clearDirty();
636
637                 } catch (IOException JavaDoc ioe) {
638
639                     throw dataFactory.markCorrupt(
640                         StandardException.newException(
641                             SQLState.FILE_CONTAINER_EXCEPTION, ioe, this));
642                 }
643             }
644         }
645     }
646
647     private void clearDirty() {
648         isDirty = false;
649         needsSync = false;
650     }
651
652
653     /**
654         Preallocate some pages if need be
655     */

656     protected int preAllocate(long lastPreallocPagenum,
657                               int preAllocSize)
658     {
659     
660         /* we had a condition here , that looks at the file size before
661          * preallocation to handle the optimization cases like , we
662          * preallocated the space and then crashed, as we don;t log the
663          * preallocated length, we don't have updated value until AlocExtent
664          * page get flushed to the disk. only way to find out that the pages
665          * we want already exist is to look at the file length.
666          * Althought it was nice thing to do, we had bug no: 3813 from
667          * customer , who for some unexplainable reasons he gets lots of
668          * junk at the end of the file. As junk is not initialized with
669          * format-ID , we get into recovery problem.
670          * To avoid such unforseen conditions, removed the file size check
671          * condition , as it is better not to fail in recovery than
672          * losing some special case performance improvement.
673          */

674   
675         int n = doPreAllocatePages(lastPreallocPagenum, preAllocSize);
676
677         if (n > 0) // sync the file
678
{
679             synchronized(this)
680             {
681                 boolean inwrite = false;
682                 try
683                 {
684                     dataFactory.writeInProgress();
685                     inwrite = true;
686
687                     if (!dataFactory.dataNotSyncedAtAllocation)
688                         fileData.sync(false);
689                 }
690                 catch (IOException JavaDoc ioe)
691                 {
692                     // The disk may have run out of space.
693
// Don't error out in pre-allocation since the user may not
694
// actually need this page.
695
n = 0;
696                 }
697                 catch (StandardException se)
698                 {
699                     // some problem calling writeInProgress
700
n = 0;
701                 }
702                 finally
703                 {
704                     if (inwrite)
705                         dataFactory.writeFinished();
706                 }
707             }
708         }
709
710         return n;
711     }
712
713     /**
714      * Truncate pages of a container.
715      * <p>
716      * Truncate all pages from lastValidPagenum+1 through the end of the file.
717      * <p>
718      *
719      * @param lastValidPagenum The page number of the last valid page of the
720      * file. All pages after this one are truncated.
721      *
722      * @exception StandardException Standard exception policy.
723      **/

724     protected void truncatePages(
725     long lastValidPagenum)
726         throws StandardException
727     {
728
729
730         synchronized(this)
731         {
732             boolean inwrite = false;
733             try
734             {
735                 dataFactory.writeInProgress();
736                 inwrite = true;
737
738                 fileData.setLength((lastValidPagenum + 1) * pageSize);
739             }
740             catch (IOException JavaDoc ioe)
741             {
742                 // The disk may have run out of space.
743
// Don't error out in un-allocation since application can
744
// still function even if allocation fails.
745
}
746             catch (StandardException se)
747             {
748                 // some problem calling writeInProgress
749
}
750             finally
751             {
752                 if (inwrite)
753                     dataFactory.writeFinished();
754             }
755         }
756
757         return;
758     }
759
760
761     /*
762         Write the header of a random access file and sync it
763         @param create if true, the container is being created
764                 if false, the container already exist
765         @param syncFile if true, sync the file
766     */

767     private void writeRAFHeader(StorageRandomAccessFile file, boolean create,
768                                 boolean syncFile)
769          throws IOException JavaDoc, StandardException
770     {
771         byte[] epage;
772         if (create)
773         {
774             // the file doesn't exist yet, get an embryonic page buffer
775
epage = getEmbryonicPage((DataInput JavaDoc)null);
776         }
777         else
778         {
779             file.seek(FIRST_ALLOC_PAGE_OFFSET);
780             epage = getEmbryonicPage(file);
781         }
782
783         // need to check for frozen state
784

785
786         file.seek(FIRST_ALLOC_PAGE_OFFSET);
787         writeHeader(file, create, epage);
788
789         // leave the end of the file at a page boundry. This
790
// is to work around bugs in the EPOC jvm where a seek
791
// beyond the end of a file does not throw an exception
792
// but just moves the offset to the end of the file. This only
793
// occurs when the second page is written after the header has
794
// been written, ending up with the page at the incorrect offset.
795
if (create) {
796             padFile(file, pageSize);
797         }
798
799         if (syncFile)
800         {
801             dataFactory.writeInProgress();
802             try
803             {
804                 if (!dataFactory.dataNotSyncedAtCheckpoint)
805                    file.sync(false);
806
807             }
808             finally
809             {
810                 dataFactory.writeFinished();
811             }
812         }
813
814         epage = null;
815     }
816
817     /**
818         flush the cache to ensure all of my pages are written to disk
819
820         @exception StandardException Standard Cloudscape error policy
821     */

822     protected void flushAll() throws StandardException {
823
824         pageCache.clean(identity);
825
826         // now clean myself which will sync all my pages.
827
clean(false);
828     }
829
830
831      synchronized StorageFile getFileName(ContainerKey identity, boolean stub,
832                                              boolean errorOK, boolean tryAlternatePath)
833          throws StandardException
834      {
835          // RESOLVE - READ ONLY
836

837          actionCode = GET_FILE_NAME_ACTION;
838          actionIdentity = identity;
839          actionStub = stub;
840          actionErrorOK = errorOK;
841          actionTryAlternatePath = tryAlternatePath;
842          try
843          {
844              return (StorageFile) AccessController.doPrivileged( this);
845          }
846          catch( PrivilegedActionException JavaDoc pae){ throw (StandardException) pae.getException();}
847          finally{ actionIdentity = null; }
848      }
849
850     protected StorageFile privGetFileName(ContainerKey identity, boolean stub,
851                                     boolean errorOK, boolean tryAlternatePath)
852         throws StandardException
853     {
854         StorageFile container = dataFactory.getContainerPath( identity, stub);
855
856         // retry with small case 'c' and 'd'
857
// bug fix for track 3444
858
if (!container.exists() && tryAlternatePath)
859         {
860             container = dataFactory.getAlternateContainerPath( identity, stub);
861         }
862
863         if (!container.exists()) {
864
865             StorageFile directory = container.getParentDir();
866
867             if (!directory.exists())
868             {
869                 // make sure only 1 thread can create a segment at one time
870
synchronized(dataFactory)
871                 {
872                     if (!directory.exists())
873                     {
874                         if (!directory.mkdirs())
875                         {
876                             if (errorOK)
877                             {
878                                 return null;
879                             }
880                             else
881                             {
882                                 throw StandardException.newException(
883                                     SQLState.FILE_CANNOT_CREATE_SEGMENT,
884                                     directory);
885                             }
886                         }
887                     }
888                 }
889             }
890         }
891
892         return container;
893     } // end of privGetFileName
894

895
896     synchronized void createContainer(ContainerKey newIdentity)
897         throws StandardException
898     {
899
900         if (SanityManager.DEBUG) {
901             if ((spareSpace < 0) || (spareSpace > 100))
902                 SanityManager.THROWASSERT("invalid spare space " + spareSpace);
903         }
904
905         actionCode = CREATE_CONTAINER_ACTION;
906         actionIdentity = newIdentity;
907         try
908         {
909             AccessController.doPrivileged( this);
910         }
911         catch( PrivilegedActionException JavaDoc pae){ throw (StandardException) pae.getException();}
912         finally{ actionIdentity = null; }
913     } // end of createContainer
914

915     synchronized boolean removeFile(StorageFile file)
916         throws SecurityException JavaDoc, StandardException
917     {
918         actionCode = REMOVE_FILE_ACTION;
919         actionFile = file;
920         try
921         {
922             return AccessController.doPrivileged( this) != null;
923         }
924         catch( PrivilegedActionException JavaDoc pae){ throw (StandardException) pae.getException();}
925         finally{ actionFile = null; }
926     } // end of removeFile
927

928     private boolean privRemoveFile(StorageFile file)
929         throws StandardException
930     {
931         closeContainer();
932
933         dataFactory.writeInProgress();
934         try
935         {
936             if (file.exists())
937                 return file.delete();
938         }
939         finally
940         {
941             dataFactory.writeFinished();
942         }
943
944         return true;
945     } // end of privRemoveFile
946

947     synchronized boolean openContainer(ContainerKey newIdentity)
948         throws StandardException
949     {
950         actionCode = OPEN_CONTAINER_ACTION;
951         actionIdentity = newIdentity;
952         try
953         {
954             return AccessController.doPrivileged( this) != null;
955         }
956         catch( PrivilegedActionException JavaDoc pae){ throw (StandardException) pae.getException();}
957         finally{ actionIdentity = null; }
958     }
959
960     private synchronized void stubbify(LogInstant instant)
961         throws StandardException
962     {
963          // update header, synchronized this in case the cache is cleaning
964
// this container at the same time. Make sure the clean and
965
// stubbify is mutually exclusive.
966
setDroppedState(true);
967          setCommittedDropState(true);
968
969          // The whole container should be shrunk into a 'stub'.
970
// If the file system supports truncation, we can just truncate the
971
// file after the header. Since it doesn't, we need to write out a
972
// seperate file (the stub), then reset fileData to point to that,
973
// then remove the current file.
974
//
975
// There may still be dirty pages that belongs to this file which are
976
// still in the page cache. They need not really
977
// be written since they don't really exist anymore
978
//
979
// there are 3 pieces of information on disk :
980
// 1) the log operation that caused this file to be stubbified
981
// 2) the stub
982
// 3) the file
983
//
984
// The order of event, as far as persisent store is concerned, is
985
// A) stub shows up
986
// B) the file disappear
987
// C) the log operation got flushed
988
// (B and C may swap order)
989
//
990
// If neither A or B happens (we crashed before the sync call),
991
// then nothing happened.
992
//
993
// if A happened but B and C did not, then when we recover, we will not
994
// know the file has been stubbified. Hopefully, it will be stubbified
995
// again if the post-commit queue manager is alerted to the fact.
996
//
997
// if A and B happened but C did not, then the file is stubbified but
998
// there is no log record to indicate that. This is undesirable but
999
// still safe because the only time we stubbify is on a post commit
1000
// operation, i.e., either a create container has rolled back or a
1001
// dropped container has committed. We end up having a a container
1002
// stub which behaves the same as a dropped container - only that all
1003
// the redo work is unnecessary because we 'know' it will
1004
// eventually be dropped and committed.
1005
//
1006
// If A and C happened and not B, then during redo, this stubbify
1007
// routine will be called again and the file will be deleted again
1008
//
1009
// The reason why A has to be sync'ed out is that we don't want B to
1010
// happen but A did not and the system crashed. Then we are left
1011
// with neither the file nor the stub and maybe even no log record.
1012
// Then the system is not recoverable.
1013

1014        actionIdentity = (ContainerKey)getIdentity();
1015        actionInstant = instant;
1016        actionCode = STUBBIFY_ACTION;
1017        try
1018        {
1019            AccessController.doPrivileged( this);
1020        }
1021        catch( PrivilegedActionException JavaDoc pae){ throw (StandardException) pae.getException();}
1022        finally
1023        {
1024            actionIdentity = null;
1025            actionInstant = null;
1026        }
1027    }
1028
1029
1030
1031
1032        
1033    /**
1034     * Backup the container.
1035     *
1036     * @param handle the container handle.
1037     * @param backupLocation location of the backup container.
1038     * @exception StandardException Standard Derby error policy
1039     */

1040    protected void backupContainer(BaseContainerHandle handle, String JavaDoc backupLocation)
1041        throws StandardException
1042    {
1043        actionContainerHandle = handle;
1044        actionBackupLocation = backupLocation;
1045        actionCode = BACKUP_CONTAINER_ACTION;
1046        try
1047        {
1048            AccessController.doPrivileged(this);
1049        }
1050        catch( PrivilegedActionException JavaDoc pae){ throw (StandardException) pae.getException();}
1051        finally
1052        {
1053            actionContainerHandle = null;
1054            actionBackupLocation = null;
1055        }
1056    }
1057
1058
1059    /**
1060     * Backup the container.
1061     *
1062     * The container is written to the backup by reading the pages
1063     * through the page cache, and then writing into the backup container.
1064     * If the container is dropped(commitetd drop), only container stub is
1065     * copied to the backup using simple file copy.
1066     *
1067     * MT -
1068     * At any given time only one backup thread is allowed, but when backup in
1069     * progress DML/DDL operations can run in parallel. Pages are latched while
1070     * writing them to the backup to avoid copying partial changes to the pages.
1071     * Online backup does not acquire any user level locks , so users can drop
1072     * tables when backup is in progress. So it is possible that Container
1073     * Removal request can come in when container backup is in progress.
1074     * This case is handled by using the synchronization on this object monitor
1075     * and using inRemove and inBackup flags. Conatiner removal checks if backup
1076     * is in progress and wait for the backup to yield to continue the removal.
1077     * Basic idea is to give preference to remove by stopping the backup of the
1078     * container temporarily, when the remove container is requested by another
1079     * thread. Generally, it takes more time to backup a regular container than
1080     * the stub becuase stub is just one page. After each page copy, a check is
1081     * made to find if a remove is requested and if it is then backup of the
1082     * container is aborted and the backup thread puts itself into the wait state until
1083     * remove request thread notifies that the remove is complete. When
1084     * remove request compeletes stub is copied into the backup.
1085     *
1086     * Compress is blocked when backup is in progesss, so truncation of the
1087     * container can not happen when backup is in progess. No need to
1088     * synchronize backup of the container with truncation.
1089     *
1090     *
1091     * @param handle the container handle.
1092     * @param backupLocation location of the backup container.
1093     * @exception StandardException Derby Standard error policy
1094     *
1095     */

1096    private void privBackupContainer(BaseContainerHandle handle,
1097                                     String JavaDoc backupLocation)
1098        throws StandardException
1099    {
1100        boolean backupCompleted = false;
1101        File backupFile = null;
1102        RandomAccessFile backupRaf = null;
1103        boolean isStub = false;
1104        BasePage page = null;
1105
1106        while(!backupCompleted) {
1107            try {
1108
1109                synchronized (this) {
1110                    // wait if some one is removing the
1111
// container because of a drop.
1112
while (inRemove)
1113                    {
1114                        try {
1115                            wait();
1116                        }
1117                        catch (InterruptedException JavaDoc ie)
1118                        {
1119                            throw StandardException.interrupt(ie);
1120                        }
1121                    }
1122
1123                    if (getCommittedDropState())
1124                        isStub = true;
1125                    inBackup = true;
1126                }
1127            
1128                // create container at the backup location.
1129
if (isStub) {
1130                    // get the stub ( it is a committted drop table container )
1131
StorageFile file = privGetFileName((ContainerKey)getIdentity(),
1132                                                       true, false, true);
1133                    backupFile = new File(backupLocation, file.getName());
1134
1135                    // directly copy the stub to the backup
1136
if(!FileUtil.copyFile(dataFactory.getStorageFactory(),
1137                                          file, backupFile))
1138                    {
1139                        throw StandardException.newException(
1140                                              SQLState.RAWSTORE_ERROR_COPYING_FILE,
1141                                              file, backupFile);
1142                    }
1143                }else {
1144                    // regular container file
1145
long lastPageNumber= getLastPageNumber(handle);
1146                    if (lastPageNumber == ContainerHandle.INVALID_PAGE_NUMBER) {
1147                        // last page number is invalid if there are no pages in
1148
// the container yet. No need to backup this container,
1149
// this container creation is yet to complete.The reason
1150
// backup is getting called on such a container is
1151
// because container handle appears in the cache after
1152
// the file is created on the disk but before it's
1153
// first page is allocated.
1154
return;
1155                    }
1156
1157                    StorageFile file =
1158                        privGetFileName(
1159                            (ContainerKey)getIdentity(), false, false, true);
1160
1161                    backupFile = new File(backupLocation , file.getName());
1162                    backupRaf = new RandomAccessFile(backupFile, "rw");
1163
1164                    byte[] encryptionBuf = null;
1165                    if (dataFactory.databaseEncrypted()) {
1166                        // Backup uses seperate encryption buffer to encrypt the
1167
// page instead of encryption buffer used by the regular
1168
// conatiner writes. Otherwise writes to the backup
1169
// has to be synchronized with regualar database writes
1170
// because backup can run in parallel to container
1171
// writes.
1172
encryptionBuf = new byte[pageSize];
1173                    }
1174
1175                    // copy all the pages of the container from the database
1176
// to the backup location by reading through the page cache.
1177
for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER;
1178                         pageNumber <= lastPageNumber; pageNumber++) {
1179                        page = getLatchedPage(handle, pageNumber);
1180                        
1181                        // update the page array before writing to the disk
1182
// with container header and encrypt it if the database
1183
// is encrypted.
1184

1185                        byte[] dataToWrite = updatePageArray(pageNumber,
1186                                                             page.getPageArray(),
1187                                                             encryptionBuf, false);
1188                        backupRaf.write(dataToWrite, 0, pageSize);
1189
1190                        // unlatch releases page from cache, see
1191
// StoredPage.releaseExclusive()
1192
page.unlatch();
1193                        page = null;
1194
1195                        // check if some one wants to commit drop the table while
1196
// conatiner is being written to the backup. If so,
1197
// abort the backup and restart it once the drop
1198
// is complete.
1199

1200                        synchronized (this)
1201                        {
1202                            if (inRemove) {
1203                                break;
1204                            }
1205                        }
1206                    }
1207                }
1208
1209                // sync and close the backup conatiner. Incase of a stub,
1210
// it is already synced and closed while doing the copy.
1211
if(!isStub) {
1212                    backupRaf.getFD().sync();
1213                    backupRaf.close();
1214                    backupRaf = null;
1215                }
1216                
1217                // backup of the conatiner is complete.
1218
backupCompleted = true;
1219
1220            }catch (IOException JavaDoc ioe) {
1221                throw StandardException.newException(
1222                                                SQLState.BACKUP_FILE_IO_ERROR,
1223                                                ioe,
1224                                                backupFile);
1225            } finally {
1226                synchronized (this) {
1227                    inBackup = false;
1228                    notifyAll();
1229                }
1230
1231                if (page != null) {
1232                    page.unlatch();
1233                    page = null;
1234                }
1235
1236                // if backup of container is not complete, close the file
1237
// handles and remove the container file from the backup
1238
// if it exists
1239
if (!backupCompleted && backupFile != null)
1240                {
1241                    if (backupRaf != null)
1242                    {
1243                        try {
1244                            backupRaf.close();
1245                            backupRaf = null;
1246                        } catch (IOException JavaDoc ioe){
1247                            throw StandardException.newException(
1248                                            SQLState.BACKUP_FILE_IO_ERROR,
1249                                            ioe,
1250                                            backupFile);
1251                        }
1252                    }
1253
1254                    if(backupFile.exists())
1255                    {
1256                        if (!backupFile.delete())
1257                            throw StandardException.newException(
1258                                                SQLState.UNABLE_TO_DELETE_FILE,
1259                                                backupFile);
1260                    }
1261                }
1262            }
1263        }
1264    }
1265
1266
1267
1268
1269    /**
1270     * Create encrypted version of the container with the
1271     * user specified encryption properties.
1272     *
1273     * Read all the pages of the container from the original container
1274     * through the page cache, encrypt each page data with new encryption
1275     * mechanism and write to the specified container file.
1276     *
1277     * @param handle the container handle.
1278     * @param newFilePath file to store the new encrypted version of
1279     * the container
1280     * @exception StandardException Derby Standard error policy
1281     *
1282     */

1283    protected void encryptContainer(BaseContainerHandle handle,
1284                                    String JavaDoc newFilePath)
1285        throws StandardException
1286    {
1287        BasePage page = null;
1288        StorageFile newFile =
1289            dataFactory.getStorageFactory().newStorageFile(newFilePath);
1290        StorageRandomAccessFile newRaf = null;
1291        try {
1292            long lastPageNumber= getLastPageNumber(handle);
1293 
1294            newRaf = privGetRandomAccessFile(newFile);
1295
1296            byte[] encryptionBuf = null;
1297            encryptionBuf = new byte[pageSize];
1298
1299            // copy all the pages from the current container to the
1300
// new container file after encryting the pages.
1301
for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER;
1302                 pageNumber <= lastPageNumber; pageNumber++)
1303            {
1304
1305                page = getLatchedPage(handle, pageNumber);
1306                        
1307                // update the page array before writing to the disk
1308
// with container header and encrypt it.
1309

1310                byte[] dataToWrite = updatePageArray(pageNumber,
1311                                                     page.getPageArray(),
1312                                                     encryptionBuf,
1313                                                     true);
1314                newRaf.write(dataToWrite, 0, pageSize);
1315
1316                // unlatch releases page from cache.
1317
page.unlatch();
1318                page = null;
1319            }
1320
1321            // sync the new version of the container.
1322
newRaf.sync(true);
1323            newRaf.close();
1324            newRaf = null;
1325            
1326        }catch (IOException JavaDoc ioe) {
1327            throw StandardException.newException(
1328                                    SQLState.FILE_CONTAINER_EXCEPTION,
1329                                    ioe,
1330                                    newFile);
1331        } finally {
1332
1333            if (page != null) {
1334                page.unlatch();
1335                page = null;
1336            }
1337            
1338            if (newRaf != null) {
1339                try {
1340                    newRaf.close();
1341                }catch (IOException JavaDoc ioe)
1342                {
1343                    newRaf = null;
1344                    throw StandardException.newException(
1345                                    SQLState.FILE_CONTAINER_EXCEPTION,
1346                                    ioe,
1347                                    newFile);
1348                    
1349                }
1350            }
1351        }
1352    }
1353
1354
1355    synchronized StorageRandomAccessFile privGetRandomAccessFile(StorageFile file)
1356        throws SecurityException JavaDoc, StandardException
1357    {
1358        actionCode = GET_RANDOM_ACCESS_FILE_ACTION;
1359        actionFile = file;
1360        try
1361        {
1362            return (StorageRandomAccessFile)AccessController.doPrivileged(this);
1363        }
1364        catch( PrivilegedActionException JavaDoc pae){
1365            throw (StandardException) pae.getException();
1366        }
1367        finally{ actionFile = null; }
1368    }
1369
1370
1371
1372     // PrivilegedExceptionAction method
1373
public Object JavaDoc run() throws StandardException, IOException JavaDoc
1374     {
1375         switch( actionCode)
1376         {
1377         case GET_FILE_NAME_ACTION:
1378             return privGetFileName( actionIdentity, actionStub, actionErrorOK, actionTryAlternatePath);
1379
1380         case CREATE_CONTAINER_ACTION:
1381         {
1382             StorageFile file = privGetFileName( actionIdentity, false, false, false);
1383
1384             try {
1385                 if (file.exists()) {
1386                     // note I'm left in the no-identity state as fillInIdentity()
1387
// hasn't been called.
1388
throw StandardException.newException( SQLState.FILE_EXISTS, file);
1389                 }
1390             } catch (SecurityException JavaDoc se) {
1391                 throw StandardException.newException( SQLState.FILE_CREATE, se, file);
1392             }
1393
1394             try {
1395
1396                 // OK not to force WAL here, in fact, this operation preceeds the
1397
// creation of the log record to ensure sufficient space.
1398

1399                 dataFactory.writeInProgress();
1400                 try
1401                 {
1402                     fileData = file.getRandomAccessFile( "rw");
1403                 }
1404                 finally
1405                 {
1406                     dataFactory.writeFinished();
1407                 }
1408
1409                 // This container format specifies that the first page is an
1410
// allocation page and the container information is stored within
1411
// it. The allocation page needs to be somewhat formatted
1412
// because if the system crashed after the create container log
1413
// operation is written, it needs to be well formed enough to get
1414
// the container information back out of it.
1415
//
1416
// Don't try to go thru the page cache here because the container
1417
// object cannot be found in the container cache at this point
1418
// yet. However, if we use the page cache to store the first
1419
// allocation page, then in order to write itself out, it needs to
1420
// ask the container to do so, which is going to create a
1421
// deadlock. The allocation page cannot write itself out without
1422
// going thru the container because it doesn't know where its
1423
// offset is. Here we effectively hardwired page 0 at offset 0 of
1424
// the container file to be the first allocation page.
1425

1426                 // create an embryonic page - if this is not a temporary container,
1427
// synchronously write out the file header.
1428
writeRAFHeader(fileData, true,
1429                                (actionIdentity.getSegmentId() != ContainerHandle.TEMPORARY_SEGMENT));
1430
1431             } catch (SecurityException JavaDoc se) {
1432
1433                 // only thrown by the RandomeAccessFile constructor,
1434
// so the file won't exist
1435
throw StandardException.newException( SQLState.FILE_CREATE, se, file);
1436
1437             } catch (IOException JavaDoc ioe) {
1438
1439                 boolean fileDeleted;
1440                 try {
1441                     fileDeleted = privRemoveFile(file);
1442                 } catch (SecurityException JavaDoc se) {
1443                     throw StandardException.newException( SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, se.toString());
1444                 }
1445
1446                 if (!fileDeleted) {
1447                     throw StandardException.newException( SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, ioe.toString());
1448                 }
1449
1450                 throw StandardException.newException( SQLState.FILE_CREATE, ioe, file);
1451             }
1452
1453             canUpdate = true;
1454             return null;
1455         } // end of case CREATE_CONTAINER_ACTION
1456

1457         case REMOVE_FILE_ACTION:
1458             return privRemoveFile( actionFile) ? this : null;
1459
1460         case OPEN_CONTAINER_ACTION:
1461         {
1462             boolean isStub = false; // is this a stub?
1463

1464             StorageFile file = privGetFileName( actionIdentity, false, true, true);
1465             if (file == null)
1466                 return null;
1467
1468             try {
1469                 if (!file.exists()) {
1470
1471                     // file does not exist, may be it has been stubbified
1472
file = privGetFileName( actionIdentity, true, true, true);
1473                     if (!file.exists())
1474                         return null;
1475                     isStub = true;
1476                 }
1477             } catch (SecurityException JavaDoc se) {
1478                 throw StandardException.newException(
1479                     SQLState.DATA_UNEXPECTED_EXCEPTION, se);
1480             }
1481
1482             canUpdate = false;
1483             try {
1484                 if (!dataFactory.isReadOnly() && file.canWrite())
1485                     canUpdate = true;
1486             } catch (SecurityException JavaDoc se) {
1487                 // just means we can't write to it.
1488
}
1489
1490             try {
1491
1492                 fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r");
1493                 fileData.seek(FIRST_ALLOC_PAGE_OFFSET);
1494                 readHeader(fileData);
1495
1496                 if (SanityManager.DEBUG)
1497                 {
1498                     if (isStub)
1499                         SanityManager.ASSERT(getDroppedState() && getCommittedDropState(),
1500                                              "a stub failed to set drop state");
1501                 }
1502
1503             } catch (IOException JavaDoc ioe) {
1504
1505                 if (isStub)
1506                 {
1507                     throw dataFactory.
1508                         markCorrupt(StandardException.
1509                                     newException(SQLState.
1510                                                  FILE_CONTAINER_EXCEPTION,
1511                                                  ioe, this));
1512                 }
1513
1514                 // maybe it is being stubbified... try that
1515
StorageFile stub =
1516                     privGetFileName(actionIdentity, true, true, true);
1517
1518                 if (stub.exists())
1519                 {
1520                     try
1521                     {
1522                         boolean delete_status = privRemoveFile(file);
1523                         if (SanityManager.DEBUG)
1524                         {
1525                             if (!delete_status)
1526                             {
1527                                 SanityManager.THROWASSERT(
1528                                     "delete of file (" + file + ") failed.");
1529                             }
1530                         }
1531
1532                         fileData =
1533                             stub.getRandomAccessFile(canUpdate ? "rw" : "r");
1534
1535                         readHeader(fileData);
1536                     }
1537                     catch (IOException JavaDoc ioe2)
1538                     {
1539                         throw dataFactory.
1540                             markCorrupt(StandardException.
1541                                         newException(SQLState.
1542                                                      FILE_CONTAINER_EXCEPTION,
1543                                                      ioe2, this));
1544                     }
1545
1546                     // RESOLVE: this is a temporary hack
1547

1548                 }
1549                 else
1550                     throw dataFactory.
1551                         markCorrupt(StandardException.
1552                                     newException(SQLState.
1553                                                  FILE_CONTAINER_EXCEPTION,
1554                                                  ioe, this));
1555             }
1556
1557             return this;
1558         } // end of case OPEN_CONTAINER_ACTION
1559

1560         case STUBBIFY_ACTION:
1561         {
1562             StorageFile file = privGetFileName( actionIdentity, false, false, true);
1563             StorageFile stub = privGetFileName( actionIdentity, true, false, false);
1564
1565             StorageRandomAccessFile stubData = null;
1566
1567             try
1568             {
1569                 // !!!!!
1570
// bumpContainerVersion();
1571
//
1572
// do NOT bump the container version. We WANT the stubbify
1573
// operation to get redone every time. This is because this
1574
// operation first writes out the stub and then remove the
1575
// container file. If we bump the version, then the stub will
1576
// contain the new version. And if the system crashes right then,
1577
// then we will skip the whole operation during redo even though
1578
// the container file may not have been removed. Since we don't
1579
// want to have the remove happen before the stub is written, we
1580
// cannot sync it and therefore cannot be sure the remove
1581
// happened before the system crashed.
1582

1583                 if (!stub.exists())
1584                 {
1585                     // write the header to the stub
1586
stubData = stub.getRandomAccessFile( "rw");
1587
1588                     writeRAFHeader(stubData,
1589                                    true, /* create */
1590                                    true); /* sync */
1591
1592                     stubData.close();
1593                     stubData = null;
1594                 }
1595
1596
1597                 // Force WAL and check for database corruption before removing file.
1598
// This is one operation where the container is changed on disk
1599
// directly without going thru the container cache, which otherwise
1600
// would have force WAL. Take care of it here.
1601
dataFactory.flush(actionInstant);
1602
1603                 // try to remove the container file
1604
// fileDate is not null only if we are redoing a removeContainer
1605
// (stubbify) operation. Then fileData acutally is opened against
1606
// the stub and the original container file does not exist.
1607
// Then we need to close it here because this method is called by
1608
// cache.remove and nobody will be able to see fileData after this.
1609
privRemoveFile(file);
1610
1611             }
1612             catch (SecurityException JavaDoc se)
1613             {
1614                 throw StandardException.
1615                     newException(SQLState.FILE_CANNOT_REMOVE_FILE, se, file,
1616                                  se.toString());
1617             }
1618             catch (IOException JavaDoc ioe)
1619             {
1620                 // exception thrown while in creating the stub. Remove the
1621
// (half-baked) stub
1622
try
1623                 {
1624                     if (stubData != null)
1625                     {
1626                         stubData.close();
1627                         stub.delete();
1628                         stubData = null;
1629                     }
1630
1631                     if (fileData != null)
1632                     {
1633                         fileData.close();
1634                         fileData = null;
1635                     }
1636                 }
1637                 catch (IOException JavaDoc ioe2)
1638                 {
1639                     throw StandardException.newException(
1640                         SQLState.FILE_CANNOT_REMOVE_FILE, ioe2, file, ioe.toString());
1641                 }
1642                 catch (SecurityException JavaDoc se)
1643                 {
1644                     throw StandardException.newException(
1645                         SQLState.FILE_CANNOT_REMOVE_FILE, se, file, stub);
1646                 }
1647             }
1648    
1649             //let the data factory know about this the stub file;It
1650
// could remove when next checkpoint occurs if it's not necessary for recovery
1651
dataFactory.stubFileToRemoveAfterCheckPoint(stub,actionInstant, getIdentity());
1652             return null;
1653         } // end of case STUBBIFY_ACTION
1654

1655         case BACKUP_CONTAINER_ACTION: {
1656             privBackupContainer(actionContainerHandle, actionBackupLocation);
1657             return null;
1658         } // end of case BACKUP_CONTAINER_ACTION
1659

1660         case GET_RANDOM_ACCESS_FILE_ACTION: {
1661             return actionFile.getRandomAccessFile("rw");
1662         } // end of case BACKUP_CONTAINER_ACTION
1663

1664         
1665         } // end of switch
1666
return null;
1667
1668     } // end of run
1669
}
1670
Popular Tags