KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > com > mckoi > database > TableDataConglomerate


1 /**
2  * com.mckoi.database.TableDataConglomerate 18 Nov 2000
3  *
4  * Mckoi SQL Database ( http://www.mckoi.com/database )
5  * Copyright (C) 2000, 2001, 2002 Diehl and Associates, Inc.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * Version 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License Version 2 for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * Version 2 along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  *
20  * Change Log:
21  *
22  *
23  */

24
25 package com.mckoi.database;
26
27 import java.io.*;
28 import java.util.Iterator JavaDoc;
29 import java.util.ArrayList JavaDoc;
30 import java.util.HashMap JavaDoc;
31 import java.util.List JavaDoc;
32 import com.mckoi.util.IntegerListInterface;
33 import com.mckoi.util.IntegerIterator;
34 import com.mckoi.util.IntegerVector;
35 import com.mckoi.util.ByteArrayUtil;
36 import com.mckoi.util.UserTerminal;
37 import com.mckoi.util.BigNumber;
38 import com.mckoi.debug.*;
39
40 import com.mckoi.store.Store;
41 import com.mckoi.store.MutableArea;
42 import com.mckoi.store.Area;
43
44 import com.mckoi.database.StateStore.StateResource;
45
46 import com.mckoi.database.global.ByteLongObject;
47 import com.mckoi.database.global.ObjectTranslator;
48 import com.mckoi.database.global.Ref;
49
50 /**
51  * A conglomerate of data that represents the contents of all tables in a
52  * complete database. This object handles all data persistance management
53  * (storage, retrieval, removal) issues. It is a transactional manager for
54  * both data and indices in the database.
55  *
56  * @author Tobias Downer
57  */

58
59 public class TableDataConglomerate {
60
61   /**
62    * The postfix on the name of the state file for the database store name.
63    */

64   public static final String JavaDoc STATE_POST = "_sf";
65   
66   // ---------- The standard constraint/schema tables ----------
67

68   /**
69    * The name of the system schema where persistant conglomerate state is
70    * stored.
71    */

72   public static final String JavaDoc SYSTEM_SCHEMA = "SYS_INFO";
73
74   /**
75    * The schema info table.
76    */

77   public static final TableName SCHEMA_INFO_TABLE =
78                              new TableName(SYSTEM_SCHEMA, "sUSRSchemaInfo");
79
80   public static final TableName PERSISTENT_VAR_TABLE =
81                            new TableName(SYSTEM_SCHEMA, "sUSRDatabaseVars");
82
83   public static final TableName FOREIGN_COLS_TABLE =
84                          new TableName(SYSTEM_SCHEMA, "sUSRForeignColumns");
85
86   public static final TableName UNIQUE_COLS_TABLE =
87                           new TableName(SYSTEM_SCHEMA, "sUSRUniqueColumns");
88
89   public static final TableName PRIMARY_COLS_TABLE =
90                          new TableName(SYSTEM_SCHEMA, "sUSRPrimaryColumns");
91
92   public static final TableName CHECK_INFO_TABLE =
93                               new TableName(SYSTEM_SCHEMA, "sUSRCheckInfo");
94
95   public static final TableName UNIQUE_INFO_TABLE =
96                              new TableName(SYSTEM_SCHEMA, "sUSRUniqueInfo");
97
98   public static final TableName FOREIGN_INFO_TABLE =
99                                new TableName(SYSTEM_SCHEMA, "sUSRFKeyInfo");
100
101   public static final TableName PRIMARY_INFO_TABLE =
102                                new TableName(SYSTEM_SCHEMA, "sUSRPKeyInfo");
103
104   public static final TableName SYS_SEQUENCE_INFO =
105                            new TableName(SYSTEM_SCHEMA, "sUSRSequenceInfo");
106
107   public static final TableName SYS_SEQUENCE =
108                                new TableName(SYSTEM_SCHEMA, "sUSRSequence");
109
110   /**
111    * The TransactionSystem that this Conglomerate is a child of.
112    */

113   private final TransactionSystem system;
114
115   /**
116    * The StoreSystem object used by this conglomerate to store the underlying
117    * representation.
118    */

119   private final StoreSystem store_system;
120   
121   /**
122    * The name given to this conglomerate.
123    */

124   private String JavaDoc name;
125
126   /**
127    * The actual store that backs the state store.
128    */

129   private Store act_state_store;
130   
131   /**
132    * A store for the conglomerate state container. This
133    * file stores information persistantly about the state of this object.
134    */

135   private StateStore state_store;
136
137   /**
138    * The current commit id for committed transactions. Whenever transactional
139    * changes are committed to the conglomerate, this id is incremented.
140    */

141   private long commit_id;
142
143
144   /**
145    * The list of all tables that are currently open in this conglomerate.
146    * This includes tables that are not committed.
147    */

148   private ArrayList JavaDoc table_list;
149
150   /**
151    * The actual Store implementation that maintains the BlobStore information
152    * for this conglomerate (if there is one).
153    */

154   private Store act_blob_store;
155
156   /**
157    * The BlobStore object for this conglomerate.
158    */

159   private BlobStore blob_store;
160
161   /**
162    * The SequenceManager object for this conglomerate.
163    */

164   private SequenceManager sequence_manager;
165   
166   /**
167    * The list of transactions that are currently open over this conglomerate.
168    * This list is ordered from lowest commit_id to highest. This object is
169    * shared with all the children MasterTableDataSource objects.
170    */

171   private OpenTransactionList open_transactions;
172
173   /**
174    * The list of all name space journals for the history of committed
175    * transactions.
176    */

177   private ArrayList JavaDoc namespace_journal_list;
178   
179   // ---------- Table event listener ----------
180

181   /**
182    * All listeners for modification events on tables in this conglomerate.
183    * This is a mapping from TableName -> ArrayList of listeners.
184    */

185   private final HashMap JavaDoc modification_listeners;
186   
187   
188
189   
190   // ---------- Locks ----------
191

192   /**
193    * This lock is obtained when we go to commit a change to the table.
194    * Grabbing this lock ensures that no other commits can occur at the same
195    * time on this conglomerate.
196    */

197   final Object JavaDoc commit_lock = new Object JavaDoc();
198
199 // // ---------- Shutdown hook thread ----------
200
//
201
// /**
202
// * The ConglomerateShutdownHookThread object which we create when the
203
// * conglomerate in openned, and removed when we close the conglomerate.
204
// */
205
// private ConglomerateShutdownHookThread shutdown_hook = null;
206

207   
208
209
210   /**
211    * Constructs the conglomerate.
212    */

213   public TableDataConglomerate(TransactionSystem system,
214                                StoreSystem store_system) {
215     this.system = system;
216     this.store_system = store_system;
217     this.open_transactions = new OpenTransactionList(system);
218     this.modification_listeners = new HashMap JavaDoc();
219     this.namespace_journal_list = new ArrayList JavaDoc();
220
221     this.sequence_manager = new SequenceManager(this);
222
223   }
224
225   /**
226    * Returns the TransactionSystem that this conglomerate is part of.
227    */

228   public final TransactionSystem getSystem() {
229     return system;
230   }
231
232   /**
233    * Returns the StoreSystem used by this conglomerate to manage the
234    * persistent state of the database.
235    */

236   public final StoreSystem storeSystem() {
237     return store_system;
238   }
239
240   /**
241    * Returns the SequenceManager object for this conglomerate.
242    */

243   final SequenceManager getSequenceManager() {
244     return sequence_manager;
245   }
246   
247   /**
248    * Returns the BlobStore for this conglomerate.
249    */

250   final BlobStore getBlobStore() {
251     return blob_store;
252   }
253
254   /**
255    * Returns the DebugLogger object that we use to log debug messages to.
256    */

257   public final DebugLogger Debug() {
258     return getSystem().Debug();
259   }
260
261   /**
262    * Returns the name given to this conglomerate.
263    */

264   String JavaDoc getName() {
265     return name;
266   }
267
268   // ---------- Conglomerate state methods ----------
269

270   /**
271    * Marks the given table id as committed dropped.
272    */

273   private void markAsCommittedDropped(int table_id) {
274     MasterTableDataSource master_table = getMasterTable(table_id);
275     state_store.addDeleteResource(
276             new StateResource(table_id, createEncodedTableFile(master_table)));
277   }
278
279   /**
280    * Loads the master table given the table_id and the name of the table
281    * resource in the database path. The table_string is a specially formatted
282    * string that we parse to determine the file structure of the table.
283    */

284   private MasterTableDataSource loadMasterTable(int table_id,
285                         String JavaDoc table_str, int table_type) throws IOException {
286
287     // Open the table
288
if (table_type == 1) {
289       V1MasterTableDataSource master =
290           new V1MasterTableDataSource(getSystem(),
291                              storeSystem(), open_transactions);
292       if (master.exists(table_str)) {
293         return master;
294       }
295     }
296     else if (table_type == 2) {
297       V2MasterTableDataSource master =
298           new V2MasterTableDataSource(getSystem(),
299                  storeSystem(), open_transactions, blob_store);
300       if (master.exists(table_str)) {
301         return master;
302       }
303     }
304       
305     // If not exists, then generate an error message
306
Debug().write(Lvl.ERROR, this,
307                   "Couldn't find table source - resource name: " +
308                   table_str + " table_id: " + table_id);
309     
310     return null;
311   }
312
313   /**
314    * Returns a string that is an encoded table file name. An encoded table
315    * file name includes information about the table type with the name of the
316    * table. For example, ":1ThisTable" represents a V1MasterTableDataSource
317    * table with file name "ThisTable".
318    */

319   private static String JavaDoc createEncodedTableFile(MasterTableDataSource table) {
320     char type;
321     if (table instanceof V1MasterTableDataSource) {
322       type = '1';
323     }
324     else if (table instanceof V2MasterTableDataSource) {
325       type = '2';
326     }
327     else {
328       throw new RuntimeException JavaDoc("Unrecognised MasterTableDataSource class.");
329     }
330     StringBuffer JavaDoc buf = new StringBuffer JavaDoc();
331     buf.append(':');
332     buf.append(type);
333     buf.append(table.getSourceIdent());
334     return new String JavaDoc(buf);
335   }
336
337   /**
338    * Reads in the list of committed tables in this conglomerate. This should
339    * only be called during an 'open' like method. This method fills the
340    * 'committed_tables' and 'table_list' lists with the tables in this
341    * conglomerate.
342    */

343   private void readVisibleTables() throws IOException {
344
345     // The list of all visible tables from the state file
346
StateResource[] tables = state_store.getVisibleList();
347     // For each visible table
348
for (int i = 0; i < tables.length; ++i) {
349       StateResource resource = tables[i];
350
351       int master_table_id = (int) resource.table_id;
352       String JavaDoc file_name = resource.name;
353       
354       // Parse the file name string and determine the table type.
355
int table_type = 1;
356       if (file_name.startsWith(":")) {
357         if (file_name.charAt(1) == '1') {
358           table_type = 1;
359         }
360         else if (file_name.charAt(1) == '2') {
361           table_type = 2;
362         }
363         else {
364           throw new RuntimeException JavaDoc("Table type is not known.");
365         }
366         file_name = file_name.substring(2);
367       }
368       
369       // Load the master table from the resource information
370
MasterTableDataSource master =
371                        loadMasterTable(master_table_id, file_name, table_type);
372
373       if (master == null) {
374         throw new Error JavaDoc("Table file for " + file_name + " was not found.");
375       }
376
377       if (master instanceof V1MasterTableDataSource) {
378         V1MasterTableDataSource v1_master = (V1MasterTableDataSource) master;
379         v1_master.open(file_name);
380       }
381       else if (master instanceof V2MasterTableDataSource) {
382         V2MasterTableDataSource v2_master = (V2MasterTableDataSource) master;
383         v2_master.open(file_name);
384       }
385       else {
386         throw new Error JavaDoc("Unknown master table type: " + master.getClass());
387       }
388
389       // Add the table to the table list
390
table_list.add(master);
391
392     }
393
394   }
395
396   /**
397    * Checks the list of committed tables in this conglomerate. This should
398    * only be called during an 'check' like method. This method fills the
399    * 'committed_tables' and 'table_list' lists with the tables in this
400    * conglomerate.
401    */

402   public void checkVisibleTables(UserTerminal terminal) throws IOException {
403
404     // The list of all visible tables from the state file
405
StateResource[] tables = state_store.getVisibleList();
406     // For each visible table
407
for (int i = 0; i < tables.length; ++i) {
408       StateResource resource = tables[i];
409
410       int master_table_id = (int) resource.table_id;
411       String JavaDoc file_name = resource.name;
412       
413       // Parse the file name string and determine the table type.
414
int table_type = 1;
415       if (file_name.startsWith(":")) {
416         if (file_name.charAt(1) == '1') {
417           table_type = 1;
418         }
419         else if (file_name.charAt(1) == '2') {
420           table_type = 2;
421         }
422         else {
423           throw new RuntimeException JavaDoc("Table type is not known.");
424         }
425         file_name = file_name.substring(2);
426       }
427       
428       // Load the master table from the resource information
429
MasterTableDataSource master =
430                        loadMasterTable(master_table_id, file_name, table_type);
431
432       if (master instanceof V1MasterTableDataSource) {
433         V1MasterTableDataSource v1_master = (V1MasterTableDataSource) master;
434         v1_master.checkAndRepair(file_name, terminal);
435       }
436       else if (master instanceof V2MasterTableDataSource) {
437         V2MasterTableDataSource v2_master = (V2MasterTableDataSource) master;
438         v2_master.checkAndRepair(file_name, terminal);
439       }
440       else {
441         throw new Error JavaDoc("Unknown master table type: " + master.getClass());
442       }
443
444       // Add the table to the table list
445
table_list.add(master);
446
447       // Set a check point
448
store_system.setCheckPoint();
449       
450     }
451
452   }
453
454
455
456
457
458
459
460
461   /**
462    * Reads in the list of committed dropped tables on this conglomerate. This
463    * should only be called during an 'open' like method. This method fills
464    * the 'committed_dropped' and 'table_list' lists with the tables in this
465    * conglomerate.
466    * <p>
467    * @param terminal the terminal to ask questions if problems are found. If
468    * null then an exception is thrown if there are problems.
469    */

470   private void readDroppedTables() throws IOException {
471
472     // The list of all dropped tables from the state file
473
StateResource[] tables = state_store.getDeleteList();
474     // For each visible table
475
for (int i = 0; i < tables.length; ++i) {
476       StateResource resource = tables[i];
477
478       int master_table_id = (int) resource.table_id;
479       String JavaDoc file_name = resource.name;
480       
481       // Parse the file name string and determine the table type.
482
int table_type = 1;
483       if (file_name.startsWith(":")) {
484         if (file_name.charAt(1) == '1') {
485           table_type = 1;
486         }
487         else if (file_name.charAt(1) == '2') {
488           table_type = 2;
489         }
490         else {
491           throw new RuntimeException JavaDoc("Table type is not known.");
492         }
493         file_name = file_name.substring(2);
494       }
495       
496       // Load the master table from the resource information
497
MasterTableDataSource master =
498                        loadMasterTable(master_table_id, file_name, table_type);
499
500       // File wasn't found so remove from the delete resources
501
if (master == null) {
502         state_store.removeDeleteResource(resource.name);
503       }
504       else {
505         if (master instanceof V1MasterTableDataSource) {
506           V1MasterTableDataSource v1_master = (V1MasterTableDataSource) master;
507           v1_master.open(file_name);
508         }
509         else if (master instanceof V2MasterTableDataSource) {
510           V2MasterTableDataSource v2_master = (V2MasterTableDataSource) master;
511           v2_master.open(file_name);
512         }
513         else {
514           throw new Error JavaDoc("Unknown master table type: " + master.getClass());
515         }
516
517         // Add the table to the table list
518
table_list.add(master);
519       }
520
521     }
522
523     // Commit any changes to the state store
524
state_store.commit();
525     
526   }
527
528   /**
529    * Create the system tables that must be present in a conglomerates. These
530    * tables consist of contraint and table management data.
531    * <p>
532    * <pre>
533    * sUSRPKeyInfo - Primary key constraint information.
534    * sUSRFKeyInfo - Foreign key constraint information.
535    * sUSRUniqueInfo - Unique set constraint information.
536    * sUSRCheckInfo - Check constraint information.
537    * sUSRPrimaryColumns - Primary columns information (refers to PKeyInfo)
538    * sUSRUniqueColumns - Unique columns information (refers to UniqueInfo)
539    * sUSRForeignColumns1 - Foreign column information (refers to FKeyInfo)
540    * sUSRForeignColumns2 - Secondary Foreign column information (refers to
541    * FKeyInfo).
542    * </pre>
543    * These tables handle data for referential integrity. There are also some
544    * additional tables containing general table information.
545    * <pre>
546    * sUSRTableColumnInfo - All table and column information.
547    * </pre>
548    * The design is fairly elegant in that we are using the database to store
549    * information to maintain referential integrity.
550    * <p><pre>
551    * The schema layout for these tables;
552    *
553    * CREATE TABLE sUSRPKeyInfo (
554    * id NUMERIC NOT NULL,
555    * name TEXT NOT NULL, // The name of the primary key constraint
556    * schema TEXT NOT NULL, // The name of the schema
557    * table TEXT NOT NULL, // The name of the table
558    * deferred BIT NOT NULL, // Whether deferred or immediate
559    * PRIMARY KEY (id),
560    * UNIQUE (schema, table)
561    * );
562    * CREATE TABLE sUSRFKeyInfo (
563    * id NUMERIC NOT NULL,
564    * name TEXT NOT NULL, // The name of the foreign key constraint
565    * schema TEXT NOT NULL, // The name of the schema
566    * table TEXT NOT NULL, // The name of the table
567    * ref_schema TEXT NOT NULL, // The name of the schema referenced
568    * ref_table TEXT NOT NULL, // The name of the table referenced
569    * update_rule TEXT NOT NULL, // The rule for updating to table
570    * delete_rule TEXT NOT NULL, // The rule for deleting from table
571    * deferred BIT NOT NULL, // Whether deferred or immediate
572    * PRIMARY KEY (id)
573    * );
574    * CREATE TABLE sUSRUniqueInfo (
575    * id NUMERIC NOT NULL,
576    * name TEXT NOT NULL, // The name of the unique constraint
577    * schema TEXT NOT NULL, // The name of the schema
578    * table TEXT NOT NULL, // The name of the table
579    * deferred BIT NOT NULL, // Whether deferred or immediate
580    * PRIMARY KEY (id)
581    * );
582    * CREATE TABLE sUSRCheckInfo (
583    * id NUMERIC NOT NULL,
584    * name TEXT NOT NULL, // The name of the check constraint
585    * schema TEXT NOT NULL, // The name of the schema
586    * table TEXT NOT NULL, // The name of the table
587    * expression TEXT NOT NULL, // The check expression
588    * deferred BIT NOT NULL, // Whether deferred or immediate
589    * PRIMARY KEY (id)
590    * );
591    * CREATE TABLE sUSRPrimaryColumns (
592    * pk_id NUMERIC NOT NULL, // The primary key constraint id
593    * column TEXT NOT NULL, // The name of the primary
594    * seq_no INTEGER NOT NULL, // The sequence number of this constraint
595    * FOREIGN KEY pk_id REFERENCES sUSRPKeyInfo
596    * );
597    * CREATE TABLE sUSRUniqueColumns (
598    * un_id NUMERIC NOT NULL, // The unique constraint id
599    * column TEXT NOT NULL, // The column that is unique
600    * seq_no INTEGER NOT NULL, // The sequence number of this constraint
601    * FOREIGN KEY un_id REFERENCES sUSRUniqueInfo
602    * );
603    * CREATE TABLE sUSRForeignColumns (
604    * fk_id NUMERIC NOT NULL, // The foreign key constraint id
605    * fcolumn TEXT NOT NULL, // The column in the foreign key
606    * pcolumn TEXT NOT NULL, // The column in the primary key
607    * // (referenced)
608    * seq_no INTEGER NOT NULL, // The sequence number of this constraint
609    * FOREIGN KEY fk_id REFERENCES sUSRFKeyInfo
610    * );
611    * CREATE TABLE sUSRSchemaInfo (
612    * id NUMERIC NOT NULL,
613    * name TEXT NOT NULL,
614    * type TEXT, // Schema type (system, etc)
615    * other TEXT,
616    *
617    * UNIQUE ( name )
618    * );
619    * CREATE TABLE sUSRTableInfo (
620    * id NUMERIC NOT NULL,
621    * name TEXT NOT NULL, // The name of the table
622    * schema TEXT NOT NULL, // The name of the schema of this table
623    * type TEXT, // Table type (temporary, system, etc)
624    * other TEXT, // Notes, etc
625    *
626    * UNIQUE ( name )
627    * );
628    * CREATE TABLE sUSRColumnColumns (
629    * t_id NUMERIC NOT NULL, // Foreign key to sUSRTableInfo
630    * column TEXT NOT NULL, // The column name
631    * seq_no INTEGER NOT NULL, // The sequence in the table
632    * type TEXT NOT NULL, // The SQL type of this column
633    * size NUMERIC, // The size of the column if applicable
634    * scale NUMERIC, // The scale of the column if applicable
635    * default TEXT NOT NULL, // The default expression
636    * constraints TEXT NOT NULL, // The constraints of this column
637    * other TEXT, // Notes, etc
638    *
639    * FOREIGN KEY t_id REFERENCES sUSRTableInfo,
640    * UNIQUE ( t_id, column )
641    * );
642    *
643    * </pre>
644    */

645   void updateSystemTableSchema() {
646     // Create the transaction
647
Transaction transaction = createTransaction();
648
649     DataTableDef table;
650
651     table = new DataTableDef();
652     table.setTableName(SYS_SEQUENCE_INFO);
653     table.addColumn(DataTableColumnDef.createNumericColumn("id"));
654     table.addColumn(DataTableColumnDef.createStringColumn("schema"));
655     table.addColumn(DataTableColumnDef.createStringColumn("name"));
656     table.addColumn(DataTableColumnDef.createNumericColumn("type"));
657     transaction.alterCreateTable(table, 187, 128);
658
659     table = new DataTableDef();
660     table.setTableName(SYS_SEQUENCE);
661     table.addColumn(DataTableColumnDef.createNumericColumn("seq_id"));
662     table.addColumn(DataTableColumnDef.createNumericColumn("last_value"));
663     table.addColumn(DataTableColumnDef.createNumericColumn("increment"));
664     table.addColumn(DataTableColumnDef.createNumericColumn("minvalue"));
665     table.addColumn(DataTableColumnDef.createNumericColumn("maxvalue"));
666     table.addColumn(DataTableColumnDef.createNumericColumn("start"));
667     table.addColumn(DataTableColumnDef.createNumericColumn("cache"));
668     table.addColumn(DataTableColumnDef.createBooleanColumn("cycle"));
669     transaction.alterCreateTable(table, 187, 128);
670
671     table = new DataTableDef();
672     table.setTableName(PRIMARY_INFO_TABLE);
673     table.addColumn(DataTableColumnDef.createNumericColumn("id"));
674     table.addColumn(DataTableColumnDef.createStringColumn("name"));
675     table.addColumn(DataTableColumnDef.createStringColumn("schema"));
676     table.addColumn(DataTableColumnDef.createStringColumn("table"));
677     table.addColumn(DataTableColumnDef.createNumericColumn("deferred"));
678     transaction.alterCreateTable(table, 187, 128);
679
680     table = new DataTableDef();
681     table.setTableName(FOREIGN_INFO_TABLE);
682     table.addColumn(DataTableColumnDef.createNumericColumn("id"));
683     table.addColumn(DataTableColumnDef.createStringColumn("name"));
684     table.addColumn(DataTableColumnDef.createStringColumn("schema"));
685     table.addColumn(DataTableColumnDef.createStringColumn("table"));
686     table.addColumn(DataTableColumnDef.createStringColumn("ref_schema"));
687     table.addColumn(DataTableColumnDef.createStringColumn("ref_table"));
688     table.addColumn(DataTableColumnDef.createStringColumn("update_rule"));
689     table.addColumn(DataTableColumnDef.createStringColumn("delete_rule"));
690     table.addColumn(DataTableColumnDef.createNumericColumn("deferred"));
691     transaction.alterCreateTable(table, 187, 128);
692
693     table = new DataTableDef();
694     table.setTableName(UNIQUE_INFO_TABLE);
695     table.addColumn(DataTableColumnDef.createNumericColumn("id"));
696     table.addColumn(DataTableColumnDef.createStringColumn("name"));
697     table.addColumn(DataTableColumnDef.createStringColumn("schema"));
698     table.addColumn(DataTableColumnDef.createStringColumn("table"));
699     table.addColumn(DataTableColumnDef.createNumericColumn("deferred"));
700     transaction.alterCreateTable(table, 187, 128);
701
702     table = new DataTableDef();
703     table.setTableName(CHECK_INFO_TABLE);
704     table.addColumn(DataTableColumnDef.createNumericColumn("id"));
705     table.addColumn(DataTableColumnDef.createStringColumn("name"));
706     table.addColumn(DataTableColumnDef.createStringColumn("schema"));
707     table.addColumn(DataTableColumnDef.createStringColumn("table"));
708     table.addColumn(DataTableColumnDef.createStringColumn("expression"));
709     table.addColumn(DataTableColumnDef.createNumericColumn("deferred"));
710     table.addColumn(
711             DataTableColumnDef.createBinaryColumn("serialized_expression"));
712     transaction.alterCreateTable(table, 187, 128);
713
714     table = new DataTableDef();
715     table.setTableName(PRIMARY_COLS_TABLE);
716     table.addColumn(DataTableColumnDef.createNumericColumn("pk_id"));
717     table.addColumn(DataTableColumnDef.createStringColumn("column"));
718     table.addColumn(DataTableColumnDef.createNumericColumn("seq_no"));
719     transaction.alterCreateTable(table, 91, 128);
720
721     table = new DataTableDef();
722     table.setTableName(UNIQUE_COLS_TABLE);
723     table.addColumn(DataTableColumnDef.createNumericColumn("un_id"));
724     table.addColumn(DataTableColumnDef.createStringColumn("column"));
725     table.addColumn(DataTableColumnDef.createNumericColumn("seq_no"));
726     transaction.alterCreateTable(table, 91, 128);
727
728     table = new DataTableDef();
729     table.setTableName(FOREIGN_COLS_TABLE);
730     table.addColumn(DataTableColumnDef.createNumericColumn("fk_id"));
731     table.addColumn(DataTableColumnDef.createStringColumn("fcolumn"));
732     table.addColumn(DataTableColumnDef.createStringColumn("pcolumn"));
733     table.addColumn(DataTableColumnDef.createNumericColumn("seq_no"));
734     transaction.alterCreateTable(table, 91, 128);
735
736     table = new DataTableDef();
737     table.setTableName(SCHEMA_INFO_TABLE);
738     table.addColumn(DataTableColumnDef.createNumericColumn("id"));
739     table.addColumn(DataTableColumnDef.createStringColumn("name"));
740     table.addColumn(DataTableColumnDef.createStringColumn("type"));
741     table.addColumn(DataTableColumnDef.createStringColumn("other"));
742     transaction.alterCreateTable(table, 91, 128);
743
744     // Stores misc variables of the database,
745
table = new DataTableDef();
746     table.setTableName(PERSISTENT_VAR_TABLE);
747     table.addColumn(DataTableColumnDef.createStringColumn("variable"));
748     table.addColumn(DataTableColumnDef.createStringColumn("value"));
749     transaction.alterCreateTable(table, 91, 128);
750
751     // Commit and close the transaction.
752
try {
753       transaction.closeAndCommit();
754     }
755     catch (TransactionException e) {
756       Debug().writeException(e);
757       throw new Error JavaDoc("Transaction Exception creating conglomerate.");
758     }
759
760   }
761
762   /**
763    * Given a table with a 'id' field, this will check that the sequence
764    * value for the table is at least greater than the maximum id in the column.
765    */

766   void resetTableID(TableName tname) {
767     // Create the transaction
768
Transaction transaction = createTransaction();
769     // Get the table
770
MutableTableDataSource table = transaction.getTable(tname);
771     // Find the index of the column name called 'id'
772
DataTableDef table_def = table.getDataTableDef();
773     int col_index = table_def.findColumnName("id");
774     if (col_index == -1) {
775       throw new Error JavaDoc("Column name 'id' not found.");
776     }
777     // Find the maximum 'id' value.
778
SelectableScheme scheme = table.getColumnScheme(col_index);
779     IntegerVector ivec = scheme.selectLast();
780     if (ivec.size() > 0) {
781       TObject ob = table.getCellContents(col_index, ivec.intAt(0));
782       BigNumber b_num = ob.toBigNumber();
783       if (b_num != null) {
784         // Set the unique id to +1 the maximum id value in the column
785
transaction.setUniqueID(tname, b_num.longValue() + 1L);
786       }
787     }
788
789     // Commit and close the transaction.
790
try {
791       transaction.closeAndCommit();
792     }
793     catch (TransactionException e) {
794       Debug().writeException(e);
795       throw new Error JavaDoc("Transaction Exception creating conglomerate.");
796     }
797   }
798
799   /**
800    * Resets the table sequence id for all the system tables managed by the
801    * conglomerate.
802    */

803   void resetAllSystemTableID() {
804     resetTableID(PRIMARY_INFO_TABLE);
805     resetTableID(FOREIGN_INFO_TABLE);
806     resetTableID(UNIQUE_INFO_TABLE);
807     resetTableID(CHECK_INFO_TABLE);
808     resetTableID(SCHEMA_INFO_TABLE);
809   }
810   
811   /**
812    * Populates the system table schema with initial data for an empty
813    * conglomerate. This sets up the standard variables and table
814    * constraint data.
815    */

816   private void initializeSystemTableSchema() {
817     // Create the transaction
818
Transaction transaction = createTransaction();
819
820     // Insert the two default schema names,
821
transaction.createSchema(SYSTEM_SCHEMA, "SYSTEM");
822
823     // -- Primary Keys --
824
// The 'id' columns are primary keys on all the system tables,
825
final String JavaDoc[] id_col = new String JavaDoc[] { "id" };
826     transaction.addPrimaryKeyConstraint(PRIMARY_INFO_TABLE,
827               id_col, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_PK_PK");
828     transaction.addPrimaryKeyConstraint(FOREIGN_INFO_TABLE,
829               id_col, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_FK_PK");
830     transaction.addPrimaryKeyConstraint(UNIQUE_INFO_TABLE,
831               id_col, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_UNIQUE_PK");
832     transaction.addPrimaryKeyConstraint(CHECK_INFO_TABLE,
833               id_col, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_CHECK_PK");
834     transaction.addPrimaryKeyConstraint(SCHEMA_INFO_TABLE,
835               id_col, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_SCHEMA_PK");
836
837     // -- Foreign Keys --
838
// Create the foreign key references,
839
final String JavaDoc[] fk_col = new String JavaDoc[1];
840     final String JavaDoc[] fk_ref_col = new String JavaDoc[] { "id" };
841     fk_col[0] = "pk_id";
842     transaction.addForeignKeyConstraint(
843               PRIMARY_COLS_TABLE, fk_col, PRIMARY_INFO_TABLE, fk_ref_col,
844               Transaction.NO_ACTION, Transaction.NO_ACTION,
845               Transaction.INITIALLY_IMMEDIATE, "SYSTEM_PK_FK");
846     fk_col[0] = "fk_id";
847     transaction.addForeignKeyConstraint(
848               FOREIGN_COLS_TABLE, fk_col, FOREIGN_INFO_TABLE, fk_ref_col,
849               Transaction.NO_ACTION, Transaction.NO_ACTION,
850               Transaction.INITIALLY_IMMEDIATE, "SYSTEM_FK_FK");
851     fk_col[0] = "un_id";
852     transaction.addForeignKeyConstraint(
853               UNIQUE_COLS_TABLE, fk_col, UNIQUE_INFO_TABLE, fk_ref_col,
854               Transaction.NO_ACTION, Transaction.NO_ACTION,
855               Transaction.INITIALLY_IMMEDIATE, "SYSTEM_UNIQUE_FK");
856
857     // sUSRPKeyInfo 'schema', 'table' column is a unique set,
858
// (You are only allowed one primary key per table).
859
String JavaDoc[] columns = new String JavaDoc[] { "schema", "table" };
860     transaction.addUniqueConstraint(PRIMARY_INFO_TABLE,
861          columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_PKEY_ST_UNIQUE");
862     // sUSRSchemaInfo 'name' column is a unique column,
863
columns = new String JavaDoc[] { "name" };
864     transaction.addUniqueConstraint(SCHEMA_INFO_TABLE,
865          columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_SCHEMA_UNIQUE");
866 // columns = new String[] { "name" };
867
columns = new String JavaDoc[] { "name", "schema" };
868     // sUSRPKeyInfo 'name' column is a unique column,
869
transaction.addUniqueConstraint(PRIMARY_INFO_TABLE,
870          columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_PKEY_UNIQUE");
871     // sUSRFKeyInfo 'name' column is a unique column,
872
transaction.addUniqueConstraint(FOREIGN_INFO_TABLE,
873          columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_FKEY_UNIQUE");
874     // sUSRUniqueInfo 'name' column is a unique column,
875
transaction.addUniqueConstraint(UNIQUE_INFO_TABLE,
876          columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_UNIQUE_UNIQUE");
877     // sUSRCheckInfo 'name' column is a unique column,
878
transaction.addUniqueConstraint(CHECK_INFO_TABLE,
879          columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_CHECK_UNIQUE");
880
881     // sUSRDatabaseVars 'variable' is unique
882
columns = new String JavaDoc[] { "variable" };
883     transaction.addUniqueConstraint(PERSISTENT_VAR_TABLE,
884        columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_DATABASEVARS_UNIQUE");
885
886     // Insert the version number of the database
887
transaction.setPersistentVar("database.version", "1.4");
888
889     // Commit and close the transaction.
890
try {
891       transaction.closeAndCommit();
892     }
893     catch (TransactionException e) {
894       Debug().writeException(e);
895       throw new Error JavaDoc("Transaction Exception initializing conglomerate.");
896     }
897
898   }
899
900   /**
901    * Initializes the BlobStore. If the BlobStore doesn't exist it will be
902    * created, and if it does exist it will be initialized.
903    */

904   private void initializeBlobStore() throws IOException {
905
906     // Does the file already exist?
907
boolean blob_store_exists = storeSystem().storeExists("BlobStore");
908     // If the blob store doesn't exist and we are read_only, we can't do
909
// anything further so simply return.
910
if (!blob_store_exists && isReadOnly()) {
911       return;
912     }
913
914     // The blob store,
915
if (blob_store_exists) {
916       act_blob_store = storeSystem().openStore("BlobStore");
917     }
918     else {
919       act_blob_store = storeSystem().createStore("BlobStore");
920     }
921
922     try {
923       act_blob_store.lockForWrite();
924
925       // Create the BlobStore object
926
blob_store = new BlobStore(act_blob_store);
927
928       // Get the 64 byte fixed area
929
MutableArea fixed_area = act_blob_store.getMutableArea(-1);
930       // If the blob store didn't exist then we need to create it here,
931
if (!blob_store_exists) {
932         long header_p = blob_store.create();
933         fixed_area.putLong(header_p);
934         fixed_area.checkOut();
935       }
936       else {
937         // Otherwise we need to initialize the blob store
938
long header_p = fixed_area.getLong();
939         blob_store.init(header_p);
940       }
941     }
942     finally {
943       act_blob_store.unlockForWrite();
944     }
945
946   }
947
948
949
950
951
952
953
954
955
956   // ---------- Private methods ----------
957

958   /**
959    * Returns true if the system is in read only mode.
960    */

961   private boolean isReadOnly() {
962     return system.readOnlyAccess();
963   }
964
965   /**
966    * Returns the path of the database.
967    */

968   private File getPath() {
969     return system.getDatabasePath();
970   }
971   
972   /**
973    * Returns the next unique table_id value for a new table and updates the
974    * conglomerate state information as appropriate.
975    */

976   private int nextUniqueTableID() throws IOException {
977     return state_store.nextTableID();
978   }
979
980
981   /**
982    * Sets up the internal state of this object.
983    */

984   private void setupInternal() {
985     commit_id = 0;
986     table_list = new ArrayList JavaDoc();
987     
988 // // If the VM supports shutdown hook,
989
// try {
990
// shutdown_hook = new ConglomerateShutdownHookThread();
991
// Runtime.getRuntime().addShutdownHook(shutdown_hook);
992
// }
993
// catch (Throwable e) {
994
// // Catch instantiation/access errors
995
// system.Debug().write(Lvl.MESSAGE, this,
996
// "Unable to register shutdown hook.");
997
// }
998

999   }
1000
1001  // ---------- Public methods ----------
1002

1003  /**
1004   * Minimally creates a new conglomerate but does NOT initialize any of the
1005   * system tables. This is a useful feature for a copy function that requires
1006   * a TableDataConglomerate object to copy data into but does not require any
1007   * initial system tables (because this information is copied from the source
1008   * conglomerate.
1009   */

1010  void minimalCreate(String JavaDoc name) throws IOException {
1011    this.name = name;
1012    
1013    if (exists(name)) {
1014      throw new IOException("Conglomerate already exists: " + name);
1015    }
1016
1017    // Lock the store system (generates an IOException if exclusive lock
1018
// can not be made).
1019
if (!isReadOnly()) {
1020      storeSystem().lock(name);
1021    }
1022
1023    // Create/Open the state store
1024
act_state_store = storeSystem().createStore(name + STATE_POST);
1025    try {
1026      act_state_store.lockForWrite();
1027
1028      state_store = new StateStore(act_state_store);
1029      long head_p = state_store.create();
1030      // Get the fixed area
1031
MutableArea fixed_area = act_state_store.getMutableArea(-1);
1032      fixed_area.putLong(head_p);
1033      fixed_area.checkOut();
1034    }
1035    finally {
1036      act_state_store.unlockForWrite();
1037    }
1038    
1039    setupInternal();
1040
1041    // Init the conglomerate blob store
1042
initializeBlobStore();
1043
1044    // Create the system table (but don't initialize)
1045
updateSystemTableSchema();
1046
1047  }
1048  
1049  /**
1050   * Creates a new conglomerate at the given path in the file system. This
1051   * must be an empty directory where files can be stored. This will create
1052   * the conglomerate and exit in an open (read/write) state.
1053   */

1054  public void create(String JavaDoc name) throws IOException {
1055    minimalCreate(name);
1056    
1057    // Initialize the conglomerate system tables.
1058
initializeSystemTableSchema();
1059
1060    // Commit the state
1061
state_store.commit();
1062
1063  }
1064
1065  /**
1066   * Opens a conglomerate. If the conglomerate does not exist then an
1067   * IOException is generated. Once a conglomerate is open, we may start
1068   * opening transactions and altering the data within it.
1069   */

1070  public void open(String JavaDoc name) throws IOException {
1071    this.name = name;
1072
1073    if (!exists(name)) {
1074      throw new IOException("Conglomerate doesn't exists: " + name);
1075    }
1076
1077    // Check the file lock
1078
if (!isReadOnly()) {
1079      // Obtain the lock (generate error if this is not possible)
1080
storeSystem().lock(name);
1081    }
1082
1083    // Open the state store
1084
act_state_store = storeSystem().openStore(name + STATE_POST);
1085    state_store = new StateStore(act_state_store);
1086    // Get the fixed 64 byte area.
1087
Area fixed_area = act_state_store.getArea(-1);
1088    long head_p = fixed_area.getLong();
1089    state_store.init(head_p);
1090    
1091    setupInternal();
1092
1093    // Init the conglomerate blob store
1094
initializeBlobStore();
1095    
1096    readVisibleTables();
1097    readDroppedTables();
1098    
1099    // We possibly have things to clean up if there are deleted columns.
1100
cleanUpConglomerate();
1101
1102  }
1103
1104  /**
1105   * Closes this conglomerate. The conglomerate must be open for it to be
1106   * closed. When closed, any use of this object is undefined.
1107   */

1108  public void close() throws IOException {
1109    synchronized (commit_lock) {
1110
1111      // We possibly have things to clean up.
1112
cleanUpConglomerate();
1113
1114      // Set a check point
1115
store_system.setCheckPoint();
1116
1117      // Go through and close all the committed tables.
1118
int size = table_list.size();
1119      for (int i = 0; i < size; ++i) {
1120        MasterTableDataSource master =
1121                                    (MasterTableDataSource) table_list.get(i);
1122        master.dispose(false);
1123      }
1124
1125      state_store.commit();
1126      storeSystem().closeStore(act_state_store);
1127
1128      table_list = null;
1129
1130    }
1131
1132    // Unlock the storage system
1133
storeSystem().unlock(name);
1134
1135    if (blob_store != null) {
1136      storeSystem().closeStore(act_blob_store);
1137    }
1138
1139// removeShutdownHook();
1140
}
1141  
1142// /**
1143
// * Removes the shutdown hook.
1144
// */
1145
// private void removeShutdownHook() {
1146
// // If the VM supports shutdown hook, remove it,
1147
// try {
1148
// if (shutdown_hook != null) {
1149
//// System.out.println("REMOVING: " + this);
1150
// Runtime.getRuntime().removeShutdownHook(shutdown_hook);
1151
// // We have no start it otherwise the ThreadGroup won't remove its
1152
// // reference to it and it causes GC problems.
1153
// shutdown_hook.start();
1154
// shutdown_hook.waitUntilComplete();
1155
// shutdown_hook = null;
1156
// }
1157
// }
1158
// catch (Throwable e) {
1159
// // Catch (and ignore) instantiation/access errors
1160
// }
1161
// }
1162

1163  /**
1164   * Deletes and closes the conglomerate. This will delete all the files in
1165   * the file system associated with this conglomerate, so this method should
1166   * be used with care.
1167   * <p>
1168   * WARNING: Will result in total loss of all data stored in the conglomerate.
1169   */

1170  public void delete() throws IOException {
1171    synchronized (commit_lock) {
1172
1173      // We possibly have things to clean up.
1174
cleanUpConglomerate();
1175
1176      // Go through and delete and close all the committed tables.
1177
int size = table_list.size();
1178      for (int i = 0; i < size; ++i) {
1179        MasterTableDataSource master =
1180                                    (MasterTableDataSource) table_list.get(i);
1181        master.drop();
1182      }
1183
1184      // Delete the state file
1185
state_store.commit();
1186      storeSystem().closeStore(act_state_store);
1187      storeSystem().deleteStore(act_state_store);
1188
1189      // Delete the blob store
1190
if (blob_store != null) {
1191        storeSystem().closeStore(act_blob_store);
1192        storeSystem().deleteStore(act_blob_store);
1193      }
1194      
1195      // Invalidate this object
1196
table_list = null;
1197
1198    }
1199
1200    // Unlock the storage system.
1201
storeSystem().unlock(name);
1202  }
1203
1204  /**
1205   * Returns true if the conglomerate is closed.
1206   */

1207  public boolean isClosed() {
1208    synchronized (commit_lock) {
1209      return table_list == null;
1210    }
1211  }
1212
1213
1214  /**
1215   * Returns true if the conglomerate exists in the file system and can
1216   * be opened.
1217   */

1218  public boolean exists(String JavaDoc name) throws IOException {
1219    return storeSystem().storeExists(name + STATE_POST);
1220  }
1221
1222  /**
1223   * Makes a complete copy of this database to the position represented by the
1224   * given TableDataConglomerate object. The given TableDataConglomerate
1225   * object must NOT be being used by another database running in the JVM.
1226   * This may take a while to complete. The backup operation occurs within its
1227   * own transaction and the copy transaction is read-only meaning there is no
1228   * way for the copy process to interfere with other transactions running
1229   * concurrently.
1230   * <p>
1231   * The conglomerate must be open before this method is called.
1232   */

1233  public void liveCopyTo(TableDataConglomerate dest_conglomerate)
1234                                                           throws IOException {
1235
1236    // The destination store system
1237
StoreSystem dest_store_system = dest_conglomerate.storeSystem();
1238
1239    // Copy all the blob data from the given blob store to the current blob
1240
// store.
1241
dest_conglomerate.blob_store.copyFrom(dest_store_system, blob_store);
1242
1243    // Open new transaction - this is the current view we are going to copy.
1244
Transaction transaction = createTransaction();
1245
1246    try {
1247
1248      // Copy the data in this transaction to the given destination store system.
1249
transaction.liveCopyAllDataTo(dest_conglomerate);
1250
1251    }
1252    finally {
1253      // Make sure we close the transaction
1254
try {
1255        transaction.closeAndCommit();
1256      }
1257      catch (TransactionException e) {
1258        throw new RuntimeException JavaDoc("Transaction Error: " + e.getMessage());
1259      }
1260    }
1261
1262    // Finished - increment the live copies counter.
1263
getSystem().stats().increment("TableDataConglomerate.liveCopies");
1264    
1265  }
1266
1267  // ---------- Diagnostic and repair ----------
1268

1269  /**
1270   * Returns a RawDiagnosticTable object that is used for diagnostics of the
1271   * table with the given file name.
1272   */

1273  public RawDiagnosticTable getDiagnosticTable(String JavaDoc table_file_name) {
1274    synchronized (commit_lock) {
1275      for (int i = 0; i < table_list.size(); ++i) {
1276        MasterTableDataSource master =
1277                                    (MasterTableDataSource) table_list.get(i);
1278        if (master.getSourceIdent().equals(table_file_name)) {
1279          return master.getRawDiagnosticTable();
1280        }
1281      }
1282    }
1283    return null;
1284  }
1285
1286  /**
1287   * Returns the list of file names for all tables in this conglomerate.
1288   */

1289  public String JavaDoc[] getAllTableFileNames() {
1290    synchronized (commit_lock) {
1291      String JavaDoc[] list = new String JavaDoc[table_list.size()];
1292      for (int i = 0; i < table_list.size(); ++i) {
1293        MasterTableDataSource master =
1294                                    (MasterTableDataSource) table_list.get(i);
1295        list[i] = master.getSourceIdent();
1296      }
1297      return list;
1298    }
1299  }
1300
1301  // ---------- Conglomerate event notification ----------
1302

1303  /**
1304   * Adds a listener for transactional modification events that occur on the
1305   * given table in this conglomerate. A transactional modification event is
1306   * an event fired immediately upon the modification of a table by a
1307   * transaction, either immediately before the modification or immediately
1308   * after. Also an event is fired when a modification to a table is
1309   * successfully committed.
1310   * <p>
1311   * The BEFORE_* type triggers are given the opportunity to modify the
1312   * contents of the RowData before the update or insert occurs. All triggers
1313   * may generate an exception which will cause the transaction to rollback.
1314   * <p>
1315   * The event carries with it the event type, the transaction that the event
1316   * occurred in, and any information regarding the modification itself.
1317   * <p>
1318   * This event/listener mechanism is intended to be used to implement higher
1319   * layer database triggering systems. Note that care must be taken with
1320   * the commit level events because they occur inside a commit lock on this
1321   * conglomerate and so synchronization and deadlock issues need to be
1322   * carefully considered.
1323   * <p>
1324   * NOTE: A listener on the given table will be notified of ALL table
1325   * modification events by all transactions at the time they happen.
1326   *
1327   * @param table_name the name of the table in the conglomerate to listen for
1328   * events from.
1329   * @param listener the listener to be notified of events.
1330   */

1331  public void addTransactionModificationListener(TableName table_name,
1332                                   TransactionModificationListener listener) {
1333    synchronized (modification_listeners) {
1334      ArrayList JavaDoc list = (ArrayList JavaDoc) modification_listeners.get(table_name);
1335      if (list == null) {
1336        // If the mapping doesn't exist then create the list for the table
1337
// here.
1338
list = new ArrayList JavaDoc();
1339        modification_listeners.put(table_name, list);
1340      }
1341      
1342      list.add(listener);
1343    }
1344  }
1345
1346  /**
1347   * Removes a listener for transaction modification events on the given table
1348   * in this conglomerate as previously set by the
1349   * 'addTransactionModificationListener' method.
1350   *
1351   * @param table_name the name of the table in the conglomerate to remove from
1352   * the listener list.
1353   * @param listener the listener to be removed.
1354   */

1355  public void removeTransactionModificationListener(TableName table_name,
1356                                   TransactionModificationListener listener) {
1357    synchronized (modification_listeners) {
1358      ArrayList JavaDoc list = (ArrayList JavaDoc) modification_listeners.get(table_name);
1359      if (list != null) {
1360        int sz = list.size();
1361        for (int i = sz - 1; i >= 0; --i) {
1362          if (list.get(i) == listener) {
1363            list.remove(i);
1364          }
1365        }
1366      }
1367    }
1368  }
1369
1370  // ---------- Transactional management ----------
1371

1372  /**
1373   * Starts a new transaction. The Transaction object returned by this
1374   * method is used to read the contents of the database at the time
1375   * the transaction was started. It is also used if any modifications are
1376   * required to be made.
1377   */

1378  public Transaction createTransaction() {
1379    long this_commit_id;
1380    ArrayList JavaDoc this_committed_tables = new ArrayList JavaDoc();
1381
1382    // Don't let a commit happen while we are looking at this.
1383
synchronized (commit_lock) {
1384
1385      this_commit_id = commit_id;
1386      StateResource[] committed_table_list = state_store.getVisibleList();
1387      for (int i = 0; i < committed_table_list.length; ++i) {
1388        this_committed_tables.add(
1389                      getMasterTable((int) committed_table_list[i].table_id));
1390      }
1391
1392      // Create a set of IndexSet for all the tables in this transaction.
1393
int sz = this_committed_tables.size();
1394      ArrayList JavaDoc index_info = new ArrayList JavaDoc(sz);
1395      for (int i = 0; i < sz; ++i) {
1396        MasterTableDataSource mtable =
1397                       (MasterTableDataSource) this_committed_tables.get(i);
1398        index_info.add(mtable.createIndexSet());
1399      }
1400
1401      // Create the transaction and record it in the open transactions list.
1402
Transaction t = new Transaction(this,
1403                        this_commit_id, this_committed_tables, index_info);
1404      open_transactions.addTransaction(t);
1405      return t;
1406
1407    }
1408
1409  }
1410
1411  /**
1412   * This is called to notify the conglomerate that the transaction has
1413   * closed. This is always called from either the rollback or commit method
1414   * of the transaction object.
1415   * <p>
1416   * NOTE: This increments 'commit_id' and requires that the conglomerate is
1417   * commit locked.
1418   */

1419  private void closeTransaction(Transaction transaction) {
1420    boolean last_transaction = false;
1421    // Closing must happen under a commit lock.
1422
synchronized (commit_lock) {
1423      open_transactions.removeTransaction(transaction);
1424      // Increment the commit id.
1425
++commit_id;
1426      // Was that the last transaction?
1427
last_transaction = open_transactions.count() == 0;
1428    }
1429
1430    // If last transaction then schedule a clean up event.
1431
if (last_transaction) {
1432      try {
1433        cleanUpConglomerate();
1434      }
1435      catch (IOException e) {
1436        Debug().write(Lvl.ERROR, this, "Error cleaning up conglomerate");
1437        Debug().writeException(Lvl.ERROR, e);
1438      }
1439    }
1440
1441  }
1442
1443
1444  /**
1445   * Closes and drops the MasterTableDataSource. This should only be called
1446   * from the clean up method (cleanUpConglomerate()).
1447   * <p>
1448   * Returns true if the drop succeeded. A drop may fail if, for example, the
1449   * roots of the table are locked.
1450   * <p>
1451   * Note that the table_file_name will be encoded with the table type. For
1452   * example, ":2mighty.koi"
1453   */

1454  private boolean closeAndDropTable(String JavaDoc table_file_name) throws IOException {
1455    // Find the table with this file name.
1456
for (int i = 0; i < table_list.size(); ++i) {
1457      MasterTableDataSource t = (MasterTableDataSource) table_list.get(i);
1458      String JavaDoc enc_fn = table_file_name.substring(2);
1459      if (t.getSourceIdent().equals(enc_fn)) {
1460        // Close and remove from the list.
1461
if (t.isRootLocked()) {
1462          // We can't drop a table that has roots locked..
1463
return false;
1464        }
1465
1466        // This drops if the table has been marked as being dropped.
1467
boolean b = t.drop();
1468        if (b) {
1469          table_list.remove(i);
1470        }
1471        return b;
1472      }
1473    }
1474    return false;
1475  }
1476
1477  /**
1478   * Closes the MasterTableDataSource with the given source ident. This should
1479   * only be called from the clean up method (cleanUpConglomerate()).
1480   * <p>
1481   * Note that the table_file_name will be encoded with the table type. For
1482   * example, ":2mighty.koi"
1483   */

1484  private void closeTable(String JavaDoc table_file_name, boolean pending_drop)
1485                                                          throws IOException {
1486    // Find the table with this file name.
1487
for (int i = 0; i < table_list.size(); ++i) {
1488      MasterTableDataSource t = (MasterTableDataSource) table_list.get(i);
1489      String JavaDoc enc_fn = table_file_name.substring(2);
1490      if (t.getSourceIdent().equals(enc_fn)) {
1491        // Close and remove from the list.
1492
if (t.isRootLocked()) {
1493          // We can't drop a table that has roots locked..
1494
return;
1495        }
1496
1497        // This closes the table
1498
t.dispose(pending_drop);
1499        return;
1500      }
1501    }
1502    return;
1503  }
1504  
1505  /**
1506   * Cleans up the conglomerate by deleting all tables marked as deleted.
1507   * This should be called when the conglomerate is opened, shutdown and
1508   * when there are no transactions open.
1509   */

1510  private void cleanUpConglomerate() throws IOException {
1511    synchronized (commit_lock) {
1512      if (isClosed()) {
1513        return;
1514      }
1515
1516      // If no open transactions on the database, then clean up.
1517
if (open_transactions.count() == 0) {
1518        
1519        StateResource[] delete_list = state_store.getDeleteList();
1520        if (delete_list.length > 0) {
1521          int drop_count = 0;
1522          
1523          for (int i = delete_list.length - 1; i >= 0; --i) {
1524            String JavaDoc fn = (String JavaDoc) delete_list[i].name;
1525            closeTable(fn, true);
1526          }
1527
1528// // NASTY HACK: The native win32 file mapping will not
1529
// // let you delete a file that is mapped. The NIO API does not allow
1530
// // you to manually unmap a file, and the only way to unmap
1531
// // memory under win32 is to wait for the garbage collector to
1532
// // free it. So this is a hack to try and make the engine
1533
// // unmap the memory mapped buffer.
1534
// //
1535
// // This is not a problem under Unix/Linux because the OS has no
1536
// // difficulty deleting a file that is mapped.
1537
//
1538
// System.gc();
1539
// try {
1540
// Thread.sleep(5);
1541
// }
1542
// catch (InterruptedException e) { /* ignore */ }
1543

1544          for (int i = delete_list.length - 1; i >= 0; --i) {
1545            String JavaDoc fn = (String JavaDoc) delete_list[i].name;
1546            boolean dropped = closeAndDropTable(fn);
1547            // If we managed to drop the table, remove from the list.
1548
if (dropped) {
1549              state_store.removeDeleteResource(fn);
1550              ++drop_count;
1551            }
1552          }
1553
1554          // If we dropped a table, commit an update to the conglomerate state.
1555
if (drop_count > 0) {
1556            state_store.commit();
1557          }
1558        }
1559
1560      }
1561    }
1562  }
1563
1564  // ---------- Detection of constraint violations ----------
1565

1566  /**
1567   * A variable resolver for a single row of a table source. Used when
1568   * evaluating a check constraint for newly added row.
1569   */

1570  private static class TableRowVariableResolver implements VariableResolver {
1571
1572    private TableDataSource table;
1573    private int row_index = -1;
1574
1575    public TableRowVariableResolver(TableDataSource table, int row) {
1576      this.table = table;
1577      this.row_index = row;
1578    }
1579
1580    private int findColumnName(Variable variable) {
1581      int col_index = table.getDataTableDef().findColumnName(
1582                                                        variable.getName());
1583      if (col_index == -1) {
1584        throw new Error JavaDoc("Can't find column: " + variable);
1585      }
1586      return col_index;
1587    }
1588
1589    // --- Implemented ---
1590

1591    public int setID() {
1592      return row_index;
1593    }
1594
1595    public TObject resolve(Variable variable) {
1596      int col_index = findColumnName(variable);
1597      return table.getCellContents(col_index, row_index);
1598    }
1599
1600    public TType returnTType(Variable variable) {
1601      int col_index = findColumnName(variable);
1602      return table.getDataTableDef().columnAt(col_index).getTType();
1603    }
1604
1605  }
1606
1607  /**
1608   * Convenience, converts a String[] array to a comma deliminated string
1609   * list.
1610   */

1611  static String JavaDoc stringColumnList(String JavaDoc[] list) {
1612    StringBuffer JavaDoc buf = new StringBuffer JavaDoc();
1613    for (int i = 0; i < list.length - 1; ++i) {
1614      buf.append(list[i]);
1615    }
1616    buf.append(list[list.length - 1]);
1617    return new String JavaDoc(buf);
1618  }
1619
1620  /**
1621   * Convenience, returns either 'Immediate' or 'Deferred' dependant on the
1622   * deferred short.
1623   */

1624  static String JavaDoc deferredString(short deferred) {
1625    switch(deferred) {
1626      case(Transaction.INITIALLY_IMMEDIATE):
1627        return "Immediate";
1628      case(Transaction.INITIALLY_DEFERRED):
1629        return "Deferred";
1630      default:
1631        throw new Error JavaDoc("Unknown deferred string.");
1632    }
1633  }
1634
1635  /**
1636   * Returns a list of column indices into the given DataTableDef for the
1637   * given column names.
1638   */

1639  static int[] findColumnIndices(DataTableDef table_def, String JavaDoc[] cols) {
1640    // Resolve the list of column names to column indexes
1641
int[] col_indexes = new int[cols.length];
1642    for (int i = 0; i < cols.length; ++i) {
1643      col_indexes[i] = table_def.findColumnName(cols[i]);
1644    }
1645    return col_indexes;
1646  }
1647
1648  /**
1649   * Checks the uniqueness of the columns in the row of the table. If
1650   * the given column information in the row data is not unique then it
1651   * returns false. We also check for a NULL values - a PRIMARY KEY constraint
1652   * does not allow NULL values, whereas a UNIQUE constraint does.
1653   */

1654  private static boolean isUniqueColumns(
1655                       TableDataSource table, int rindex, String JavaDoc[] cols,
1656                       boolean nulls_are_allowed) {
1657
1658    DataTableDef table_def = table.getDataTableDef();
1659    // 'identical_rows' keeps a tally of the rows that match our added cell.
1660
IntegerVector identical_rows = null;
1661
1662    // Resolve the list of column names to column indexes
1663
int[] col_indexes = findColumnIndices(table_def, cols);
1664
1665    // If the value being tested for uniqueness contains NULL, we return true
1666
// if nulls are allowed.
1667
for (int i = 0; i < col_indexes.length; ++i) {
1668      TObject cell = table.getCellContents(col_indexes[i], rindex);
1669      if (cell.isNull()) {
1670        return nulls_are_allowed;
1671      }
1672    }
1673    
1674    
1675    for (int i = 0; i < col_indexes.length; ++i) {
1676
1677      int col_index = col_indexes[i];
1678
1679      // Get the column definition and the cell being inserted,
1680
// DataTableColumnDef column_def = table_def.columnAt(col_index);
1681
TObject cell = table.getCellContents(col_index, rindex);
1682
1683      // We are assured of uniqueness if 'identical_rows != null &&
1684
// identical_rows.size() == 0' This is because 'identical_rows' keeps
1685
// a running tally of the rows in the table that contain unique columns
1686
// whose cells match the record being added.
1687

1688      if (identical_rows == null || identical_rows.size() > 0) {
1689
1690        // Ask SelectableScheme to return pointers to row(s) if there is
1691
// already a cell identical to this in the table.
1692

1693        SelectableScheme ss = table.getColumnScheme(col_index);
1694        IntegerVector ivec = ss.selectEqual(cell);
1695
1696        // If 'identical_rows' hasn't been set up yet then set it to 'ivec'
1697
// (the list of rows where there is a cell which is equal to the one
1698
// being added)
1699
// If 'identical_rows' has been set up, then perform an
1700
// 'intersection' operation on the two lists (only keep the numbers
1701
// that are repeated in both lists). Therefore we keep the rows
1702
// that match the row being added.
1703

1704        if (identical_rows == null) {
1705          identical_rows = ivec;
1706        }
1707        else {
1708          ivec.quickSort();
1709          int row_index = identical_rows.size() - 1;
1710          while (row_index >= 0) {
1711            int val = identical_rows.intAt(row_index);
1712            int found_index = ivec.sortedIndexOf(val);
1713            // If we _didn't_ find the index in the array
1714
if (found_index >= ivec.size() ||
1715                ivec.intAt(found_index) != val) {
1716              identical_rows.removeIntAt(row_index);
1717            }
1718            --row_index;
1719          }
1720        }
1721
1722      }
1723
1724    } // for each column
1725

1726    // If there is 1 (the row we added) then we are unique, otherwise we are
1727
// not.
1728
if (identical_rows != null) {
1729      int sz = identical_rows.size();
1730      if (sz == 1) {
1731        return true;
1732      }
1733      if (sz > 1) {
1734        return false;
1735      }
1736      else if (sz == 0) {
1737        throw new Error JavaDoc("Assertion failed: We must be able to find the " +
1738                        "row we are testing uniqueness against!");
1739      }
1740    }
1741    return true;
1742
1743  }
1744
1745
1746  /**
1747   * Returns the key indices found in the given table. The keys are
1748   * in the given column indices, and the key is in the 'key' array. This can
1749   * be used to count the number of keys found in a table for constraint
1750   * violation checking.
1751   */

1752  static IntegerVector findKeys(TableDataSource t2, int[] col2_indexes,
1753                                TObject[] key_value) {
1754
1755    int key_size = key_value.length;
1756    // Now query table 2 to determine if the key values are present.
1757
// Use index scan on first key.
1758
SelectableScheme ss = t2.getColumnScheme(col2_indexes[0]);
1759    IntegerVector list = ss.selectEqual(key_value[0]);
1760    if (key_size > 1) {
1761      // Full scan for the rest of the columns
1762
int sz = list.size();
1763      // For each element of the list
1764
for (int i = sz - 1; i >= 0; --i) {
1765        int r_index = list.intAt(i);
1766        // For each key in the column list
1767
for (int c = 1; c < key_size; ++c) {
1768          int col_index = col2_indexes[c];
1769          TObject c_value = key_value[c];
1770          if (c_value.compareTo(t2.getCellContents(col_index, r_index)) != 0) {
1771            // If any values in the key are not equal set this flag to false
1772
// and remove the index from the list.
1773
list.removeIntAt(i);
1774            // Break the for loop
1775
break;
1776          }
1777        }
1778      }
1779    }
1780
1781    return list;
1782  }
1783
1784  /**
1785   * Finds the number of rows that are referenced between the given row of
1786   * table1 and that match table2. This method is used to determine if
1787   * there are referential links.
1788   * <p>
1789   * If this method returns -1 it means the value being searched for is NULL
1790   * therefore we can't determine if there are any referenced links.
1791   * <p>
1792   * HACK: If 'check_source_table_key' is set then the key is checked for in
1793   * the source table and if it exists returns 0. Otherwise it looks for
1794   * references to the key in table2.
1795   */

1796  private static int rowCountOfReferenceTable(
1797                 SimpleTransaction transaction,
1798                 int row_index, TableName table1, String JavaDoc[] cols1,
1799                                TableName table2, String JavaDoc[] cols2,
1800                                boolean check_source_table_key) {
1801
1802    // Get the tables
1803
TableDataSource t1 = transaction.getTableDataSource(table1);
1804    TableDataSource t2 = transaction.getTableDataSource(table2);
1805    // The table defs
1806
DataTableDef dtd1 = t1.getDataTableDef();
1807    DataTableDef dtd2 = t2.getDataTableDef();
1808    // Resolve the list of column names to column indexes
1809
int[] col1_indexes = findColumnIndices(dtd1, cols1);
1810    int[] col2_indexes = findColumnIndices(dtd2, cols2);
1811
1812    int key_size = col1_indexes.length;
1813    // Get the data from table1
1814
TObject[] key_value = new TObject[key_size];
1815    int null_count = 0;
1816    for (int n = 0; n < key_size; ++n) {
1817      key_value[n] = t1.getCellContents(col1_indexes[n], row_index);
1818      if (key_value[n].isNull()) {
1819        ++null_count;
1820      }
1821    }
1822
1823    // If we are searching for null then return -1;
1824
if (null_count > 0) {
1825      return -1;
1826    }
1827
1828    // HACK: This is a hack. The purpose is if the key exists in the source
1829
// table we return 0 indicating to the delete check that there are no
1830
// references and it's valid. To the semantics of the method this is
1831
// incorrect.
1832
if (check_source_table_key) {
1833      IntegerVector keys = findKeys(t1, col1_indexes, key_value);
1834      int key_count = keys.size();
1835      if (key_count > 0) {
1836        return 0;
1837      }
1838    }
1839
1840    return findKeys(t2, col2_indexes, key_value).size();
1841  }
1842
1843
1844  /**
1845   * Checks that the nullibility and class of the fields in the given
1846   * rows are valid. Should be used as part of the insert procedure.
1847   */

1848  static void checkFieldConstraintViolations(
1849                                  SimpleTransaction transaction,
1850                                  TableDataSource table, int[] row_indices) {
1851
1852    // Quick exit case
1853
if (row_indices == null || row_indices.length == 0) {
1854      return;
1855    }
1856
1857    // Check for any bad cells - which are either cells that are 'null' in a
1858
// column declared as 'not null', or duplicated in a column declared as
1859
// unique.
1860

1861    DataTableDef table_def = table.getDataTableDef();
1862    TableName table_name = table_def.getTableName();
1863
1864    // Check not-null columns are not null. If they are null, throw an
1865
// error. Additionally check that JAVA_OBJECT columns are correctly
1866
// typed.
1867

1868    // Check each field of the added rows
1869
int len = table_def.columnCount();
1870    for (int i = 0; i < len; ++i) {
1871
1872      // Get the column definition and the cell being inserted,
1873
DataTableColumnDef column_def = table_def.columnAt(i);
1874      // For each row added to this column
1875
for (int rn = 0; rn < row_indices.length; ++rn) {
1876        TObject cell = table.getCellContents(i, row_indices[rn]);
1877
1878        // Check: Column defined as not null and cell being inserted is
1879
// not null.
1880
if (column_def.isNotNull() && cell.isNull()) {
1881          throw new DatabaseConstraintViolationException(
1882              DatabaseConstraintViolationException.NULLABLE_VIOLATION,
1883              "You tried to add 'null' cell to column '" +
1884              table_def.columnAt(i).getName() +
1885              "' which is declared as 'not_null'");
1886        }
1887
1888        // Check: If column is a java object, then deserialize and check the
1889
// object is an instance of the class constraint,
1890
if (!cell.isNull() &&
1891            column_def.getSQLType() ==
1892                           com.mckoi.database.global.SQLTypes.JAVA_OBJECT) {
1893          String JavaDoc class_constraint = column_def.getClassConstraint();
1894          // Everything is derived from java.lang.Object so this optimization
1895
// will not cause an object deserialization.
1896
if (!class_constraint.equals("java.lang.Object")) {
1897            // Get the binary representation of the java object
1898
ByteLongObject serialized_jobject =
1899                                           (ByteLongObject) cell.getObject();
1900            // Deserialize the object
1901
Object JavaDoc ob = ObjectTranslator.deserialize(serialized_jobject);
1902            // Check it's assignable from the constraining class
1903
if (!ob.getClass().isAssignableFrom(
1904                        column_def.getClassConstraintAsClass())) {
1905              throw new DatabaseConstraintViolationException(
1906                DatabaseConstraintViolationException.JAVA_TYPE_VIOLATION,
1907                "The Java object being inserted is not derived from the " +
1908                "class constraint defined for the column (" +
1909                class_constraint + ")");
1910            }
1911          }
1912        }
1913
1914      } // For each row being added
1915

1916    } // for each column
1917

1918  }
1919
1920  /**
1921   * Performs constraint violation checks on an addition of the given set of
1922   * row indices into the TableDataSource in the given transaction. If a
1923   * violation is detected a DatabaseConstraintViolationException is thrown.
1924   * <p>
1925   * If deferred = IMMEDIATE only immediate constraints are tested. If
1926   * deferred = DEFERRED all constraints are tested.
1927   *
1928   * @param transaction the Transaction instance used to determine table
1929   * constraints.
1930   * @param table the table to test
1931   * @param row_indices the list of rows that were added to the table.
1932   * @param deferred '1' indicates Transaction.IMMEDIATE,
1933   * '2' indicates Transaction.DEFERRED.
1934   */

1935  static void checkAddConstraintViolations(
1936           SimpleTransaction transaction,
1937           TableDataSource table, int[] row_indices, short deferred) {
1938
1939    String JavaDoc cur_schema = table.getDataTableDef().getSchema();
1940    QueryContext context = new SystemQueryContext(transaction, cur_schema);
1941
1942    // Quick exit case
1943
if (row_indices == null || row_indices.length == 0) {
1944      return;
1945    }
1946
1947    DataTableDef table_def = table.getDataTableDef();
1948    TableName table_name = table_def.getTableName();
1949
1950    // ---- Constraint checking ----
1951

1952    // Check any primary key constraint.
1953
Transaction.ColumnGroup primary_key =
1954               Transaction.queryTablePrimaryKeyGroup(transaction, table_name);
1955    if (primary_key != null &&
1956        (deferred == Transaction.INITIALLY_DEFERRED ||
1957         primary_key.deferred == Transaction.INITIALLY_IMMEDIATE)) {
1958
1959      // For each row added to this column
1960
for (int rn = 0; rn < row_indices.length; ++rn) {
1961        if (!isUniqueColumns(table, row_indices[rn],
1962                             primary_key.columns, false)) {
1963          throw new DatabaseConstraintViolationException(
1964            DatabaseConstraintViolationException.PRIMARY_KEY_VIOLATION,
1965            deferredString(deferred) + " primary Key constraint violation (" +
1966            primary_key.name + ") Columns = ( " +
1967            stringColumnList(primary_key.columns) +
1968            " ) Table = ( " + table_name.toString() + " )");
1969        }
1970      } // For each row being added
1971

1972    }
1973
1974    // Check any unique constraints.
1975
Transaction.ColumnGroup[] unique_constraints =
1976                  Transaction.queryTableUniqueGroups(transaction, table_name);
1977    for (int i = 0; i < unique_constraints.length; ++i) {
1978      Transaction.ColumnGroup unique = unique_constraints[i];
1979      if (deferred == Transaction.INITIALLY_DEFERRED ||
1980          unique.deferred == Transaction.INITIALLY_IMMEDIATE) {
1981
1982        // For each row added to this column
1983
for (int rn = 0; rn < row_indices.length; ++rn) {
1984          if (!isUniqueColumns(table, row_indices[rn], unique.columns, true)) {
1985            throw new DatabaseConstraintViolationException(
1986              DatabaseConstraintViolationException.UNIQUE_VIOLATION,
1987              deferredString(deferred) + " unique constraint violation (" +
1988              unique.name + ") Columns = ( " +
1989              stringColumnList(unique.columns) + " ) Table = ( " +
1990              table_name.toString() + " )");
1991          }
1992        } // For each row being added
1993

1994      }
1995    }
1996
1997    // Check any foreign key constraints.
1998
// This ensures all foreign references in the table are referenced
1999
// to valid records.
2000
Transaction.ColumnGroupReference[] foreign_constraints =
2001          Transaction.queryTableForeignKeyReferences(transaction, table_name);
2002    for (int i = 0; i < foreign_constraints.length; ++i) {
2003      Transaction.ColumnGroupReference ref = foreign_constraints[i];
2004      if (deferred == Transaction.INITIALLY_DEFERRED ||
2005          ref.deferred == Transaction.INITIALLY_IMMEDIATE) {
2006        // For each row added to this column
2007
for (int rn = 0; rn < row_indices.length; ++rn) {
2008          // Make sure the referenced record exists
2009

2010          // Return the count of records where the given row of
2011
// table_name(columns, ...) IN
2012
// ref_table_name(ref_columns, ...)
2013
int row_count = rowCountOfReferenceTable(transaction,
2014                                     row_indices[rn],
2015                                     ref.key_table_name, ref.key_columns,
2016                                     ref.ref_table_name, ref.ref_columns,
2017                                     false);
2018          if (row_count == -1) {
2019            // foreign key is NULL
2020
}
2021          if (row_count == 0) {
2022            throw new DatabaseConstraintViolationException(
2023              DatabaseConstraintViolationException.FOREIGN_KEY_VIOLATION,
2024              deferredString(deferred)+" foreign key constraint violation (" +
2025              ref.name + ") Columns = " +
2026              ref.key_table_name.toString() + "( " +
2027              stringColumnList(ref.key_columns) + " ) -> " +
2028              ref.ref_table_name.toString() + "( " +
2029              stringColumnList(ref.ref_columns) + " )");
2030          }
2031        } // For each row being added.
2032
}
2033    }
2034
2035    // Any general checks of the inserted data
2036
Transaction.CheckExpression[] check_constraints =
2037               Transaction.queryTableCheckExpressions(transaction, table_name);
2038
2039    // The TransactionSystem object
2040
TransactionSystem system = transaction.getSystem();
2041
2042    // For each check constraint, check that it evaluates to true.
2043
for (int i = 0; i < check_constraints.length; ++i) {
2044      Transaction.CheckExpression check = check_constraints[i];
2045      if (deferred == Transaction.INITIALLY_DEFERRED ||
2046          check.deferred == Transaction.INITIALLY_IMMEDIATE) {
2047
2048        check = system.prepareTransactionCheckConstraint(table_def, check);
2049        Expression exp = check.expression;
2050
2051        // For each row being added to this column
2052
for (int rn = 0; rn < row_indices.length; ++rn) {
2053          TableRowVariableResolver resolver =
2054                        new TableRowVariableResolver(table, row_indices[rn]);
2055          TObject ob = exp.evaluate(null, resolver, context);
2056          Boolean JavaDoc b = ob.toBoolean();
2057
2058          if (b != null) {
2059            if (b.equals(Boolean.FALSE)) {
2060              // Evaluated to false so don't allow this row to be added.
2061
throw new DatabaseConstraintViolationException(
2062                 DatabaseConstraintViolationException.CHECK_VIOLATION,
2063                 deferredString(deferred) + " check constraint violation (" +
2064                 check.name + ") - '" + exp.text() +
2065                 "' evaluated to false for inserted/updated row.");
2066            }
2067          }
2068          else {
2069            // NOTE: This error will pass the row by default
2070
transaction.Debug().write(Lvl.ERROR,
2071              TableDataConglomerate.class,
2072              deferredString(deferred) + " check constraint violation (" +
2073              check.name + ") - '" + exp.text() +
2074              "' returned a non boolean or NULL result.");
2075          }
2076        } // For each row being added
2077

2078      }
2079    }
2080
2081
2082
2083  }
2084
2085  /**
2086   * Performs constraint violation checks on an addition of the given
2087   * row index into the TableDataSource in the given transaction. If a
2088   * violation is detected a DatabaseConstraintViolationException is thrown.
2089   * <p>
2090   * If deferred = IMMEDIATE only immediate constraints are tested. If
2091   * deferred = DEFERRED all constraints are tested.
2092   *
2093   * @param transaction the Transaction instance used to determine table
2094   * constraints.
2095   * @param table the table to test
2096   * @param row_index the row that was added to the table.
2097   * @param deferred '1' indicates Transaction.IMMEDIATE,
2098   * '2' indicates Transaction.DEFERRED.
2099   */

2100  static void checkAddConstraintViolations(
2101             SimpleTransaction transaction,
2102             TableDataSource table, int row_index, short deferred) {
2103    checkAddConstraintViolations(transaction, table,
2104                                 new int[] { row_index }, deferred);
2105  }
2106
2107  /**
2108   * Performs constraint violation checks on a removal of the given set of
2109   * row indexes from the TableDataSource in the given transaction. If a
2110   * violation is detected a DatabaseConstraintViolationException is thrown.
2111   * <p>
2112   * If deferred = IMMEDIATE only immediate constraints are tested. If
2113   * deferred = DEFERRED all constraints are tested.
2114   *
2115   * @param transaction the Transaction instance used to determine table
2116   * constraints.
2117   * @param table the table to test
2118   * @param row_indices the set of rows that were removed from the table.
2119   * @param deferred '1' indicates Transaction.IMMEDIATE,
2120   * '2' indicates Transaction.DEFERRED.
2121   */

2122  static void checkRemoveConstraintViolations(
2123           SimpleTransaction transaction, TableDataSource table,
2124           int[] row_indices, short deferred) {
2125
2126    // Quick exit case
2127
if (row_indices == null || row_indices.length == 0) {
2128      return;
2129    }
2130
2131    DataTableDef table_def = table.getDataTableDef();
2132    TableName table_name = table_def.getTableName();
2133
2134    // Check any imported foreign key constraints.
2135
// This ensures that a referential reference can not be removed making
2136
// it invalid.
2137
Transaction.ColumnGroupReference[] foreign_constraints =
2138           Transaction.queryTableImportedForeignKeyReferences(
2139                                                     transaction, table_name);
2140    for (int i = 0; i < foreign_constraints.length; ++i) {
2141      Transaction.ColumnGroupReference ref = foreign_constraints[i];
2142      if (deferred == Transaction.INITIALLY_DEFERRED ||
2143          ref.deferred == Transaction.INITIALLY_IMMEDIATE) {
2144        // For each row removed from this column
2145
for (int rn = 0; rn < row_indices.length; ++rn) {
2146          // Make sure the referenced record exists
2147

2148          // Return the count of records where the given row of
2149
// ref_table_name(columns, ...) IN
2150
// table_name(ref_columns, ...)
2151
int row_count = rowCountOfReferenceTable(transaction,
2152                                     row_indices[rn],
2153                                     ref.ref_table_name, ref.ref_columns,
2154                                     ref.key_table_name, ref.key_columns,
2155                                     true);
2156          // There must be 0 references otherwise the delete isn't allowed to
2157
// happen.
2158
if (row_count > 0) {
2159            throw new DatabaseConstraintViolationException(
2160              DatabaseConstraintViolationException.FOREIGN_KEY_VIOLATION,
2161              deferredString(deferred)+" foreign key constraint violation " +
2162              "on delete (" +
2163              ref.name + ") Columns = " +
2164              ref.key_table_name.toString() + "( " +
2165              stringColumnList(ref.key_columns) + " ) -> " +
2166              ref.ref_table_name.toString() + "( " +
2167              stringColumnList(ref.ref_columns) + " )");
2168          }
2169        } // For each row being added.
2170
}
2171    }
2172
2173  }
2174
2175  /**
2176   * Performs constraint violation checks on a removal of the given
2177   * row index from the TableDataSource in the given transaction. If a
2178   * violation is detected a DatabaseConstraintViolationException is thrown.
2179   * <p>
2180   * If deferred = IMMEDIATE only immediate constraints are tested. If
2181   * deferred = DEFERRED all constraints are tested.
2182   *
2183   * @param transaction the Transaction instance used to determine table
2184   * constraints.
2185   * @param table the table to test
2186   * @param row_index the row that was removed from the table.
2187   * @param deferred '1' indicates Transaction.IMMEDIATE,
2188   * '2' indicates Transaction.DEFERRED.
2189   */

2190  static void checkRemoveConstraintViolations(
2191              SimpleTransaction transaction,
2192              TableDataSource table, int row_index, short deferred) {
2193    checkRemoveConstraintViolations(transaction, table,
2194                                    new int[] { row_index }, deferred);
2195  }
2196
2197  /**
2198   * Performs constraint violation checks on all the rows in the given
2199   * table. If a violation is detected a DatabaseConstraintViolationException
2200   * is thrown.
2201   * <p>
2202   * This method is useful when the constraint schema of a table changes and
2203   * we need to check existing data in a table is conformant with the new
2204   * constraint changes.
2205   * <p>
2206   * If deferred = IMMEDIATE only immediate constraints are tested. If
2207   * deferred = DEFERRED all constraint are tested.
2208   */

2209  static void checkAllAddConstraintViolations(
2210               SimpleTransaction transaction, TableDataSource table,
2211               short deferred) {
2212    // Get all the rows in the table
2213
int[] rows = new int[table.getRowCount()];
2214    RowEnumeration row_enum = table.rowEnumeration();
2215    int p = 0;
2216    while (row_enum.hasMoreRows()) {
2217      rows[p] = row_enum.nextRowIndex();
2218      ++p;
2219    }
2220    // Check the constraints of all the rows in the table.
2221
checkAddConstraintViolations(transaction, table,
2222                                 rows, Transaction.INITIALLY_DEFERRED);
2223  }
2224
2225
2226  // ---------- Blob store and object management ----------
2227

2228  /**
2229   * Creates and allocates storage for a new large object in the blob store.
2230   * This is called to create a new large object before filling it with data
2231   * sent from the client.
2232   */

2233  Ref createNewLargeObject(byte type, long size) {
2234    try {
2235      // If the conglomerate is read-only, a blob can not be created.
2236
if (isReadOnly()) {
2237        throw new RuntimeException JavaDoc(
2238            "A new large object can not be allocated " +
2239            "with a read-only conglomerate");
2240      }
2241      // Allocate the large object from the store
2242
Ref ref = blob_store.allocateLargeObject(type, size);
2243      // Return the large object reference
2244
return ref;
2245    }
2246    catch (IOException e) {
2247      Debug().writeException(e);
2248      throw new RuntimeException JavaDoc("IO Error when creating blob: " +
2249                                 e.getMessage());
2250    }
2251  }
2252  
2253  /**
2254   * Called when one or more blobs has been completed. This flushes the blob
2255   * to the blob store and completes the blob write procedure. It's important
2256   * this is called otherwise the BlobStore may not be correctly flushed to
2257   * disk with the changes and the data will not be recoverable if a crash
2258   * occurs.
2259   */

2260  void flushBlobStore() {
2261    // NOTE: no longer necessary - please deprecate
2262
}
2263 
2264
2265  // ---------- Conglomerate diagnosis and repair methods ----------
2266

2267  /**
2268   * Checks the conglomerate state file. The returned ErrorState object
2269   * contains information about any error generated.
2270   */

2271  public void fix(String JavaDoc name, UserTerminal terminal) {
2272    this.name = name;
2273
2274    try {
2275
2276      String JavaDoc state_fn = (name + STATE_POST);
2277      boolean state_exists = false;
2278      try {
2279        state_exists = exists(name);
2280      }
2281      catch (IOException e) {
2282        terminal.println("IO Error when checking if state store exists: " +
2283                         e.getMessage());
2284        e.printStackTrace();
2285      }
2286
2287      if (!state_exists) {
2288        terminal.println("Couldn't find store: " + state_fn);
2289        return;
2290      }
2291      terminal.println("+ Found state store: " + state_fn);
2292      
2293      // Open the state store
2294
try {
2295        act_state_store = storeSystem().openStore(name + STATE_POST);
2296        state_store = new StateStore(act_state_store);
2297        // Get the 64 byte fixed area
2298
Area fixed_area = act_state_store.getArea(-1);
2299        long head_p = fixed_area.getLong();
2300        state_store.init(head_p);
2301        terminal.println("+ Initialized the state store: " + state_fn);
2302      }
2303      catch (IOException e) {
2304        // Couldn't initialize the state file.
2305
terminal.println("Couldn't initialize the state file: " + state_fn +
2306                         " Reason: " + e.getMessage());
2307        return;
2308      }
2309
2310      // Initialize the blob store
2311
try {
2312        initializeBlobStore();
2313      }
2314      catch (IOException e) {
2315        terminal.println("Error intializing BlobStore: " + e.getMessage());
2316        e.printStackTrace();
2317        return;
2318      }
2319      // Setup internal
2320
setupInternal();
2321
2322      try {
2323        checkVisibleTables(terminal);
2324
2325        // Reset the sequence id's for the system tables
2326
terminal.println("+ RESETTING ALL SYSTEM TABLE UNIQUE ID VALUES.");
2327        resetAllSystemTableID();
2328      
2329        // Some diagnostic information
2330
StringBuffer JavaDoc buf = new StringBuffer JavaDoc();
2331        MasterTableDataSource t;
2332        StateResource[] committed_tables = state_store.getVisibleList();
2333        StateResource[] committed_dropped = state_store.getDeleteList();
2334        for (int i = 0; i < committed_tables.length; ++i) {
2335          terminal.println("+ COMMITTED TABLE: " +
2336                           committed_tables[i].name);
2337        }
2338        for (int i = 0; i < committed_dropped.length; ++i) {
2339          terminal.println("+ COMMIT DROPPED TABLE: " +
2340                           committed_dropped[i].name);
2341        }
2342
2343        return;
2344
2345      }
2346      catch (IOException e) {
2347        terminal.println("IOException: " + e.getMessage());
2348        e.printStackTrace();
2349      }
2350
2351    }
2352    finally {
2353      try {
2354        close();
2355      }
2356      catch (IOException e) {
2357        terminal.println("Unable to close conglomerate after fix.");
2358      }
2359    }
2360
2361  }
2362
2363
2364  // ---------- Conveniences for commit ----------
2365

2366  /**
2367   * A static container class for information collected about a table during
2368   * the commit cycle.
2369   */

2370  private static class CommitTableInfo {
2371    // The master table
2372
MasterTableDataSource master;
2373    // The immutable index set
2374
IndexSet index_set;
2375    // The journal describing the changes to this table by this
2376
// transaction.
2377
MasterTableJournal journal;
2378    // A list of journals describing changes since this transaction
2379
// started.
2380
MasterTableJournal[] changes_since_commit;
2381    // Break down of changes to the table
2382
// Normalized list of row ids that were added
2383
int[] norm_added_rows;
2384    // Normalized list of row ids that were removed
2385
int[] norm_removed_rows;
2386  }
2387
2388  /**
2389   * Returns true if the given List of 'CommitTableInfo' objects contains an
2390   * entry for the given master table.
2391   */

2392  private static boolean commitTableListContains(List JavaDoc list,
2393                                                 MasterTableDataSource master) {
2394    int sz = list.size();
2395    for (int i = 0; i < sz; ++i) {
2396      CommitTableInfo info = (CommitTableInfo) list.get(i);
2397      if (info.master.equals(master)) {
2398        return true;
2399      }
2400    }
2401    return false;
2402  }
2403  
2404  
2405
2406
2407  // ---------- low level File IO level operations on a conglomerate ----------
2408
// These operations are low level IO operations on the contents of the
2409
// conglomerate. How the rows and tables are organised is up to the
2410
// transaction managemenet. These methods deal with the low level
2411
// operations of creating/dropping tables and adding, deleting and querying
2412
// row in tables.
2413

2414  /**
2415   * Tries to commit a transaction to the conglomerate. This is called
2416   * by the 'closeAndCommit' method in Transaction. An overview of how this
2417   * works follows:
2418   * <ul>
2419   * <li> Determine if any transactions have been committed since this
2420   * transaction was created.
2421   * <li> If no transactions committed then commit this transaction and exit.
2422   * <li> Otherwise, determine the tables that have been changed by the
2423   * committed transactions since this was created.
2424   * <li> If no tables changed in the tables changed by this transaction then
2425   * commit this transaction and exit.
2426   * <li> Determine if there are any rows that have been deleted that this
2427   * transaction read/deleted.
2428   * <li> If there are then rollback this transaction and throw an error.
2429   * <li> Determine if any rows have been added to the tables this transaction
2430   * read/changed.
2431   * <li> If there are then rollback this transaction and throw an error.
2432   * <li> Otherwise commit the transaction.
2433   * </ul>
2434   *
2435   * @param transaction the transaction to commit from.
2436   * @param visible_tables the list of visible tables at the end of the commit
2437   * (MasterTableDataSource)
2438   * @param selected_from_tables ths list of tables that this transaction
2439   * performed 'select' like queries on (MasterTableDataSource)
2440   * @param touched_tables the list of tables touched by the transaction
2441   * (MutableTableDataSource)
2442   * @param journal the journal that describes all the changes within the
2443   * transaction.
2444   */

2445  void processCommit(Transaction transaction, ArrayList JavaDoc visible_tables,
2446                     ArrayList JavaDoc selected_from_tables,
2447                     ArrayList JavaDoc touched_tables, TransactionJournal journal)
2448                                                 throws TransactionException {
2449
2450    // Get individual journals for updates made to tables in this
2451
// transaction.
2452
// The list MasterTableJournal
2453
ArrayList JavaDoc journal_list = new ArrayList JavaDoc();
2454    for (int i = 0; i < touched_tables.size(); ++i) {
2455      MasterTableJournal table_journal =
2456                 ((MutableTableDataSource) touched_tables.get(i)).getJournal();
2457      if (table_journal.entries() > 0) { // Check the journal has entries.
2458
journal_list.add(table_journal);
2459      }
2460    }
2461    MasterTableJournal[] changed_tables =
2462                (MasterTableJournal[]) journal_list.toArray(
2463                                  new MasterTableJournal[journal_list.size()]);
2464
2465    // The list of tables created by this journal.
2466
IntegerVector created_tables = journal.getTablesCreated();
2467    // Ths list of tables dropped by this journal.
2468
IntegerVector dropped_tables = journal.getTablesDropped();
2469    // The list of tables that constraints were alter by this journal
2470
IntegerVector constraint_altered_tables =
2471                         journal.getTablesConstraintAltered();
2472
2473    // Exit early if nothing changed (this is a read-only transaction)
2474
if (changed_tables.length == 0 &&
2475        created_tables.size() == 0 && dropped_tables.size() == 0 &&
2476        constraint_altered_tables.size() == 0) {
2477      closeTransaction(transaction);
2478      return;
2479    }
2480
2481    // This flag is set to true when entries from the changes tables are
2482
// at a point of no return. If this is false it is safe to rollback
2483
// changes if necessary.
2484
boolean entries_committed = false;
2485
2486    // The tables that were actually changed (MasterTableDataSource)
2487
ArrayList JavaDoc changed_tables_list = new ArrayList JavaDoc();
2488
2489    // Grab the commit lock.
2490
synchronized (commit_lock) {
2491
2492      // Get the list of all database objects that were created in the
2493
// transaction.
2494
ArrayList JavaDoc database_objects_created = transaction.getAllNamesCreated();
2495      // Get the list of all database objects that were dropped in the
2496
// transaction.
2497
ArrayList JavaDoc database_objects_dropped = transaction.getAllNamesDropped();
2498
2499      // This is a transaction that will represent the view of the database
2500
// at the end of the commit
2501
Transaction check_transaction = null;
2502
2503      try {
2504
2505        // ---- Commit check stage ----
2506

2507        long tran_commit_id = transaction.getCommitID();
2508
2509        // We only perform this check if transaction error on dirty selects
2510
// are enabled.
2511
if (transaction.transactionErrorOnDirtySelect()) {
2512
2513          // For each table that this transaction selected from, if there are
2514
// any committed changes then generate a transaction error.
2515
for (int i = 0; i < selected_from_tables.size(); ++i) {
2516            MasterTableDataSource selected_table =
2517                          (MasterTableDataSource) selected_from_tables.get(i);
2518            // Find all committed journals equal to or greater than this
2519
// transaction's commit_id.
2520
MasterTableJournal[] journals_since =
2521                          selected_table.findAllJournalsSince(tran_commit_id);
2522            if (journals_since.length > 0) {
2523              // Yes, there are changes so generate transaction error and
2524
// rollback.
2525
throw new TransactionException(
2526                TransactionException.DIRTY_TABLE_SELECT,
2527                    "Concurrent Serializable Transaction Conflict(4): " +
2528                    "Select from table that has committed changes: " +
2529                    selected_table.getName());
2530            }
2531          }
2532        }
2533
2534        // Check there isn't a namespace clash with database objects.
2535
// We need to create a list of all create and drop activity in the
2536
// conglomerate from when the transaction started.
2537
ArrayList JavaDoc all_dropped_obs = new ArrayList JavaDoc();
2538        ArrayList JavaDoc all_created_obs = new ArrayList JavaDoc();
2539        int nsj_sz = namespace_journal_list.size();
2540        for (int i = 0; i < nsj_sz; ++i) {
2541          NameSpaceJournal ns_journal =
2542                             (NameSpaceJournal) namespace_journal_list.get(i);
2543          if (ns_journal.commit_id >= tran_commit_id) {
2544            all_dropped_obs.addAll(ns_journal.dropped_names);
2545            all_created_obs.addAll(ns_journal.created_names);
2546          }
2547        }
2548
2549        // The list of all dropped objects since this transaction
2550
// began.
2551
int ado_sz = all_dropped_obs.size();
2552        boolean conflict5 = false;
2553        Object JavaDoc conflict_name = null;
2554        String JavaDoc conflict_desc = "";
2555        for (int n = 0; n < ado_sz; ++n) {
2556          if (database_objects_dropped.contains(all_dropped_obs.get(n))) {
2557            conflict5 = true;
2558            conflict_name = all_dropped_obs.get(n);
2559            conflict_desc = "Drop Clash";
2560          }
2561        }
2562        // The list of all created objects since this transaction
2563
// began.
2564
int aco_sz = all_created_obs.size();
2565        for (int n = 0; n < aco_sz; ++n) {
2566          if (database_objects_created.contains(all_created_obs.get(n))) {
2567            conflict5 = true;
2568            conflict_name = all_created_obs.get(n);
2569            conflict_desc = "Create Clash";
2570          }
2571        }
2572        if (conflict5) {
2573          // Namespace conflict...
2574
throw new TransactionException(
2575               TransactionException.DUPLICATE_TABLE,
2576               "Concurrent Serializable Transaction Conflict(5): " +
2577               "Namespace conflict: " + conflict_name.toString() + " " +
2578               conflict_desc);
2579        }
2580
2581        // For each journal,
2582
for (int i = 0; i < changed_tables.length; ++i) {
2583          MasterTableJournal change_journal = changed_tables[i];
2584          // The table the change was made to.
2585
int table_id = change_journal.getTableID();
2586          // Get the master table with this table id.
2587
MasterTableDataSource master = getMasterTable(table_id);
2588
2589          // True if the state contains a committed resource with the given name
2590
boolean committed_resource =
2591                               state_store.containsVisibleResource(table_id);
2592          
2593          // Check this table is still in the committed tables list.
2594
if (!created_tables.contains(table_id) &&
2595              !committed_resource) {
2596            // This table is no longer a committed table, so rollback
2597
throw new TransactionException(
2598                  TransactionException.TABLE_DROPPED,
2599                  "Concurrent Serializable Transaction Conflict(2): " +
2600                  "Table altered/dropped: " + master.getName());
2601          }
2602
2603          // Since this journal was created, check to see if any changes to the
2604
// tables have been committed since.
2605
// This will return all journals on the table with the same commit_id
2606
// or greater.
2607
MasterTableJournal[] journals_since =
2608                                 master.findAllJournalsSince(tran_commit_id);
2609
2610          // For each journal, determine if there's any clashes.
2611
for (int n = 0; n < journals_since.length; ++n) {
2612            // This will thrown an exception if a commit classes.
2613
change_journal.testCommitClash(master.getDataTableDef(),
2614                                           journals_since[n]);
2615          }
2616
2617        }
2618
2619        // Look at the transaction journal, if a table is dropped that has
2620
// journal entries since the last commit then we have an exception
2621
// case.
2622
for (int i = 0; i < dropped_tables.size(); ++i) {
2623          int table_id = dropped_tables.intAt(i);
2624          // Get the master table with this table id.
2625
MasterTableDataSource master = getMasterTable(table_id);
2626          // Any journal entries made to this dropped table?
2627
if (master.findAllJournalsSince(tran_commit_id).length > 0) {
2628            // Oops, yes, rollback!
2629
throw new TransactionException(
2630                  TransactionException.TABLE_REMOVE_CLASH,
2631                  "Concurrent Serializable Transaction Conflict(3): " +
2632                  "Dropped table has modifications: " + master.getName());
2633          }
2634        }
2635
2636        // Tests passed so go on to commit,
2637

2638        // ---- Commit stage ----
2639

2640        // Create a normalized list of MasterTableDataSource of all tables that
2641
// were either changed (and not dropped), and created (and not dropped).
2642
// This list represents all tables that are either new or changed in
2643
// this transaction.
2644

2645        final int created_tables_count = created_tables.size();
2646        final int changed_tables_count = changed_tables.length;
2647        final ArrayList JavaDoc normalized_changed_tables = new ArrayList JavaDoc(8);
2648        // Add all tables that were changed and not dropped in this transaction.
2649
for (int i = 0; i < changed_tables_count; ++i) {
2650          MasterTableJournal table_journal = changed_tables[i];
2651          // The table the changes were made to.
2652
int table_id = table_journal.getTableID();
2653          // If this table is not dropped in this transaction and is not
2654
// already in the normalized list then add it.
2655
if (!dropped_tables.contains(table_id)) {
2656            MasterTableDataSource master_table = getMasterTable(table_id);
2657
2658            CommitTableInfo table_info = new CommitTableInfo();
2659            table_info.master = master_table;
2660            table_info.journal = table_journal;
2661            table_info.changes_since_commit =
2662                           master_table.findAllJournalsSince(tran_commit_id);
2663            
2664            normalized_changed_tables.add(table_info);
2665          }
2666        }
2667
2668        // Add all tables that were created and not dropped in this transaction.
2669
for (int i = 0; i < created_tables_count; ++i) {
2670          int table_id = created_tables.intAt(i);
2671          // If this table is not dropped in this transaction then this is a
2672
// new table in this transaction.
2673
if (!dropped_tables.contains(table_id)) {
2674            MasterTableDataSource master_table = getMasterTable(table_id);
2675            if (!commitTableListContains(normalized_changed_tables,
2676                                         master_table)) {
2677
2678              // This is for entries that are created but modified (no journal).
2679
CommitTableInfo table_info = new CommitTableInfo();
2680              table_info.master = master_table;
2681            
2682              normalized_changed_tables.add(table_info);
2683            }
2684          }
2685        }
2686
2687        // The final size of the normalized changed tables list
2688
final int norm_changed_tables_count = normalized_changed_tables.size();
2689        
2690        // Create a normalized list of MasterTableDataSource of all tables that
2691
// were dropped (and not created) in this transaction. This list
2692
// represents tables that will be dropped if the transaction
2693
// successfully commits.
2694

2695        final int dropped_tables_count = dropped_tables.size();
2696        final ArrayList JavaDoc normalized_dropped_tables = new ArrayList JavaDoc(8);
2697        for (int i = 0; i < dropped_tables_count; ++i) {
2698          // The dropped table
2699
int table_id = dropped_tables.intAt(i);
2700          // Was this dropped table also created? If it was created in this
2701
// transaction then we don't care about it.
2702
if (!created_tables.contains(table_id)) {
2703            MasterTableDataSource master_table = getMasterTable(table_id);
2704            normalized_dropped_tables.add(master_table);
2705          }
2706        }
2707
2708        // We now need to create a SimpleTransaction object that we
2709
// use to send to the triggering mechanism. This
2710
// SimpleTransaction represents a very specific view of the
2711
// transaction. This view contains the latest version of changed
2712
// tables in this transaction. It also contains any tables that have
2713
// been created by this transaction and does not contain any tables
2714
// that have been dropped. Any tables that have not been touched by
2715
// this transaction are shown in their current committed state.
2716
// To summarize - this view is the current view of the database plus
2717
// any modifications made by the transaction that is being committed.
2718

2719        // How this works - All changed tables are merged with the current
2720
// committed table. All created tables are added into check_transaction
2721
// and all dropped tables are removed from check_transaction. If
2722
// there were no other changes to a table between the time the
2723
// transaction was created and now, the view of the table in the
2724
// transaction is used, otherwise the latest changes are merged.
2725

2726        // Note that this view will be the view that the database will
2727
// ultimately become if this transaction successfully commits. Also,
2728
// you should appreciate that this view is NOT exactly the same as
2729
// the current trasaction view because any changes that have been
2730
// committed by concurrent transactions will be reflected in this view.
2731

2732        // Create a new transaction of the database which will represent the
2733
// committed view if this commit is successful.
2734
check_transaction = createTransaction();
2735
2736        // Overwrite this view with tables from this transaction that have
2737
// changed or have been added or dropped.
2738

2739        // (Note that order here is important). First drop any tables from
2740
// this view.
2741
for (int i = 0; i < normalized_dropped_tables.size(); ++i) {
2742          // Get the table
2743
MasterTableDataSource master_table =
2744                     (MasterTableDataSource) normalized_dropped_tables.get(i);
2745          // Drop this table in the current view
2746
check_transaction.removeVisibleTable(master_table);
2747        }
2748
2749        // Now add any changed tables to the view.
2750

2751        // Represents view of the changed tables
2752
TableDataSource[] changed_table_source =
2753                               new TableDataSource[norm_changed_tables_count];
2754        // Set up the above arrays
2755
for (int i = 0; i < norm_changed_tables_count; ++i) {
2756
2757          // Get the information for this changed table
2758
CommitTableInfo table_info =
2759                            (CommitTableInfo) normalized_changed_tables.get(i);
2760
2761          // Get the master table that changed from the normalized list.
2762
MasterTableDataSource master = table_info.master;
2763          // Did this table change since the transaction started?
2764
MasterTableJournal[] all_table_changes =
2765                                               table_info.changes_since_commit;
2766
2767          if (all_table_changes == null || all_table_changes.length == 0) {
2768            // No changes so we can pick the correct IndexSet from the current
2769
// transaction.
2770

2771            // Get the state of the changed tables from the Transaction
2772
MutableTableDataSource mtable =
2773                                   transaction.getTable(master.getTableName());
2774            // Get the current index set of the changed table
2775
table_info.index_set = transaction.getIndexSetForTable(master);
2776            // Flush all index changes in the table
2777
mtable.flushIndexChanges();
2778
2779            // Set the 'check_transaction' object with the latest version of the
2780
// table.
2781
check_transaction.updateVisibleTable(table_info.master,
2782                                                 table_info.index_set);
2783
2784          }
2785          else {
2786            // There were changes so we need to merge the changes with the
2787
// current view of the table.
2788

2789            // It's not immediately obvious how this merge update works, but
2790
// basically what happens is we put the table journal with all the
2791
// changes into a new MutableTableDataSource of the current
2792
// committed state, and then we flush all the changes into the
2793
// index and then update the 'check_transaction' with this change.
2794

2795            // Create the MutableTableDataSource with the changes from this
2796
// journal.
2797
MutableTableDataSource mtable =
2798                      master.createTableDataSourceAtCommit(check_transaction,
2799                                                           table_info.journal);
2800            // Get the current index set of the changed table
2801
table_info.index_set =
2802                                 check_transaction.getIndexSetForTable(master);
2803            // Flush all index changes in the table
2804
mtable.flushIndexChanges();
2805
2806            // Dispose the table
2807
mtable.dispose();
2808
2809          }
2810
2811          // And now refresh the 'changed_table_source' entry
2812
changed_table_source[i] =
2813                           check_transaction.getTable(master.getTableName());
2814
2815        }
2816
2817        // The 'check_transaction' now represents the view the database will be
2818
// if the commit succeeds. We lock 'check_transaction' so it is
2819
// read-only (the view is immutable).
2820
check_transaction.setReadOnly();
2821        
2822        // Any tables that the constraints were altered for we need to check
2823
// if any rows in the table violate the new constraints.
2824
for (int i = 0; i < constraint_altered_tables.size(); ++i) {
2825          // We need to check there are no constraint violations for all the
2826
// rows in the table.
2827
int table_id = constraint_altered_tables.intAt(i);
2828          for (int n = 0; n < norm_changed_tables_count; ++n) {
2829            CommitTableInfo table_info =
2830                            (CommitTableInfo) normalized_changed_tables.get(n);
2831            if (table_info.master.getTableID() == table_id) {
2832              checkAllAddConstraintViolations(check_transaction,
2833                            changed_table_source[n],
2834                            Transaction.INITIALLY_DEFERRED);
2835            }
2836          }
2837        }
2838
2839        // For each changed table we must determine the rows that
2840
// were deleted and perform the remove constraint checks on the
2841
// deleted rows. Note that this happens after the records are
2842
// removed from the index.
2843

2844        // For each changed table,
2845
for (int i = 0; i < norm_changed_tables_count; ++i) {
2846          CommitTableInfo table_info =
2847                            (CommitTableInfo) normalized_changed_tables.get(i);
2848          // Get the journal that details the change to the table.
2849
MasterTableJournal change_journal = table_info.journal;
2850          if (change_journal != null) {
2851            // Find the normalized deleted rows.
2852
int[] normalized_removed_rows =
2853                                      change_journal.normalizedRemovedRows();
2854            // Check removing any of the data doesn't cause a constraint
2855
// violation.
2856
checkRemoveConstraintViolations(check_transaction,
2857                       changed_table_source[i], normalized_removed_rows,
2858                       Transaction.INITIALLY_DEFERRED);
2859
2860            // Find the normalized added rows.
2861
int[] normalized_added_rows =
2862                                      change_journal.normalizedAddedRows();
2863            // Check adding any of the data doesn't cause a constraint
2864
// violation.
2865
checkAddConstraintViolations(check_transaction,
2866                                changed_table_source[i], normalized_added_rows,
2867                                Transaction.INITIALLY_DEFERRED);
2868
2869            // Set up the list of added and removed rows
2870
table_info.norm_added_rows = normalized_added_rows;
2871            table_info.norm_removed_rows = normalized_removed_rows;
2872
2873          }
2874        }
2875
2876        // Deferred trigger events.
2877
// For each changed table.
2878
n_loop:
2879        for (int i = 0; i < norm_changed_tables_count; ++i) {
2880          CommitTableInfo table_info =
2881                            (CommitTableInfo) normalized_changed_tables.get(i);
2882          // Get the journal that details the change to the table.
2883
MasterTableJournal change_journal = table_info.journal;
2884          if (change_journal != null) {
2885            // Get the table name
2886
TableName table_name = table_info.master.getTableName();
2887            // The list of listeners to dispatch this event to
2888
TransactionModificationListener[] listeners;
2889            // Are there any listeners listening for events on this table?
2890
synchronized (modification_listeners) {
2891              ArrayList JavaDoc list =
2892                            (ArrayList JavaDoc) modification_listeners.get(table_name);
2893              if (list == null || list.size() == 0) {
2894                // If no listeners on this table, continue to the next
2895
// table that was changed.
2896
continue n_loop;
2897              }
2898              // Generate the list of listeners,
2899
listeners = (TransactionModificationListener[]) list.toArray(
2900                            new TransactionModificationListener[list.size()]);
2901            }
2902            // Generate the event
2903
TableCommitModificationEvent event =
2904                      new TableCommitModificationEvent(
2905                                check_transaction, table_name,
2906                                table_info.norm_added_rows,
2907                                table_info.norm_removed_rows);
2908            // Fire this event on the listeners
2909
for (int n = 0; n < listeners.length; ++n) {
2910              listeners[n].tableCommitChange(event);
2911            }
2912
2913          } // if (change_journal != null)
2914
} // for each changed table
2915

2916        // NOTE: This isn't as fail safe as it could be. We really need to
2917
// do the commit in two phases. The first writes updated indices to
2918
// the index files. The second updates the header pointer for the
2919
// respective table. Perhaps we can make the header update
2920
// procedure just one file write.
2921

2922        // Finally, at this point all constraint checks have passed and the
2923
// changes are ready to finally be committed as permanent changes
2924
// to the conglomerate. All that needs to be done is to commit our
2925
// IndexSet indices for each changed table as final.
2926
// ISSUE: Should we separate the 'committing of indexes' changes and
2927
// 'committing of delete/add flags' to make the FS more robust?
2928
// It would be more robust if all indexes are committed in one go,
2929
// then all table flag data.
2930

2931        // Set flag to indicate we have committed entries.
2932
entries_committed = true;
2933
2934        // For each change to each table,
2935
for (int i = 0; i < norm_changed_tables_count; ++i) {
2936          CommitTableInfo table_info =
2937                            (CommitTableInfo) normalized_changed_tables.get(i);
2938          // Get the journal that details the change to the table.
2939
MasterTableJournal change_journal = table_info.journal;
2940          if (change_journal != null) {
2941            // Get the master table with this table id.
2942
MasterTableDataSource master = table_info.master;
2943            // Commit the changes to the table.
2944
// We use 'this.commit_id' which is the current commit level we are
2945
// at.
2946
master.commitTransactionChange(this.commit_id, change_journal,
2947                                           table_info.index_set);
2948            // Add to 'changed_tables_list'
2949
changed_tables_list.add(master);
2950          }
2951        }
2952
2953        // Only do this if we've created or dropped tables.
2954
if (created_tables.size() > 0 || dropped_tables.size() > 0) {
2955          // Update the committed tables in the conglomerate state.
2956
// This will update and synchronize the headers in this conglomerate.
2957
commitToTables(created_tables, dropped_tables);
2958        }
2959
2960        // Update the namespace clash list
2961
if (database_objects_created.size() > 0 ||
2962            database_objects_dropped.size() > 0) {
2963          NameSpaceJournal namespace_journal =
2964                   new NameSpaceJournal(tran_commit_id,
2965                                        database_objects_created,
2966                                        database_objects_dropped);
2967          namespace_journal_list.add(namespace_journal);
2968        }
2969
2970      }
2971      finally {
2972
2973        try {
2974
2975          // If entries_committed == false it means we didn't get to a point
2976
// where any changed tables were committed. Attempt to rollback the
2977
// changes in this transaction if they haven't been committed yet.
2978
if (entries_committed == false) {
2979            // For each change to each table,
2980
for (int i = 0; i < changed_tables.length; ++i) {
2981              // Get the journal that details the change to the table.
2982
MasterTableJournal change_journal = changed_tables[i];
2983              // The table the changes were made to.
2984
int table_id = change_journal.getTableID();
2985              // Get the master table with this table id.
2986
MasterTableDataSource master = getMasterTable(table_id);
2987              // Commit the rollback on the table.
2988
master.rollbackTransactionChange(change_journal);
2989            }
2990            if (Debug().isInterestedIn(Lvl.INFORMATION)) {
2991              Debug().write(Lvl.INFORMATION, this,
2992                          "Rolled back transaction changes in a commit.");
2993            }
2994          }
2995
2996        }
2997        finally {
2998          try {
2999            // Dispose the 'check_transaction'
3000
if (check_transaction != null) {
3001              check_transaction.dispose();
3002              closeTransaction(check_transaction);
3003            }
3004            // Always ensure a transaction close, even if we have an exception.
3005
// Notify the conglomerate that this transaction has closed.
3006
closeTransaction(transaction);
3007          }
3008          catch (Throwable JavaDoc e) {
3009            Debug().writeException(e);
3010          }
3011        }
3012
3013      }
3014
3015      // Flush the journals up to the minimum commit id for all the tables
3016
// that this transaction changed.
3017
long min_commit_id = open_transactions.minimumCommitID(null);
3018      int chsz = changed_tables_list.size();
3019      for (int i = 0; i < chsz; ++i) {
3020        MasterTableDataSource master =
3021                            (MasterTableDataSource) changed_tables_list.get(i);
3022        master.mergeJournalChanges(min_commit_id);
3023      }
3024      int nsjsz = namespace_journal_list.size();
3025      for (int i = nsjsz - 1; i >= 0; --i) {
3026        NameSpaceJournal namespace_journal =
3027                              (NameSpaceJournal) namespace_journal_list.get(i);
3028        // Remove if the commit id for the journal is less than the minimum
3029
// commit id
3030
if (namespace_journal.commit_id < min_commit_id) {
3031          namespace_journal_list.remove(i);
3032        }
3033      }
3034
3035      // Set a check point in the store system. This means that the
3036
// persistance state is now stable.
3037
store_system.setCheckPoint();
3038
3039    } // synchronized (commit_lock)
3040

3041  }
3042
3043  /**
3044   * Rollbacks a transaction and invalidates any changes that the transaction
3045   * made to the database. The rows that this transaction changed are given
3046   * up as freely available rows. This is called by the 'closeAndRollback'
3047   * method in Transaction.
3048   */

3049  void processRollback(Transaction transaction,
3050                       ArrayList JavaDoc touched_tables, TransactionJournal journal) {
3051
3052    // Go through the journal. Any rows added should be marked as deleted
3053
// in the respective master table.
3054

3055    // Get individual journals for updates made to tables in this
3056
// transaction.
3057
// The list MasterTableJournal
3058
ArrayList JavaDoc journal_list = new ArrayList JavaDoc();
3059    for (int i = 0; i < touched_tables.size(); ++i) {
3060      MasterTableJournal table_journal =
3061                 ((MutableTableDataSource) touched_tables.get(i)).getJournal();
3062      if (table_journal.entries() > 0) { // Check the journal has entries.
3063
journal_list.add(table_journal);
3064      }
3065    }
3066    MasterTableJournal[] changed_tables =
3067                (MasterTableJournal[]) journal_list.toArray(
3068                                  new MasterTableJournal[journal_list.size()]);
3069
3070    // The list of tables created by this journal.
3071
IntegerVector created_tables = journal.getTablesCreated();
3072
3073    synchronized (commit_lock) {
3074
3075      try {
3076
3077        // For each change to each table,
3078
for (int i = 0; i < changed_tables.length; ++i) {
3079          // Get the journal that details the change to the table.
3080
MasterTableJournal change_journal = changed_tables[i];
3081          // The table the changes were made to.
3082
int table_id = change_journal.getTableID();
3083          // Get the master table with this table id.
3084
MasterTableDataSource master = getMasterTable(table_id);
3085          // Commit the rollback on the table.
3086
master.rollbackTransactionChange(change_journal);
3087        }
3088
3089      }
3090      finally {
3091        // Notify the conglomerate that this transaction has closed.
3092
closeTransaction(transaction);
3093      }
3094    }
3095  }
3096
3097  // -----
3098

3099  /**
3100   * Sets the given List of MasterTableDataSource objects to the currently
3101   * committed list of tables in this conglomerate. This will make the change
3102   * permanent by updating the state file also.
3103   * <p>
3104   * This should be called as part of a transaction commit.
3105   */

3106  private void commitToTables(
3107                  IntegerVector created_tables, IntegerVector dropped_tables) {
3108
3109    // Add created tables to the committed tables list.
3110
for (int i = 0; i < created_tables.size(); ++i) {
3111      // For all created tables, add to the visible list and remove from the
3112
// delete list in the state store.
3113
MasterTableDataSource t = getMasterTable(created_tables.intAt(i));
3114      StateResource resource =
3115                 new StateResource(t.getTableID(), createEncodedTableFile(t));
3116      state_store.addVisibleResource(resource);
3117      state_store.removeDeleteResource(resource.name);
3118    }
3119
3120    // Remove dropped tables from the committed tables list.
3121
for (int i = 0; i < dropped_tables.size(); ++i) {
3122      // For all dropped tables, add to the delete list and remove from the
3123
// visible list in the state store.
3124
MasterTableDataSource t = getMasterTable(dropped_tables.intAt(i));
3125      StateResource resource =
3126                 new StateResource(t.getTableID(), createEncodedTableFile(t));
3127      state_store.addDeleteResource(resource);
3128      state_store.removeVisibleResource(resource.name);
3129    }
3130    
3131    try {
3132      state_store.commit();
3133    }
3134    catch (IOException e) {
3135      Debug().writeException(e);
3136      throw new Error JavaDoc("IO Error: " + e.getMessage());
3137    }
3138  }
3139
3140  /**
3141   * Returns the MasterTableDataSource in this conglomerate with the given
3142   * table id.
3143   */

3144  MasterTableDataSource getMasterTable(int table_id) {
3145    synchronized (commit_lock) {
3146      // Find the table with this table id.
3147
for (int i = 0; i < table_list.size(); ++i) {
3148        MasterTableDataSource t = (MasterTableDataSource) table_list.get(i);
3149        if (t.getTableID() == table_id) {
3150          return t;
3151        }
3152      }
3153      throw new Error JavaDoc("Unable to find an open table with id: " + table_id);
3154    }
3155  }
3156
3157  /**
3158   * Creates a table store in this conglomerate with the given name and returns
3159   * a reference to the table. Note that this table is not a commited change
3160   * to the system. It is a free standing blank table store. The table
3161   * returned here is uncommitted and will be deleted unless it is committed.
3162   * <p>
3163   * Note that two tables may exist within a conglomerate with the same name,
3164   * however each <b>committed</b> table must have a unique name.
3165   * <p>
3166   * @param table_def the table definition.
3167   * @param data_sector_size the size of the data sectors (affects performance
3168   * and size of the file).
3169   * @param index_sector_size the size of the index sectors.
3170   */

3171  MasterTableDataSource createMasterTable(DataTableDef table_def,
3172                                int data_sector_size, int index_sector_size) {
3173    synchronized (commit_lock) {
3174      try {
3175
3176        // EFFICIENCY: Currently this writes to the conglomerate state file
3177
// twice. Once in 'nextUniqueTableID' and once in
3178
// 'state_store.commit'.
3179

3180        // The unique id that identifies this table,
3181
int table_id = nextUniqueTableID();
3182
3183        // Create the object.
3184
V2MasterTableDataSource master_table =
3185            new V2MasterTableDataSource(getSystem(),
3186                 storeSystem(), open_transactions, blob_store);
3187        master_table.create(table_id, table_def);
3188
3189        // Add to the list of all tables.
3190
table_list.add(master_table);
3191
3192        // Add this to the list of deleted tables,
3193
// (This should really be renamed to uncommitted tables).
3194
markAsCommittedDropped(table_id);
3195
3196        // Commit this
3197
state_store.commit();
3198        
3199        // And return it.
3200
return master_table;
3201
3202      }
3203      catch (IOException e) {
3204        Debug().writeException(e);
3205        throw new Error JavaDoc("Unable to create master table '" +
3206                        table_def.getName() + "' - " + e.getMessage());
3207      }
3208    }
3209
3210  }
3211
3212  /**
3213   * Creates a table store in this conglomerate that is an exact copy of the
3214   * given MasterTableDataSource. Note that this table is not a commited change
3215   * to the system. It is a free standing blank table store. The table
3216   * returned here is uncommitted and will be deleted unless it is committed.
3217   * <p>
3218   * Note that two tables may exist within a conglomerate with the same name,
3219   * however each <b>committed</b> table must have a unique name.
3220   * <p>
3221   * @param src_master_table the source master table to copy.
3222   * @param index_set the view of the table index to copy.
3223   * @return the MasterTableDataSource with the copied information.
3224   */

3225  MasterTableDataSource copyMasterTable(
3226                 MasterTableDataSource src_master_table, IndexSet index_set) {
3227    synchronized (commit_lock) {
3228      try {
3229
3230        // EFFICIENCY: Currently this writes to the conglomerate state file
3231
// twice. Once in 'nextUniqueTableID' and once in
3232
// 'state_store.commit'.
3233

3234        // The unique id that identifies this table,
3235
int table_id = nextUniqueTableID();
3236
3237        // Create the object.
3238
V2MasterTableDataSource master_table =
3239            new V2MasterTableDataSource(getSystem(),
3240                 storeSystem(), open_transactions, blob_store);
3241
3242        master_table.copy(table_id, src_master_table, index_set);
3243
3244        // Add to the list of all tables.
3245
table_list.add(master_table);
3246
3247        // Add this to the list of deleted tables,
3248
// (This should really be renamed to uncommitted tables).
3249
markAsCommittedDropped(table_id);
3250
3251        // Commit this
3252
state_store.commit();
3253        
3254        // And return it.
3255
return master_table;
3256
3257      }
3258      catch (IOException e) {
3259        Debug().writeException(e);
3260        throw new RuntimeException JavaDoc("Unable to copy master table '" +
3261                        src_master_table.getDataTableDef().getName() +
3262                        "' - " + e.getMessage());
3263      }
3264    }
3265    
3266  }
3267
3268  // ---------- Inner classes ----------
3269

3270  /**
3271   * A journal for handling namespace clashes between transactions. For
3272   * example, we would need to generate a conflict if two concurrent
3273   * transactions were to drop the same table, or if a procedure and a
3274   * table with the same name were generated in concurrent transactions.
3275   */

3276  private static class NameSpaceJournal {
3277
3278    /**
3279     * The commit_id of this journal entry.
3280     */

3281    long commit_id;
3282
3283    /**
3284     * The list of names created in this journal.
3285     */

3286    ArrayList JavaDoc created_names;
3287
3288    /**
3289     * The list of names dropped in this journal.
3290     */

3291    ArrayList JavaDoc dropped_names;
3292
3293    /**
3294     * Constructs the journal.
3295     */

3296    NameSpaceJournal(long commit_id,
3297                     ArrayList JavaDoc created_names, ArrayList JavaDoc dropped_names) {
3298      this.commit_id = commit_id;
3299      this.created_names = created_names;
3300      this.dropped_names = dropped_names;
3301    }
3302
3303  }
3304  
3305
3306// // ---------- Shutdown hook ----------
3307
//
3308
// /**
3309
// * This is a thread that is started when the shutdown hook for this
3310
// * conglomerate is executed. It goes through each table in the conglomerate
3311
// * and attempts to lock the 'writeLockedObject' for each table. When all the
3312
// * objects are locked it goes into a wait state.
3313
// */
3314
// private class ConglomerateShutdownHookThread extends Thread {
3315
// private boolean complete = false;
3316
//
3317
// ConglomerateShutdownHookThread() {
3318
// setName("Mckoi - JVM Shutdown Hook");
3319
// }
3320
//
3321
// public synchronized void run() {
3322
// // Synchronize over the commit_lock object
3323
// synchronized (commit_lock) {
3324
// if (table_list != null) {
3325
//// System.out.println("Cleanup on: " + TableDataConglomerate.this);
3326
// for (int i = 0; i < table_list.size(); ++i) {
3327
// MasterTableDataSource master =
3328
// (MasterTableDataSource) table_list.get(i);
3329
//// System.out.println("CLEANUP: " + master);
3330
// master.shutdownHookCleanup();
3331
// }
3332
// }
3333
// }
3334
// complete = true;
3335
// notifyAll();
3336
// }
3337
// public synchronized void waitUntilComplete() {
3338
// try {
3339
// while (!complete) {
3340
// wait();
3341
// }
3342
// }
3343
// catch (InterruptedException e) { /* ignore */ }
3344
// }
3345
// }
3346

3347  public void finalize() {
3348// removeShutdownHook();
3349
}
3350
3351
3352   
3353}
3354
Popular Tags