1 24 25 package com.mckoi.database; 26 27 import com.mckoi.store.*; 28 import com.mckoi.debug.*; 29 import com.mckoi.util.ByteArrayUtil; 30 import com.mckoi.util.IntegerListInterface; 31 import com.mckoi.util.UserTerminal; 32 import com.mckoi.database.global.ObjectTransfer; 33 import com.mckoi.database.global.StringObject; 34 import com.mckoi.database.global.ClobRef; 35 import com.mckoi.database.global.Ref; 36 import java.util.Collections ; 37 import java.util.HashMap ; 38 import java.util.Iterator ; 39 import java.util.ArrayList ; 40 import java.util.List ; 41 import java.io.*; 42 43 73 74 public final class V2MasterTableDataSource extends MasterTableDataSource { 75 76 79 private String file_name; 80 81 84 private Store store; 85 86 89 private IndexSetStore index_store; 90 91 94 private long sequence_id; 95 96 97 99 104 107 private long index_header_p; 108 109 112 private long list_header_p; 113 114 117 private MutableArea header_area; 118 119 120 123 private FixedRecordList list_structure; 124 125 128 private long first_delete_chain_record; 129 130 131 132 133 134 138 private boolean has_shutdown; 139 140 141 144 public V2MasterTableDataSource(TransactionSystem system, 145 StoreSystem store_system, 146 OpenTransactionList open_transactions, 147 BlobStoreInterface blob_store_interface) { 148 super(system, store_system, open_transactions, blob_store_interface); 149 first_delete_chain_record = -1; 150 has_shutdown = false; 151 } 152 153 157 private static DataOutputStream getDOut(OutputStream out) { 158 return new DataOutputStream(new BufferedOutputStream(out, 512)); 160 } 161 162 166 private static DataInputStream getDIn(InputStream in) { 167 return new DataInputStream(new BufferedInputStream(in, 512)); 169 } 170 171 174 private void setupInitialStore() throws IOException { 175 ByteArrayOutputStream bout = new ByteArrayOutputStream(); 177 DataOutputStream dout = new DataOutputStream(bout); 178 dout.writeInt(1); 179 getDataTableDef().write(dout); 180 byte[] data_table_def_buf = bout.toByteArray(); 182 183 bout = new ByteArrayOutputStream(); 185 dout = new DataOutputStream(bout); 186 dout.writeInt(1); 187 getDataIndexSetDef().write(dout); 188 byte[] index_set_def_buf = bout.toByteArray(); 190 191 bout = null; 192 dout = null; 193 194 try { 195 store.lockForWrite(); 196 197 AreaWriter header_writer = store.createArea(80); 199 long header_p = header_writer.getID(); 200 AreaWriter data_table_def_writer = 202 store.createArea(data_table_def_buf.length); 203 long data_table_def_p = data_table_def_writer.getID(); 204 AreaWriter data_index_set_writer = 206 store.createArea(index_set_def_buf.length); 207 long data_index_set_def_p = data_index_set_writer.getID(); 208 209 list_header_p = list_structure.create(); 211 list_structure.setReservedLong(-1); 212 first_delete_chain_record = -1; 213 214 index_store = new IndexSetStore(store, getSystem()); 216 index_header_p = index_store.create(); 217 218 header_writer.putInt(1); header_writer.putInt(table_id); header_writer.putLong(sequence_id); header_writer.putLong(data_table_def_p); header_writer.putLong(data_index_set_def_p); header_writer.putLong(index_header_p); header_writer.putLong(list_header_p); header_writer.finish(); 227 228 data_table_def_writer.put(data_table_def_buf); 230 data_table_def_writer.finish(); 231 232 data_index_set_writer.put(index_set_def_buf); 234 data_index_set_writer.finish(); 235 236 MutableArea fixed_area = store.getMutableArea(-1); 238 fixed_area.putLong(header_p); 239 fixed_area.checkOut(); 240 241 header_area = store.getMutableArea(header_p); 243 244 } 245 finally { 246 store.unlockForWrite(); 247 } 248 249 } 250 251 255 private void readStoreHeaders() throws IOException { 256 Area fixed_area = store.getArea(-1); 258 header_area = store.getMutableArea(fixed_area.getLong()); 260 261 int version = header_area.getInt(); if (version != 1) { 264 throw new IOException("Incorrect version identifier."); 265 } 266 this.table_id = header_area.getInt(); this.sequence_id = header_area.getLong(); long def_p = header_area.getLong(); long index_def_p = header_area.getLong(); this.index_header_p = header_area.getLong(); this.list_header_p = header_area.getLong(); 273 DataInputStream din = getDIn(store.getAreaInputStream(def_p)); 275 version = din.readInt(); 276 if (version != 1) { 277 throw new IOException("Incorrect DataTableDef version identifier."); 278 } 279 table_def = DataTableDef.read(din); 280 din.close(); 281 282 din = getDIn(store.getAreaInputStream(index_def_p)); 284 version = din.readInt(); 285 if (version != 1) { 286 throw new IOException("Incorrect DataIndexSetDef version identifier."); 287 } 288 index_def = DataIndexSetDef.read(din); 289 din.close(); 290 291 list_structure.init(list_header_p); 293 first_delete_chain_record = list_structure.getReservedLong(); 294 295 index_store = new IndexSetStore(store, getSystem()); 297 try { 298 index_store.init(index_header_p); 299 } 300 catch (IOException e) { 301 index_store = new IndexSetStore(store, getSystem()); 305 index_header_p = index_store.create(); 306 index_store.addIndexLists(table_def.columnCount() + 1, (byte) 1, 1024); 307 header_area.position(32); 308 header_area.putLong(index_header_p); 309 header_area.position(0); 310 header_area.checkOut(); 311 } 312 313 } 314 315 320 void create(int table_id, DataTableDef table_def) throws IOException { 321 322 setupDataTableDef(table_def); 324 325 this.sequence_id = 1; 327 328 this.file_name = makeTableFileName(getSystem(), table_id, getTableName()); 330 331 store = storeSystem().createStore(file_name); 333 334 try { 335 store.lockForWrite(); 336 337 list_structure = new FixedRecordList(store, 12); 339 } 340 finally { 341 store.unlockForWrite(); 342 } 343 344 this.table_id = table_id; 346 347 setupInitialStore(); 349 index_store.addIndexLists(table_def.columnCount() + 1, (byte) 1, 1024); 350 351 loadInternal(); 353 354 356 } 357 358 362 boolean exists(String identity) throws IOException { 363 return storeSystem().storeExists(identity); 364 } 365 366 371 public void open(String file_name) throws IOException { 372 373 this.file_name = file_name; 375 376 store = storeSystem().openStore(file_name); 378 boolean need_check = !store.lastCloseClean(); 379 380 list_structure = new FixedRecordList(store, 12); 382 383 readStoreHeaders(); 385 386 column_count = table_def.columnCount(); 388 389 table_indices = new MultiVersionTableIndices(getSystem(), 391 table_def.getTableName(), table_def.columnCount()); 392 column_rid_list = new RIDList[table_def.columnCount()]; 394 395 loadInternal(); 397 398 if (need_check) { 399 doOpeningScan(); 402 403 Debug().write(Lvl.INFORMATION, this, 405 "Scanning File: " + file_name + " for leaks."); 406 scanForLeaks(); 407 } 408 409 420 } 421 422 428 synchronized void close(boolean pending_drop) throws IOException { 429 433 synchronized (list_structure) { 434 435 clearAllRootLocks(); 437 438 try { 439 try { 440 store.lockForWrite(); 441 442 if (!isReadOnly()) { 444 garbage_collector.performCollectionEvent(true); 445 } 446 447 if (pending_drop) { 451 dropAllBlobReferences(); 453 } 454 } 455 finally { 456 store.unlockForWrite(); 457 } 458 } 459 catch (Throwable e) { 460 Debug().write(Lvl.ERROR, this, 461 "Exception during table (" + toString() + ") close: " + 462 e.getMessage()); 463 Debug().writeException(e); 464 } 465 466 index_store.close(); 468 470 storeSystem().closeStore(store); 472 473 table_def = null; 474 table_indices = null; 475 column_rid_list = null; 476 is_closed = true; 477 } 478 } 479 480 488 void copy(int table_id, MasterTableDataSource src_master_table, 489 IndexSet index_set) throws IOException { 490 491 create(table_id, src_master_table.getDataTableDef()); 493 494 IntegerListInterface master_index = index_set.getIndex(0); 496 497 int sz = src_master_table.rawRowCount(); 499 for (int i = 0; i < sz; ++i) { 500 if (master_index.contains(i)) { 502 copyRecordFrom(src_master_table, i); 504 } 505 } 506 507 if (src_master_table instanceof V2MasterTableDataSource) { 509 index_store.copyAllFrom(index_set); 510 } 511 else if (src_master_table instanceof V1MasterTableDataSource) { 512 buildIndexes(); 518 } 519 520 long un_id = src_master_table.nextUniqueID(); 522 setUniqueID(un_id); 523 524 } 525 526 528 538 private long writeRecordToStore(RowData data) throws IOException { 539 540 int row_cells = data.getColumnCount(); 542 543 int[] cell_sizes = new int[row_cells]; 544 int[] cell_type = new int[row_cells]; 545 546 try { 547 store.lockForWrite(); 548 549 int all_records_size = 0; 551 for (int i = 0; i < row_cells; ++i) { 552 TObject cell = data.getCellData(i); 553 int sz; 554 int ctype; 555 if (cell.getObject() instanceof Ref) { 556 Ref large_object_ref = (Ref) cell.getObject(); 557 sz = 16; 559 ctype = 2; 560 if (large_object_ref != null) { 561 blob_store_interface.establishReference(large_object_ref.getID()); 564 } 565 } 566 else { 567 sz = ObjectTransfer.exactSize(cell.getObject()); 568 ctype = 1; 569 } 570 cell_sizes[i] = sz; 571 cell_type[i] = ctype; 572 all_records_size += sz; 573 } 574 575 long record_p; 576 577 AreaWriter writer = 579 store.createArea(all_records_size + (row_cells * 8) + 4); 580 record_p = writer.getID(); 581 582 DataOutputStream dout = getDOut(writer.getOutputStream()); 584 585 dout.writeInt(0); int cell_skip = 0; 588 for (int i = 0; i < row_cells; ++i) { 589 dout.writeInt((int) cell_type[i]); 590 dout.writeInt(cell_skip); 591 cell_skip += cell_sizes[i]; 592 } 593 594 for (int i = 0; i < row_cells; ++i) { 596 TObject t_object = data.getCellData(i); 597 int ctype = cell_type[i]; 598 if (ctype == 1) { 599 ObjectTransfer.writeTo(dout, t_object.getObject()); 601 } 602 else if (ctype == 2) { 603 Ref large_object_ref = (Ref) t_object.getObject(); 606 if (large_object_ref == null) { 607 dout.writeInt(1); 609 dout.writeInt(0); dout.writeLong(-1); 611 } 612 else { 613 dout.writeInt(0); 614 dout.writeInt(0); dout.writeLong(large_object_ref.getID()); 616 } 617 } 618 else { 619 throw new IOException("Unrecognised cell type."); 620 } 621 } 622 623 dout.flush(); 625 626 writer.finish(); 628 629 return record_p; 631 632 } 633 finally { 634 store.unlockForWrite(); 635 } 636 637 } 638 639 645 private void copyRecordFrom(MasterTableDataSource src_master_table, 646 int record_id) throws IOException { 647 648 int sz = src_master_table.getDataTableDef().columnCount(); 650 RowData row_data = new RowData(getSystem(), sz); 651 for (int i = 0; i < sz; ++i) { 652 TObject tob = src_master_table.getCellContents(i, record_id); 653 row_data.setColumnDataFromTObject(i, tob); 654 } 655 656 try { 657 store.lockForWrite(); 658 659 long record_p = writeRecordToStore(row_data); 662 663 addToRecordList(record_id, record_p); 665 666 writeRecordType(record_id, 0x010); 668 669 } 670 finally { 671 store.unlockForWrite(); 672 } 673 674 } 675 676 680 private void removeAllBlobReferencesForRecord(long record_p) 681 throws IOException { 682 Area record_area = store.getArea(record_p); 684 int reserved = record_area.getInt(); for (int i = 0; i < column_count; ++i) { 687 int ctype = record_area.getInt(); 688 int cell_offset = record_area.getInt(); 689 if (ctype == 1) { 690 } 692 else if (ctype == 2) { 693 int cur_p = record_area.position(); 694 record_area.position(cell_offset + 4 + (column_count * 8)); 695 int btype = record_area.getInt(); 696 record_area.getInt(); if (btype == 0) { 698 long blob_ref_id = record_area.getLong(); 699 blob_store_interface.releaseReference(blob_ref_id); 701 } 702 record_area.position(cur_p); 704 } 705 else { 706 throw new RuntimeException ("Unrecognised type."); 707 } 708 } 709 } 710 711 721 private void dropAllBlobReferences() throws IOException { 722 723 synchronized (list_structure) { 724 long elements = list_structure.addressableNodeCount(); 725 for (long i = 0; i < elements; ++i) { 726 Area a = list_structure.positionOnNode(i); 727 int status = a.getInt(); 728 if ((status & 0x020000) == 0) { 730 long record_p = a.getLong(); 732 removeAllBlobReferencesForRecord(record_p); 733 } 734 } 735 } 736 737 } 738 739 741 748 public void scanForLeaks() throws IOException { 749 750 synchronized (list_structure) { 751 752 ArrayList used_areas = new ArrayList (); 754 755 used_areas.add(new Long (header_area.getID())); 757 758 header_area.position(16); 759 used_areas.add(new Long (header_area.getLong())); 761 used_areas.add(new Long (header_area.getLong())); 762 763 list_structure.addAllAreasUsed(used_areas); 765 766 index_store.addAllAreasUsed(used_areas); 768 769 long elements = list_structure.addressableNodeCount(); 771 for (long i = 0; i < elements; ++i) { 772 Area a = list_structure.positionOnNode(i); 773 int status = a.getInt(); 774 if ((status & 0x020000) == 0) { 775 long pointer = a.getLong(); 776 used_areas.add(new Long (pointer)); 779 } 780 } 781 782 if (store instanceof AbstractStore) { 784 AbstractStore a_store = (AbstractStore) store; 785 ArrayList leaked_areas = a_store.findAllocatedAreasNotIn(used_areas); 786 if (leaked_areas.size() == 0) { 787 Debug().write(Lvl.INFORMATION, this, "No leaked areas."); 788 } 789 else { 790 Debug().write(Lvl.INFORMATION, this, "There were " + 791 leaked_areas.size() + " leaked areas found."); 792 for (int n = 0; n < leaked_areas.size(); ++n) { 793 Long area_pointer = (Long ) leaked_areas.get(n); 794 store.deleteArea(area_pointer.longValue()); 795 } 796 Debug().write(Lvl.INFORMATION, this, 797 "Leaked areas successfully freed."); 798 } 799 } 800 801 } 802 803 } 804 805 811 public void checkAndRepair(String file_name, 812 UserTerminal terminal) throws IOException { 813 814 this.file_name = file_name; 815 816 terminal.println("+ Repairing V2MasterTableDataSource " + file_name); 817 818 store = storeSystem().openStore(file_name); 819 if (store instanceof AbstractStore) { 821 ((AbstractStore) store).openScanAndFix(terminal); 822 } 823 824 list_structure = new FixedRecordList(store, 12); 826 827 try { 828 readStoreHeaders(); 830 column_count = table_def.columnCount(); 832 } 833 catch (IOException e) { 834 terminal.println( 836 "! Table is not repairable because the file headers are corrupt."); 837 terminal.println(" Error reported: " + e.getMessage()); 838 e.printStackTrace(); 839 return; 840 } 841 842 terminal.println("- Checking record integrity."); 844 845 List all_areas = store.getAllAreas(); 847 ArrayList all_records = new ArrayList (); 849 850 first_delete_chain_record = -1; 853 int record_count = 0; 854 int free_count = 0; 855 int sz = rawRowCount(); 856 for (int i = sz - 1; i >= 0; --i) { 857 boolean record_valid = checkAndRepairRecord(i, all_areas, terminal); 858 if (record_valid) { 859 all_records.add(new Long (i)); 860 ++record_count; 861 } 862 else { 863 ++free_count; 864 } 865 } 866 list_structure.setReservedLong(first_delete_chain_record); 868 869 terminal.print("* Record count = " + record_count); 870 terminal.println(" Free count = " + free_count); 871 872 terminal.println("- Rebuilding all table index information."); 874 875 int index_count = table_def.columnCount() + 1; 876 for (int i = 0; i < index_count; ++i) { 877 index_store.commitDropIndex(i); 878 } 879 buildIndexes(); 881 882 terminal.println("- Table check complete."); 883 886 } 887 888 892 private boolean checkAndRepairRecord( 893 int row_index, List all_areas, UserTerminal terminal) 894 throws IOException { 895 synchronized (list_structure) { 896 MutableArea block_area = list_structure.positionOnNode(row_index); 898 int p = block_area.position(); 899 int status = block_area.getInt(); 900 if ((status & 0x020000) == 0) { 902 long record_p = block_area.getLong(); 903 int i = Collections.binarySearch(all_areas, new Long (record_p)); 906 if (i >= 0) { 907 try { 910 internalGetCellContents(0, row_index); 911 return true; 913 } 914 catch (Throwable e) { 915 terminal.println("+ Error accessing record: " + e.getMessage()); 918 } 919 920 } 921 922 terminal.println("+ Record area not valid: row = " + row_index + 925 " pointer = " + record_p); 926 terminal.println("+ Deleting record."); 927 } 928 block_area.position(p); 930 block_area.putInt(0x020000); 931 block_area.putLong(first_delete_chain_record); 932 block_area.checkOut(); 933 first_delete_chain_record = row_index; 934 935 return false; 936 937 } 938 939 } 940 941 942 943 948 private void growListStructure() throws IOException { 949 try { 950 store.lockForWrite(); 951 952 list_structure.increaseSize(); 954 int new_block_number = list_structure.listBlockCount() - 1; 956 long start_index = 957 list_structure.listBlockFirstPosition(new_block_number); 958 long size_of_block = list_structure.listBlockNodeCount(new_block_number); 959 960 MutableArea a = list_structure.positionOnNode(start_index); 962 963 for (long n = 0; n < size_of_block - 1; ++n) { 965 a.putInt(0x020000); 966 a.putLong(start_index + n + 1); 967 } 968 a.putInt(0x020000); 970 a.putLong(first_delete_chain_record); 971 a.checkOut(); 972 first_delete_chain_record = start_index; 974 list_structure.setReservedLong(first_delete_chain_record); 976 977 } 978 finally { 979 store.unlockForWrite(); 980 } 981 982 } 983 984 989 private long addToRecordList(long index, long record_p) throws IOException { 990 synchronized (list_structure) { 991 if (has_shutdown) { 992 throw new IOException("IO operation while VM shutting down."); 993 } 994 995 long addr_count = list_structure.addressableNodeCount(); 996 while (index >= addr_count) { 998 growListStructure(); 999 addr_count = list_structure.addressableNodeCount(); 1000 } 1001 1002 long prev = -1; 1005 long chain = first_delete_chain_record; 1006 while (chain != -1 && chain != index) { 1007 Area a = list_structure.positionOnNode(chain); 1008 if (a.getInt() == 0x020000) { 1009 prev = chain; 1010 chain = a.getLong(); 1011 } 1012 else { 1013 throw new IOException("Not deleted record is in delete chain!"); 1014 } 1015 } 1016 if (chain == -1) { 1018 throw new IOException( 1019 "Unable to add record because index is not available."); 1020 } 1021 Area a = list_structure.positionOnNode(chain); 1023 if (a.getInt() != 0x020000) { 1024 throw new IOException("Not deleted record is in delete chain!"); 1025 } 1026 long next_p = a.getLong(); 1027 1028 try { 1029 store.lockForWrite(); 1030 1031 if (prev == -1) { 1033 first_delete_chain_record = next_p; 1034 list_structure.setReservedLong(first_delete_chain_record); 1035 } 1036 else { 1037 MutableArea ma = list_structure.positionOnNode(prev); 1039 ma.putInt(0x020000); 1040 ma.putLong(next_p); 1041 ma.checkOut(); 1042 } 1043 1044 MutableArea ma = list_structure.positionOnNode(index); 1046 ma.putInt(0); 1047 ma.putLong(record_p); 1048 ma.checkOut(); 1049 1050 } 1051 finally { 1052 store.unlockForWrite(); 1053 } 1054 1055 } 1056 1057 return index; 1058 } 1059 1060 1065 private long addToRecordList(long record_p) throws IOException { 1066 synchronized (list_structure) { 1067 if (has_shutdown) { 1068 throw new IOException("IO operation while VM shutting down."); 1069 } 1070 1071 if (first_delete_chain_record == -1) { 1073 growListStructure(); 1075 } 1076 1077 long recycled_record = first_delete_chain_record; 1079 MutableArea block = list_structure.positionOnNode(recycled_record); 1080 int rec_pos = block.position(); 1081 int status = block.getInt(); 1083 if ((status & 0x020000) == 0) { 1084 throw new Error ("Assertion failed: record is not deleted. " + 1085 "status = " + status + ", rec_pos = " + rec_pos); 1086 } 1087 long next_chain = block.getLong(); 1089 first_delete_chain_record = next_chain; 1090 1091 try { 1092 1093 store.lockForWrite(); 1094 1095 list_structure.setReservedLong(first_delete_chain_record); 1097 block.position(rec_pos); 1099 block.putInt(0); 1100 block.putLong(record_p); 1101 block.checkOut(); 1102 1103 } 1104 finally { 1105 store.unlockForWrite(); 1106 } 1107 1108 return recycled_record; 1109 1110 } 1111 1112 } 1113 1114 1115 1117 String getSourceIdent() { 1118 return file_name; 1119 } 1120 1121 1122 int writeRecordType(int row_index, int row_state) throws IOException { 1123 synchronized (list_structure) { 1124 if (has_shutdown) { 1125 throw new IOException("IO operation while VM shutting down."); 1126 } 1127 1128 MutableArea block_area = list_structure.positionOnNode(row_index); 1130 int pos = block_area.position(); 1131 int old_status = block_area.getInt(); 1133 int mod_status = (old_status & 0x0FFFF0000) | (row_state & 0x0FFFF); 1134 1135 try { 1137 1138 store.lockForWrite(); 1139 1140 block_area.position(pos); 1141 block_area.putInt(mod_status); 1142 block_area.checkOut(); 1143 1144 } 1145 finally { 1146 store.unlockForWrite(); 1147 } 1148 1149 return old_status & 0x0FFFF; 1150 } 1151 } 1152 1153 1154 int readRecordType(int row_index) throws IOException { 1155 synchronized (list_structure) { 1156 Area block_area = list_structure.positionOnNode(row_index); 1158 return block_area.getInt() & 0x0FFFF; 1160 } 1161 } 1162 1163 1164 boolean recordDeleted(int row_index) throws IOException { 1165 synchronized (list_structure) { 1166 Area block_area = list_structure.positionOnNode(row_index); 1168 return (block_area.getInt() & 0x020000) != 0; 1170 } 1171 } 1172 1173 1174 int rawRowCount() throws IOException { 1175 synchronized (list_structure) { 1176 long total = list_structure.addressableNodeCount(); 1177 return (int) total; 1179 } 1180 } 1181 1182 1183 void internalDeleteRow(int row_index) throws IOException { 1184 long record_p; 1185 synchronized (list_structure) { 1186 if (has_shutdown) { 1187 throw new IOException("IO operation while VM shutting down."); 1188 } 1189 1190 MutableArea block_area = list_structure.positionOnNode(row_index); 1192 int p = block_area.position(); 1193 int status = block_area.getInt(); 1194 if ((status & 0x020000) != 0) { 1196 throw new IOException("Record is already marked as deleted."); 1197 } 1198 record_p = block_area.getLong(); 1199 1200 try { 1202 store.lockForWrite(); 1203 1204 block_area.position(p); 1205 block_area.putInt(0x020000); 1206 block_area.putLong(first_delete_chain_record); 1207 block_area.checkOut(); 1208 first_delete_chain_record = row_index; 1209 list_structure.setReservedLong(first_delete_chain_record); 1211 1212 removeAllBlobReferencesForRecord(record_p); 1215 1216 store.deleteArea(record_p); 1218 1219 } 1220 finally { 1221 store.unlockForWrite(); 1222 } 1223 1224 } 1225 1226 } 1227 1228 1229 IndexSet createIndexSet() { 1230 return index_store.getSnapshotIndexSet(); 1231 } 1232 1233 1234 void commitIndexSet(IndexSet index_set) { 1235 index_store.commitIndexSet(index_set); 1236 index_set.dispose(); 1237 } 1238 1239 int internalAddRow(RowData data) throws IOException { 1240 1241 long row_number; 1242 int int_row_number; 1243 1244 synchronized (list_structure) { 1246 long record_p = writeRecordToStore(data); 1247 row_number = addToRecordList(record_p); 1249 int_row_number = (int) row_number; 1250 } 1251 1252 if (DATA_CELL_CACHING) { 1254 int row_cells = data.getColumnCount(); 1255 for (int i = 0; i < row_cells; ++i) { 1256 cache.put(table_id, int_row_number, i, data.getCellData(i)); 1258 } 1259 } 1260 1261 return (int) row_number; 1265 1266 } 1267 1268 1269 synchronized void checkForCleanup() { 1270 garbage_collector.performCollectionEvent(false); 1272 } 1273 1274 1275 1276 1278 private void skipStream(InputStream in, final long amount) 1279 throws IOException { 1280 long count = amount; 1281 long skipped = 0; 1282 while (skipped < amount) { 1283 long last_skipped = in.skip(count); 1284 skipped += last_skipped; 1285 count -= last_skipped; 1286 } 1287 } 1288 1289 1290 private short s_run_file_hits = Short.MAX_VALUE; 1292 1293 1295 TObject internalGetCellContents(int column, int row) { 1296 1297 1312 1320 1323 TObject cell; 1325 if (DATA_CELL_CACHING) { 1326 cell = cache.get(table_id, row, column); 1327 if (cell != null) { 1328 return cell; 1329 } 1330 } 1331 1332 1336 long record_p = -1; 1337 try { 1338 synchronized (list_structure) { 1339 1340 ++s_run_file_hits; 1342 1343 if (s_run_file_hits >= 100) { 1344 getSystem().stats().add(s_run_file_hits, file_hits_key); 1345 s_run_file_hits = 0; 1346 } 1347 1348 Area list_block = list_structure.positionOnNode(row); 1350 int status = list_block.getInt(); 1351 if ((status & 0x020000) != 0) { 1353 throw new Error ("Unable to read deleted record."); 1354 } 1355 record_p = list_block.getLong(); 1357 1358 } 1359 1360 DataInputStream din = getDIn(store.getAreaInputStream(record_p)); 1362 1363 skipStream(din, 4 + (column * 8)); 1364 int cell_type = din.readInt(); 1365 int cell_offset = din.readInt(); 1366 1367 int cur_at = 8 + 4 + (column * 8); 1368 int be_at = 4 + (column_count * 8); 1369 int skip_amount = (be_at - cur_at) + cell_offset; 1370 1371 skipStream(din, skip_amount); 1372 1373 Object ob; 1374 if (cell_type == 1) { 1375 ob = ObjectTransfer.readFrom(din); 1377 } 1378 else if (cell_type == 2) { 1379 int f_type = din.readInt(); 1381 int f_reserved = din.readInt(); 1382 long ref_id = din.readLong(); 1383 if (f_type == 0) { 1384 ob = blob_store_interface.getLargeObject(ref_id); 1386 } 1387 else if (f_type == 1) { 1388 ob = null; 1389 } 1390 else { 1391 throw new RuntimeException ("Unknown blob type."); 1392 } 1393 } 1394 else { 1395 throw new RuntimeException ("Unrecognised cell type in data."); 1396 } 1397 1398 TType ttype = getDataTableDef().columnAt(column).getTType(); 1401 cell = new TObject(ttype, ob); 1403 1404 din.close(); 1406 1407 } 1408 catch (IOException e) { 1409 Debug().writeException(e); 1410 throw new RuntimeException ("IOError getting cell at (" + column + ", " + 1412 row + ") pointer = " + record_p + "."); 1413 } 1414 1415 if (DATA_CELL_CACHING) { 1417 cache.put(table_id, row, column, cell); 1418 } 1419 1420 return cell; 1421 1422 } 1423 1424 1425 long currentUniqueID() { 1426 synchronized (list_structure) { 1427 return sequence_id - 1; 1428 } 1429 } 1430 1431 1432 long nextUniqueID() { 1433 synchronized (list_structure) { 1434 long v = sequence_id; 1435 ++sequence_id; 1436 if (has_shutdown) { 1437 throw new RuntimeException ("IO operation while VM shutting down."); 1438 } 1439 try { 1440 try { 1441 store.lockForWrite(); 1442 header_area.position(4 + 4); 1443 header_area.putLong(sequence_id); 1444 header_area.checkOut(); 1445 } 1446 finally { 1447 store.unlockForWrite(); 1448 } 1449 } 1450 catch (IOException e) { 1451 Debug().writeException(e); 1452 throw new Error ("IO Error: " + e.getMessage()); 1453 } 1454 return v; 1455 } 1456 } 1457 1458 1459 void setUniqueID(long value) { 1460 synchronized (list_structure) { 1461 sequence_id = value; 1462 if (has_shutdown) { 1463 throw new RuntimeException ("IO operation while VM shutting down."); 1464 } 1465 try { 1466 try { 1467 store.lockForWrite(); 1468 header_area.position(4 + 4); 1469 header_area.putLong(sequence_id); 1470 header_area.checkOut(); 1471 } 1472 finally { 1473 store.unlockForWrite(); 1474 } 1475 } 1476 catch (IOException e) { 1477 Debug().writeException(e); 1478 throw new Error ("IO Error: " + e.getMessage()); 1479 } 1480 } 1481 } 1482 1483 synchronized void dispose(boolean pending_drop) throws IOException { 1484 synchronized (list_structure) { 1485 if (!is_closed) { 1486 close(pending_drop); 1487 } 1488 } 1489 } 1490 1491 synchronized boolean drop() throws IOException { 1492 synchronized (list_structure) { 1493 1494 if (!is_closed) { 1495 close(true); 1496 } 1497 1498 boolean b = storeSystem().deleteStore(store); 1499 if (b) { 1500 Debug().write(Lvl.MESSAGE, this, "Dropped: " + getSourceIdent()); 1501 } 1502 return b; 1503 1504 } 1505 } 1506 1507 void shutdownHookCleanup() { 1508 synchronized (list_structure) { 1510 index_store.close(); 1511 has_shutdown = true; 1513 } 1514 } 1520 1521 boolean isWorthCompacting() { 1522 return true; 1525 } 1526 1527 1528 1531 public String toString() { 1532 return "[V2MasterTableDataSource: " + file_name + "]"; 1533 } 1534 1535} 1536 1537 | Popular Tags |