1 9 package org.ozoneDB.core.storage.magicStore; 10 11 import java.io.BufferedInputStream ; 12 import java.io.BufferedOutputStream ; 13 import java.io.File ; 14 import java.io.FileInputStream ; 15 import java.io.FileOutputStream ; 16 import java.io.FilenameFilter ; 17 import java.io.IOException ; 18 import java.io.InputStream ; 19 import java.io.ObjectInputStream ; 20 import java.io.ObjectOutputStream ; 21 import java.io.OutputStream ; 22 import java.util.HashSet ; 23 import java.util.Iterator ; 24 import java.util.LinkedList ; 25 import java.util.Set ; 26 import java.util.zip.GZIPInputStream ; 27 import java.util.zip.GZIPOutputStream ; 28 import org.ozoneDB.Setup; 29 import org.ozoneDB.DxLib.DxHashMap; 30 import org.ozoneDB.DxLib.DxHashSet; 31 import org.ozoneDB.DxLib.DxIterator; 32 import org.ozoneDB.DxLib.DxMap; 33 import org.ozoneDB.core.Env; 34 import org.ozoneDB.core.Lock; 35 import org.ozoneDB.core.MROWLock; 36 import org.ozoneDB.core.Permissions; 37 import org.ozoneDB.core.Transaction; 38 import org.ozoneDB.core.TransactionID; 39 import org.ozoneDB.core.storage.AbstractClusterStore; 40 import org.ozoneDB.core.storage.Cache; 41 import org.ozoneDB.core.storage.Cluster; 42 import org.ozoneDB.core.storage.ClusterID; 43 import org.ozoneDB.core.storage.StorageObjectContainer; 44 import org.ozoneDB.core.storage.SoftReferenceCache; 45 import org.ozoneDB.io.stream.ResolvingObjectInputStream; 46 import org.ozoneDB.util.LogWriter; 47 48 49 60 public final class ClusterStore extends AbstractClusterStore { 61 62 final static String POSTFIX_NEW = POSTFIX_SEPARATOR + "new"; 64 private final static String POSTFIX_OLD = POSTFIX_SEPARATOR + "old"; 65 66 protected final static int compressionFactor = 3; 67 68 private transient Cache clusterCache; 70 71 protected int maxClusterSize = 64 * 1024; 72 73 76 protected DxMap growingClusterIDs; 77 78 private boolean compressClusters; 79 80 private MagicStore magicStore; 81 82 83 ClusterStore(Env _env) { 84 super(_env); 85 maxClusterSize = env.config.intProperty(Setup.WS_CLUSTER_SIZE, -1); 86 87 clusterCache = new SoftReferenceCache(); 89 90 compressClusters = env.config.booleanProperty(Setup.WS_COMPRESS_CLUSTERS, true); 91 } 92 93 94 MagicStore getMagicStore() { 95 return magicStore; 96 } 97 98 void setMagicStore(MagicStore _magicStore) { 99 this.magicStore = _magicStore; 100 } 101 102 public void startup() throws Exception { 103 growingClusterIDs = new DxHashMap(32); 104 } 105 106 107 public void shutdown() { 108 } 109 110 111 114 public boolean isCleanShutdown() { 115 File file = new File (env.getDatabaseDir() + Env.DATA_DIR); 116 String [] fileList = file.list(); 117 118 for (int i = 0; i < fileList.length; i++) { 119 if (fileList[i].endsWith(POSTFIX_NEW) || fileList[i].endsWith(POSTFIX_OLD)) { 120 return false; 121 } 122 } 123 return true; 124 } 125 126 127 130 public Set recoverClusterIDs() { 131 File file = new File (env.getDatabaseDir() + Env.DATA_DIR); 132 String [] fileList = file.list(); 133 134 Set result = new HashSet (); 135 for (int i = 0; i < fileList.length; i++) { 136 if (fileList[i].endsWith(POSTFIX_CLUSTER) || fileList[i].endsWith(POSTFIX_NEW)|| fileList[i].endsWith(POSTFIX_OLD)) { 137 String cidString = fileList[i].substring(0, fileList[i].indexOf(POSTFIX_SEPARATOR)); 138 long cid = Long.parseLong(cidString); 139 result.add(new ClusterID(cid)); 140 } 141 } 142 return result; 143 } 144 145 149 Set uncommittedTaIDs() { 150 File file = new File (env.getDatabaseDir() + Env.DATA_DIR); 151 File [] fileList = file.listFiles(new FilenameFilter () { 152 public boolean accept(File dir, String name) { 153 return name.endsWith(POSTFIX_NEW); 154 } 155 }); 156 Set result = new HashSet (); 157 for (int i = 0; i < fileList.length; i++) { 158 String [] parts = splitClusterName(fileList[i].getName()); 159 long taID = Long.parseLong(parts[parts.length - 2]); 160 result.add(new TransactionID(taID)); 161 } 162 return result; 163 } 164 165 166 private static String [] splitClusterName(String clusterName) { 167 return clusterName.split("\\" + POSTFIX_SEPARATOR); 170 } 171 172 public long currentCacheSize() { 173 return clusterCache.size(); 174 } 175 176 177 public int currentBytesPerContainer() { 178 int result = env.config.intProperty(Setup.WS_CLUSTER_SIZE_RATIO, 256); 179 return result; 181 } 182 183 188 protected synchronized Cluster growingCluster(Permissions perms, MagicTransaction ta) throws Exception { 189 if (env.logWriter.hasTarget(LogWriter.DEBUG3)) { 190 env.logWriter.newEntry(this, "growingCluster() ", LogWriter.DEBUG3); 191 } 192 193 Cluster cluster = null; 194 ClusterID cid = (ClusterID) growingClusterIDs.elementForKey(perms); 195 196 if (cid != null) { 198 cluster = (Cluster) clusterCache.get(cid); 199 if (cluster == null) { 200 cluster = loadCluster(cid, ta); 201 } 202 if (cluster.lock() == null || cluster.size() >= maxClusterSize || 205 cluster.lock().level(null) > Lock.LEVEL_NONE && !cluster.lock().isAcquiredBy(env.transactionManager.currentTA())) { 206 207 if (env.logWriter.hasTarget(LogWriter.DEBUG1)) { 208 env.logWriter.newEntry(this, 209 "growingCluster(): growing cluster not usable: cid=" + cluster.clusterID() + " size=" + cluster.size() + " lockLevel=" + 210 (cluster.lock() != null ? String.valueOf(cluster.lock().level(null)) : "null"), 211 LogWriter.DEBUG1); 212 } 213 214 growingClusterIDs.removeForKey(perms); 215 cluster = null; 216 } 217 } 218 219 if (cluster == null) { 221 for (Iterator i = clusterCache.copyToMap().values().iterator(); i.hasNext(); ) { 222 Cluster cursor = (Cluster) i.next(); 223 224 if (cursor.size() < maxClusterSize && cursor.permissions().equals(perms)) { 226 cluster = cursor; 227 228 if (cluster.lock() == null) { 230 env.logWriter.newEntry(this, 231 "growingCluster(): loaded cluster was deactivated: " + cluster.clusterID(), 232 LogWriter.DEBUG); 233 cluster = null; 234 } else if (cluster.lock().level(null) > Lock.LEVEL_NONE && !cluster.lock().isAcquiredBy( 235 env.transactionManager.currentTA())) { 236 if (env.logWriter.hasTarget(LogWriter.DEBUG1)) { 238 env.logWriter.newEntry(this, 239 "growingCluster(): loaded cluster is locked by another transaction: " 240 + cluster.clusterID(), LogWriter.DEBUG1); 241 } 242 cluster = null; 243 } else { 244 growingClusterIDs.addForKey(cluster.clusterID(), perms); 245 if (env.logWriter.hasTarget(LogWriter.DEBUG1)) { 246 env.logWriter.newEntry(this, 247 "growingCluster(): loaded cluster is now growing cluster: " + cluster.clusterID() 248 + " size:" + cluster.size(), LogWriter.DEBUG1); 249 } 250 break; 251 } 252 } 253 } 254 } 255 256 if (cluster == null) { 259 cluster = createANewEmptyAndUsableCluster(perms); 260 } 261 262 return cluster; 263 } 264 265 274 protected synchronized Cluster createANewEmptyAndUsableCluster(Permissions perms) throws IOException , ClassNotFoundException { 275 Cluster cluster = new MagicCluster(new ClusterID(env.keyGenerator.nextID()), perms, (MROWLock) env.transactionManager.newLock(), 256); 277 278 activateCluster(cluster, 100); 279 clusterCache.put(cluster.clusterID(), cluster); 280 281 growingClusterIDs.addForKey(cluster.clusterID(), perms); 282 284 return cluster; 285 } 286 287 292 protected Cluster giveMeAnUnlockedCluster(Permissions perms) throws IOException , ClassNotFoundException { 293 return createANewEmptyAndUsableCluster(perms); 294 } 295 296 304 public void registerContainerAndLock(StorageObjectContainer container, Permissions perms, Transaction locker, int lockLevel) throws Exception { 305 if (env.logWriter.hasTarget(LogWriter.DEBUG3)) { 306 env.logWriter.newEntry(this, "registerContainer()", LogWriter.DEBUG3); 307 } 308 309 Cluster cluster = null; 310 311 boolean locked = false; 312 boolean alright = false; 313 314 try { 315 synchronized (this) { 316 MagicTransaction ta = (MagicTransaction) env.transactionManager.currentTA(); 317 cluster = growingCluster(perms, ta); 318 319 Lock clusterLock = cluster.lock(); 320 int prevLevel = clusterLock.tryAcquire(locker, lockLevel); 321 322 if (prevLevel == Lock.NOT_ACQUIRED) { cluster = giveMeAnUnlockedCluster(perms); 324 325 clusterLock = cluster.lock(); 326 prevLevel = clusterLock.tryAcquire(locker, lockLevel); 327 328 if (prevLevel == Lock.NOT_ACQUIRED) { 329 throw new Error ("BUG! We could not acquire a lock for an unlocked cluster."); 330 } 331 } 332 locked = true; 333 334 cluster.registerContainer(container); 335 } 336 cluster.updateLockLevel(locker); 337 338 if (env.logWriter.hasTarget(LogWriter.DEBUG3)) { 339 env.logWriter.newEntry(this, " cluster: " + cluster.clusterID(), LogWriter.DEBUG3); 340 } 341 alright = true; 342 } finally { 343 if (!alright) { 344 if (locked) { 345 cluster.lock().release(locker); 346 } 347 } 348 } 349 } 350 351 352 public void invalidateContainer(StorageObjectContainer container) { 353 synchronized (container) { 354 container.getCluster().removeContainer(container); 355 container.setCluster(null); 356 } 357 } 358 359 360 protected Cluster restoreCluster(final ClusterID cid, Set uncommittedTaIDs) throws Exception { 361 String basename = basename(cid); 362 Cluster cluster; 363 364 new File (basename + POSTFIX_LOCK).delete(); 365 366 File dir = new File (env.getDatabaseDir() + Env.DATA_DIR); 367 File [] oldFileList = dir.listFiles(new FilenameFilter () { 368 public boolean accept(File dir, String name) { 369 return name.startsWith(cid.value() + POSTFIX_SEPARATOR) && name.endsWith(POSTFIX_OLD); 370 } 371 }); 372 373 File [] newFileList = dir.listFiles(new FilenameFilter () { 374 public boolean accept(File dir, String name) { 375 return name.startsWith(cid.value() + POSTFIX_SEPARATOR) && name.endsWith(POSTFIX_OLD); 376 } 377 }); 378 379 File clusterFile = new File (basename + POSTFIX_CLUSTER); 380 381 if (oldFileList.length == 0) { 382 if (newFileList.length == 1) { 383 384 newFileList[0].delete(); 387 } 388 } else if (oldFileList.length == 1) { 389 long num = Long.parseLong(splitClusterName(oldFileList[0].getName())[1]); 390 TransactionID taID = new TransactionID(num); 391 if (uncommittedTaIDs.contains(taID)) { 392 if (newFileList.length == 1) { 393 394 newFileList[0].delete(); 398 } else { 399 400 clusterFile.delete(); 405 } 406 if (!oldFileList[0].renameTo(clusterFile)) { 407 throw new IOException ("Unable to rename old cluster file " + oldFileList[0] + " to " + clusterFile); 408 } 409 410 } else { 411 412 if (oldFileList.length == 1) { 417 418 oldFileList[0].delete(); 426 } 427 428 } 429 } 430 431 cluster = (Cluster) loadData(basename + POSTFIX_CLUSTER); 432 activateCluster(cluster, 0); 433 434 return cluster; 435 } 436 437 438 446 450 public Cluster loadCluster(ClusterID cid, MagicTransaction ta) throws IOException , ClassNotFoundException { 451 Cluster cluster = (Cluster) clusterCache.get(cid); 452 if (cluster == null) { 453 if (env.logWriter.hasTarget(LogWriter.DEBUG)) { 454 env.logWriter.newEntry(this, "loadCluster(): load cluster from disk: " + cid.toString(), LogWriter.DEBUG); 455 } 456 457 final String basename = basename(cid); 458 String newClusterName = ta == null ? null : basename + POSTFIX_SEPARATOR + ta.taID().value() + POSTFIX_NEW; 459 String uncommittedClusterName = ta == null ? null : basename + POSTFIX_SEPARATOR + ta.taID().value() + POSTFIX_CLUSTER; 460 String currentClusterName = basename + POSTFIX_CLUSTER; 461 String lockName = basename + POSTFIX_LOCK; 462 463 String clusterName = null; 464 465 if (new File (uncommittedClusterName).exists()) { 466 clusterName = uncommittedClusterName; 467 } else if (new File (lockName).exists()) { 468 MROWLock lock = (MROWLock) loadData(lockName); 469 TransactionID lockerID = lock.getWriteLockingTransactionID(); 470 clusterName = basename + POSTFIX_SEPARATOR + lockerID.value() + POSTFIX_CLUSTER; 471 } else if (new File (currentClusterName).exists()) { 472 clusterName = currentClusterName; 473 } else if (new File (newClusterName).exists()) { 474 clusterName = newClusterName; 475 } 476 cluster = (Cluster) loadData(clusterName); 477 478 int clusterByteSize = (int) new File (clusterName).length(); 479 if (compressClusters) { 480 clusterByteSize *= compressionFactor; 481 } 482 483 env.logWriter.newEntry(this, "loaded data = " + cluster.getClass().getName(), LogWriter.DEBUG); 484 485 synchronized (this) { 486 487 Cluster interimCluster = (Cluster) clusterCache.get(cid); 491 if (interimCluster != null) { 492 env.logWriter.newEntry(this, "loadCluster(): cluster was loaded by another thread too; droping my copy", LogWriter.DEBUG); 493 494 cluster = interimCluster; 495 496 } else { 497 synchronized (cluster) { 502 File lockFile = new File (lockName); 504 if (lockFile.exists()) { 505 cluster.setLock((Lock) loadData(lockName)); 506 if (!lockFile.delete()) { 507 env.logWriter.newEntry(this, "could not delete lock file " + lockFile, LogWriter.ERROR); 508 } 509 } else { 510 if (env.logWriter.hasTarget(LogWriter.DEBUG3)) { 511 env.logWriter.newEntry(this, "no lock on disk for " + cid + ", creating a new lock.", LogWriter.DEBUG3); 512 } 513 cluster.setLock(env.transactionManager.newLock()); 514 } 515 ((MROWLock) cluster.lock()).setDebugInfo("clusterID=" + cluster.clusterID()); 516 517 518 activateCluster(cluster, clusterByteSize); 519 } 520 521 if (clusterByteSize > maxClusterSize * 2) { 522 splitCluster(cluster); 523 } 524 525 clusterCache.put(cluster.clusterID(), cluster); 526 } 527 } 528 } 529 if (env.logWriter.hasTarget(LogWriter.DEBUG3)) { 530 env.logWriter.newEntry(this, "returning MagicCluster: " + cluster, LogWriter.DEBUG3); 531 } 532 return cluster; 533 } 534 535 536 public void splitCluster(Cluster cluster) { 537 } 539 540 541 545 public void unloadCluster(ClusterID cid, boolean deactivate) throws IOException { 546 if (env.logWriter.hasTarget(LogWriter.DEBUG)) { 547 env.logWriter.newEntry(this, "unloadCluster(" + cid + "," + deactivate + ").", LogWriter.DEBUG); 548 } 549 Cluster cluster = (Cluster) clusterCache.remove(cid); 550 551 if (deactivate) { 552 deactivateCluster(cluster); 553 } 554 } 555 556 557 561 protected void activateCluster(Cluster cluster, int size) { 562 if (env.logWriter.hasTarget(LogWriter.DEBUG3)) { 563 env.logWriter.newEntry(this, "activateCluster(): " + cluster.clusterID(), LogWriter.DEBUG3); 564 } 565 cluster.setEnv(env); 566 cluster.setClusterStore(this); 567 cluster.touch(); 568 cluster.setCurrentSize(size); 569 } 570 571 572 579 protected void deactivateCluster(Cluster cluster) throws IOException { 580 if (env.logWriter.hasTarget(LogWriter.DEBUG)) { 581 env.logWriter.newEntry(this, 582 "deactivateCluster(): " + cluster.clusterID() + " priority: " + cluster.cachePriority(), 583 LogWriter.DEBUG); 584 env.logWriter.newEntry(this, " lock: " + cluster.lock().level(null), LogWriter.DEBUG); 585 } 586 587 String basename = basename(cluster.clusterID()); 588 589 synchronized (this) { } 591 } 592 593 594 608 public synchronized void prepareCommitCluster(Transaction ta, ClusterID cid) throws IOException , ClassNotFoundException { 609 if (env.logWriter.hasTarget(LogWriter.DEBUG3)) { 610 env.logWriter.newEntry(this, "prepareCommitCluster(): " + cid, LogWriter.DEBUG3); 611 } 612 613 Cluster cluster = loadCluster(cid, (MagicTransaction) ta); 614 cluster.prepareCommit(ta); 615 616 String basename = basename(cid); 617 File clusterFile = new File (basename + POSTFIX_CLUSTER); 618 File oldFile = new File (basename + POSTFIX_SEPARATOR + ta.taID().value() + POSTFIX_OLD); 619 620 if (cluster.lock().level(null) >= Lock.LEVEL_WRITE) { 621 if (clusterFile.exists() && !clusterFile.renameTo(oldFile)) { 624 throw new IOException ("Unable to rename cluster file " + clusterFile + " to " + oldFile); 625 } 626 String tempFilename = basename(cid) + POSTFIX_SEPARATOR + ta.taID().value() + POSTFIX_NEW; 627 storeDataImmediately(cluster, tempFilename); 628 } 629 } 630 631 632 635 public synchronized void commitCluster(Transaction ta, ClusterID cid) throws IOException , ClassNotFoundException { 636 if (env.logWriter.hasTarget(LogWriter.DEBUG3)) { 637 env.logWriter.newEntry(this, "commitCluster(): " + cid, LogWriter.DEBUG3); 638 } 639 640 String basename = basename(cid); 641 File clusterFile = new File (basename + POSTFIX_CLUSTER); 642 File oldFile = new File (basename + POSTFIX_SEPARATOR + ta.taID().value() + POSTFIX_OLD); 643 File newFile = new File (basename + POSTFIX_SEPARATOR + ta.taID().value() + POSTFIX_NEW); 644 File uncommittedFile = new File (basename + POSTFIX_SEPARATOR + ta.taID().value() + POSTFIX_CLUSTER); 645 646 if (newFile.exists() && !newFile.renameTo(clusterFile)) { 648 throw new IOException ("Unable to rename cluster file " + newFile + " to " + clusterFile); 649 } 650 651 if (oldFile.exists() && !oldFile.delete()) { 653 throw new IOException ("Unable to delete old cluster file " + oldFile); 654 } 655 Cluster cluster = loadCluster(cid, (MagicTransaction) ta); 656 cluster.commit(ta); 657 658 if (uncommittedFile.exists() && !uncommittedFile.delete()) { 659 throw new IOException ("Unable to delete uncommitted cluster file " + uncommittedFile); 660 } 661 662 updateLockOnDisk(cluster, ta); 666 } 667 668 669 673 public synchronized void abortCluster(Transaction ta, ClusterID cid) throws IOException , ClassNotFoundException { 674 String basename = basename(cid); 675 File newFile = new File (basename + POSTFIX_SEPARATOR + ta.taID().value() + POSTFIX_NEW); 676 File clusterFile = new File (basename + POSTFIX_CLUSTER); 677 File oldFile = new File (basename + POSTFIX_SEPARATOR + ta.taID().value() + POSTFIX_OLD); 678 if (newFile.exists() && !newFile.delete()) { 679 throw new IOException ("Unable to delete new cluster file " + newFile); 680 } 681 if (oldFile.exists()) { 682 if (clusterFile.exists() && !clusterFile.delete()) { 683 throw new IOException ("Unable to delete cluster file " + clusterFile); 684 } 685 if (!oldFile.renameTo(clusterFile)) { 686 throw new IOException ("Unable to rename old cluster file " + oldFile + " to " + clusterFile); 687 } 688 } 689 690 694 Cluster cluster = loadCluster(cid, (MagicTransaction) ta); 695 cluster.abort(ta); 696 697 unloadCluster(cid, false); 700 701 updateLockOnDisk(cluster, ta); 705 706 } 707 708 709 protected void updateLockOnDisk(Cluster cluster, Transaction ta) throws IOException { 710 ClusterID cid = cluster.clusterID(); 712 if (cluster.lock().level(ta) == Lock.LEVEL_NONE) { 713 File lockFile = new File (basename(cid) + POSTFIX_LOCK); 714 if (lockFile.exists() && !lockFile.delete()) { 715 throw new IOException ("Unable to delete lock file."); 716 } 717 } else { 718 storeDataImmediately(cluster.lock(), basename(cid) + POSTFIX_LOCK); 719 } 720 } 721 722 723 728 private class StoreThread extends Thread { 729 730 private class Entry { 731 732 public String key; 733 public Object value; 734 735 public Entry(String key, Object value) { 736 this.value = value; 737 this.key = key; 738 } 739 } 740 741 private LinkedList storeList = new LinkedList (); 742 private volatile boolean stopRunning; 743 744 public void stopRunning() { 745 stopRunning = true; 746 synchronized (storeList) { 747 storeList.notifyAll(); 748 } 749 } 750 751 public void storeData(Object obj, String key) { 752 if (stopRunning) { 753 throw new RuntimeException ("cannot call storeData() after stopRunning()"); 755 } 756 757 synchronized (storeList) { 758 while (storeList.size() >= 10) { 765 try { 766 storeList.wait(); 767 } catch (InterruptedException ignore) { 768 } 769 } 770 storeList.addLast(new Entry(key, obj)); 771 storeList.notifyAll(); 772 } 773 } 774 775 public void run() { 776 for (Entry entry = null; !stopRunning && entry == null; ) { 777 synchronized (storeList) { 778 if (storeList.size() > 0) { 779 entry = (Entry) storeList.removeFirst(); 780 storeList.notifyAll(); 781 } else { 782 try { 783 storeList.wait(); 784 } catch (InterruptedException ignore) { 785 } 786 } 787 } 788 if (entry != null) { 789 try { 790 storeDataImmediately(entry.value, entry.key); 791 } catch (IOException e) { 792 env.logWriter.newEntry(this, "could not write: " + entry.value + ", filename: " + entry.key, e, LogWriter.ERROR); 793 } 794 } 795 } 796 } 797 } 798 799 private StoreThread storeThread = new StoreThread(); 800 801 805 protected void storeData(Object obj, String key) throws IOException { 806 if (env.logWriter.hasTarget(LogWriter.DEBUG3)) { 807 env.logWriter.newEntry(this, "storeData(): " + key, LogWriter.DEBUG3); 808 } 809 810 storeThread.storeData(obj, key); 811 } 812 813 protected void storeDataImmediately(Object obj, String key) throws IOException { 814 OutputStream out = new FileOutputStream (key); 815 816 if (compressClusters) { 817 out = new GZIPOutputStream (out, 3 * 4096); 818 } else { 819 out = new BufferedOutputStream (out, 3 * 4096); 820 } 821 822 ObjectOutputStream oout = new ObjectOutputStream (out); 823 try { 824 oout.writeObject(obj); 825 } finally { 826 oout.close(); 827 } 828 } 829 830 833 protected Object loadData(String key) throws IOException , ClassNotFoundException { 834 if (env.logWriter.hasTarget(LogWriter.DEBUG3)) { 835 env.logWriter.newEntry(this, "loadData(): " + key, LogWriter.DEBUG3); 836 } 837 838 InputStream in = new FileInputStream (key); 839 840 if (compressClusters) { 841 in = new GZIPInputStream (in, 3 * 4096); 842 } else { 843 in = new BufferedInputStream (in, 3 * 4096); 844 } 845 846 ObjectInputStream oin = new ResolvingObjectInputStream(in); 847 try { 848 Object result = oin.readObject(); 849 return result; 850 } finally { 851 oin.close(); 852 } 853 } 854 855 856 void abortTransaction(MagicTransaction ta) throws IOException , ClassNotFoundException { 857 ta.commitClusterIDs = new DxHashSet(64); 858 859 DxIterator it = ta.idTable.iterator(); 860 ClusterID cid; 861 while ((cid = (ClusterID) it.next()) != null) { 862 if (!ta.commitClusterIDs.contains(cid)) { 863 865 Cluster cluster = loadCluster(cid, ta); 866 867 if (cluster.lock().level(ta) > Lock.LEVEL_READ) { 868 if (env.logWriter.hasTarget(LogWriter.DEBUG2)) { 869 env.logWriter.newEntry(this, "abort cluster: " + cid, LogWriter.DEBUG2); 870 } 871 872 abortCluster(ta, cid); 873 } else { 874 cluster.lock().release(ta); 876 } 877 ta.commitClusterIDs.add(cid); 878 } 879 } 880 } 881 } 882 | Popular Tags |