1 19 package org.netbeans.mdr.persistence.btreeimpl.btreestorage; 20 21 import java.io.*; 22 import java.util.*; 23 24 import org.netbeans.mdr.persistence.*; 25 import org.netbeans.mdr.util.Logger; 26 27 35 public class FileCache { 36 37 private String [] fileNames; 38 39 private int fileSize[]; 40 41 42 private FileHeader header; 43 44 45 private LogFile log; 46 47 48 private boolean inXact; 49 50 51 private static int pageSize; 52 53 54 private static ArrayList pages; 55 56 57 private static HashMap pageHash; 58 59 60 private static IntrusiveList freePages; 61 62 private static HashSet instances = new HashSet(); 63 64 65 66 private HashMap heldForLog; 67 68 69 private long newTimeStamp; 70 71 72 private static int hits = 0; 73 private static int misses = 0; 74 private static int extensions = 0; 75 private static int pagesFlushed = 0; 76 77 private int logFlushes = 0; 78 79 80 private static int flushFailure = -1; 81 private static int commitFailure = -1; 82 83 84 private ArrayList toNotify; 85 86 private static final int MAX_FILES = 200; 87 private static final Map OPEN_FILES_CACHE = new LinkedHashMap(MAX_FILES * 2, .5f, true) { 88 protected boolean removeEldestEntry(Map.Entry eldest) { 89 if (size() > MAX_FILES) { 90 RandomAccessFile file = (RandomAccessFile) eldest.getValue(); 91 try { 92 file.close(); 93 } catch (IOException e) { 94 Logger.getDefault().notify(e); 95 } 96 return true; 97 } 98 return false; 99 } 100 }; 101 102 static void closeFile(String fileName) throws IOException { 103 RandomAccessFile file; 104 synchronized (FileCache.OPEN_FILES_CACHE) { 105 file = (RandomAccessFile) FileCache.OPEN_FILES_CACHE.remove(fileName); 106 } 107 if (file != null) file.close(); 108 } 109 110 static RandomAccessFile getFile(String fileName) throws IOException { 111 RandomAccessFile result; 112 synchronized (FileCache.OPEN_FILES_CACHE) { 113 result = (RandomAccessFile) FileCache.OPEN_FILES_CACHE.get(fileName); 114 if (result == null) { 115 result = new RandomAccessFile(fileName, "rw"); 116 FileCache.OPEN_FILES_CACHE.put(fileName, result); 117 } 118 } 119 return result; 120 } 121 122 static { 123 pages = new ArrayList(BtreeDatabase.FILE_CACHE_SIZE); 124 pageHash = new HashMap(); 125 freePages = new IntrusiveList(); 126 pageSize = BtreeDatabase.PAGE_SIZE; 127 addPages(BtreeDatabase.FILE_CACHE_SIZE); 128 } 129 130 static int checkForForcedFailure(String property, int count) { 131 if (count == -1) { 132 133 Integer failCount = Integer.getInteger(property); 134 count = (failCount == null) ? 0 : failCount.intValue(); 135 } 136 137 if (count > 0 && --count == 0) { 138 System.exit(1); 139 } 140 141 return count; 143 } 144 145 146 private static void addPages(int numToAdd) { 147 for (int i = 0; i < numToAdd; i++) { 148 CachedPage page = new CachedPage(pageSize); 149 pages.add(page); 150 freePages.addLast(page); 151 } 152 } 154 155 159 void holdForLog(CachedPage page) { 160 page.heldForLog = true; 161 heldForLog.put(page.key, page); 162 } 163 164 167 void logWasFlushed() { 168 Iterator itr = heldForLog.values().iterator(); 169 while (itr.hasNext()) { 170 CachedPage page = (CachedPage)itr.next(); 171 page.heldForLog = false; 172 if (page.getPinCount() == 0) 173 freePages.addFirst(page); 174 } 175 heldForLog.clear(); 176 } 178 179 190 public FileCache(String fileNames[], String baseName) 191 throws StorageException { 192 193 this.fileNames = new String [fileNames.length]; 194 boolean failure = true; 195 try { 196 try { 197 RandomAccessFile[] files = new RandomAccessFile[fileNames.length]; 198 fileSize = new int[fileNames.length]; 199 200 FileHeader tmpHeader; 201 202 for (int i = 0; i < fileNames.length; i++) { 203 files[i] = getFile(fileNames[i]); 204 this.fileNames[i] = fileNames[i]; 205 } 206 tmpHeader = new FileHeader(files[0]); 207 208 log = new LogFile( 209 this, baseName, BtreeDatabase.PAGE_SIZE, fileNames.length, tmpHeader.fileId); 210 211 for (int i = 0; i < fileNames.length; i++) { 212 fileSize[i] = (int)files[i].length(); 213 FileHeader hdr = new FileHeader(files[i]); 214 215 if (i == 0) { 219 header = hdr; 220 } 221 else if (!hdr.equals(header)) { 222 throw new StoragePersistentDataException( 223 "Files are not consistent"); 224 } 225 } 226 227 heldForLog = new HashMap(); 228 failure = false; 229 instances.add(this); 230 } 231 finally { 232 if (failure) { 233 for (int i = 0; i < this.fileNames.length; i++) { 234 if (this.fileNames[i] != null) { 235 closeFile(this.fileNames[i]); 236 } 237 } 238 if (log != null) 239 log.close(); 240 } 241 } 242 } 243 catch (IOException ex) { 244 throw new StorageIOException(ex); 245 } 246 247 } 248 249 251 RandomAccessFile[] getFiles() throws IOException { 252 return getFiles(fileNames); 253 } 254 255 static RandomAccessFile[] getFiles(String [] fileNames) throws IOException { 256 RandomAccessFile[] files = new RandomAccessFile[fileNames.length]; 257 for (int i = 0; i < fileNames.length; i++) { 258 files[i] = FileCache.getFile(fileNames[i]); 259 } 260 return files; 261 } 262 263 266 public synchronized void abort() throws StorageException { 267 closeFiles(); 268 } 269 270 273 public synchronized void close() throws StorageException { 274 commit(); 275 276 Iterator itr = pages.iterator(); 278 while (itr.hasNext()) { 279 CachedPage page = (CachedPage)itr.next(); 280 if (page.getOwner() == this) { 281 if (pages.size() > BtreeDatabase.FILE_CACHE_SIZE) { 282 itr.remove(); 283 freePages.remove(page); 284 } else { 285 freePages.addLast(page); 286 page.reInit(null, null); 287 } 288 } 289 } 290 for (Iterator it = pageHash.keySet().iterator(); it.hasNext();) { 291 HashKey entry = (HashKey) it.next(); 292 if (entry.owner == this) { 293 it.remove(); 294 } 295 } 296 297 closeFiles(); 298 } 299 300 301 private void closeFiles() throws StorageException { 302 try { 303 for (int i = 0; i < fileNames.length; i++) 304 closeFile(fileNames[i]); 305 log.close(); 306 } 307 catch (IOException ex) { 308 throw new StorageIOException(ex); 309 } finally { 310 instances.remove(this); 311 } 312 } 313 314 317 public synchronized void commit() throws StorageException { 318 commitFailure = checkForForcedFailure( 319 "org.netbeans.mdr.persistence.btreeimpl.btreestorage.FileCache.commitFailure", 320 commitFailure); 321 322 if (toNotify != null) { 323 Iterator itr = toNotify.iterator(); 324 while (itr.hasNext()) { 325 NotifyOnCommit obj = (NotifyOnCommit)itr.next(); 326 obj.prepareToCommit(); 327 } 328 } 329 330 if (inXact) { 331 332 333 for (int i = 0; i < fileNames.length; i++) { 334 CachedPage first = getPage(i, 0); 335 setWritable(first); 336 FileHeader.updateTime(first, newTimeStamp); 337 first.unpin(); 338 } 339 log.flush(); 340 341 Iterator itr = pages.iterator(); 343 while (itr.hasNext()) { 344 CachedPage page = (CachedPage)itr.next(); 345 if (page.isDirty && page.getOwner() == this) 346 flushOne(page); 347 } 348 log.commit(); 349 header.timeStamp = newTimeStamp; 350 inXact = false; 351 } 352 } 353 354 355 356 private static void flushOne(CachedPage page) throws StorageException{ 357 try { 358 flushFailure = checkForForcedFailure( 359 "org.netbeans.mdr.persistence.btreeimpl.btreestorage.FileCache.flushFailure", 360 flushFailure); 361 FileCache owner = page.getOwner(); 362 assert owner != null; 363 364 if (!instances.contains(owner)) return; 366 RandomAccessFile file = getFile(owner.fileNames[page.key.fileIndex]); 367 file.seek(page.key.offset); 368 file.write(page.contents); 369 page.isDirty = false; 370 pagesFlushed++; 371 if (page.key.offset >= owner.fileSize[page.key.fileIndex]) { 372 owner.fileSize[page.key.fileIndex] = page.key.offset + pageSize; 373 } 374 } 375 catch (IOException ex) { 376 throw new StorageIOException(ex); 377 } 378 } 379 380 385 public synchronized void unpin(CachedPage pages[]) 386 throws StorageException { 387 for (int i = 0; i < pages.length; i++) 388 unpin(pages[i]); 389 } 390 391 396 public synchronized void unpin(CachedPage page) 397 throws StorageException { 398 if (page.getPinCount() <= 0) { 399 throw new StorageTransientDataException( 400 "Attempt to unpin page which is not pinned"); 401 } 402 403 if ((page.innerUnpin() == 0) && !page.heldForLog) { 404 freePages.addFirst(page); 405 } 406 407 } 408 409 418 public synchronized CachedPage[] getPages(int fileidx, int first, int size) 419 throws StorageException { 420 421 CachedPage retval[] = new CachedPage[size]; 422 for (int i = 0 ; i < size; i++) { 423 retval[i] = 424 getPage(this, new PageID(fileidx, pageSize * (first + i)), false); 425 } 426 427 return retval; 428 } 429 430 438 public synchronized CachedPage getPage(int fileidx, int pageNum) 439 throws StorageException { 440 return getPage(this, new PageID(fileidx, pageNum * pageSize), false); 441 } 442 443 450 synchronized CachedPage getPage(PageID page) throws StorageException { 451 return getPage(this, page, false); 452 } 453 454 private static class HashKey { 455 public final FileCache owner; 456 public final PageID id; 457 458 public HashKey(FileCache owner, PageID id) { 459 this.owner = owner; 460 this.id = id; 461 } 462 463 public boolean equals(Object o) { 464 return o == this || ((o instanceof HashKey) && (((HashKey) o).owner == owner) && (((HashKey) o).id.equals(id))); 465 } 466 467 public int hashCode() { 468 return owner.hashCode() + 31 * id.hashCode(); 469 } 470 } 471 472 480 private static CachedPage getPage(FileCache instance, PageID id, boolean fromCacheOnly) 481 throws StorageException { 482 HashKey key = new HashKey(instance, id); 483 CachedPage page = (CachedPage)pageHash.get(key); 484 if (page != null) 485 { 486 if (page.pin(instance) == 0 && !page.heldForLog) 487 freePages.remove(page); 488 hits++; 489 return page; 490 } 491 else if (fromCacheOnly) { 492 return null; 493 } 494 495 CachedPage free = (CachedPage)freePages.removeLast(); 497 if (free == null) { 498 499 for (Iterator it = instances.iterator(); it.hasNext();) { 500 FileCache cache = (FileCache) it.next(); 501 if (!cache.heldForLog.isEmpty()) 502 { 503 cache.log.flush(); 504 cache.logFlushes++; 505 } 506 } 507 free = (CachedPage)freePages.removeLast(); 508 } 509 510 if (free == null) { 511 int increment = (pages.size() + 1) / 2; 513 addPages(increment); 514 extensions++; 515 free = (CachedPage)freePages.removeLast(); 516 } 517 518 if (free.isDirty) { 519 flushOne(free); 520 } 521 522 if (free.key != null && free.getOwner() != null) { 523 pageHash.remove(new HashKey(free.getOwner(), free.key)); 524 } 525 526 free.reInit(instance, id); 527 pageHash.put(key, free); 528 if (id.offset >= instance.fileSize[id.fileIndex]) { 529 Arrays.fill(free.contents, (byte)0); 530 } 531 else { 532 try { 533 RandomAccessFile file = getFile(instance.fileNames[id.fileIndex]); 534 file.seek(id.offset); 535 file.readFully(free.contents); 536 } 537 catch (IOException ex) { 538 throw new StorageIOException(ex); 539 } 540 } 541 542 free.pin(instance); 543 misses++; 544 return free; 545 } 546 547 554 public synchronized void setWritable(CachedPage page) 555 throws StorageException{ 556 if (page.isDirty) 557 return; 558 559 if (!inXact) { 560 newTimeStamp = System.currentTimeMillis(); 561 log.begin(fileNames, header.timeStamp, newTimeStamp); 562 inXact = true; 563 } 564 log.addPageToLog(page); 565 page.isDirty = true; 566 } 567 568 574 public synchronized void setWritable(CachedPage pages[]) 575 throws StorageException{ 576 for (int i = 0; i < pages.length; i++) 577 setWritable(pages[i]); 578 } 579 580 583 public void dumpCache(PrintStream strm) { 584 strm.println("Cached files:"); 585 for (int i = 0; i < fileNames.length; i++) { 586 strm.println( 587 Integer.toString(i) + ": " + fileNames[i] + 588 " size: " + fileSize[i]); 589 } 590 strm.println(""); 591 592 593 strm.println(Integer.toString(pages.size()) + " pages"); 594 Iterator itr = pages.iterator(); 595 int num = 0; 596 while (itr.hasNext()) { 597 strm.println(Integer.toString(num++) + ":"); 598 strm.print(((CachedPage)itr.next()).toString()); 599 } 600 } 601 602 605 public void showStats(PrintStream strm) { 606 showStats(new PrintWriter(strm)); 607 } 608 609 612 public void showStats(PrintWriter strm) { 613 int pinned = 0; 614 int dirty = 0; 615 int held = 0; 616 for (int i = 0; i < pages.size(); i++) { 617 CachedPage pg = (CachedPage)pages.get(i); 618 if (pg.getPinCount() > 0) { 619 pinned++; 620 } 622 if (pg.isDirty) 623 dirty++; 624 if (pg.heldForLog) 625 held++; 626 } 627 strm.println("Page counts: total = " + pages.size() + " pinned = " + 628 pinned + " dirty = " + dirty + " held = " + held); 629 strm.println( 630 "Cache hits: " + hits + " misses: " + misses + 631 " hit rate: " + 100. * (float)hits / (float)(hits + misses)); 632 strm.println(pagesFlushed + " pages written"); 633 strm.println("Log file flushed to free pages " + logFlushes + " times"); 634 strm.println("Cache made bigger " + extensions + " times"); 635 strm.flush(); 636 } 637 638 641 int getPageSize() { 642 return pageSize; 643 } 644 645 649 public synchronized void addNotifier(NotifyOnCommit notified) { 650 if (toNotify == null) { 651 toNotify = new ArrayList(); 652 } 653 toNotify.add(notified); 654 } 655 656 662 public interface NotifyOnCommit { 663 664 667 void prepareToCommit() throws StorageException; 668 } 669 670 } 671 672 | Popular Tags |