alvinalexander.com | career | drupal | java | mac | mysql | perl | scala | uml | unix  

What this is

This file is included in the DevDaily.com "Java Source Code Warehouse" project. The intent of this project is to help you "Learn Java by Example" TM.

Other links

The source code

/*
 *                 Sun Public License Notice
 * 
 * The contents of this file are subject to the Sun Public License
 * Version 1.0 (the "License"). You may not use this file except in
 * compliance with the License. A copy of the License is available at
 * http://www.sun.com/
 * 
 * The Original Code is NetBeans. The Initial Developer of the Original
 * Code is Sun Microsystems, Inc. Portions Copyright 1997-2001 Sun
 * Microsystems, Inc. All Rights Reserved.
 */
package org.netbeans.mdr.persistence.btreeimpl.btreestorage;

import java.io.*;
import java.text.*;
import java.util.*;

import org.netbeans.mdr.persistence.*;

/** The transaction log file used by the FileCache.
* 

* The scheme used is before-image logging. That is, before a page is modified, * its contents are written to the log file. The log file itself contains two * sorts of pages: these before-image pages, and map pages, which keep track * of the PageID of each before-image file. *

* At the successful completion of each transaction, the log file is deleted. * If, on cache creation, the log file exists, this indicates that a previous * transaction did not complete successfully. Recovery is attempted, using the * contents of the log file to back out the partially completed transaction. */ class LogFile { /* page size for the cache */ private int pageSize; /* number of files in the cache */ private int numFiles; /* name of the log file */ private String fileName; /* the log file */ private RandomAccessFile file; /* bitmaps showing which pages are currently logged */ private BitSet pageBitmaps[]; /* the current map page */ private MapPage currentMap; /* owning cache */ private FileCache cache; /* file ID */ long fileId; /* counts for forced failure during regreession tests */ private int beforeWriteFailure = -1; private int afterCommitFailure = -1; private int recoveryFailure = -1; /** Create the log file. If the file already exists on disk, * attempt recovery. Note that exceptions pnly occur during recovery. * @param fCache the cache which owns this log file * @param name the name of the log file * @param pgSz the page size * @param files the number of files being logged * @param id the fileId for the files being logged * @exception StorageException I/O error during recovery * @exception BadParameterException the log file is not consistent with * the files being recovered * @exception ConsistencyException the log file is corrupt on disk */ LogFile(FileCache fCache, String name, int pgSz, int files, long id) throws StorageException { // Size must be a power of 2 and >= 4096 // no more than 4096 files cache = fCache; pageSize = pgSz; fileName = name; numFiles = files; fileId = id; pageBitmaps = new BitSet[files]; for (int i = 0; i < files; i++) pageBitmaps[i] = new BitSet(); if (new File(name).exists()) recover(); } /* returns true if page is already logged */ private boolean isPageLogged(CachedPage page) { return pageBitmaps[page.key.fileIndex].get(page.key.offset/pageSize); } /* create the log file */ private void createPhysicalLog() throws StorageException { try { file = new RandomAccessFile(fileName, "rw"); file.setLength(0); writeMap(); } catch (IOException ex) { throw new StorageIOException(ex); } } /* add a new page to the log file * @param page the page to add * @exception StorageException error loggin the page to the before file */ void addPageToLog(CachedPage page) throws StorageException { if (page.key.offset >= currentMap.getEOF(page.key.fileIndex)) { return; } if (isPageLogged(page)) return; if (file == null) createPhysicalLog(); try { file.seek(currentMap.nextPageOffset()); file.write(page.contents); } catch (IOException ex) { throw new StorageIOException(ex); } beforeWriteFailure = cache.checkForForcedFailure( "org.netbeans.mdr.persistence.btreeimpl.btreestorage.LogFile.beforeWriteFailure", beforeWriteFailure); pageBitmaps[page.key.fileIndex].set(page.key.offset/pageSize); /* add to map */ currentMap.add(page); if (currentMap.isFull()) { // map is full; need to write it writeMap(); } /* prevent from being written until log is flushed */ cache.holdForLog(page); } /* write the current map page to the log */ private void writeMap() throws StorageException{ flushFile(); currentMap.write(file); flushFile(); if (currentMap.isFull()) currentMap = new MapPage(currentMap); } /* sync the file to disk */ private void flushFile() throws StorageException{ try { file.getFD().sync(); } catch (IOException ex) { throw new StorageIOException(ex); } } /** make the log consistent on disk * @exception StorageException I/O error writing the log */ void flush() throws StorageException{ writeMap(); cache.logWasFlushed(); } /** begin a new transaction * @param files the files being logged * @param timeStamp the timestamp for the previously comitted transaction * @param long newTimeStamp the timestamp for the new transaction * @exception StorageException I/O error accessing the files */ void begin(RandomAccessFile files[], long timeStamp, long newTimeStamp) throws StorageException { file = null; currentMap = new MapPage(pageSize, numFiles, 0); currentMap.setEOFs(files); for (int i = 0; i < numFiles; i++) { pageBitmaps[i].xor(pageBitmaps[i]); } currentMap.setTimeStamps(timeStamp, newTimeStamp); currentMap.setFileID(fileId); } /** commit the current transaction * @exception StorageException I/O error closing or deleting the log */ void commit() throws StorageException{ try { if (file != null) { file.close(); (new File(fileName)).delete(); } } catch (IOException ex) { throw new StorageIOException(ex); } file = null; afterCommitFailure = cache.checkForForcedFailure( "org.netbeans.mdr.persistence.btreeimpl.btreestorage.LogFile.afterCommitFailure", afterCommitFailure); } /** close the log file * @exception StorageException I/O error closing the log */ void close() throws StorageException{ try { if (file != null) file.close(); } catch (IOException ex) { throw new StorageIOException(ex); } } /** return the current log file size */ int fileSize() { return currentMap.nextPageOffset(); } /** recover from an incomplete transaction * @exception StorageException I/O error during recovery * @exception BadParameterException the log file is not consistent with * the files being recovered * @exception ConsistencyException the log file is corrupt on disk */ void recover() throws StorageException { RandomAccessFile files[] = null; RandomAccessFile logFile = null; try { try { logFile = new RandomAccessFile(fileName, "r"); files = cache.getFiles(); if (files.length != numFiles) { throw new StorageBadRequestException( MessageFormat.format( "Log file contains {0} files; {1} were requested", new Object[] { new Integer(numFiles), new Integer(files.length)})); } int offset = 0; int numPages = (int)logFile.length() / pageSize; if (numPages > 0) { byte pageBuffer[] = new byte[pageSize]; MapPage page = new MapPage(logFile, 0, pageSize); page.checkParameters(pageSize, numFiles); for (int i = 0; i < numFiles; i++) { page.checkFileHeader(files[i]); } while (true) { page.recover(files, logFile, numPages, pageBuffer); recoveryFailure = cache.checkForForcedFailure( "org.netbeans.mdr.persistence.btreeimpl.btreestorage.LogFile.recoveryFailure", recoveryFailure); MapPage newPage = page.getNext(logFile, numPages); if (newPage != null && !newPage.isEmpty()) { page = newPage; } else { break; } } page.truncateFiles(files); logFile.close(); (new File(fileName)).delete(); } } finally { if (logFile != null) { logFile.close(); } } } catch (IOException ex) { throw new StorageIOException(ex); } } }

... this post is sponsored by my books ...

#1 New Release!

FP Best Seller

 

new blog posts

 

Copyright 1998-2021 Alvin Alexander, alvinalexander.com
All Rights Reserved.

A percentage of advertising revenue from
pages under the /java/jwarehouse URI on this website is
paid back to open source projects.