alvinalexander.com | career | drupal | java | mac | mysql | perl | scala | uml | unix  

What this is

This file is included in the DevDaily.com "Java Source Code Warehouse" project. The intent of this project is to help you "Learn Java by Example" TM.

Other links

The source code

/*
 *                 Sun Public License Notice
 * 
 * The contents of this file are subject to the Sun Public License
 * Version 1.0 (the "License"). You may not use this file except in
 * compliance with the License. A copy of the License is available at
 * http://www.sun.com/
 * 
 * The Original Code is NetBeans. The Initial Developer of the Original
 * Code is Sun Microsystems, Inc. Portions Copyright 1997-2001 Sun
 * Microsystems, Inc. All Rights Reserved.
 */
package org.netbeans.mdr.persistence.btreeimpl.btreestorage;

import java.io.*;
import java.util.*;

import org.netbeans.mdr.persistence.*;

/**
* FileCache provides transactional cached access to a set of files.
* Changes to these files are accumulated both in memory and on disk until
* a commit is requested, at which time they are flushed to disk.  If the
* program exits for any reason without comitting, any changes which were
* written to disk are rolled back to the last commit point the next time
* the FileCache is opened.
*/
public class FileCache {

    /* names of files contained in the cache */
    private String fileNames[];

    /* files contained in the cache */
    private RandomAccessFile files[];

    /* sizes of files contained in the cache.  */
    private int fileSize[];

    /* correct header for files in the cache */
    private FileHeader header;

    /* our log file */
    private LogFile log;

    /* true if cache has uncomitted changes */
    private boolean inXact;

    /* size of cached pages */
    private static int pageSize;

    /* all pages */
    private static ArrayList pages;

    /* pages hashed by their ID */
    private static HashMap pageHash;

    /* pages not curently pinned */
    private static IntrusiveList freePages;
    
    private static HashSet/**/ instances = new HashSet();
    

    /* dirty pages which cannot be written until the log is flushed */
    private HashMap heldForLog;

    /* time stmp for current transaction */
    private long newTimeStamp;

    /* caching stats */
    private static int hits = 0;
    private static int misses = 0;
    private static int extensions = 0;
    private static int pagesFlushed = 0;

    private int logFlushes = 0;

    /* for regression testing */
    private static int flushFailure = -1;  /* fail after this many flushes */
    private static int commitFailure = -1; /* fail after this many commits */

    /* A list of objects to notify before comitting */
    private ArrayList toNotify;
    
    static {
        pages = new ArrayList(BtreeDatabase.FILE_CACHE_SIZE);
        pageHash = new HashMap();
        freePages = new IntrusiveList();
        pageSize = BtreeDatabase.PAGE_SIZE;
        addPages(BtreeDatabase.FILE_CACHE_SIZE);
    }

    static int checkForForcedFailure(String property, int count) {
        if (count == -1) {
            /* see if we're supposed to fail after N operations */
            Integer failCount = Integer.getInteger(property);
            count = (failCount == null) ? 0 : failCount.intValue();
        }

        if (count > 0 && --count == 0) {
            System.exit(1);
        }

        //Logger.getDefault().log(property + ": : count is " + count);
        return count;
    }

    /* extend the cache */
    private static void addPages(int numToAdd) {
        for (int i = 0; i < numToAdd; i++) {
            CachedPage page = new CachedPage(pageSize);
            pages.add(page);
            freePages.addLast(page);
        }
        // Logger.getDefault().log("Cache is now " + pages.size() + " pages");
    }
        
    /** A callback used by the logging system to indicate that a modified
    * page cannot be written until the log file is flushed.
    * @param page the page which is being held.
    */
    void holdForLog(CachedPage page) {
        page.heldForLog = true;
        heldForLog.put(page.key, page);
    }

    /** A callback used by the logging system to indicate that the
    * log file was flushed.
    */
    void logWasFlushed() {
        Iterator itr = heldForLog.values().iterator();
        while (itr.hasNext()) {
            CachedPage page = (CachedPage)itr.next();
            page.heldForLog = false;
            if (page.getPinCount() == 0)
                freePages.addFirst(page);
        }
        heldForLog.clear();
        // Logger.getDefault().log("Log file flushed");
    }
        
    /** Create the cache and open the files.  The files are assumed
    * already to exist and have valid, identical file headers.
    * @param pgSize the cache's page size
    * @param numBufs the number of page buffers to create
    * @param names the files to access via the cache
    * @param logName the name of the log file
    * @exception StorageException I/O error opening or reading the files
    * @exception BadParameterException if the file do not have identical file
    * headers, or the log file exists but is not consistent with the files
    * @exception ConsistencyException if the log file exists and is corrupted
    */
    public FileCache(String names[], String logName) 
            throws StorageException {

        boolean failure = true;
        try {
            try {
                files = new RandomAccessFile[names.length];
                fileSize = new int[names.length];
                fileNames = new String[names.length];
                System.arraycopy(names, 0, fileNames, 0, names.length);

                FileHeader tmpHeader;

                for (int i = 0; i < fileNames.length; i++) {
                    files[i] = new RandomAccessFile(fileNames[i], "rw");
                }
                tmpHeader = new FileHeader(files[0]);

                log = new LogFile(
                    this, logName, BtreeDatabase.PAGE_SIZE, fileNames.length, tmpHeader.fileId);

                for (int i = 0; i < fileNames.length; i++) {
                    fileSize[i] = (int)files[i].length();
                    FileHeader hdr = new FileHeader(files[i]);

                    // Note: we can't check that headers are consistent until
                    // after recovery, since previous to that, files may have
                    // inconsistent timestamps
                    if (i == 0) {
                        header = hdr;
                    }
                    else if (!hdr.equals(header)) {
                        throw new StoragePersistentDataException(
                                        "Files are not consistent");
                    }
                }

                heldForLog = new HashMap();
                failure = false;
                instances.add(this);
            }
            finally {
                if (failure) {
                    if (files != null) {
                        for (int i = 0; i < files.length; i++) {
                            if (files[i] != null) { 
                                files[i].close();
                            }
                        }
                    }
                    if (log != null)
                        log.close();
                }
            }
        }
        catch (IOException ex) {
            throw new StorageIOException(ex);
        }

    }

    /** return the array of open files
    */
    RandomAccessFile[] getFiles() {
        return files;
    }

    /** close all files without comitting
    * @exception StorageException I/O error closing the files
    */
    public synchronized void abort() throws StorageException {
        closeFiles();
    }

    /** commit all changes and close all cached files
    * @exception StorageException I/O error closing the files
    */
    public synchronized void close() throws StorageException {
        commit();
        closeFiles();
    }

    /* close all files */
    private void closeFiles() throws StorageException {
        try {
            for (int i = 0; i < files.length; i++)
                files[i].close();
            log.close();
        }
        catch (IOException ex) {
            throw new StorageIOException(ex);
        } finally {
            instances.remove(this);
        }
    }

    /** commit all changes 
    * @exception StorageException I/O error writing the files
    */
    public synchronized void commit() throws StorageException {
        commitFailure = checkForForcedFailure(
            "org.netbeans.mdr.persistence.btreeimpl.btreestorage.FileCache.commitFailure",
            commitFailure);

        if (toNotify != null) {
            Iterator itr = toNotify.iterator();
            while (itr.hasNext()) {
                NotifyOnCommit obj = (NotifyOnCommit)itr.next();
                obj.prepareToCommit();
            }
        }

        if (inXact) {

            /* update timestaps */
            for (int i = 0; i < files.length; i++) {
                CachedPage first = getPage(i, 0);
                setWritable(first);
                FileHeader.updateTime(first, newTimeStamp);
		first.unpin();
            }
            log.flush();

            // Flush all cache buffers.
            Iterator itr = pages.iterator();
            while (itr.hasNext()) {
                CachedPage page = (CachedPage)itr.next();
                if (page.isDirty && page.getOwner() == this)
                    flushOne(page);
            }
            log.commit();
	    header.timeStamp = newTimeStamp;
            inXact = false;
        }
    }

        
    /* write a dirty page to the disk */
    private static void flushOne(CachedPage page) throws StorageException{
        try {
            flushFailure = checkForForcedFailure(
                "org.netbeans.mdr.persistence.btreeimpl.btreestorage.FileCache.flushFailure", 
                flushFailure);
            FileCache owner = page.getOwner();
            assert owner != null;
            RandomAccessFile file = owner.files[page.key.fileIndex];
            file.seek(page.key.offset);
            file.write(page.contents);
            page.isDirty = false;
            pagesFlushed++;
            if (page.key.offset >= owner.fileSize[page.key.fileIndex]) {
                owner.fileSize[page.key.fileIndex] = page.key.offset + pageSize;
            }
        }
        catch (IOException ex) {
            throw new StorageIOException(ex);
        }
    }

    /** unpin a set of pages.  Until unpinned the same number of times
    * that they have been pinned, pages cannot be released from the cache.
    * @param pages the pages to unpin
    * @exception BadParameterException if the page is not pinned
    */
    public synchronized void unpin(CachedPage pages[]) 
        throws StorageException {
        for (int i = 0; i < pages.length; i++)
            unpin(pages[i]);
    }

    /** unpin a page.  Until unpinned the same number of times it has 
    * been pinned, a page cannot be released from the cache.
    * @param page the page to unpin
    * @exception BadParameterException if any of the pages are not pinned
    */
    public synchronized void unpin(CachedPage page) 
                        throws StorageException {
        if (page.getPinCount() <= 0) {
            throw new StorageTransientDataException(
                        "Attempt to unpin page which is not pinned");
        }

        if ((page.innerUnpin() == 0) && !page.heldForLog) {
            freePages.addFirst(page);
        }

    }

    /** Get the pages which contain the desired bytes from the file
    * This implicitly pins these pages.  Note that these pages may extend
    * past the current EOF; that is, this routine may extend the file.
    * @param fileidx the index of the file containing the page.
    * @param first  the number of the first page to get
    * @param length the number of pages to get
    * @return the array of pages requested.
    * @exception StorageException I/O error reading the pages
    */
    public synchronized CachedPage[] getPages(int fileidx, int first, int size) 
        throws StorageException {

        CachedPage retval[] = new CachedPage[size];
        for (int i = 0 ; i < size; i++) {
            retval[i] = 
                getPage(this, new PageID(fileidx, pageSize * (first + i)), false);
        }

        return retval;
    }

    /** Get the single page at the desired offset into the file
    * This implicitly pins that pages.  Note that this may may exist
    * past the current EOF; that is, this routine may extend the file.
    * @param fileidx the index of the file containing the page.
    * @param pageNum the page number to get
    * @return the page requested
    * @exception StorageException I/O error reading the page
    */
    public synchronized CachedPage getPage(int fileidx, int pageNum) 
                                                    throws StorageException {
        return getPage(this, new PageID(fileidx, pageNum * pageSize), false);
    }

    /** Get the single page at the desired offset into the file
    * This implicitly pins that pages.  Note that this may may exist
    * past the current EOF; that is, this routine may extend the file.
    * @param page the PageId describing the desired page
    * @return the page requested
    * @exception StorageException I/O error reading the page
    */
    synchronized CachedPage getPage(PageID page) throws StorageException {
        return getPage(this, page, false);
    }
    
    private static class HashKey {
        public final FileCache owner;
        public final PageID id;
        
        public HashKey(FileCache owner, PageID id) {
            this.owner = owner;
            this.id = id;
        }
        
        public boolean equals(Object o) {
            return o == this || ((o instanceof HashKey) && (((HashKey) o).owner == owner) && (((HashKey) o).id.equals(id)));
        }
        
        public int hashCode() {
            return owner.hashCode() + 31 * id.hashCode();
        }
    }

    /** Get the single page at the desired offset into the file
    * This implicitly pins that pages.  Note that this may may exist
    * past the current EOF; that is, this routine may extend the file.
    * @param page the PageId describing the desired page
    * @param fromCacheOnly if true, only look inside the cache
    * @return the page requested
    * @exception StorageException I/O error reading the page
    */
    private static CachedPage getPage(FileCache instance, PageID id, boolean fromCacheOnly) 
            throws StorageException {
        HashKey key = new HashKey(instance, id);
        CachedPage page = (CachedPage)pageHash.get(key);
        if (page != null)
        {
            if (page.pin(instance) == 0 && !page.heldForLog)
                freePages.remove(page);
            hits++;
            return page;
        }
        else if (fromCacheOnly) {
            return null;
        }

        // Find a free page
        CachedPage free = (CachedPage)freePages.removeLast();
        if (free == null) {
            /* if there are any waiting for the log to be flushed, flush it */
            for (Iterator it = instances.iterator(); it.hasNext();) {
                FileCache cache = (FileCache) it.next();
                if (!cache.heldForLog.isEmpty())
                {
                    cache.log.flush();
                    cache.logFlushes++;
                }
            }
            free = (CachedPage)freePages.removeLast();
        }

        if (free == null) {
            // cache is full -- make it half as big again
            int increment = (pages.size() + 1) / 2;
            addPages(increment);
            extensions++;
            free = (CachedPage)freePages.removeLast();
        }

        if (free.isDirty) {
            flushOne(free);
        }
            
        if (free.key != null && free.getOwner() != null) {
            pageHash.remove(new HashKey(free.getOwner(), free.key));
        }

        free.reInit(instance, id);
        pageHash.put(key, free);
        if (id.offset >= instance.fileSize[id.fileIndex]) {
            Arrays.fill(free.contents, (byte)0);
        } 
        else {
            try {
                instance.files[id.fileIndex].seek(id.offset);
                instance.files[id.fileIndex].readFully(free.contents);
            }
            catch (IOException ex) {
                throw new StorageIOException(ex);
            }
        }

        free.pin(instance);
        misses++;
        return free;
    }

    /** Make the specified page writable.  If it was not writable previously, 
    * this causes it to be logged.  This must be called before the page
    * is modified.  If the cache is not currently in a transaction,
    * this implicitly begins one.
    * @param page The page to be made writable.
    * @exception StorageException I/O error logging the page
    */
    public synchronized void setWritable(CachedPage page) 
        throws StorageException{
        if (page.isDirty)
            return;

        if (!inXact) { 
            newTimeStamp = System.currentTimeMillis();
            log.begin(files, header.timeStamp, newTimeStamp);
            inXact = true;
        }
        log.addPageToLog(page);
        page.isDirty = true;
    }

    /** Make the specified pages writable. If any were not writable previously,
    * this causes them to be logged.  This be called before the pages are
    * modified.
    * @param pages The pages to be made writable.
    * @exception StorageException I/O error logging the pages
    */
    public synchronized void setWritable(CachedPage pages[]) 
        throws StorageException{
        for (int i = 0; i < pages.length; i++)
            setWritable(pages[i]);
    }

    /**
    * for debugging
    */
    public void dumpCache(PrintStream strm) {
        strm.println("Cached files:");
        for (int i = 0; i < fileNames.length; i++) {
            strm.println(
                Integer.toString(i) + ": " + fileNames[i] +
                " size: " + fileSize[i]);
        }
        strm.println("");
                

        strm.println(Integer.toString(pages.size()) + " pages");
        Iterator itr = pages.iterator();
        int num = 0;
        while (itr.hasNext()) {
            strm.println(Integer.toString(num++) + ":");
            strm.print(((CachedPage)itr.next()).toString());
        }
    }

    /**
    * Show caching statistics
    */
    public void showStats(PrintStream strm) {
    	showStats(new PrintWriter(strm));
    }

    /**
    * Show caching statistics
    */
    public void showStats(PrintWriter strm) {
    	int pinned = 0;
	int dirty = 0;
	int held = 0;
	for (int i = 0; i < pages.size(); i++) {
	    CachedPage pg = (CachedPage)pages.get(i);
	    if (pg.getPinCount() > 0) {
	    	pinned++;
//		strm.println("Pinned page "+ pg.key + " count: " + pg.pinCount);
	    }
	    if (pg.isDirty)
	    	dirty++;
	    if (pg.heldForLog)
	    	held++;
	}
        strm.println("Page counts: total = " + pages.size() + " pinned = " +
		pinned + " dirty = " + dirty + " held = " + held);
        strm.println(
            "Cache hits: " + hits + " misses: " + misses + 
            " hit rate: " + 100. * (float)hits / (float)(hits + misses));
        strm.println(pagesFlushed + " pages written");
        strm.println("Log file flushed to free pages " + logFlushes + " times");
        strm.println("Cache made bigger " + extensions + " times");
	strm.flush();
    }

    /**
    * get page size
    */
    int getPageSize() {
        return pageSize;
    }

    /**
    * Add to the list of objects to be notified before commit
    * @param notified the obect to add to the list
    */
    public synchronized void addNotifier(NotifyOnCommit notified) {
        if (toNotify == null) {
            toNotify = new ArrayList();
        }
        toNotify.add(notified);
    }

    /** 
    * An object which needs to be notified before the cache commits (for
    * instance, to write any changes to the cache before the cache is
    * flushed to disk) implements this interface, and calls addNotifier
    * on the cache.
    */
    public interface NotifyOnCommit {

        /** a callback method called before the cache commits.  If
        * the result is an exception, the commit does not take place.
        */
        void prepareToCommit() throws StorageException;
    }

}

... this post is sponsored by my books ...

#1 New Release!

FP Best Seller

 

new blog posts

 

Copyright 1998-2021 Alvin Alexander, alvinalexander.com
All Rights Reserved.

A percentage of advertising revenue from
pages under the /java/jwarehouse URI on this website is
paid back to open source projects.