alvinalexander.com | career | drupal | java | mac | mysql | perl | scala | uml | unix  

What this is

This file is included in the DevDaily.com "Java Source Code Warehouse" project. The intent of this project is to help you "Learn Java by Example" TM.

Other links

The source code

/*
 *                 Sun Public License Notice
 * 
 * The contents of this file are subject to the Sun Public License
 * Version 1.0 (the "License"). You may not use this file except in
 * compliance with the License. A copy of the License is available at
 * http://www.sun.com/
 * 
 * The Original Code is NetBeans. The Initial Developer of the Original
 * Code is Sun Microsystems, Inc. Portions Copyright 1997-2001 Sun
 * Microsystems, Inc. All Rights Reserved.
 */
package org.netbeans.mdr.persistence.btreeimpl.btreestorage;

import java.io.*;
import java.util.*;
import java.util.zip.*;

import org.netbeans.mdr.persistence.*;
import org.netbeans.mdr.util.Logger;

/**
* This is the superclass for extents which are parts of records
*/
abstract class ActiveBtreeExtent extends BtreeExtent {

    /** How much data this extent contains */
    int dataLength;

    /** The offset at which the data starts */
    int dataStart;

    /** Convert a deleted extent to an active one.  The deleted extent has
    * already been removed from its chain
    * @param del the extent to convert
    */
    ActiveBtreeExtent(DeletedBtreeExtent del) {
        super(del);
        headerIsDirty = true;
    }


    /** called by subclasses to initialize an ActiveBtreeExtent
    * @param file the BtreeDataFile this extent will belong to
    * @param chunkNum where this extent begins
    * @param numChunks the size of the extent
    */
    ActiveBtreeExtent(
        BtreeDataFile file, int chunkNum, short numChunks) {

        super(file, chunkNum, numChunks);
    }

    /** get the amount of data contained in this extent 
    * @return amount of data
    */
    abstract int getMyDataLength();

    /** set the amount of data contained in this extent
    * @param length amount of data
    */
    abstract int setMyDataLength(int length);

    /** get how much data this extent could contain
    * @return maximum amount of data which would fit 
    */
    abstract int getAvailableDataLength();

    /* write this extent's data to the cache.  The data is supplied in
    * a separate buffer; the extent describes where in the cache to write it to.
    * @param dataBuffer data to write
    * @param dataOffset where in the buffer to begin writing wrom
    */
    void writeData(byte dataBuffer[], int dataOffset) 
            throws StorageException {
        int toCopy = dataLength;
        IntHolder offst = new IntHolder();
        int numChunks = 
            (dataStart + toCopy - 1) / BtreeDataFile.BTREE_CHUNK_SIZE + 1;
        
        if (numChunks > chunks) {
            StorageException se = new StoragePersistentDataException ("Number of chunks does not match.");
            Logger.getDefault().annotate(se, "Bad number of chunks: ----------------------");
            Logger.getDefault().annotate(se, "start chunk number: " + myChunkNum);
            Logger.getDefault().annotate(se, "#chunks: " + chunks + " computed #chunks: " + numChunks);
            Logger.getDefault().annotate(se, "dataLength: " + dataLength + " dataSart: " + dataStart);
            throw se;
        }
        
        CachedPage pages[] = owner.getChunks(myChunkNum, numChunks, offst);
        try {
            int pageNum = 0;
            int pageSize = pages[0].contents.length;
            int offset = offst.getValue() + dataStart;
            while (offset >= pageSize) {
                pageNum++;
                offset -= pageSize;
            }
            while (toCopy > 0) {
                int thisPage = Math.min(pageSize - offset, toCopy);
                pages[pageNum].setWritable();
                System.arraycopy(dataBuffer, dataOffset, 
                                 pages[pageNum].contents, offset, thisPage);
                dataOffset += thisPage;
                toCopy -= thisPage;
                pageNum++;
                offset = 0;
            }
        }
        finally {
            for (int i = 0; i < pages.length; i++) {
                pages[i].unpin();
            }
        }
    }

    /** Add the data desribed by this extent to a CachedPageInputStream.
    * The pages of data are already in the cache
    * @param strm stream to add pages to.
    */
    void addToStream(CachedPageInputStream strm) throws StorageException {
        IntHolder offst = new IntHolder();
        int toAppend = getMyDataLength();
        int numChunks = 
            (dataStart + toAppend - 1) / BtreeDataFile.BTREE_CHUNK_SIZE + 1;
        CachedPage[] pages = owner.getChunks(myChunkNum, numChunks, offst);

        if (numChunks > chunks) {
            StorageException se = new StoragePersistentDataException ("Number of chunks does not match.");
            Logger.getDefault().annotate(se, "Bad number of chunks: ----------------------");
            Logger.getDefault().annotate(se, "start chunk number: " + myChunkNum);
            Logger.getDefault().annotate(se, "#chunks: " + chunks + " computed #chunks: " + numChunks);
            Logger.getDefault().annotate(se, "dataLength: " + dataLength + " dataSart: " + dataStart);
            throw se;
        }
        
        int pageNum = 0;
        int pageSize = pages[0].contents.length;
        int offset = offst.getValue() + dataStart;
        while (offset >= pageSize) {
            pageNum++;
            offset -= pageSize;
        }

        for (; pageNum < pages.length; pageNum++) {
            int thisPage = Math.min(pageSize - offset, toAppend);
            strm.addPage(pages[pageNum], offset, thisPage);
            offset = 0;
            toAppend -= thisPage;
        }

        for (; pageNum < pages.length; pageNum++) {
            pages[pageNum].unpin();
        }

    }


    /** Get CRC of record's data
    * @return CRC
    */
    long getCRC() throws StorageException {
        CachedPageInputStream dstrm = new CachedPageInputStream();
        CheckedInputStream cis = null;
        try {
            try {
                addToStream(dstrm);
                cis = new CheckedInputStream(dstrm, new CRC32());
                while (cis.read() >= 0)
                    ;
                return cis.getChecksum().getValue();
            }
            finally {
                if (cis != null)
                    cis.close();
                else
                    dstrm.close();
            }
        }
        catch (IOException exc) {
            throw new StorageIOException(exc);
        }
    }

    /** is the extent already full of data
    * @return true if the extent has no room for more data
    */
    abstract boolean isMaximum();

    /** dump extent as text (for debugging)
    * @param level bitmask of what to dump.  See the superclass for the
    * meaning of the levels.
    * @param strm where to dump it to
    */
    void dump(int level, PrintWriter strm) throws StorageException{
        super.dump(level, strm);
        boolean dumpData = (level & DUMP_DATA) != 0;
        boolean showCheckSum = (level & DUMP_DATA_CHECKSUM) != 0;

        strm.println("" + dataLength + " data bytes");
        if (dumpData) {
            CachedPageInputStream dstrm = new CachedPageInputStream();
            try {
                try {
                    addToStream(dstrm);
                    if (dumpData) {
                        dumpBytesAsHex(dstrm, strm, "\t");
                        strm.println();
                    }
                }
                finally {
                    dstrm.close();
                }
            }
            catch (IOException exc) {
                throw new StorageIOException(exc);
            }
        }

        if (showCheckSum) {
            strm.println("Data checksum: " + getCRC());
            strm.println();
        }
    }
}
... this post is sponsored by my books ...

#1 New Release!

FP Best Seller

 

new blog posts

 

Copyright 1998-2021 Alvin Alexander, alvinalexander.com
All Rights Reserved.

A percentage of advertising revenue from
pages under the /java/jwarehouse URI on this website is
paid back to open source projects.