alvinalexander.com | career | drupal | java | mac | mysql | perl | scala | uml | unix  

What this is

This file is included in the DevDaily.com "Java Source Code Warehouse" project. The intent of this project is to help you "Learn Java by Example" TM.

Other links

The source code

/*
 *                 Sun Public License Notice
 * 
 * The contents of this file are subject to the Sun Public License
 * Version 1.0 (the "License"). You may not use this file except in
 * compliance with the License. A copy of the License is available at
 * http://www.sun.com/
 * 
 * The Original Code is NetBeans. The Initial Developer of the Original
 * Code is Sun Microsystems, Inc. Portions Copyright 1997-2001 Sun
 * Microsystems, Inc. All Rights Reserved.
 */
package org.netbeans.mdr.persistence.btreeimpl.btreestorage;

import java.io.*;
import java.text.*;
import java.util.*;

import org.netbeans.mdr.persistence.*;
import org.netbeans.mdr.persistence.btreeimpl.btreeindex.*;

/**
* This class represents the Btree data file.  This file is divided into chunks
* of BTREE_CHUNK_SIZE bytes each.  It begins with a header of size
* BTREE_HEADER_SIZE bytes (an even number of chunks) followed by a set of
* variable-sized extents (each of which is also an even number of chunks).
* A record, which contains a key plus data, consists of one or more extents.
* If there is more than one, each other than the last points to its successor.
* There are also extents representing records which have been deleted.  The
* data file header points to chains of deleted extents organized by size,
* allowing their efficient reuse.  No attempt is made to coalesce deleted
* extents.
* 

* An extent is created slightly bigger than necessary to contain its record, * to allow the record to grow subsequently without requiring a new extent * to be allocated for it. * * *

    * Data file header disk format: *
  1. * bytes 0-63 FileHeader *
  2. * bytes 64-67 MAGIC *
  3. * bytes 68-71 eof chunk number *
  4. * bytes 72-75 version *
  5. * bytes 76-113 Repository UUID string *
  6. * bytes 114-117 first deleted extent header of size 1 chunk *
  7. * bytes 118-121 first deleted extent header of size 2 chunks *
  8. * ... *
  9. * bytes 1134-1137 first deleted extent header of size 256 chunks *
  10. * bytes 1138-1141 Number of objects currently in repository *
  11. * bytes 1142-1149 Persistent counter *
* Note that no extents larger than 256 (MAX_CHUNKS_IN_EXTENT) chunks are * created. *

* @see BtreeExtent */ class BtreeDataFile implements FileCache.NotifyOnCommit, MofidGenerator { /** The btree data file is divided into chunks of this size. A file cache page must be an even number of chunks. Value is 32 */ static final int BTREE_CHUNK_SIZE = 32; /** the current (and, so far, only) version of btree. Value is 100 */ static final int BTREE_VERSION = 100; /** the size of the data file header in bytes. This must be an even number of chunks. Value is 2048 */ static final int BTREE_HEADER_SIZE = 2048; /** Maximum number of chunks in a single extent. Value is 256 */ static final int MAX_CHUNKS_IN_EXTENT = 256; /** Maximum bytes in a single extent */ static final int MAX_BYTES_IN_EXTENT = MAX_CHUNKS_IN_EXTENT * BTREE_CHUNK_SIZE; /* magic number identifying btree data file header */ private static final int MAGIC = 0xFEEDBEEF; /* the file cache from which we read pages */ private FileCache cache; /* The index of the data file within the cache */ private int fileIndex; /* The cache page size */ private int pageSize; /* The number of chunks in a page */ private int chunksPerPage; /* header magic number */ private int magic; /* version of Btree in file */ private int version; /* UUID of data file */ private String UUID; /* persistent counter */ private long counter; /* File header (only used when creating file) */ private FileHeader fileHeader; /* Number of chunks in file */ private int eof; /* Array of chains of deleted extents */ private int deletedExtents[]; /* true when header is different in memory than on disk */ private boolean dirty = false; /* number of objects currently in repository */ private int numberOfObjects; /* The number of changes made to the data file snce it was last opened */ int modificationLevel; /* buffer for MOFID conversion. Any method which uses this *must* be synchronized! */ private byte mofid[] = new byte[MOFID.LENGTH]; /** BtreStorage*/ private BtreeStorage storage; /** Create a BtreeDataFile by reading a data file from a File Cache * @param theCache the file cache * @param index index of the data file in the cache * @exception StorageException if an IO error occurs or btree detects an inconsistency */ BtreeDataFile(BtreeStorage storage, FileCache theCache, int index) throws StorageException { this.storage = storage; cache = theCache; cache.addNotifier(this); fileIndex = index; pageSize = cache.getPageSize(); checkFit(pageSize); chunksPerPage = pageSize / BTREE_CHUNK_SIZE; fileHeader = null; deletedExtents = new int[MAX_CHUNKS_IN_EXTENT]; IntHolder offset = new IntHolder(); CachedPage page = getChunk(0, offset); try { offset.setValue(offset.getValue() + FileHeader.HEADER_SIZE); magic = Converter.readInt(page.contents, offset); eof = Converter.readInt(page.contents, offset); version = Converter.readInt(page.contents, offset); UUID = Converter.readString(page.contents, offset); if (magic != MAGIC) { throw new StoragePersistentDataException( "Bad magic number in data file"); } if (version != BTREE_VERSION) { throw new StoragePersistentDataException( "Bad version number in data file"); } for (int i = 0; i < MAX_CHUNKS_IN_EXTENT; i++) { deletedExtents[i] = Converter.readInt(page.contents, offset); } numberOfObjects = Converter.readInt(page.contents, offset); counter = Converter.readLong(page.contents, offset); } finally { page.unpin(); } dirty = false; } /** Create a BtreeDataFile in memory. This is only used when creating a * data file on disk. * @param hdr FileHeader to write to disk * @param vers version of Btree data file to create * @param pgSize version of Btree data file to create */ private BtreeDataFile(BtreeStorage storage, FileHeader hdr, int vers) { this.storage = storage; magic = MAGIC; version = vers; eof = BTREE_HEADER_SIZE / BTREE_CHUNK_SIZE; counter = BtreeFactory.FIRST_EXTERNAL_ID + new Random().nextInt(4096); // adding a random increment to fix #46323 if (storage.storageUUID != null) UUID = storage.storageUUID; else UUID = new UUID().toString(); fileHeader = hdr; deletedExtents = new int[MAX_CHUNKS_IN_EXTENT]; cache = null; fileIndex = -1; pageSize = 0; chunksPerPage = 0; } /** Create a btree data file * @param fileName name of file to create * @param hdr FileHeader to write to disk * @param replace if true, overwrite an existing file * @param pgSize the page size with which the file will be accessed * @exception StorageException if the file exists and replace is false * or if an I/O error occurs */ static void create(BtreeStorage storage, String fileName, FileHeader hdr, int pgSize, boolean replace) throws StorageException { checkFit(pgSize); File old = new File(fileName); if (old.exists()) { if (replace) { old.delete(); } else { throw new StorageBadRequestException( MessageFormat.format( "File {0} exists", new Object[] {fileName})); } } BtreeDataFile dummy = new BtreeDataFile(storage, hdr, BTREE_VERSION); byte buffer[] = new byte[pgSize]; dummy.writeHeader(buffer, true); try { RandomAccessFile file = new RandomAccessFile(fileName, "rw"); file.write(buffer); file.close(); } catch (IOException ex) { throw new StorageIOException(ex); } } /** Read a record from the data file * @param chunkNum the number of the chunk where the record starts * @param key the record's key * @return a stream from which the record can be read. The stream must * be closed when it is no longer needed to allow the pages to be * release from the cache. * @exception StorageException if btree detects an inconsistency * or if an I/O error occurs */ synchronized CachedPageInputStream get(int chunkNum, long key) throws StorageException { Converter.writeLong(mofid, 0, key); return get(chunkNum, mofid); } /** Read a record from the data file * @param chunkNum the number of the chunk where the record starts * @param key the record's key * @return a stream from which the record can be read. The stream must * be closed when it is no longer needed to allow the pages to be * release from the cache. * @exception StorageException if btree detects an inconsistency * or if an I/O error occurs */ synchronized CachedPageInputStream get(int chunkNum, byte[] key) throws StorageException { return getStream(checkRecord(chunkNum, key)); } /* create a stream from a normal extent and all of its continuations */ private CachedPageInputStream getStream(NormalBtreeExtent ext) throws StorageException { CachedPageInputStream strm = new CachedPageInputStream(); ext.addToStream(strm); int next = ext.getNext(); while (next != 0) { ContinuationBtreeExtent cext = (ContinuationBtreeExtent)getExtent(next); cext.addToStream(strm); next = cext.getNext(); } return strm; } /** Write a new record to the file. Duplicate keys are not checked. * If you wish to replace an existing record which has the same key, * call replace instead. * @param key the record's key * @param record the bytes which make up the record. There is effectively * no limit to the record length. * return the chunk number of the first extent created * @exception StorageException if btree detects an inconsistency * or if an I/O error occurs */ synchronized int put(long key, byte record[]) throws StorageException { Converter.writeLong(mofid, 0, key); return put(mofid, record); } /** Write a new record to the file. Duplicate keys are not checked. * If you wish to replace an existing record which has the same key, * call replace instead. * @param key the record's key * @param record the bytes which make up the record. There is effectively * no limit to the record length. * return the chunk number of the first extent created * @exception StorageException if btree detects an inconsistency * or if an I/O error occurs */ synchronized int put(byte[] key, byte record[]) throws StorageException { modificationLevel++; NormalBtreeExtent first = getNormalExtent(record.length, key, null); ActiveBtreeExtent previous = first; first.writeData(record, 0); int offset = first.getMyDataLength(); int toAllocate = record.length - offset; if (toAllocate > 0) { while (toAllocate > 0) { ContinuationBtreeExtent cext = getContinuationExtent(toAllocate, null); cext.writeData(record, offset); previous.setNext(cext); previous.writeHeader(); previous = cext; int written = cext.getMyDataLength(); offset += written; toAllocate -= written; } } previous.setNext(0); previous.writeHeader(); numberOfObjects++; return first.getOffset(); } /** Replace an existing record to the file. This replaces only the bytes * in the record: the key does not change. The size of the record may * change. if possible, the existing extent(s) will be reused. * @param key the existing record's key * @param record the bytes which will make up the new version of the * record. There is effectively no limit to the record length. * return the new chunk number of the first extent * @exception StorageException if btree detects an inconsistency * or if an I/O error occurs */ synchronized int replace(int chunkNum, long key, byte record[]) throws StorageException { Converter.writeLong(mofid, 0, key); return replace(chunkNum, mofid, record); } /** Replace an existing record to the file. This replaces only the bytes * in the record: the key does not change. The size of the record may * change. if possible, the existing extent(s) will be reused. * @param key the existing record's key * @param record the bytes which will make up the new version of the * record. There is effectively no limit to the record length. * return the new chunk number of the first extent * @exception StorageException if btree detects an inconsistency * or if an I/O error occurs */ synchronized int replace(int chunkNum, byte key[], byte record[]) throws StorageException { modificationLevel++; NormalBtreeExtent exist = checkRecord(chunkNum, key); ArrayList conts = null; int next = exist.getNext(); if (next > 0) { conts = new ArrayList(); while (next > 0) { ContinuationBtreeExtent ext = (ContinuationBtreeExtent)getExtent(next); conts.add(ext); next = ext.getNext(); } } NormalBtreeExtent first = getNormalExtent(record.length, key, exist); ActiveBtreeExtent previous = first; first.writeData(record, 0); if (exist != first) { DeletedBtreeExtent dext = new DeletedBtreeExtent(exist); dext.writeHeader(); } int offset = first.getMyDataLength(); int toAllocate = record.length - offset; int contIndex = 0; if (toAllocate > 0) { while (toAllocate > 0) { ContinuationBtreeExtent candidate = null; if (conts != null && contIndex < conts.size()) { candidate = (ContinuationBtreeExtent)conts.get(contIndex); } ContinuationBtreeExtent cext = getContinuationExtent(toAllocate, candidate); if (cext == candidate) { contIndex++; } cext.writeData(record, offset); previous.setNext(cext); previous.writeHeader(); previous = cext; int written = cext.getMyDataLength(); offset += written; toAllocate -= written; } } if (conts != null) { for (int i = contIndex; i < conts.size(); i++) { DeletedBtreeExtent dext = new DeletedBtreeExtent( (ContinuationBtreeExtent)conts.get(i)); dext.writeHeader(); } } previous.setNext(0); previous.writeHeader(); return first.getOffset(); } /** Remove a record * @param chunkNum the chunk number for the record's first extent * @param key the record's key * @exception StorageException if btree detects an inconsistency * or if an I/O error occurs */ synchronized void remove(int chunkNum, long id) throws StorageException { Converter.writeLong(mofid, 0, id); remove(chunkNum, mofid); } /** Remove a record * @param chunkNum the chunk number for the record's first extent * @param key the record's key * @exception StorageException if btree detects an inconsistency * or if an I/O error occurs */ synchronized void remove(int chunkNum, byte key[]) throws StorageException { modificationLevel++; NormalBtreeExtent ext = checkRecord(chunkNum, key); DeletedBtreeExtent dext = new DeletedBtreeExtent(ext); dext.writeHeader(); int next = ext.getNext(); while (next != 0) { ContinuationBtreeExtent cex = (ContinuationBtreeExtent)getExtent(next); DeletedBtreeExtent dex = new DeletedBtreeExtent(cex); dex.writeHeader(); next = cex.getNext(); } numberOfObjects--; } /** get the page size used by the file cache * @return page size */ int getPageSize() { return pageSize; } /** get number of objects currently conatined in the repository * @return number of objects. */ int size() { return numberOfObjects; } /** Add a newly deleted extent to the chains of deleted extents * @param extent the newly deleted extent */ void deleteExtent(DeletedBtreeExtent extent) { int size = extent.getSize(); extent.setNext(deletedExtents[size-1]); deletedExtents[size-1] = extent.getOffset(); dirty = true; } /* add in a fudge factor for record size, so a slightly bigger * record still fits */ private int computeMaxExtentSize(int numChunks) { // if there is none of the correct size available, use a slightly // bigger one int extra = Math.min((int)(numChunks * .2), 3); return Math.min(numChunks + extra, MAX_CHUNKS_IN_EXTENT); } /* Get a deleted record of the same size, or slightly bigger */ private DeletedBtreeExtent getDeletedExtent(int numChunks) throws StorageException { for (int i = numChunks; i <= computeMaxExtentSize(numChunks); i++) { if (deletedExtents[i-1] > 0) { DeletedBtreeExtent ext = (DeletedBtreeExtent)getExtent(deletedExtents[i-1]); deletedExtents[i-1] = ext.getNext(); dirty = true; return ext; } } return null; } /** get a page containing the desired chunk. * Note that the returned page is pinned. * @param chunkNum the number of the chunk to get * @param offset used to return the offset within the page whee the * chunk begins * @return the page containing the chunk * @exception StorageException if an I/O error occurs */ CachedPage getChunk(int chunkNum, IntHolder offset) throws StorageException{ int pageNum = chunkNum / chunksPerPage; CachedPage page = cache.getPage(fileIndex, pageNum); offset.setValue((chunkNum % chunksPerPage) * BTREE_CHUNK_SIZE); return page; } /** get an array of page containing the desired chunks. * Note that the returned pages are pinned. Note also that the last page * returned may contain chunks following the ones requested. * @param chunkNum the number of the first chunk to get * @param numChunks how many consecutive chunks to read * @param offset used to return the offset within the page where the * chunk begins * @return the pages containing the chunks * @exception StorageException if an I/O error occurs */ CachedPage[] getChunks(int chunkNum, int numChunks, IntHolder offset) throws StorageException { int startPageNum = chunkNum / chunksPerPage; int endPageNum = (chunkNum + numChunks - 1) / chunksPerPage; CachedPage pages[] = cache.getPages( fileIndex, startPageNum, endPageNum - startPageNum + 1); offset.setValue((chunkNum % chunksPerPage) * BTREE_CHUNK_SIZE); return pages; } /* Get the extent which begins at the supplied chunk */ private BtreeExtent getExtent(int chunkNum) throws StorageException { return BtreeExtent.readExtent(this, chunkNum); } /** * write the header to disk if it is not up-to-date there. This * is part of the FileCache.NotifyOnCommit interface. * @exception StorageException if btree detects an inconsistency * or if an I/O error occurs */ public void prepareToCommit() throws StorageException { if (!dirty) return; CachedPage page = cache.getPage(fileIndex, 0); try { page.setWritable(); writeHeader(page.contents, false); } finally { page.unpin(); } } /** Copy to another file. Since deleted extents are not copied, the * copied file is a compressed version of the original. It is assumed * that all changes to this file have been comitted, since we iterate * through the file to find records to copy. * @param target file to copy to. This should be empty. */ public void copy(BtreeDataFile target) throws StorageException { Iterator iter = iterator(ITERATE_NORMAL_EXTENTS); while (iter.hasNext()) { NormalBtreeExtent ext = (NormalBtreeExtent)iter.next(); CachedPageInputStream strm = get(ext.myChunkNum, ext.key); int offset = 0; int len = strm.available(); byte data[] = new byte[len]; do { int read = strm.read(data, offset, len); offset += read; len -= read; } while (len > 0); target.put(ext.key, data); } } /** allocate some number of objects using the persistent counter */ public synchronized long allocateObjects(int number) { //if (number < 0) Thread.dumpStack(); counter += number; dirty = true; return counter; } /** Get data file UUID, which will be the prefix of all MOFIDs */ public String getMofidPrefix() { return UUID; } /** Get new MOFID */ public long getNextMofid() { return allocateObjects(1); } /** Get value of counter. Used in transaction cache * to store state of mof id generator (after commit). */ long getMofIdCounter () { return counter; } /** Set value of counter. Used in transaction cache to perform rollback on mof id generator. */ void setMofIdCounter (long counter) { //Thread.dumpStack(); this.counter = counter; dirty = true; } /** dump basic header info */ public static final int DUMP_HEADER = 1; /** dump deleted extent chanins */ public static final int DUMP_DELETED_CHAINS = 2; /** dump active extent chains */ public static final int DUMP_ACTIVE_CHAINS = 4; /** dump extent headers */ public static final int DUMP_EXTENT_HEADERS = 8; /** do full consistency check */ public static final int DUMP_CONSISTENTCY_INFO = 16; /** dump file as text (for debugging) * "level" is a bitmask. *

    *
  1. * DUMP_HEADER -- basic header info *
  2. * DUMP_DELETED_CHAINS -- deleted extent chains *
  3. * DUMP_ACTIVE_CHAINS -- active extent chains *
  4. * DUMP_EXTENT_HEADERS -- extent headers *
  5. * DUMP_CONSISTENCY_INFO -- full consitency check *
* hdrLevel determines what to dump for each extent header. * @see BtreeExtent#dump * @param level bitmask of what to dump. * @param hdrLevel bitmask of what to dump for each extent header. * @param strm where to dump it to * @return number of errors encounters */ public int dump(int level, int hdrLevel, PrintWriter strm) throws StorageException{ return dump(level, hdrLevel, true, strm); } int dump(int level, int hdrLevel, boolean headAndTrail, PrintWriter strm) throws StorageException{ boolean doHeader = (level & DUMP_HEADER) != 0; boolean doDelChains = (level & DUMP_DELETED_CHAINS) != 0; boolean doActChains = (level & DUMP_ACTIVE_CHAINS) != 0; boolean doExtHeaders = (level & DUMP_EXTENT_HEADERS) != 0; boolean doChecks = (level & DUMP_CONSISTENTCY_INFO) != 0; if (strm == null) { strm = new PrintWriter(System.out); } if (headAndTrail) { strm.println("Btree data file dump:\n"); } if (doHeader) { strm.println("Magic number: " + magic); strm.println("Btree version: " + version); strm.println("EOF chunk: " + eof); strm.println("" + numberOfObjects + " objects"); strm.println(); } if (doDelChains) { boolean sawDeleted = false; for (int i = 0; i < MAX_CHUNKS_IN_EXTENT; i++) { if (deletedExtents[i] > 0) { if (!sawDeleted) { strm.println("Deleted extents:"); sawDeleted = true; } strm.println("\t" + (i+1) + " chunks:"); int next = deletedExtents[i]; while (next > 0) { strm.println("\t\t" + next); BtreeExtent del = getExtent(next); next = del.getNext(); } } } if (!sawDeleted) { strm.println("No deleted extents"); } strm.println(); } if (doActChains) { strm.println("Active chains:"); Iterator iter = iterator(ITERATE_NORMAL_EXTENTS); while (iter.hasNext()) { BtreeExtent ext = (NormalBtreeExtent)iter.next(); strm.println("\t" + ext.getOffset()); int next = ext.getNext(); while (next > 0) { strm.println("\t\t" + next); next = getExtent(next).getNext(); } } strm.println(); } if (doExtHeaders) { strm.println("All extent headers:"); Iterator iter = iterator(ITERATE_ALL_EXTENTS ); while (iter.hasNext()) { BtreeExtent ext = (BtreeExtent)iter.next(); ext.dump(hdrLevel, strm); strm.println(); } strm.println(); } int numErrs = 0; if (doChecks) { // Ensure all extents are on exactly one chain // Ensure all records on deleted chains are deleted and // of the correct size // Ensure records on active chains have the corrct type // Ensure only maximum-sized active extents are in chains // Ensure the repository knows how many objects it has final byte WAS_SEEN = 4; strm.println("Consistentcy check:"); byte extents[] = new byte[eof]; Iterator iter = iterator(ITERATE_ALL_EXTENTS ); while (iter.hasNext()) { BtreeExtent ext = (BtreeExtent)iter.next(); extents[ext.getOffset()] = ext.getType(); if (ext instanceof ActiveBtreeExtent) { ActiveBtreeExtent aext = (ActiveBtreeExtent)ext; if (aext.getNext() > 0 && aext.getMyDataLength() < aext.getAvailableDataLength()) { strm.println( aext.getTypeName() + " extent " + ext.getOffset() + " of size " + aext.getMyDataLength() + "has a successor extent"); } } } int totalNormal = 0; for (int i = 0; i < eof; i++) { if ((extents[i] & 0x3) == BtreeExtent.IS_NORMAL) { totalNormal++; extents[i] |= WAS_SEEN; BtreeExtent ext = getExtent(i); int next = ext.getNext(); while (next != 0) { BtreeExtent cext = getExtent(next); int type = cext.getType(); if (type != BtreeExtent.IS_CONTINUATION) { strm.println( BtreeExtent.getTypeName(type) + " extent " + next + " is on active chain " + i); numErrs++; } else if ((extents[next] & WAS_SEEN) != 0) { strm.println("extent " + next + " is on more than one chain"); numErrs++; } extents[next] |= WAS_SEEN; next = cext.getNext(); } } } if (totalNormal != numberOfObjects) { strm.println("Repository has " + totalNormal + " normal extents, but thinks it has " + numberOfObjects + " objects"); numErrs++; } for (int i = 0; i < MAX_CHUNKS_IN_EXTENT; i++) { int next = deletedExtents[i]; while (next != 0) { BtreeExtent ext = getExtent(next); int type = ext.getType(); if (type != BtreeExtent.IS_DELETED) { strm.println( BtreeExtent.getTypeName(type) + " extent " + next + " is on deleted chain " + (i + 1)); numErrs++; } else { extents[next] |= WAS_SEEN; int size = ext.getSize(); if (size != i + 1) { strm.println("Deleted extent " + next + " of size " + size + " is on chain for size " + (i + 1)); numErrs++; } } next = ext.getNext(); } } for (int i = 0; i < eof; i++) { boolean wasSeen = (extents[i] & WAS_SEEN) != 0; int type = (extents[i] & 0x3); if (type > 0 && !wasSeen) { strm.println("Extent " + i + " is not on any chain"); numErrs++; } } if (headAndTrail) { strm.println("" + numErrs + " error(s) detected."); } strm.println(); } if (headAndTrail) { strm.println("End of tree data file dump\n"); } strm.flush(); return numErrs; } /** Get a normal extent of the desired size, and write the supplied key * to it. There are three ways to get the extent, list in decreasing order * of desirability: *

* Use the candidate. This will be done if it is large enough. The * candidate alrady contains the supplied key. *

* Use a deleted extent of the desired size. This will be done if the * candidate isn't suitable, but a suitable deleted extent exists. *

* add a new extent at the end of the file. */ private NormalBtreeExtent getNormalExtent( int length, byte key[], NormalBtreeExtent candidate) throws StorageException { if (candidate != null) { if (candidate.isMaximum() || candidate.getAvailableDataLength() >= length) { candidate.setMyDataLength(length); return candidate; } } int numChunks = NormalBtreeExtent.getNumChunks(key.length, length); DeletedBtreeExtent del = getDeletedExtent(numChunks); NormalBtreeExtent first; if (del != null) { first = new NormalBtreeExtent(del, key, length); } else { first = addNormalExtent(length, key); } return first; } /* The same algorithm is used here as for getNormalExtent */ private ContinuationBtreeExtent getContinuationExtent( int length, ContinuationBtreeExtent candidate) throws StorageException { if (candidate != null) { if (candidate.isMaximum() || candidate.getAvailableDataLength() >= length){ candidate.setMyDataLength(length); return candidate; } } int nChunks = ContinuationBtreeExtent.getNumChunks(length); DeletedBtreeExtent dext = getDeletedExtent(nChunks); ContinuationBtreeExtent cext; if (dext != null) { cext = new ContinuationBtreeExtent(dext, length); } else { cext = addContinuationExtent(length); } return cext; } /* Add a normal extent at the end of the file */ private NormalBtreeExtent addNormalExtent(int length, byte key[] ) throws StorageException { int numChunks = NormalBtreeExtent.getNumChunks(length, key.length); NormalBtreeExtent ext = new NormalBtreeExtent( this, eof, (short)computeMaxExtentSize(numChunks), length, key); eof += ext.getSize();; dirty = true; return ext; } /* Add a continuation extent at the end of the file */ private ContinuationBtreeExtent addContinuationExtent(int length) { int numChunks = ContinuationBtreeExtent.getNumChunks(length); ContinuationBtreeExtent ext = new ContinuationBtreeExtent( this, eof, (short)computeMaxExtentSize(numChunks), length); eof += ext.getSize(); dirty = true; return ext; } /* write the data file header to disk. The FileHeader is written * only when creating the data file. */ private void writeHeader(byte buffer[], boolean writeFileHeader) { if (writeFileHeader) { fileHeader.write(buffer); } IntHolder offset = new IntHolder(FileHeader.HEADER_SIZE); Converter.writeInt(buffer, offset, magic); Converter.writeInt(buffer, offset, eof); Converter.writeInt(buffer, offset, version); Converter.writeString(buffer, offset, UUID); for (int i = 0; i < MAX_CHUNKS_IN_EXTENT; i++) { Converter.writeInt(buffer, offset, deletedExtents[i]); } Converter.writeInt(buffer, offset, numberOfObjects); Converter.writeLong(buffer, offset, counter); } /* check that a normal extent with the supplied key begins at the * given chunk number. if it does, return it; if not, throw * an exception */ private NormalBtreeExtent checkRecord(int chunkNum, byte key[]) throws StorageException { BtreeExtent ext = getExtent(chunkNum); if (!(ext instanceof NormalBtreeExtent)) { throw new StoragePersistentDataException( MessageFormat.format( "Not a normal extent at offset {0}", new Object[] { new Integer(chunkNum) } )); } NormalBtreeExtent norm = (NormalBtreeExtent)ext; if (!Arrays.equals(key, norm.key)) { throw new StorageBadRequestException( MessageFormat.format( "Incorrect key at offset {0}", new Object[] { new Integer(chunkNum) } )); } return norm; } /** ensure everything that needs to fit evenly does * @param pageSz the page size with which this file will be accessed * @exception BadParameterException if any constrainst are violated */ private static void checkFit(int pageSz) throws StorageException { String msg = null; if (BTREE_HEADER_SIZE % BTREE_CHUNK_SIZE != 0) msg = "Btree header size is not an even number of chunks"; else if (pageSz % BTREE_CHUNK_SIZE != 0) msg = "File cache page size is not an even number of chunks"; if (msg != null) throw new StorageBadRequestException(msg); } /** iterate over all extents */ public static final int ITERATE_ALL_EXTENTS = 0; /** iterate over normal extents */ public static final int ITERATE_NORMAL_EXTENTS = 1; /** iterate over all keys */ public static final int ITERATE_KEYS = 2; /** Create an iterator which iterates over the records in this file * @param iterType. What to iterate over. See the constants in this * class. */ Iterator iterator(int type) { return this.new BtreeDataFileIterator(type); } /** * Iterate over the extents in the data file. Optionally, * either all extents or the normal extents only. This is normally * created with the BtreeDataFile.iterator method. */ class BtreeDataFileIterator implements Iterator { /* The level of the data file when we were created */ private int iterModLevel; /* current extent */ private BtreeExtent iterCurExtent; /* what we iterate over */ private int iterType; /** Create the iterator * @param iterType. What to iterate over. See the constants in the * enclosing class. */ BtreeDataFileIterator(int type) { iterType = type; iterCurExtent = null; findNextExtent(true); } /** Remove is not suppported * @exception UnsupportedOperationException always thrown */ public void remove() { /* an optional operation which we do not support */ throw new UnsupportedOperationException( "Remove is not supported"); } /** Is there another extent to return * @return true if there is another extent * @exception ConcurrentModificationException if the data file has * been modified since the iterator was created */ public boolean hasNext() { return iterCurExtent != null; } /** return the next extent, if there is one * @return the next extent * @exception ConcurrentModificationException if the data file has * been modified since the iterator was created * @exception NoSuchElementException if no extents remain */ public Object next() { synchronized(BtreeDataFile.this) { checkModLevel(); BtreeExtent ext = iterCurExtent; if (ext == null) { throw new NoSuchElementException("At EOF"); } findNextExtent(false); Object retval; switch (iterType) { case ITERATE_KEYS: try { retval = BtreeDataFile.this.storage.readMOFIDData (new ByteArrayInputStream (((NormalBtreeExtent)ext).key)); } catch (StorageException ex) { throw new RuntimeStorageException(ex); } break; default: retval = ext; } return retval; } } /* find and read next extent */ private void findNextExtent(boolean atStart) { synchronized(BtreeDataFile.this) { int offset; if (atStart) { iterModLevel = modificationLevel; offset = BTREE_HEADER_SIZE/BTREE_CHUNK_SIZE; } else if (iterCurExtent != null) { offset = iterCurExtent.getOffset() + iterCurExtent.getSize(); } else { return; } checkModLevel(); boolean allExtents = (iterType == ITERATE_ALL_EXTENTS); while (offset < eof) { BtreeExtent ext; try { ext = getExtent(offset); } catch (StorageException ex) { throw new RuntimeStorageException(ex); } if (allExtents || (ext instanceof NormalBtreeExtent)){ iterCurExtent = ext; return; } offset += ext.getSize(); } iterCurExtent = null; } } /* check to see if the data file has been changed out from under us */ private void checkModLevel() { if (iterModLevel != modificationLevel) { throw new ConcurrentModificationException( "Data file had been modified"); } } } }

... this post is sponsored by my books ...

#1 New Release!

FP Best Seller

 

new blog posts

 

Copyright 1998-2021 Alvin Alexander, alvinalexander.com
All Rights Reserved.

A percentage of advertising revenue from
pages under the /java/jwarehouse URI on this website is
paid back to open source projects.