alvinalexander.com | career | drupal | java | mac | mysql | perl | scala | uml | unix  

What this is

This file is included in the DevDaily.com "Java Source Code Warehouse" project. The intent of this project is to help you "Learn Java by Example" TM.

Other links

The source code

/*
 *                 Sun Public License Notice
 * 
 * The contents of this file are subject to the Sun Public License
 * Version 1.0 (the "License"). You may not use this file except in
 * compliance with the License. A copy of the License is available at
 * http://www.sun.com/
 * 
 * The Original Code is NetBeans. The Initial Developer of the Original
 * Code is Sun Microsystems, Inc. Portions Copyright 1997-2001 Sun
 * Microsystems, Inc. All Rights Reserved.
 */
package org.netbeans.mdr.persistence.btreeimpl.btreestorage;

import java.lang.ref.*;
import java.io.*;
import java.text.*;
import java.util.*;

import org.netbeans.mdr.persistence.*;

/**
* An in-memory cache of MDR objects.  At any given moment, there are four
* kinds of objects in the cache:
* 
    *
  1. * Objects which are referenced by client code, and so cannot be collected. *
  2. * Objects which have been created but not yes written to disk, and so * should not be collected until they are written out. *
  3. * Objects which have been marked dirty, and so should not be collected until * they are written out. *
  4. * Objects which are nother referenced nor dirty, and so are eligible for * collection. *
* The cache also keep s track of the keys of objects which have been deleted. *

* The creator of the cache specifies how many objects * to cache in memory. Hard references are kept to the last N objects * used, to enforce this. The cache keeps soft references to the other * objects of the third type, so * how often they are collected depends upon memory usage and the * JVM's GC implementation. */ public class MDRCache { private static final ArrayList instances = new ArrayList(); /* hash MOF ID's to references */ private final FacilityCache hashOnId; /* Hard references */ private final Map hardRef; /* current slot in above index */ private int hardIndex; /* queue for soft references */ private ReferenceQueue queue; /* List of new objects */ private HashMap newOnes; /* List of dirty objects */ private HashMap dirty; /* List of deleted IDs */ private HashMap deleted; /* our overflow handler */ OverflowHandler handler; /* our threshhold for changed objects */ private static final int threshhold = Integer.getInteger( "org.netbeans.mdr.persistence.btreeimpl.btreestorage.MDRCache.threshhold", BtreeDatabase.MDR_CACHE_THRESHHOLD).intValue() * 2; private static final int LOCAL_MINIMUM = 200; private static int size = 0; private int localThreshhold; private int lastLocalSize = 0; /* caching statistics */ int hits; int misses; int maxSize; /** Create the cache * @param size how many objects to cache in memory * @param hndlr handler to call when the cache has too many changed objects * @param limit number of changed objects to allow */ public MDRCache(int size, OverflowHandler hndlr, int limit, Map hardRef) { this(size, hardRef); handler = hndlr; localThreshhold = limit; } private static class CacheClass implements Map { private final Object inner[] = new Object[256]; private int size, cursor; public Set keySet() { throw new UnsupportedOperationException(); } public Set entrySet() { throw new UnsupportedOperationException(); } public void putAll(Map t) { throw new UnsupportedOperationException(); } public boolean isEmpty() { return size == 0; } public boolean containsKey(Object key) { throw new UnsupportedOperationException(); } public boolean containsValue(Object value) { throw new UnsupportedOperationException(); } public Collection values() { throw new UnsupportedOperationException(); } public Object put(Object key, Object value) { inner[cursor] = value; cursor++; if (size < inner.length) { size++; } if (cursor >= size) { cursor = 0; } return null; } public void clear() { Arrays.fill(inner, null); size = 0; cursor = 0; } public int size() { return size; } public Object get(Object key) { throw new UnsupportedOperationException(); } public Object remove(Object key) { throw new UnsupportedOperationException(); } } /** Create the cache * @param size how many objects to cache in memory */ public MDRCache(final int size, Map hardRef) { hashOnId = new FacilityCache(); if (hardRef == null) { hardRef = new CacheClass()/*LinkedHashMap(2 * size, 0.5f, true) { public boolean removeEldestEntry(Map.Entry entry) { return size() < size; } }*/; } this.hardRef = hardRef; deleted = new HashMap(); dirty = new HashMap(); newOnes = new HashMap(); synchronized (MDRCache.class) { instances.add(this); } } /** returns true if the cache contains any changed objects * @return true if any objects have been modified or deleted */ public synchronized boolean hasChanges() { return dirty.size() + newOnes.size() + deleted.size() > 0; } /** add a new object to the cache * @param m the object's MOF ID * @param o the object to add */ public synchronized void put(Object m, Object o) throws StorageException { if (hashOnId.get(m) == null) { hashOnId.put(m, o); } makeHardRef(m, o); int curSize = hashOnId.size(); if (curSize > maxSize) { maxSize = curSize; } } /** get an object from the cache * @param m the object's MOF ID */ public synchronized Object get(Object m) { Object o = hashOnId.get(m); if (o != null) { makeHardRef(m, o); hits++; } else { misses++; } return o; } /** replace an object in the cache * @param m the object's key * @param o the object */ public synchronized void replace(Object m, Object o) throws StorageException { removeFromCache(m); put(m, o); } /** remove an object from the cache * @param m the object's MOF ID * @return true if the object was found in the cache */ public synchronized void remove(Object m) { if (!removeFromCache(m)) { deleted.put(m, m); } } /** remove all traces from the cache. * @return true if the object was new */ private boolean removeFromCache(Object m) { hashOnId.remove(m); boolean wasNew = (newOnes.remove(m) != null); dirty.remove(m); return wasNew; } /** clear all unecessary objects from the cache */ public synchronized void clear() { hardRef.clear(); System.gc(); } /* create a hard reference to the supplied object */ private void makeHardRef(Object m, Object o) { hardRef.put(m, o); } void updateSize() { int allChanged = newOnes.size() + dirty.size(); int sizeDelta = allChanged - lastLocalSize; lastLocalSize = allChanged; synchronized (MDRCache.this) { size += sizeDelta; } } /* Check to see if we have exceeded our threshhold for dirty objects */ private void checkThreshhold() throws StorageException { int allChanged = newOnes.size() + dirty.size(); int sizeDelta = allChanged - lastLocalSize; lastLocalSize = allChanged; synchronized (MDRCache.this) { size += sizeDelta; if (size >= threshhold) { //System.err.println("Global threshhold reached at: " + size); int newSize = 0; for (Iterator it = instances.iterator(); it.hasNext();) { MDRCache cache = (MDRCache) it.next(); allChanged = cache.newOnes.size() + cache.dirty.size(); if (allChanged > 10) { //System.err.println(" ...Threshhold reached at level " + allChanged); cache.handler.cacheThreshholdReached(cache, allChanged); allChanged = cache.newOnes.size() + cache.dirty.size(); } cache.lastLocalSize = allChanged; newSize += allChanged; } size = newSize; return; } } if (allChanged >= localThreshhold) { //System.err.println("Threshhold reached at level " + allChanged + ", global cache size: " + size); handler.cacheThreshholdReached(this, allChanged); } } /* throw an exception when a bad key is processed */ private void badKey(Object key) throws StorageException { throw new StorageBadRequestException( MessageFormat.format( "No object with ID {0}", new Object[] {key})); } /** * mark that the object with the given MOF ID is new * @param key MOF ID */ public synchronized void setNew(Object key) throws StorageException{ Object o = get(key); if (o == null) { badKey(key); } newOnes.put(key, o); if (handler != null) checkThreshhold(); } /** check if the object with the given key is new * @return true if it is new */ public synchronized boolean isNew(Object key) { return newOnes.get(key) != null; } /** Get an iterator over the keys of all active objects in the cache * @return the iterator */ public synchronized Iterator iterateActive() { return hashOnId.keySet().iterator(); } /** Get an iterator over the keys of deleted objects * @return the iterator */ public synchronized Iterator iterateDeleted() { return deleted.keySet().iterator(); } /** Get an iterator over the keys of new objects * @return the iterator */ public synchronized Iterator iterateNew() { return newOnes.keySet().iterator(); } /** return the number of new objects * @return number of new objects */ public int numberNew() { return newOnes.size(); } /** return the number of deleted objects * @return number of deleted objects */ public int numberDeleted() { return deleted.size(); } /** check if the object with the given key is deleted * @return true if it is deleted */ public synchronized boolean isDeleted(Object key) { return deleted.get(key) != null; } /** * mark that the object with the given MOF ID is dirty * @param key MOF ID */ public synchronized void setDirty(Object key) throws StorageException{ Object o = get(key); if (o == null) { badKey(key); } if (newOnes.get(key) == null) { dirty.put(key, o); if (handler != null) checkThreshhold(); } } /** Get all of the new objects * @return the keys of the new objects */ public synchronized Collection getNew() { return newOnes.keySet(); } /** Get all of the dirty objects * @return the keys of the dirty objects */ public synchronized Collection getDirty() { return dirty.keySet(); } /** Get all of the deleted IDs * @return the deleted IDs */ public synchronized Collection getDeleted() { return deleted.values(); } /** Clear the set of new, dirty, and deleted objects, * presumably after having written them all out. */ public synchronized void clearLists() { dirty.clear(); newOnes.clear(); deleted.clear(); } /** * Show caching statistics */ public void showStats(PrintStream strm) { showStats(new PrintWriter(strm)); } /** * Show caching statistics */ public void showStats(PrintWriter strm) { strm.println( "Cache hits: " + hits + " misses: " + misses + " hit rate: " + 100. * (float)hits / (float)(hits + misses)); strm.println("Maximum size: " + maxSize); strm.flush(); } /** * The cache handler is called when the number of changes in the cache * reaches its threshhold. The handler is expected to do something * to reduce the number of changed objects in the cache, for instance, * write them to disk */ public interface OverflowHandler { /** Notify handler that the cache has reached its threshhold * @param cache cache which reached threshhold * @param size number of changed objects currently in cache */ void cacheThreshholdReached(MDRCache cache, int size) throws StorageException; } private static class FacilityCache extends HashMap { private final ReferenceQueue queue = new ReferenceQueue(); private boolean cleaningUp = false; private class CacheReference extends WeakReference { private Object key; public CacheReference(Object key, Object object) { super(object, queue); this.key = key; } public Object getKey() { return key; } } private void cleanUp() { assert !cleaningUp; CacheReference reference; cleaningUp = true; try { while ((reference = (CacheReference) queue.poll()) != null) { Object key = reference.getKey(); java.lang.ref.Reference currentRef = (java.lang.ref.Reference) super.remove(key); if (currentRef != null && currentRef != reference && currentRef.get() != null) { super.put(key, currentRef); } } } finally { cleaningUp = false; } } public Object put(Object key, Object value) { cleanUp(); Object result = super.put(key, new CacheReference(key, value)); assert result == null || ((CacheReference) result).get() == null : "replacing non-null reference"; return null; } public Object remove(Object key) { cleanUp(); Object result = super.remove(key); return result == null ? null : ((CacheReference) result).get(); } public Object get(Object key) { cleanUp(); Object result = super.get(key); return result == null ? null : ((CacheReference) result).get(); } } }

... this post is sponsored by my books ...

#1 New Release!

FP Best Seller

 

new blog posts

 

Copyright 1998-2021 Alvin Alexander, alvinalexander.com
All Rights Reserved.

A percentage of advertising revenue from
pages under the /java/jwarehouse URI on this website is
paid back to open source projects.