Commit 7f171406 authored by Matt Tucker's avatar Matt Tucker Committed by matt

No longer needed.


git-svn-id: http://svn.igniterealtime.org/svn/repos/messenger/trunk@43 b35dd754-fafc-0310-a699-88a17e54d16e
parent c7660169
/*
* $RCSfile$
* $Revision$
* $Date$
*
* Copyright (C) 1999-2003 CoolServlets, Inc. All rights reserved.
*
* This software is the proprietary information of CoolServlets, Inc.
* Use is subject to license terms.
*/
package org.jivesoftware.util;
import org.jivesoftware.messenger.auth.UnauthorizedException;
/**
* Interface for querying and setting cache configuration information.
* Since there will be several active caches within a single server, caches can be
* given names (hopefully with some relationship to the the client using the
* cache) for display to administrators.<p>
*
* This interface is separated from the cache so that objects can update
* cache configuration separate from the cache itself. (Allowing it to delay
* changes to the cache to times best suited to its own performance.) Alternatively,
* objects can update the cache directly since it extends the CacheInfo interface.
*
* @author Iain Shigeoka
*/
public interface CacheInfo {
/**
* <p>Obtains the name of this cache.</p>
* <p>The name is completely arbitrary and used only for
* display to administrators. However, it should have some
* relationship to the primary user of the cache.</p>
*
* @return the name of this cache.
*/
public String getName();
/**
* <p>Obtain the maximum size of the cache.</p>
* <p>If the cache grows larger
* than the max size, the least frequently used items will be removed. If
* the max cache size is set to -1, there is no size limit.</p>
*
* @return The maximum size of the cache (-1 indicates unlimited max size)
* <p/>
*
*/
public int getMaxCacheSize();
/**
* Sets the maximum size of the cache. If the cache grows larger
* than the max size, the least frequently used items will be removed. If
* the max cache size is set to -1, there is no size limit.
*
* @param size The maximum size of this cache (-1 indicates unlimited max size)
* @throws UnauthorizedException If there is insufficient permissions to adjust this setting
*/
public void setMaxCacheSize(int size) throws UnauthorizedException;
/**
* Returns the maximum number of milleseconds that any object can live
* in cache. Once the specified number of milleseconds passes, the object
* will be automatically expried from cache. If the max lifetime is set
* to -1, then objects never expire.
*
* @return the maximum number of milleseconds before objects are expired.
*/
public long getMaxLifetime();
/**
* Sets the maximum number of milleseconds that any object can live
* in cache. Once the specified number of milleseconds passes, the object
* will be automatically expried from cache. If the max lifetime is set
* to -1, then objects never expire.
*
* @param maxLifetime the maximum number of milleseconds before objects are expired.
* @throws UnauthorizedException If there is insufficient permissions to adjust this setting
*/
public void setMaxLifetime(long maxLifetime) throws UnauthorizedException;
/**
* Returns the size of the cache contents in bytes. This value is only a
* rough approximation, so cache users should expect that actual VM
* memory used by the cache could be significantly higher than the value
* reported by this method.
*
* @return the size of the cache contents in bytes.
*/
public int getCacheSize();
/**
* Returns the number of cache hits. A cache hit occurs every
* time the get method is called and the cache contains the requested
* object.<p>
*
* Keeping track of cache hits and misses lets one measure how efficient
* the cache is; the higher the percentage of hits, the more efficient.
*
* @return the number of cache hits.
*/
public long getCacheHits();
/**
* Returns the number of cache misses. A cache miss occurs every
* time the get method is called and the cache does not contain the
* requested object.<p>
*
* Keeping track of cache hits and misses lets one measure how efficient
* the cache is; the higher the percentage of hits, the more efficient.
*
* @return the number of cache hits.
*/
public long getCacheMisses();
}
/*
* $RCSfile$
* $Revision$
* $Date$
*
* Copyright (C) 1999-2001 CoolServlets, Inc. All rights reserved.
*
* This software is the proprietary information of CoolServlets, Inc.
* Use is subject to license terms.
*/
package org.jivesoftware.util;
import org.jivesoftware.util.LinkedListNode;
/**
* Wrapper for all objects put into cache. It's primary purpose is to maintain
* references to the linked lists that maintain the creation time of the object
* and the ordering of the most used objects.
* <p/>
* This class is optimized for speed rather than strictly correct encapsulation.
*
* @author Jive Software
*/
public final class CacheObject {
/**
* Underlying object wrapped by the CacheObject.
*/
public Object object;
/**
* The size of the Cacheable object. The size of the Cacheable
* object is only computed once when it is added to the cache. This makes
* the assumption that once objects are added to cache, they are mostly
* read-only and that their size does not change significantly over time.
*/
public int size;
/**
* A reference to the node in the cache order list. We keep the reference
* here to avoid linear scans of the list. Every time the object is
* accessed, the node is removed from its current spot in the list and
* moved to the front.
*/
public LinkedListNode lastAccessedListNode;
/**
* A reference to the node in the age order list. We keep the reference
* here to avoid linear scans of the list. The reference is used if the
* object has to be deleted from the list.
*/
public LinkedListNode ageListNode;
/**
* A count of the number of times the object has been read from cache.
*/
public int readCount = 0;
/**
* Creates a new cache object wrapper. The size of the Cacheable object
* must be passed in in order to prevent another possibly expensive
* lookup by querying the object itself for its size.<p>
*
* @param object the underlying Object to wrap.
* @param size the size of the Cachable object in bytes.
*/
public CacheObject(Object object, int size) {
this.object = object;
this.size = size;
}
}
\ No newline at end of file
/**
* $RCSfile$
* $Revision$
* $Date$
*
* Copyright (C) 1999-2001 CoolServlets, Inc. All rights reserved.
*
* This software is the proprietary information of CoolServlets, Inc.
* Use is subject to license terms.
*/
package org.jivesoftware.util;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.util.*;
/**
* Default, non-distributed implementation of the Cache interface.
* The algorithm for cache is as follows: a HashMap is maintained for fast
* object lookup. Two linked lists are maintained: one keeps objects in the
* order they are accessed from cache, the other keeps objects in the order
* they were originally added to cache. When objects are added to cache, they
* are first wrapped by a CacheObject which maintains the following pieces
* of information:<ul>
*
* <li> The size of the object (in bytes).
* <li> A pointer to the node in the linked list that maintains accessed
* order for the object. Keeping a reference to the node lets us avoid
* linear scans of the linked list.
* <li> A pointer to the node in the linked list that maintains the age
* of the object in cache. Keeping a reference to the node lets us avoid
* linear scans of the linked list.</ul><p>
*
* To get an object from cache, a hash lookup is performed to get a reference
* to the CacheObject that wraps the real object we are looking for.
* The object is subsequently moved to the front of the accessed linked list
* and any necessary cache cleanups are performed. Cache deletion and expiration
* is performed as needed.
*
* @author Matt Tucker
*/
public class DefaultCache implements Cache {
/**
* The map the keys and values are stored in.
*/
protected Map map;
/**
* Linked list to maintain order that cache objects are accessed
* in, most used to least used.
*/
protected org.jivesoftware.util.LinkedList lastAccessedList;
/**
* Linked list to maintain time that cache objects were initially added
* to the cache, most recently added to oldest added.
*/
protected LinkedList ageList;
/**
* Maximum size in bytes that the cache can grow to.
*/
private int maxCacheSize;
/**
* Maintains the current size of the cache in bytes.
*/
private int cacheSize = 0;
/**
* Maximum length of time objects can exist in cache before expiring.
*/
protected long maxLifetime;
/**
* Maintain the number of cache hits and misses. A cache hit occurs every
* time the get method is called and the cache contains the requested
* object. A cache miss represents the opposite occurence.<p>
*
* Keeping track of cache hits and misses lets one measure how efficient
* the cache is; the higher the percentage of hits, the more efficient.
*/
protected long cacheHits, cacheMisses = 0L;
/**
* The name of the cache.
*/
private String name;
/**
* Create a new cache and specify the maximum size of for the cache in
* bytes, and the maximum lifetime of objects.
*
* @param name a name for the cache.
* @param maxSize the maximum size of the cache in bytes. -1 means the cache
* has no max size.
* @param maxLifetime the maximum amount of time objects can exist in
* cache before being deleted. -1 means objects never expire.
*/
public DefaultCache(String name, int maxSize, long maxLifetime) {
this.name = name;
this.maxCacheSize = maxSize;
this.maxLifetime = maxLifetime;
// Our primary data structure is a HashMap. The default capacity of 11
// is too small in almost all cases, so we set it bigger.
map = new HashMap(103);
lastAccessedList = new LinkedList();
ageList = new LinkedList();
}
public synchronized Object put(Object key, Object value) {
// Delete an old entry if it exists.
remove(key);
int objectSize = calculateSize(value);
// If the object is bigger than the entire cache, simply don't add it.
if (maxCacheSize > 0 && objectSize > maxCacheSize * .90) {
Log.warn("Cache: " + name + " -- object with key " + key +
" is too large to fit in cache. Size is " + objectSize);
return value;
}
cacheSize += objectSize;
CacheObject cacheObject = new CacheObject(value, objectSize);
map.put(key, cacheObject);
// Make an entry into the cache order list.
LinkedListNode lastAccessedNode = lastAccessedList.addFirst(key);
// Store the cache order list entry so that we can get back to it
// during later lookups.
cacheObject.lastAccessedListNode = lastAccessedNode;
// Add the object to the age list
LinkedListNode ageNode = ageList.addFirst(key);
// We make an explicit call to currentTimeMillis() so that total accuracy
// of lifetime calculations is better than one second.
ageNode.timestamp = System.currentTimeMillis();
cacheObject.ageListNode = ageNode;
// If cache is too full, remove least used cache entries until it is
// not too full.
cullCache();
return value;
}
public synchronized Object get(Object key) {
// First, clear all entries that have been in cache longer than the
// maximum defined age.
deleteExpiredEntries();
CacheObject cacheObject = (CacheObject)map.get(key);
if (cacheObject == null) {
// The object didn't exist in cache, so increment cache misses.
cacheMisses++;
return null;
}
// The object exists in cache, so increment cache hits. Also, increment
// the object's read count.
cacheHits++;
cacheObject.readCount++;
// Remove the object from it's current place in the cache order list,
// and re-insert it at the front of the list.
cacheObject.lastAccessedListNode.remove();
lastAccessedList.addFirst(cacheObject.lastAccessedListNode);
return cacheObject.object;
}
public synchronized Object remove(Object key) {
CacheObject cacheObject = (CacheObject)map.get(key);
// If the object is not in cache, stop trying to remove it.
if (cacheObject == null) {
return null;
}
// remove from the hash map
map.remove(key);
// remove from the cache order list
cacheObject.lastAccessedListNode.remove();
cacheObject.ageListNode.remove();
// remove references to linked list nodes
cacheObject.ageListNode = null;
cacheObject.lastAccessedListNode = null;
// removed the object, so subtract its size from the total.
cacheSize -= cacheObject.size;
return cacheObject.object;
}
public synchronized void clear() {
Object[] keys = map.keySet().toArray();
for (int i = 0; i < keys.length; i++) {
remove(keys[i]);
}
// Now, reset all containers.
map.clear();
lastAccessedList.clear();
lastAccessedList = new LinkedList();
ageList.clear();
ageList = new LinkedList();
cacheSize = 0;
cacheHits = 0;
cacheMisses = 0;
}
public int size() {
// First, clear all entries that have been in cache longer than the
// maximum defined age.
deleteExpiredEntries();
return map.size();
}
public boolean isEmpty() {
// First, clear all entries that have been in cache longer than the
// maximum defined age.
deleteExpiredEntries();
return map.isEmpty();
}
public Collection values() {
// First, clear all entries that have been in cache longer than the
// maximum defined age.
deleteExpiredEntries();
Object[] cacheObjects = map.values().toArray();
Object[] values = new Object[cacheObjects.length];
for (int i = 0; i < cacheObjects.length; i++) {
values[i] = ((CacheObject)cacheObjects[i]).object;
}
return Collections.unmodifiableList(Arrays.asList(values));
}
public boolean containsKey(Object key) {
// First, clear all entries that have been in cache longer than the
// maximum defined age.
deleteExpiredEntries();
return map.containsKey(key);
}
public void putAll(Map map) {
for (Iterator i = map.keySet().iterator(); i.hasNext();) {
Object key = i.next();
Object value = map.get(key);
put(key, value);
}
}
public boolean containsValue(Object value) {
// First, clear all entries that have been in cache longer than the
// maximum defined age.
deleteExpiredEntries();
int objectSize = calculateSize(value);
CacheObject cacheObject = new CacheObject(value, objectSize);
return map.containsValue(cacheObject);
}
public Set entrySet() {
// First, clear all entries that have been in cache longer than the
// maximum defined age.
deleteExpiredEntries();
return Collections.unmodifiableSet(map.entrySet());
}
public String getName() {
return name;
}
public Set keySet() {
// First, clear all entries that have been in cache longer than the
// maximum defined age.
deleteExpiredEntries();
return Collections.unmodifiableSet(map.keySet());
}
public long getCacheHits() {
return cacheHits;
}
public long getCacheMisses() {
return cacheMisses;
}
public int getCacheSize() {
return cacheSize;
}
public int getMaxCacheSize() {
return maxCacheSize;
}
public void setMaxCacheSize(int maxCacheSize) {
this.maxCacheSize = maxCacheSize;
// It's possible that the new max size is smaller than our current cache
// size. If so, we need to delete infrequently used items.
cullCache();
}
public long getMaxLifetime() {
return maxLifetime;
}
public void setMaxLifetime(long maxLifetime) {
this.maxLifetime = maxLifetime;
}
/**
* Returns the size of an object in bytes. Determining size by serialization
* is only used as a last resort.
*
* @return the size of an object in bytes.
*/
private int calculateSize(Object object) {
// If the object is Cacheable, ask it its size.
if (object instanceof Cacheable) {
return ((Cacheable)object).getCachedSize();
}
// Check for other common types of objects put into cache.
else if (object instanceof Long) {
return CacheSizes.sizeOfLong();
}
else if (object instanceof Integer) {
return CacheSizes.sizeOfObject() + CacheSizes.sizeOfInt();
}
else if (object instanceof Boolean) {
return CacheSizes.sizeOfObject() + CacheSizes.sizeOfBoolean();
}
else if (object instanceof long[]) {
long[] array = (long[])object;
return CacheSizes.sizeOfObject() + array.length * CacheSizes.sizeOfLong();
}
// Default behavior -- serialize the object to determine its size.
else {
int size = 1;
try {
// Default to serializing the object out to determine size.
NullOutputStream out = new NullOutputStream();
ObjectOutputStream outObj = new ObjectOutputStream(out);
outObj.writeObject(object);
size = out.size();
}
catch (IOException ioe) {
Log.error(ioe);
}
return size;
}
}
/**
* Clears all entries out of cache where the entries are older than the
* maximum defined age.
*/
protected void deleteExpiredEntries() {
// Check if expiration is turned on.
if (maxLifetime <= 0) {
return;
}
// Remove all old entries. To do this, we remove objects from the end
// of the linked list until they are no longer too old. We get to avoid
// any hash lookups or looking at any more objects than is strictly
// neccessary.
LinkedListNode node = ageList.getLast();
// If there are no entries in the age list, return.
if (node == null) {
return;
}
// Determine the expireTime, which is the moment in time that elements
// should expire from cache. Then, we can do an easy to check to see
// if the expire time is greater than the expire time.
long expireTime = System.currentTimeMillis() - maxLifetime;
while (expireTime > node.timestamp) {
// Remove the object
remove(node.object);
// Get the next node.
node = ageList.getLast();
// If there are no more entries in the age list, return.
if (node == null) {
return;
}
}
}
/**
* Removes objects from cache if the cache is too full. "Too full" is
* defined as within 3% of the maximum cache size. Whenever the cache is
* is too big, the least frequently used elements are deleted until the
* cache is at least 10% empty.
*/
protected final void cullCache() {
// Check if a max cache size is defined.
if (maxCacheSize < 0) {
return;
}
// See if the cache size is within 3% of being too big. If so, clean out
// cache until it's 10% free.
if (cacheSize >= maxCacheSize * .97) {
// First, delete any old entries to see how much memory that frees.
deleteExpiredEntries();
int desiredSize = (int)(maxCacheSize * .90);
while (cacheSize > desiredSize) {
// Get the key and invoke the remove method on it.
remove(lastAccessedList.getLast().object);
}
}
}
/**
* An extension of OutputStream that does nothing but calculate the number
* of bytes written through it.
*/
private static class NullOutputStream extends OutputStream {
int size = 0;
public void write(int b) throws IOException {
size++;
}
public void write(byte[] b) throws IOException {
size += b.length;
}
public void write(byte[] b, int off, int len) {
size += len;
}
/**
* Returns the number of bytes written out through the stream.
*
* @return the number of bytes written to the stream.
*/
public int size() {
return size;
}
}
}
\ No newline at end of file
/**
* $RCSfile$
* $Revision$
* $Date$
*
* This class was adapted directly from the Colt sources by CoolServlets Inc.
* The changes involved modifying the code so that the functionality could be
* encapsulated in a single class file.
*
* As such, the original copyright is left intact and this file is distributed
* under the original Colt license as seen below. Please visit the Colt
* homepage for more information about the excellent package:
* http://tilde-hoschek.home.cern.ch/~hoschek/colt/index.htm
* ---------------------------------------------------------------------------
* Copyright © 1999 CERN - European Organization for Nuclear Research.
* Permission to use, copy, modify, distribute and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation. CERN
* makes no representations about the suitability of this software for any
* purpose. It is provided "as is" without expressed or implied warranty.
*/
package org.jivesoftware.util;
/**
* Hash map holding (key,value) associations of type <tt>(long-->Object)</tt>;
* Automatically grows and shrinks as needed; Implemented using open addressing
* with double hashing.<p>
* <p/>
* Adapted from the Colt package by CoolServlets. Please visit the Colt
* homepage at: http://tilde-hoschek.home.cern.ch/~hoschek/colt/index.htm
*
* @author wolfgang.hoschek@cern.ch
*/
public final class LongHashMap {
//The hash table keys.
protected long table[];
//The hash table values.
protected Object values[];
//The state of each hash table entry (FREE, FULL, REMOVED).
protected byte state[];
//The number of table entries in state==FREE.
protected int freeEntries;
//The number of distinct associations in the map; its "size()".
protected int distinct;
/**
* The table capacity c=table.length always satisfies the invariant
* <tt>c * minLoadFactor <= s <= c * maxLoadFactor</tt>, where s=size() is
* the number of associations currently contained. The term
* "c * minLoadFactor" is called the "lowWaterMark", "c * maxLoadFactor" is
* called the "highWaterMark". In other words, the table capacity (and
* proportionally the memory used by this class) oscillates within these
* constraints. The terms are precomputed and cached to avoid recalculating
* them each time put(..) or removeKey(...) is called.
*/
protected int lowWaterMark;
protected int highWaterMark;
//The minimum load factor for the hashtable.
protected double minLoadFactor;
//The maximum load factor for the hashtable.
protected double maxLoadFactor;
protected static final int DEFAULT_CAPACITY = 277;
protected static final double DEFAULT_MIN_LOAD_FACTOR = 0.2;
protected static final double DEFAULT_MAX_LOAD_FACTOR = 0.6;
protected static final byte FREE = 0;
protected static final byte FULL = 1;
protected static final byte REMOVED = 2;
/**
* Constructs an empty map with default capacity and default load factors.
*/
public LongHashMap() {
this(DEFAULT_CAPACITY);
}
/**
* Constructs an empty map with the specified initial capacity and default
* load factors.
*
* @param initialCapacity the initial capacity of the map.
* @throws IllegalArgumentException if the initial capacity is less
* than zero.
*/
public LongHashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_MIN_LOAD_FACTOR, DEFAULT_MAX_LOAD_FACTOR);
}
/**
* Constructs an empty map with the specified initial capacity and the
* specified minimum and maximum load factor.
*
* @param initialCapacity the initial capacity.
* @param minLoadFactor the minimum load factor.
* @param maxLoadFactor the maximum load factor.
* @throws IllegalArgumentException if <tt>initialCapacity < 0 ||
* (minLoadFactor < 0.0 || minLoadFactor >= 1.0) ||
* (maxLoadFactor <= 0.0 || maxLoadFactor >= 1.0) ||
* (minLoadFactor >= maxLoadFactor)</tt>.
*/
public LongHashMap(int initialCapacity, double minLoadFactor, double maxLoadFactor) {
setUp(initialCapacity, minLoadFactor, maxLoadFactor);
}
/**
* Removes all (key,value) associations from the receiver.
* Implicitly calls <tt>trimToSize()</tt>.
*/
public void clear() {
for (int i = 0; i < state.length; i++) {
state[i] = FREE;
}
for (int i = 0; i < values.length - 1; i++) {
values[i] = null;
}
this.distinct = 0;
this.freeEntries = table.length; // delta
trimToSize();
}
/**
* Returns <tt>true</tt> if the receiver contains the specified key.
*
* @return <tt>true</tt> if the receiver contains the specified key.
*/
public boolean containsKey(long key) {
return indexOfKey(key) >= 0;
}
/**
* Returns <tt>true</tt> if the receiver contains the specified value.
*
* @return <tt>true</tt> if the receiver contains the specified value.
*/
public boolean containsValue(Object value) {
return indexOfValue(value) >= 0;
}
/**
* Ensures that the receiver can hold at least the specified number of
* associations without needing to allocate new internal memory. If
* necessary, allocates new internal memory and increases the capacity of
* the receiver.<p>
* <p/>
* This method never need be called; it is for performance tuning only.
* Calling this method before <tt>put()</tt>ing a large number of
* associations boosts performance, because the receiver will grow only
* once instead of potentially many times and hash collisions get less
* probable.
*
* @param minCapacity the desired minimum capacity.
*/
public void ensureCapacity(int minCapacity) {
if (table.length < minCapacity) {
int newCapacity = nextPrime(minCapacity);
rehash(newCapacity);
}
}
/**
* Returns the value associated with the specified key.
* It is often a good idea to first check with {@link #containsKey(long)}
* whether the given key has a value associated or not, i.e. whether there
* exists an association for the given key or not.
*
* @param key the key to be searched for.
* @return the value associated with the specified key; <tt>null</tt> if no
* such key is present.
*/
public final Object get(long key) {
int i = indexOfKey(key);
//If not in the map return null
if (i < 0) {
return null;
}
else {
return values[i];
}
}
/**
* Returns the index where the key would need to be inserted, if it is not
* already contained. Returns -index-1 if the key is already contained
* at slot index. Therefore, if the returned index < 0, then it is
* already contained at slot -index-1. If the returned index >= 0,
* then it is NOT already contained and should be inserted at slot index.
*
* @param key the key to be added to the receiver.
* @return the index where the key would need to be inserted.
*/
private final int indexOfInsertion(long key) {
final long tab[] = table;
final byte stat[] = state;
final int length = tab.length;
final int hash = ((int)(key ^ (key >> 32))) & 0x7FFFFFFF;
int i = hash % length;
// double hashing, see http://www.eece.unm.edu/faculty/heileman/hash/node4.html
int decrement = (hash) % (length - 2);
//OLD CODE: int decrement = (hash / length) % length;
if (decrement == 0) decrement = 1;
// stop if we find a removed or free slot, or if we find the key itself
// do NOT skip over removed slots (yes, open addressing is like that...)
while (stat[i] == FULL && tab[i] != key) {
i -= decrement;
//hashCollisions++;
if (i < 0) i += length;
}
if (stat[i] == REMOVED) {
// stop if we find a free slot, or if we find the key itself.
// do skip over removed slots (yes, open addressing is like that...)
// assertion: there is at least one FREE slot.
int j = i;
while (stat[i] != FREE && (stat[i] == REMOVED || tab[i] != key)) {
i -= decrement;
//hashCollisions++;
if (i < 0) i += length;
}
if (stat[i] == FREE) i = j;
}
if (stat[i] == FULL) {
// key already contained at slot i.
// return a negative number identifying the slot.
return -i - 1;
}
// not already contained, should be inserted at slot i.
// return a number >= 0 identifying the slot.
return i;
}
/**
* @param key the key to be searched in the receiver.
* @return the index where the key is contained in the receiver, returns -1
* if the key was not found.
*/
private final int indexOfKey(long key) {
final long tab[] = table;
final byte stat[] = state;
final int length = tab.length;
final int hash = ((int)(key ^ (key >> 32))) & 0x7FFFFFFF;
int i = hash % length;
// double hashing, see http://www.eece.unm.edu/faculty/heileman/hash/node4.html
int decrement = (hash) % (length - 2);
//OLD CODE: int decrement = (hash / length) % length;
if (decrement == 0) decrement = 1;
// stop if we find a free slot, or if we find the key itself.
// do skip over removed slots (yes, open addressing is like that...)
while (stat[i] != FREE && (stat[i] == REMOVED || tab[i] != key)) {
i -= decrement;
//hashCollisions++;
if (i < 0) i += length;
}
if (stat[i] == FREE) return -1; // not found
return i; //found, return index where key is contained
}
/**
* @param value the value to be searched in the receiver.
* @return the index where the value is contained in the receiver,
* returns -1 if the value was not found.
*/
protected int indexOfValue(Object value) {
final Object val[] = values;
final byte stat[] = state;
for (int i = stat.length; --i >= 0;) {
if (stat[i] == FULL && val[i] == value) return i;
}
return -1; // not found
}
/**
* Returns the first key the given value is associated with.
*
* @param value the value to search for.
* @return the first key for which holds <tt>get(key) == value</tt>;
* returns <tt>Long.MIN_VALUE</tt> if no such key exists.
*/
public long keyOf(Object value) {
//returns the first key found; there may be more matching keys, however.
int i = indexOfValue(value);
if (i < 0) return Long.MIN_VALUE;
return table[i];
}
/**
* Returns all the keys in the map.
*/
public long[] keys() {
long[] elements = new long[distinct];
long[] tab = table;
byte[] stat = state;
int j = 0;
for (int i = tab.length; i-- > 0;) {
if (stat[i] == FULL) {
elements[j++] = tab[i];
}
}
return elements;
}
/**
* Associates the given key with the given value. Replaces any old
* <tt>(key,someOtherValue)</tt> association, if existing.
*
* @param key the key the value shall be associated with.
* @param value the value to be associated.
* @return <tt>true</tt> if the receiver did not already contain such a key;
* <tt>false</tt> if the receiver did already contain such a key - the
* new value has now replaced the formerly associated value.
*/
public boolean put(long key, Object value) {
int i = indexOfInsertion(key);
if (i < 0) { //already contained
i = -i - 1;
this.values[i] = value;
return false;
}
if (this.distinct > this.highWaterMark) {
int newCapacity = chooseGrowCapacity(this.distinct + 1,
this.minLoadFactor,
this.maxLoadFactor);
rehash(newCapacity);
return put(key, value);
}
this.table[i] = key;
this.values[i] = value;
if (this.state[i] == FREE) this.freeEntries--;
this.state[i] = FULL;
this.distinct++;
if (this.freeEntries < 1) { //delta
int newCapacity = chooseGrowCapacity(this.distinct + 1,
this.minLoadFactor,
this.maxLoadFactor);
rehash(newCapacity);
}
return true;
}
/**
* Returns the number of (key,value) associations currently contained.
*
* @return the number of (key,value) associations currently contained.
*/
public int size() {
return distinct;
}
/**
* Returns <tt>true</tt> if the receiver contains no (key,value) associations.
*
* @return <tt>true</tt> if the receiver contains no (key,value) associations.
*/
public boolean isEmpty() {
return distinct == 0;
}
/**
* Rehashes the contents of the receiver into a new table
* with a smaller or larger capacity.
* This method is called automatically when the
* number of keys in the receiver exceeds the high water mark or falls
* below the low water mark.
*/
protected void rehash(int newCapacity) {
int oldCapacity = table.length;
//if (oldCapacity == newCapacity) return;
long oldTable[] = table;
Object oldValues[] = values;
byte oldState[] = state;
long newTable[] = new long[newCapacity];
Object newValues[] = new Object[newCapacity];
byte newState[] = new byte[newCapacity];
this.lowWaterMark = chooseLowWaterMark(newCapacity, this.minLoadFactor);
this.highWaterMark = chooseHighWaterMark(newCapacity, this.maxLoadFactor);
this.table = newTable;
this.values = newValues;
this.state = newState;
this.freeEntries = newCapacity - this.distinct; // delta
for (int i = oldCapacity; i-- > 0;) {
if (oldState[i] == FULL) {
long element = oldTable[i];
int index = indexOfInsertion(element);
newTable[index] = element;
newValues[index] = oldValues[i];
newState[index] = FULL;
}
}
}
/**
* Removes the given key with its associated element from the receiver, if
* present.
*
* @param key the key to be removed from the receiver.
* @return <tt>true</tt> if the receiver contained the specified key,
* <tt>false</tt> otherwise.
*/
public boolean removeKey(long key) {
int i = indexOfKey(key);
if (i < 0) return false; // key not contained
this.state[i] = REMOVED;
this.values[i] = null; // delta
this.distinct--;
if (this.distinct < this.lowWaterMark) {
int newCapacity = chooseShrinkCapacity(this.distinct,
this.minLoadFactor,
this.maxLoadFactor);
rehash(newCapacity);
}
return true;
}
/**
* Initializes the receiver.
*
* @param initialCapacity the initial capacity of the receiver.
* @param minLoadFactor the minLoadFactor of the receiver.
* @param maxLoadFactor the maxLoadFactor of the receiver.
* @throws IllegalArgumentException if <tt>initialCapacity < 0 ||
* (minLoadFactor < 0.0 || minLoadFactor >= 1.0) ||
* (maxLoadFactor <= 0.0 || maxLoadFactor >= 1.0) ||
* (minLoadFactor >= maxLoadFactor)</tt>.
*/
protected void setUp(int initialCapacity, double minLoadFactor, double maxLoadFactor) {
if (initialCapacity < 0) {
throw new IllegalArgumentException("Initial Capacity must not be less than zero: " + initialCapacity);
}
if (minLoadFactor < 0.0 || minLoadFactor >= 1.0) {
throw new IllegalArgumentException("Illegal minLoadFactor: " + minLoadFactor);
}
if (maxLoadFactor <= 0.0 || maxLoadFactor >= 1.0) {
throw new IllegalArgumentException("Illegal maxLoadFactor: " + maxLoadFactor);
}
if (minLoadFactor >= maxLoadFactor) {
throw new IllegalArgumentException("Illegal minLoadFactor: " + minLoadFactor +
" and maxLoadFactor: " + maxLoadFactor);
}
int capacity = initialCapacity;
capacity = nextPrime(capacity);
// open addressing needs at least one FREE slot at any time.
if (capacity == 0) {
capacity = 1;
}
this.table = new long[capacity];
this.values = new Object[capacity];
this.state = new byte[capacity];
//memory will be exhausted long before this pathological case happens, anyway.
this.minLoadFactor = minLoadFactor;
if (capacity == LARGEST_PRIME)
this.maxLoadFactor = 1.0;
else
this.maxLoadFactor = maxLoadFactor;
this.distinct = 0;
this.freeEntries = capacity; // delta
/**
* lowWaterMark will be established upon first expansion. Establishing
* it now (upon instance construction) would immediately make the table
* shrink upon first put(...). After all the idea of an
* "initialCapacity" implies violating lowWaterMarks when an object is
* young. See ensureCapacity(...)
*/
this.lowWaterMark = 0;
this.highWaterMark = chooseHighWaterMark(capacity, this.maxLoadFactor);
}
/**
* Trims the capacity of the receiver to be the receiver's current size.
* Releases any superfluous internal memory. An application can use this
* operation to minimize the storage of the receiver.
*/
public void trimToSize() {
//*1.2 because open addressing's performance exponentially degrades
//beyond that point so that even rehashing the table can take very long
int newCapacity = nextPrime((int)(1 + 1.2 * size()));
if (table.length > newCapacity) {
rehash(newCapacity);
}
}
/**
* Returns an array of all the values in the Map.
*/
public Object[] values() {
Object[] elements = new Object[distinct];
Object[] val = values;
byte[] stat = state;
int j = 0;
for (int i = stat.length; i-- > 0;) {
if (stat[i] == FULL) {
elements[j++] = val[i];
}
}
return elements;
}
/**
* Chooses a new prime table capacity optimized for growing that
* (approximately) satisfies the invariant
* <tt>c * minLoadFactor <= size <= c * maxLoadFactor</tt>
* and has at least one FREE slot for the given size.
*/
private int chooseGrowCapacity(int size, double minLoad, double maxLoad) {
return nextPrime(Math.max(size + 1, (int)((4 * size / (3 * minLoad + maxLoad)))));
}
/**
* Returns new high water mark threshold based on current capacity and
* maxLoadFactor.
*
* @return int the new threshold.
*/
private int chooseHighWaterMark(int capacity, double maxLoad) {
//makes sure there is always at least one FREE slot
return Math.min(capacity - 2, (int)(capacity * maxLoad));
}
/**
* Returns new low water mark threshold based on current capacity and minLoadFactor.
*
* @return int the new threshold.
*/
protected int chooseLowWaterMark(int capacity, double minLoad) {
return (int)(capacity * minLoad);
}
/**
* Chooses a new prime table capacity neither favoring shrinking nor growing,
* that (approximately) satisfies the invariant
* <tt>c * minLoadFactor <= size <= c * maxLoadFactor</tt>
* and has at least one FREE slot for the given size.
*/
protected int chooseMeanCapacity(int size, double minLoad, double maxLoad) {
return nextPrime(Math.max(size + 1, (int)((2 * size / (minLoad + maxLoad)))));
}
/**
* Chooses a new prime table capacity optimized for shrinking that
* (approximately) satisfies the invariant
* <tt>c * minLoadFactor <= size <= c * maxLoadFactor</tt>
* and has at least one FREE slot for the given size.
*/
protected int chooseShrinkCapacity(int size, double minLoad, double maxLoad) {
return nextPrime(Math.max(size + 1, (int)((4 * size / (minLoad + 3 * maxLoad)))));
}
/**
* Returns a prime number which is <code>&gt;= desiredCapacity</code> and
* very close to <code>desiredCapacity</code> (within 11% if
* <code>desiredCapacity &gt;= 1000</code>).
*
* @param desiredCapacity the capacity desired by the user.
* @return the capacity which should be used for a hashtable.
*/
protected int nextPrime(int desiredCapacity) {
int i = java.util.Arrays.binarySearch(primeCapacities, desiredCapacity);
if (i < 0) {
//desired capacity not found, choose next prime greater than desired
//capacity
i = -i - 1; // remember the semantics of binarySearch...
}
return primeCapacities[i];
}
/**
* The largest prime this class can generate; currently equal to <tt>Integer.MAX_VALUE</tt>.
*/
public static final int LARGEST_PRIME = Integer.MAX_VALUE; //yes, it is prime.
/**
* The prime number list consists of 11 chunks. Each chunk contains prime
* numbers. A chunk starts with a prime P1. The next element is a prime P2.
* P2 is the smallest prime for which holds: P2 >= 2*P1. The next element
* is P3, for which the same holds with respect to P2, and so on.<p>
* <p/>
* Chunks are chosen such that for any desired capacity >= 1000 the list
* includes a prime number <= desired capacity * 1.11.* Therefore, primes
* can be retrieved which are quite close to any desired capacity, which in
* turn avoids wasting memory. For example, the list includes 1039,1117,
* 1201,1277,1361,1439,1523,1597,1759,1907,2081. So if you need a
* prime >= 1040, you will find a prime <= 1040*1.11=1154.<p>
* <p/>
* Chunks are chosen such that they are optimized for a hashtable
* growthfactor of 2.0; If your hashtable has such a growthfactor then,
* after initially "rounding to a prime" upon hashtable construction,
* it will later expand to prime capacities such that there exist no better
* primes.<p>
* <p/>
* In total these are about 32*10=320 numbers -> 1 KB of static memory
* needed. If you are stingy, then delete every second or fourth chunk.
*/
private static final int[] primeCapacities = {
//chunk #0
LARGEST_PRIME,
//chunk #1
5, 11, 23, 47, 97, 197, 397, 797, 1597, 3203, 6421, 12853, 25717, 51437, 102877, 205759,
411527, 823117, 1646237, 3292489, 6584983, 13169977, 26339969, 52679969, 105359939,
210719881, 421439783, 842879579, 1685759167,
//chunk #2
433, 877, 1759, 3527, 7057, 14143, 28289, 56591, 113189, 226379, 452759, 905551, 1811107,
3622219, 7244441, 14488931, 28977863, 57955739, 115911563, 231823147, 463646329, 927292699,
1854585413,
//chunk #3
953, 1907, 3821, 7643, 15287, 30577, 61169, 122347, 244703, 489407, 978821, 1957651, 3915341,
7830701, 15661423, 31322867, 62645741, 125291483, 250582987, 501165979, 1002331963,
2004663929,
//chunk #4
1039, 2081, 4177, 8363, 16729, 33461, 66923, 133853, 267713, 535481, 1070981, 2141977, 4283963,
8567929, 17135863, 34271747, 68543509, 137087021, 274174111, 548348231, 1096696463,
//chunk #5
31, 67, 137, 277, 557, 1117, 2237, 4481, 8963, 17929, 35863, 71741, 143483, 286973, 573953,
1147921, 2295859, 4591721, 9183457, 18366923, 36733847, 73467739, 146935499, 293871013,
587742049, 1175484103,
//chunk #6
599, 1201, 2411, 4831, 9677, 19373, 38747, 77509, 155027, 310081, 620171, 1240361, 2480729,
4961459, 9922933, 19845871, 39691759, 79383533, 158767069, 317534141, 635068283, 1270136683,
//chunk #7
311, 631, 1277, 2557, 5119, 10243, 20507, 41017, 82037, 164089, 328213, 656429, 1312867,
2625761, 5251529, 10503061, 21006137, 42012281, 84024581, 168049163, 336098327, 672196673,
1344393353,
//chunk #8
3, 7, 17, 37, 79, 163, 331, 673, 1361, 2729, 5471, 10949, 21911, 43853, 87719, 175447, 350899,
701819, 1403641, 2807303, 5614657, 11229331, 22458671, 44917381, 89834777, 179669557,
359339171, 718678369, 1437356741,
//chunk #9
43, 89, 179, 359, 719, 1439, 2879, 5779, 11579, 23159, 46327, 92657, 185323, 370661, 741337,
1482707, 2965421, 5930887, 11861791, 23723597, 47447201, 94894427, 189788857, 379577741,
759155483, 1518310967,
//chunk #10
379, 761, 1523, 3049, 6101, 12203, 24407, 48817, 97649, 195311, 390647, 781301, 1562611,
3125257, 6250537, 12501169, 25002389, 50004791, 100009607, 200019221, 400038451, 800076929,
1600153859
};
static { //initializer
// The above prime numbers are formatted for human readability.
// To find numbers fast, we sort them once and for all.
java.util.Arrays.sort(primeCapacities);
}
}
\ No newline at end of file
/*
* @(#)URLUtils.java
*
* Copyright 2003-2004 by Jive Software,
* 135 W 29th St, Suite 802, New York, NY 10001, U.S.A.
* All rights reserved.
*
* This software is the confidential and proprietary information
* of Jive Software.
*/
package org.jivesoftware.util;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URL;
import java.net.URLConnection;
/**
* <code>URLUtils</code> class handles most cases when using URL's.
*
* @author Derek DeMoro
* @version 1.0, 04/21/2004
*/
public class URLUtils {
private URLUtils() {
}
/**
* Copy a give inputStream to given outputstream.
*/
private static void copy(InputStream in, OutputStream out) throws IOException {
final byte[] buffer = new byte[4096];
while (true) {
final int bytesRead = in.read(buffer);
if (bytesRead < 0) {
break;
}
out.write(buffer, 0, bytesRead);
}
}
/**
* Returns a suffix(if any) of a url.
*
* @param url the url to retrieve the suffix from.
* @return suffix of the given url, null if no suffix is found.
*/
public static String getSuffix(URL url) {
final String path = url.getPath();
int lastDot = path.lastIndexOf('.');
return (lastDot >= 0) ? path.substring(lastDot) : "";
}
/**
* Copies the contents at <CODE>source</CODE> to <CODE>destination</CODE>.
*/
public static void copyURL(URL source, File destination) throws IOException {
InputStream in = null;
OutputStream out = null;
try {
in = source.openStream();
out = new FileOutputStream(destination);
destination.mkdirs();
copy(in, out);
}
finally {
try {
if (in != null) in.close();
if (out != null) out.close();
}
catch (IOException e) {
}
}
}
/**
* Returns the canonical form of the <code>URL</code>
*
* @param url the url to retrieve the canoncial form from.
* @return the canoncical form of a url.
*/
public URL canonicalize(URL url) {
return url;
}
/**
* Checks to see if the URL can be read.
*
* @param url the url to read from.
* @return true if the URL can be read, false otherwise.
*/
public boolean canRead(URL url) {
try {
final URLConnection urlConnection = url.openConnection();
return urlConnection.getDoInput();
}
catch (Exception e) {
return false;
}
}
/**
* Checks to see if the URL can be written to.
*
* @param url the url to write to.
* @return true if the url can be written to.
*/
public boolean canWrite(URL url) {
try {
final URLConnection urlConnection = url.openConnection();
return urlConnection.getDoOutput();
}
catch (Exception e) {
return false;
}
}
/**
* Checks to see if the resource at the given url can be created.
*
* @param url the url to check if creation is possible.
* @return true if the resource can be created.
*/
public boolean canCreate(URL url) {
return true;
}
/**
* Tests to see if the URL is valid.
*
* @param url the url to test.
* @return true if the url is valid.
*/
public boolean isValid(URL url) {
if (exists(url)) {
return true;
}
return canCreate(url);
}
/**
* Tests to see if the resource at the given <code>URL</code> is valid.
*
* @param url the url to check.
* @return true if the resource at the given <code>URL</code> exists.
*/
public static boolean exists(URL url) {
return toFile(url).exists();
}
/**
* Creates directory(s) at the given <code>URL</code>
*
* @param url the url where the directory(s) should be made.
* @return true if the directory(s) were created.
*/
public static boolean mkdirs(URL url) {
final File file = toFile(url);
if (!file.exists()) {
return file.mkdirs();
}
return true;
}
/**
* Returns the name of the resource at a given <code>URL</code>
*
* @param url the url.
* @return the filename of the url.
*/
public static String getFileName(URL url) {
if (url == null) {
return "";
}
final String path = url.getPath();
if (path.equals("/")) {
return "/";
}
final int lastSep = path.lastIndexOf('/');
if (lastSep == path.length() - 1) {
final int lastSep2 = path.lastIndexOf('/', lastSep - 1);
return path.substring(lastSep2 + 1, lastSep);
}
else {
return path.substring(lastSep + 1);
}
}
/**
* Returns the numbers of bytes in the resource identified by
* the given <code>URL</code>
*
* @param url the url of the resource.
* @return the length in bytes of the resource.
*/
public long getLength(URL url) {
try {
final URLConnection urlConnection = url.openConnection();
return urlConnection.getContentLength();
}
catch (Exception e) {
return -1;
}
}
/**
* This creates a valid path by converting file sepeartor to forward slases.
*/
public static String createValidPath(String path) {
if (File.separatorChar != '/') {
path = path.replace(File.separatorChar, '/');
}
if (!path.startsWith("/")) {
path = "/" + path;
}
return path;
}
public static final File toFile(URL url) {
final String path = url.getPath();
final File file = new File(path);
return file;
}
public static URL getParent(URL url) {
final File file = toFile(url);
final File parentFile = file.getParentFile();
if (parentFile != null && !file.equals(parentFile)) {
try {
return parentFile.toURL();
}
catch (Exception ex) {
return null;
}
}
return null;
}
}
package org.jivesoftware.util;
import java.beans.XMLDecoder;
import java.beans.XMLEncoder;
import java.io.*;
import java.net.URL;
import java.net.URLDecoder;
import java.net.URLEncoder;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import org.w3c.dom.Document;
public class XMLUtils {
/**
* Handle URL
*/
final public static Object getObject(URL url) throws Exception {
InputStream stream = url.openStream();
XMLDecoder d = new XMLDecoder(stream);
Object result = d.readObject();
d.close();
stream.close();
return result;
}
final public static void writeObject(Object document, URL url) throws Exception {
final OutputStream out = openOutputStream(url);
XMLEncoder e = new XMLEncoder(out);
e.writeObject(document);
e.close();
}
/**
* Handle Output and InputStreams
*/
final public static Object getObject(InputStream stream) throws Exception {
XMLDecoder d = new XMLDecoder(stream);
Object result = d.readObject();
d.close();
stream.close();
return result;
}
final public static Object getObject(String objectStr) throws Exception {
final ByteArrayInputStream stream = new ByteArrayInputStream(objectStr.getBytes("UTF-8"));
XMLDecoder d = new XMLDecoder(stream);
Object result = d.readObject();
d.close();
stream.close();
return result;
}
final public static void writeObject(Object obj, OutputStream stream) throws Exception {
XMLEncoder e = new XMLEncoder(stream);
e.writeObject(obj);
e.close();
}
/**
* Handle File handling.
*/
final public static Object getObject(File file) throws Exception {
XMLDecoder d = new XMLDecoder(new BufferedInputStream(new FileInputStream(file)));
Object result = d.readObject();
d.close();
return result;
}
final public static void writeObject(Object obj, File file) throws Exception {
XMLEncoder e = new XMLEncoder(new BufferedOutputStream(new FileOutputStream(file)));
e.writeObject(obj);
e.close();
}
final public static String toDocument(Object obj) throws Exception {
ByteArrayOutputStream s = new ByteArrayOutputStream();
final StringWriter stringWriter = new StringWriter();
final PrintWriter printWriter = new PrintWriter(stringWriter);
XMLEncoder e = new XMLEncoder(s);
e.writeObject(obj);
e.close();
String returnStr = s.toString();
s.flush();
s.close();
printWriter.flush();
printWriter.close();
return returnStr;
}
public static String toString(Document xmlDocument) {
try {
final StringWriter stringWriter = new StringWriter();
final PrintWriter printWriter = new PrintWriter(stringWriter);
TransformerFactory tFactory = TransformerFactory.newInstance();
Transformer transformer = tFactory.newTransformer();
DOMSource source = new DOMSource(xmlDocument);
StreamResult result = new StreamResult(printWriter);
transformer.transform(source, result);
stringWriter.close();
return stringWriter.toString();
}
catch (Exception e) {
e.printStackTrace();
}
return "";
}
public static OutputStream openOutputStream(URL url)
throws IOException {
final String path = url.getPath();
try {
return new FileOutputStream(path);
}
catch (FileNotFoundException e) {
final File file = url2File(url);
final File dir = file.getParentFile();
dir.mkdirs();
return new FileOutputStream(path);
}
}
private static final File url2File(URL url) {
final String path = url.getPath();
final File file = new File(path);
return file;
}
public final static String getEncodedObject(Object o) {
try {
String value = toDocument(o);
value = URLEncoder.encode(value, "UTF-8");
return value;
}
catch (Exception ex) {
ex.printStackTrace();
}
return null;
}
public final static Object getDecodedObject(String object) {
try {
object = URLDecoder.decode(object, "UTF-8");
return getObject(object);
}
catch (Exception ex) {
ex.printStackTrace();
}
return null;
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment