Commit 649b1efc authored by Tom Evans's avatar Tom Evans Committed by tevans

OF-699/OF-729: Upgrade Hazelcast to latest release (3.1.5); fix initialization timing issue

git-svn-id: http://svn.igniterealtime.org/svn/repos/openfire/trunk@13928 b35dd754-fafc-0310-a699-88a17e54d16e
parent 32a70212
......@@ -44,6 +44,13 @@
Hazelcast Clustering Plugin Changelog
</h1>
<p><b>1.2.0</b> -- February 10, 2014</p>
<p>Miscellaneous enhancements:</p>
<ul>
<li>Fix cluster initialization logic (<a href="http://issues.igniterealtime.org/browse/OF-699">OF-699</a>)</li>
<li>Updated Hazelcast to release 3.1.5 (<a href="http://www.hazelcast.org/docs/3.1/manual/html-single/#WhatsNew31">what's new</a>).</li>
</ul>
<p><b>1.1.0</b> -- Sep 13, 2013</p>
<ul>
<li>Requires Openfire 3.9.0.</li>
......@@ -54,7 +61,7 @@ Hazelcast Clustering Plugin Changelog
<ul>
<li>Added support for cluster time (<a href="http://issues.igniterealtime.org/browse/OF-666">OF-666</a>)</li>
<li>Added <code>hazelcast-cloud.jar</code> to support AWS deployments (<a href="http://community.igniterealtime.org/blogs/ignite/2012/09/23/introducing-hazelcast-a-new-way-to-cluster-openfire#comment-8027">more info</a>).</li>
<li>Updated Hazelcast to release 2.5.1 (<a href="http://www.hazelcast.com/docs/2.5/manual/single_html/#ReleaseNotes">bug fixes</a>).</li>
<li>Updated Hazelcast to release 2.5.1 (<a href="http://www.hazelcast.org/docs/2.5/manual/single_html/#ReleaseNotes">bug fixes</a>).</li>
</ul>
<p><b>1.0.5</b> -- March 26, 2013</p>
......
......@@ -137,7 +137,7 @@
Any integer between 0 and Integer.MAX_VALUE. 0 means
Integer.MAX_VALUE. Default is 0.
-->
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<!--
When max. size is reached, specified percentage of
the map will be evicted. Any integer between 0 and 100.
......@@ -250,13 +250,13 @@
<map name="POP3 Authentication">
<backup-count>1</backup-count>
<max-size policy="cluster_wide_map_size">10000</max-size>
<max-size policy="per_partition">10000</max-size>
<time-to-live-seconds>3600</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
<map name="LDAP Authentication">
<backup-count>1</backup-count>
<max-size policy="cluster_wide_map_size">10000</max-size>
<max-size policy="per_partition">10000</max-size>
<time-to-live-seconds>7200</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
......@@ -267,7 +267,7 @@
</map>
<map name="File Transfer Cache">
<backup-count>1</backup-count>
<max-size policy="cluster_wide_map_size">10000</max-size>
<max-size policy="per_partition">10000</max-size>
<time-to-live-seconds>600</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
......@@ -278,7 +278,7 @@
</map>
<map name="Javascript Cache">
<backup-count>1</backup-count>
<max-size policy="cluster_wide_map_size">10000</max-size>
<max-size policy="per_partition">10000</max-size>
<time-to-live-seconds>864000</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
......@@ -304,7 +304,7 @@
</map>
<map name="Last Activity Cache">
<backup-count>1</backup-count>
<max-size policy="cluster_wide_map_size">10000</max-size>
<max-size policy="per_partition">10000</max-size>
<time-to-live-seconds>21600</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
......@@ -315,37 +315,37 @@
</map>
<map name="Multicast Service">
<backup-count>1</backup-count>
<max-size policy="cluster_wide_map_size">10000</max-size>
<max-size policy="per_partition">10000</max-size>
<time-to-live-seconds>86400</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
<map name="Offline Message Size">
<backup-count>1</backup-count>
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<time-to-live-seconds>43200</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
<map name="Offline Presence Cache">
<backup-count>1</backup-count>
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<time-to-live-seconds>21600</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
<map name="Privacy Lists">
<backup-count>1</backup-count>
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<time-to-live-seconds>21600</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
<map name="Remote Users Existence">
<backup-count>1</backup-count>
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<time-to-live-seconds>600</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
<map name="Remote Server Configurations">
<backup-count>1</backup-count>
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<time-to-live-seconds>1800</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
......@@ -355,28 +355,28 @@
<map name="Group Metadata Cache">
<backup-count>1</backup-count>
<read-backup-data>true</read-backup-data>
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<max-idle-seconds>3600</max-idle-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
<map name="Group">
<backup-count>1</backup-count>
<read-backup-data>true</read-backup-data>
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<max-idle-seconds>3600</max-idle-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
<map name="Roster">
<backup-count>1</backup-count>
<read-backup-data>true</read-backup-data>
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<max-idle-seconds>3600</max-idle-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
<map name="User">
<backup-count>1</backup-count>
<read-backup-data>true</read-backup-data>
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<max-idle-seconds>3600</max-idle-seconds>
<eviction-policy>LRU</eviction-policy>
</map>
......@@ -386,7 +386,7 @@
<map name="VCard">
<backup-count>1</backup-count>
<read-backup-data>true</read-backup-data>
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<time-to-live-seconds>21600</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
<near-cache>
......@@ -399,7 +399,7 @@
<map name="Published Items">
<backup-count>1</backup-count>
<read-backup-data>true</read-backup-data>
<max-size policy="cluster_wide_map_size">100000</max-size>
<max-size policy="per_partition">100000</max-size>
<time-to-live-seconds>900</time-to-live-seconds>
<eviction-policy>LRU</eviction-policy>
<near-cache>
......
......@@ -5,7 +5,7 @@
<name>${plugin.name}</name>
<description>${plugin.description}</description>
<author>Tom Evans</author>
<version>1.1.0</version>
<date>09/13/2013</date>
<version>1.2.0</version>
<date>02/10/2014</date>
<minServerVersion>3.9.0</minServerVersion>
</plugin>
......@@ -61,11 +61,11 @@ servers together in a cluster. By running Openfire as a cluster, you can
distribute the connection load among several servers, while also providing
failover in the event that one of your servers fails. This plugin is a
drop-in replacement for the original Openfire clustering plugin, using the
open source <a href="http://www.hazelcast.com">Hazelcast</a> data distribution
open source <a href="http://www.hazelcast.org">Hazelcast</a> data distribution
framework in lieu of an expensive proprietary third-party product.
</p>
<p>
The current Hazelcast release is version 2.5.1.
The current Hazelcast release is version 3.1.5.
</p>
<h2>Installation</h2>
<p>
......@@ -186,7 +186,7 @@ Hazelcast JMX docs</a> for additional information.</li>
</ol>
</p>
<p>
The Hazelcast plugin uses the <a href="http://www.hazelcast.com/docs/2.5/manual/single_html/#Config">
The Hazelcast plugin uses the <a href="http://www.hazelcast.org/docs/3.1/manual/single_html/#Config">
XML configuration builder</a> to initialize the cluster from the XML file described above.
By default the cluster members will attempt to discover each other via multicast at the
following location:
......@@ -211,7 +211,7 @@ following alternative:
&lt;/join&gt;
...
</pre>
Please refer to the <a href="http://www.hazelcast.com/docs/2.5/manual/single_html/">
Please refer to the <a href="http://www.hazelcast.org/docs/3.1/manual/single_html/">
Hazelcast reference manual</a> for more information.
</p>
</body>
......
......@@ -20,19 +20,26 @@
package com.jivesoftware.util.cache;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.Externalizable;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.ObjectStreamClass;
import java.io.Serializable;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.jivesoftware.util.cache.ExternalizableUtilStrategy;
import com.hazelcast.nio.SerializationHelper;
import com.hazelcast.core.HazelcastInstance;
/**
* Serialization strategy that uses Hazelcast as its underlying mechanism.
......@@ -51,7 +58,7 @@ public class ClusterExternalizableUtil implements ExternalizableUtilStrategy {
* @throws java.io.IOException if an error occurs.
*/
public void writeStringMap(DataOutput out, Map<String, String> stringMap) throws IOException {
SerializationHelper.writeObject(out, stringMap);
writeObject(out, stringMap);
}
/**
......@@ -63,7 +70,7 @@ public class ClusterExternalizableUtil implements ExternalizableUtilStrategy {
* @throws IOException if an error occurs.
*/
public Map<String, String> readStringMap(DataInput in) throws IOException {
return (Map<String, String>) SerializationHelper.readObject(in);
return (Map<String, String>) readObject(in);
}
/**
......@@ -75,7 +82,7 @@ public class ClusterExternalizableUtil implements ExternalizableUtilStrategy {
* @throws java.io.IOException if an error occurs.
*/
public void writeStringsMap(DataOutput out, Map<String, Set<String>> map) throws IOException {
SerializationHelper.writeObject(out, map);
writeObject(out, map);
}
/**
......@@ -87,7 +94,7 @@ public class ClusterExternalizableUtil implements ExternalizableUtilStrategy {
* @throws IOException if an error occurs.
*/
public int readStringsMap(DataInput in, Map<String, Set<String>> map) throws IOException {
Map<String, Set<String>> result = (Map<String, Set<String>>) SerializationHelper.readObject(in);
Map<String, Set<String>> result = (Map<String, Set<String>>) readObject(in);
if (result == null) return 0;
map.putAll(result);
return result.size();
......@@ -102,7 +109,7 @@ public class ClusterExternalizableUtil implements ExternalizableUtilStrategy {
* @throws IOException if an error occurs.
*/
public void writeLongIntMap(DataOutput out, Map<Long, Integer> map) throws IOException {
SerializationHelper.writeObject(out, map);
writeObject(out, map);
}
/**
......@@ -114,7 +121,7 @@ public class ClusterExternalizableUtil implements ExternalizableUtilStrategy {
* @throws IOException if an error occurs.
*/
public Map<Long, Integer> readLongIntMap(DataInput in) throws IOException {
return (Map<Long, Integer>) SerializationHelper.readObject(in);
return (Map<Long, Integer>) readObject(in);
}
/**
......@@ -126,7 +133,7 @@ public class ClusterExternalizableUtil implements ExternalizableUtilStrategy {
* @throws IOException if an error occurs.
*/
public void writeStringList(DataOutput out, List<String> stringList) throws IOException {
SerializationHelper.writeObject(out, stringList);
writeObject(out, stringList);
}
/**
......@@ -138,7 +145,7 @@ public class ClusterExternalizableUtil implements ExternalizableUtilStrategy {
* @throws IOException if an error occurs.
*/
public List<String> readStringList(DataInput in) throws IOException {
return (List<String>) SerializationHelper.readObject(in);
return (List<String>) readObject(in);
}
/**
......@@ -150,7 +157,7 @@ public class ClusterExternalizableUtil implements ExternalizableUtilStrategy {
* @throws IOException if an error occurs.
*/
public void writeLongArray(DataOutput out, long [] array) throws IOException {
SerializationHelper.writeObject(out, array);
writeObject(out, array);
}
/**
......@@ -162,112 +169,301 @@ public class ClusterExternalizableUtil implements ExternalizableUtilStrategy {
* @throws IOException if an error occurs.
*/
public long [] readLongArray(DataInput in) throws IOException {
return (long[]) SerializationHelper.readObject(in);
return (long []) readObject(in);
}
public void writeLong(DataOutput out, long value) throws IOException {
SerializationHelper.writeObject(out, value);
writeObject(out, value);
}
public long readLong(DataInput in) throws IOException {
return (Long) SerializationHelper.readObject(in);
return (Long) readObject(in);
}
public void writeByteArray(DataOutput out, byte[] value) throws IOException {
SerializationHelper.writeObject(out, value);
writeObject(out, value);
}
public byte[] readByteArray(DataInput in) throws IOException {
return (byte[]) SerializationHelper.readObject(in);
return (byte []) readObject(in);
}
public void writeInt(DataOutput out, int value) throws IOException {
SerializationHelper.writeObject(out, value);
writeObject(out, value);
}
public int readInt(DataInput in) throws IOException {
return (Integer) SerializationHelper.readObject(in);
return (Integer) readObject(in);
}
public void writeBoolean(DataOutput out, boolean value) throws IOException {
SerializationHelper.writeObject(out, value);
writeObject(out, value);
}
public boolean readBoolean(DataInput in) throws IOException {
return (Boolean) SerializationHelper.readObject(in);
return (Boolean) readObject(in);
}
public void writeSerializable(DataOutput out, Serializable value) throws IOException {
SerializationHelper.writeObject(out, value);
writeObject(out, value);
}
public Serializable readSerializable(DataInput in) throws IOException {
return (Serializable) SerializationHelper.readObject(in);
return (Serializable) readObject(in);
}
public void writeSafeUTF(DataOutput out, String value) throws IOException {
SerializationHelper.writeObject(out, value);
writeObject(out, value);
}
public String readSafeUTF(DataInput in) throws IOException {
return (String) SerializationHelper.readObject(in);
return (String) readObject(in);
}
public void writeExternalizableCollection(DataOutput out, Collection<? extends Externalizable> value)
throws IOException {
SerializationHelper.writeObject(out, value);
/**
* Writes a collection of Externalizable objects. The collection passed as a parameter
* must be a collection and not a <tt>null</null> value.
*
* @param out the output stream.
* @param value the collection of Externalizable objects. This value must not be null.
* @throws IOException if an error occurs.
*/
public void writeExternalizableCollection(DataOutput out, Collection<? extends Externalizable> value) throws IOException {
writeObject(out, value);
}
public void writeSerializableCollection(DataOutput out, Collection<? extends Serializable> value)
throws IOException {
SerializationHelper.writeObject(out, value);
/**
* Writes a collection of Serializable objects. The collection passed as a parameter
* must be a collection and not a <tt>null</null> value.
*
* @param out the output stream.
* @param value the collection of Serializable objects. This value must not be null.
* @throws IOException if an error occurs.
*/
public void writeSerializableCollection(DataOutput out, Collection<? extends Serializable> value) throws IOException {
writeObject(out, value);
}
public int readExternalizableCollection(DataInput in, Collection<? extends Externalizable> value,
ClassLoader loader) throws IOException {
Collection<Externalizable> result = (Collection<Externalizable>) SerializationHelper.readObject(in);
/**
* Reads a collection of Externalizable objects and adds them to the collection passed as a parameter. The
* collection passed as a parameter must be a collection and not a <tt>null</null> value.
*
* @param in the input stream.
* @param value the collection of Externalizable objects. This value must not be null.
* @param loader class loader to use to build elements inside of the serialized collection.
* @throws IOException if an error occurs.
* @return the number of elements added to the collection.
*/
public int readExternalizableCollection(DataInput in, Collection<? extends Externalizable> value, ClassLoader loader) throws IOException {
Collection<Externalizable> result = (Collection<Externalizable>) readObject(in);
if (result == null) return 0;
((Collection<Externalizable>)value).addAll(result);
return result.size();
}
public int readSerializableCollection(DataInput in, Collection<? extends Serializable> value,
ClassLoader loader) throws IOException {
Collection<Serializable> result = (Collection<Serializable>) SerializationHelper.readObject(in);
/**
* Reads a collection of Serializable objects and adds them to the collection passed as a parameter. The
* collection passed as a parameter must be a collection and not a <tt>null</null> value.
*
* @param in the input stream.
* @param value the collection of Serializable objects. This value must not be null.
* @param loader class loader to use to build elements inside of the serialized collection.
* @throws IOException if an error occurs.
* @return the number of elements added to the collection.
*/
public int readSerializableCollection(DataInput in, Collection<? extends Serializable> value, ClassLoader loader) throws IOException {
Collection<Serializable> result = (Collection<Serializable>) readObject(in);
if (result == null) return 0;
((Collection<Serializable>)value).addAll(result);
return result.size();
}
}
/**
* Writes a Map of String key and value pairs. This method handles the
* case when the Map is <tt>null</tt>.
*
* @param out the output stream.
* @param map the Map of String key and Externalizable value pairs.
* @throws java.io.IOException if an error occurs.
*/
public void writeExternalizableMap(DataOutput out, Map<String, ? extends Externalizable> map) throws IOException {
SerializationHelper.writeObject(out, map);
writeObject(out, map);
}
/**
* Writes a Map of Serializable key and value pairs. This method handles the
* case when the Map is <tt>null</tt>.
*
* @param out the output stream.
* @param map the Map of Serializable key and value pairs.
* @throws java.io.IOException if an error occurs.
*/
public void writeSerializableMap(DataOutput out, Map<? extends Serializable, ? extends Serializable> map) throws IOException {
SerializationHelper.writeObject(out, map);
writeObject(out, map);
}
/**
* Reads a Map of String key and value pairs. This method will return
* <tt>null</tt> if the Map written to the stream was <tt>null</tt>.
*
* @param in the input stream.
* @param map a Map of String key and Externalizable value pairs.
* @param loader class loader to use to build elements inside of the serialized collection.
* @throws IOException if an error occurs.
* @return the number of elements added to the collection.
*/
public int readExternalizableMap(DataInput in, Map<String, ? extends Externalizable> map, ClassLoader loader) throws IOException {
Map<String, Externalizable> result = (Map<String, Externalizable>) SerializationHelper.readObject(in);
Map<String, Externalizable> result = (Map<String, Externalizable>) readObject(in);
if (result == null) return 0;
((Map<String, Externalizable>)map).putAll(result);
return result.size();
}
/**
* Reads a Map of Serializable key and value pairs. This method will return
* <tt>null</tt> if the Map written to the stream was <tt>null</tt>.
*
* @param in the input stream.
* @param map a Map of Serializable key and value pairs.
* @param loader class loader to use to build elements inside of the serialized collection.
* @throws IOException if an error occurs.
* @return the number of elements added to the collection.
*/
public int readSerializableMap(DataInput in, Map<? extends Serializable, ? extends Serializable> map, ClassLoader loader) throws IOException {
Map<String, Serializable> result = (Map<String, Serializable>) SerializationHelper.readObject(in);
Map<String, Serializable> result = (Map<String, Serializable>) readObject(in);
if (result == null) return 0;
((Map<String, Serializable>)map).putAll(result);
return result.size();
}
public void writeStrings(DataOutput out, Collection<String> collection) throws IOException {
SerializationHelper.writeObject(out, collection);
writeObject(out, collection);
}
public int readStrings(DataInput in, Collection<String> collection) throws IOException {
Collection<String> result = (Collection<String>) SerializationHelper.readObject(in);
Collection<String> result = (Collection<String>) readObject(in);
if (result == null) return 0;
collection.addAll(result);
return result.size();
}
// serialization helpers
public static void writeObject(DataOutput out, Object obj) throws IOException {
if (obj == null) {
out.writeByte(0);
} else if (obj instanceof Long) {
out.writeByte(1);
out.writeLong((Long) obj);
} else if (obj instanceof Integer) {
out.writeByte(2);
out.writeInt((Integer) obj);
} else if (obj instanceof String) {
out.writeByte(3);
out.writeUTF((String) obj);
} else if (obj instanceof Double) {
out.writeByte(4);
out.writeDouble((Double) obj);
} else if (obj instanceof Float) {
out.writeByte(5);
out.writeFloat((Float) obj);
} else if (obj instanceof Boolean) {
out.writeByte(6);
out.writeBoolean((Boolean) obj);
} else if (obj instanceof Date) {
out.writeByte(8);
out.writeLong(((Date) obj).getTime());
} else {
out.writeByte(9);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(bos);
oos.writeObject(obj);
oos.close();
byte[] buf = bos.toByteArray();
out.writeInt(buf.length);
out.write(buf);
}
}
public static Object readObject(DataInput in) throws IOException {
byte type = in.readByte();
if (type == 0) {
return null;
} else if (type == 1) {
return in.readLong();
} else if (type == 2) {
return in.readInt();
} else if (type == 3) {
return in.readUTF();
} else if (type == 4) {
return in.readDouble();
} else if (type == 5) {
return in.readFloat();
} else if (type == 6) {
return in.readBoolean();
} else if (type == 8) {
return new Date(in.readLong());
} else if (type == 9) {
int len = in.readInt();
byte[] buf = new byte[len];
in.readFully(buf);
ObjectInputStream oin = newObjectInputStream(new ByteArrayInputStream(buf));
try {
return oin.readObject();
} catch (ClassNotFoundException e) {
throw new IOException(e);
} finally {
oin.close();
}
} else {
throw new IOException("Unknown object type=" + type);
}
}
public static ObjectInputStream newObjectInputStream(final InputStream in) throws IOException {
return new ObjectInputStream(in) {
@Override
protected Class<?> resolveClass(final ObjectStreamClass desc) throws ClassNotFoundException {
return loadClass(desc.getName());
}
};
}
public static Class<?> loadClass(final String className) throws ClassNotFoundException {
return loadClass(null, className);
}
public static Class<?> loadClass(final ClassLoader classLoader, final String className) throws ClassNotFoundException {
if (className == null) {
throw new IllegalArgumentException("ClassName cannot be null!");
}
if (className.length() <= MAX_PRIM_CLASSNAME_LENGTH && Character.isLowerCase(className.charAt(0))) {
for (int i = 0; i < PRIMITIVE_CLASSES_ARRAY.length; i++) {
if (className.equals(PRIMITIVE_CLASSES_ARRAY[i].getName())) {
return PRIMITIVE_CLASSES_ARRAY[i];
}
}
}
ClassLoader theClassLoader = classLoader;
if (className.startsWith("com.hazelcast.") || className.startsWith("[Lcom.hazelcast.")) {
theClassLoader = HazelcastInstance.class.getClassLoader();
}
if (theClassLoader == null) {
theClassLoader = Thread.currentThread().getContextClassLoader();
}
if (theClassLoader != null) {
if (className.startsWith("[")) {
return Class.forName(className, true, theClassLoader);
} else {
return theClassLoader.loadClass(className);
}
}
return Class.forName(className);
}
private static final Class[] PRIMITIVE_CLASSES_ARRAY = {int.class, long.class, boolean.class, byte.class,
float.class, double.class, byte.class, char.class, short.class, void.class};
private static final int MAX_PRIM_CLASSNAME_LENGTH = 7; // boolean.class.getName().length();
}
......@@ -54,6 +54,7 @@ import org.xmpp.packet.Presence;
import com.hazelcast.core.Cluster;
import com.hazelcast.core.EntryEvent;
import com.hazelcast.core.EntryEventType;
import com.hazelcast.core.EntryListener;
import com.hazelcast.core.LifecycleEvent;
import com.hazelcast.core.LifecycleEvent.LifecycleState;
......@@ -138,18 +139,6 @@ public class ClusterListener implements MembershipListener, LifecycleListener {
directedPresencesCache = CacheFactory.createCache(PresenceUpdateHandler.PRESENCE_CACHE_NAME);
addEntryListener(C2SCache, new CacheListener(this, C2SCache.getName()));
addEntryListener(anonymousC2SCache, new CacheListener(this, anonymousC2SCache.getName()));
addEntryListener(S2SCache, new CacheListener(this, S2SCache.getName()));
addEntryListener(componentsCache, new ComponentCacheListener());
addEntryListener(sessionInfoCache, new CacheListener(this, sessionInfoCache.getName()));
addEntryListener(componentSessionsCache, new CacheListener(this, componentSessionsCache.getName()));
addEntryListener(multiplexerSessionsCache, new CacheListener(this, multiplexerSessionsCache.getName()));
addEntryListener(incomingServerSessionsCache, new CacheListener(this, incomingServerSessionsCache.getName()));
addEntryListener(directedPresencesCache, new DirectedPresenceListener());
joinCluster();
}
......@@ -173,7 +162,7 @@ public class ClusterListener implements MembershipListener, LifecycleListener {
ClusteredCache clusteredCache = (ClusteredCache) wrapped;
for (Map.Entry entry : (Set<Map.Entry>) cache.entrySet()) {
EntryEvent event = new EntryEvent(clusteredCache.map.getName(), cluster.getLocalMember(),
EntryEvent.TYPE_ADDED, entry.getKey(), null, entry.getValue());
EntryEventType.ADDED.getType(), entry.getKey(), null, entry.getValue());
EntryListener.entryAdded(event);
}
}
......@@ -555,6 +544,20 @@ public class ClusterListener implements MembershipListener, LifecycleListener {
if (!isDone()) { // already joined
return;
}
// Trigger events
ClusterManager.fireJoinedCluster(false);
addEntryListener(C2SCache, new CacheListener(this, C2SCache.getName()));
addEntryListener(anonymousC2SCache, new CacheListener(this, anonymousC2SCache.getName()));
addEntryListener(S2SCache, new CacheListener(this, S2SCache.getName()));
addEntryListener(componentsCache, new ComponentCacheListener());
addEntryListener(sessionInfoCache, new CacheListener(this, sessionInfoCache.getName()));
addEntryListener(componentSessionsCache, new CacheListener(this, componentSessionsCache.getName()));
addEntryListener(multiplexerSessionsCache, new CacheListener(this, multiplexerSessionsCache.getName()));
addEntryListener(incomingServerSessionsCache, new CacheListener(this, incomingServerSessionsCache.getName()));
addEntryListener(directedPresencesCache, new DirectedPresenceListener());
// Simulate insert events of existing cache content
simulateCacheInserts(C2SCache);
simulateCacheInserts(anonymousC2SCache);
......@@ -566,8 +569,7 @@ public class ClusterListener implements MembershipListener, LifecycleListener {
simulateCacheInserts(incomingServerSessionsCache);
simulateCacheInserts(directedPresencesCache);
// Trigger events
ClusterManager.fireJoinedCluster(false);
if (CacheFactory.isSeniorClusterMember()) {
seniorClusterMember = true;
ClusterManager.fireMarkedAsSeniorClusterMember();
......
......@@ -19,6 +19,7 @@
package com.jivesoftware.util.cache;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
......@@ -39,6 +40,8 @@ import com.hazelcast.monitor.LocalMapStats;
public class ClusteredCache implements Cache {
private static Logger logger = LoggerFactory.getLogger(ClusteredCache.class);
private final Map<EntryListener, String> registrations = new HashMap<EntryListener, String>();
/**
* The map is used for distributed operations such as get, put, etc.
......@@ -59,11 +62,14 @@ public class ClusteredCache implements Cache {
}
public void addEntryListener(EntryListener listener, boolean includeValue) {
map.addEntryListener(listener, includeValue);
registrations.put(listener, map.addEntryListener(listener, includeValue));
}
public void removeEntryListener(EntryListener listener) {
map.removeEntryListener(listener);
String registrationId = registrations.get(listener);
if (registrationId != null) {
map.removeEntryListener(registrationId);
}
}
// Cache Interface
......@@ -168,7 +174,12 @@ public class ClusteredCache implements Cache {
} else if (timeout == 0) {
result = map.tryLock(key);
} else {
result = map.tryLock(key, timeout, TimeUnit.MILLISECONDS);
try {
result = map.tryLock(key, timeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
logger.error("Failed to get cluster lock", e);
result = false;
}
}
return result;
}
......
......@@ -20,6 +20,7 @@
package com.jivesoftware.util.cache;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
......@@ -29,6 +30,7 @@ import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.Condition;
......@@ -52,11 +54,9 @@ import org.slf4j.LoggerFactory;
import com.hazelcast.config.ClasspathXmlConfig;
import com.hazelcast.config.Config;
import com.hazelcast.core.Cluster;
import com.hazelcast.core.DistributedTask;
import com.hazelcast.core.Hazelcast;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.Member;
import com.hazelcast.core.MultiTask;
import com.jivesoftware.openfire.session.RemoteSessionLocator;
import com.jivesoftware.util.cluster.ClusterPacketRouter;
import com.jivesoftware.util.cluster.HazelcastClusterNodeInfo;
......@@ -69,6 +69,8 @@ import com.jivesoftware.util.cluster.HazelcastClusterNodeInfo;
*/
public class ClusteredCacheFactory implements CacheFactoryStrategy {
public static final String HAZELCAST_EXECUTOR_SERVICE_NAME =
JiveGlobals.getProperty("hazelcast.executor.service.name", "openfire::cluster::executor");
private static final long MAX_CLUSTER_EXECUTION_TIME =
JiveGlobals.getLongProperty("hazelcast.max.execution.seconds", 30);
private static final long CLUSTER_STARTUP_RETRY_TIME =
......@@ -276,8 +278,8 @@ public class ClusteredCacheFactory implements CacheFactoryStrategy {
if (members.size() > 0) {
// Asynchronously execute the task on the other cluster members
logger.debug("Executing asynchronous MultiTask: " + task.getClass().getName());
hazelcast.getExecutorService().execute(
new MultiTask<Object>(new CallableTask<Object>(task), members));
hazelcast.getExecutorService(HAZELCAST_EXECUTOR_SERVICE_NAME).submitToMembers(
new CallableTask<Object>(task), members);
} else {
logger.warn("No cluster members selected for cluster task " + task.getClass().getName());
}
......@@ -295,8 +297,8 @@ public class ClusteredCacheFactory implements CacheFactoryStrategy {
if (member != null) {
// Asynchronously execute the task on the target member
logger.debug("Executing asynchronous DistributedTask: " + task.getClass().getName());
hazelcast.getExecutorService().execute(
new DistributedTask<Object>(new CallableTask<Object>(task), member));
hazelcast.getExecutorService(HAZELCAST_EXECUTOR_SERVICE_NAME).submitToMember(
new CallableTask<Object>(task), member);
return true;
} else {
logger.warn("Requested node " + StringUtils.getString(nodeID) + " not found in cluster");
......@@ -310,8 +312,7 @@ public class ClusteredCacheFactory implements CacheFactoryStrategy {
* (seconds) until the task is run on all members.
*/
public Collection<Object> doSynchronousClusterTask(ClusterTask task, boolean includeLocalMember) {
Collection<Object> result = Collections.emptyList();
if (cluster == null) { return result; }
if (cluster == null) { return Collections.emptyList(); }
Set<Member> members = new HashSet<Member>();
Member current = cluster.getLocalMember();
for(Member member : cluster.getMembers()) {
......@@ -319,15 +320,19 @@ public class ClusteredCacheFactory implements CacheFactoryStrategy {
members.add(member);
}
}
Collection<Object> result = new ArrayList<Object>();
if (members.size() > 0) {
// Asynchronously execute the task on the other cluster members
MultiTask<Object> multiTask = new MultiTask<Object>(
new CallableTask<Object>(task), members);
try {
logger.debug("Executing MultiTask: " + task.getClass().getName());
hazelcast.getExecutorService().execute(multiTask);
result = multiTask.get(MAX_CLUSTER_EXECUTION_TIME,TimeUnit.SECONDS);
logger.debug("MultiTask result: " + (result == null ? "null" : result.size()));
Map<Member, Future<Object>> futures = hazelcast.getExecutorService(HAZELCAST_EXECUTOR_SERVICE_NAME)
.submitToMembers(new CallableTask<Object>(task), members);
long nanosLeft = TimeUnit.SECONDS.toNanos(MAX_CLUSTER_EXECUTION_TIME);
for (Future<Object> future : futures.values()) {
long start = System.nanoTime();
result.add(future.get(nanosLeft, TimeUnit.NANOSECONDS));
nanosLeft = (System.nanoTime() - start);
}
} catch (TimeoutException te) {
logger.error("Failed to execute cluster task within " + MAX_CLUSTER_EXECUTION_TIME + " seconds", te);
} catch (Exception e) {
......@@ -351,12 +356,11 @@ public class ClusteredCacheFactory implements CacheFactoryStrategy {
// Check that the requested member was found
if (member != null) {
// Asynchronously execute the task on the target member
DistributedTask<Object> distributedTask = new DistributedTask<Object>(
new CallableTask<Object>(task), member);
logger.debug("Executing DistributedTask: " + task.getClass().getName());
hazelcast.getExecutorService().execute(distributedTask);
try {
result = distributedTask.get(MAX_CLUSTER_EXECUTION_TIME, TimeUnit.SECONDS);
Future<Object> future = hazelcast.getExecutorService(HAZELCAST_EXECUTOR_SERVICE_NAME)
.submitToMember(new CallableTask<Object>(task), member);
result = future.get(MAX_CLUSTER_EXECUTION_TIME, TimeUnit.SECONDS);
logger.debug("DistributedTask result: " + (result == null ? "null" : result));
} catch (TimeoutException te) {
logger.error("Failed to execute cluster task within " + MAX_CLUSTER_EXECUTION_TIME + " seconds", te);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment