Skip to content
Browse files

Merge pull request #21 from GregDThomas/dynamic-cache-expiry

Fix issue #5; allow code to create Caches on the fly
  • Loading branch information...
GregDThomas committed Mar 18, 2019
2 parents 6f3e135 + 1b4fe64 commit 127f2be515ccc6f233fefee957cfa7400495cf91
@@ -47,6 +47,7 @@ <h1>

<p><b>2.4.1</b> -- tbd</p>
<li>[<a href=''>Issue #5</a>] - Dynamically created ClusteredCache entries not expiring</li>
<li>[<a href=''>Issue #13</a>] - Less dramatic log messages when running a one-node cluster</li>
<li>[<a href=''>Issue #16</a>] - Expose host name of cluster members on the admin UI</li>
@@ -260,5 +260,45 @@ <h3>A Word About Garbage Collection</h3>
<p>This GC configuration will also emit helpful GC diagnostic information to the console to aid further
tuning and troubleshooting as appropriate for your deployment.

<h2>Configuring Cache expiry times and sizes</h2>
<h3>Core Openfire caches</h3>
When clustering is enabled, the only way to change the size of standard Openfire caches is by
editing <code>plugins/hazelcast/classes/hazelcast-cache-config.xml</code> on every node in the cluster, shutting down
every node in the cluster, and then restarting the nodes. Dynamic configuration is not currently possible, and using
different configurations on different nodes is liable to lead to odd behaviour.

<p>This is different to non-clustered caches, where it is sufficient to edit the
<code>cache.[cache name].maxLifetime</code> and <code>cache.[cache name].size</code> System Properties</p>

<h3>Plugin defined caches</h3>
A plugin can create it's own Cache without the requirement to edit any configuration files. For example;

final String cacheName = "my-test-cache";
CacheFactory.setMaxSizeProperty(cacheName, maxCacheSize);
CacheFactory.setMaxLifetimeProperty(cacheName, maxLifetime);

final Cache&lt;JID, Instant&gt; cache = CacheFactory.createCache(cacheName);

<code>CacheFactory.setMaxSizeProperty</code>/<code>CacheFactory.setMaxLifetimeProperty</code> will set Openfire
System Properties that are used to configure the Cache when it is created.
If no Openfire System Properties are set, the default expiry values are used (unlimited size and six hours,
The first node in the cluster to call <code>CacheFactory.createCache</code> will use the configured expiry
values, subsequent calls to <code>CacheFactory.createCache</code> will simply retrieve the existing Cache
with the previously configured expiry values. It is not possible to change the expiry values after the Cache
has been created.

@@ -37,15 +37,14 @@
public class ClusteredCache<K extends Serializable, V extends Serializable> implements Cache<K, V> {

private static Logger logger = LoggerFactory.getLogger(ClusteredCache.class);
private static final Logger logger = LoggerFactory.getLogger(ClusteredCache.class);

private final Set<String> listeners = ConcurrentHashMap.newKeySet();

* The map is used for distributed operations such as get, put, etc.
final IMap<K, V> map;
private final int hazelcastLifetimeInSeconds;
private String name;
private long numberOfGets = 0;

@@ -54,15 +53,13 @@
* @param name a name for the cache, which should be unique per vm.
* @param cache the cache implementation
* @param hazelcastLifetimeInSeconds the lifetime of cache entries
protected ClusteredCache(String name, IMap<K,V> cache, final int hazelcastLifetimeInSeconds) {
protected ClusteredCache(final String name, final IMap<K, V> cache) { = cache;
this.hazelcastLifetimeInSeconds = hazelcastLifetimeInSeconds; = name;

void addEntryListener(MapListener listener) {
void addEntryListener(final MapListener listener) {
listeners.add(map.addEntryListener(listener, false));

@@ -72,24 +69,24 @@ public String getName() {

public void setName(String name) {
public void setName(final String name) { = name;

public V put(K key, V object) {
public V put(final K key, final V object) {
if (object == null) { return null; }
return map.put(key, object, hazelcastLifetimeInSeconds, TimeUnit.SECONDS);
return map.put(key, object);

public V get(Object key) {
public V get(final Object key) {
return map.get(key);

public V remove(Object key) {
public V remove(final Object key) {
return map.remove(key);

@@ -100,17 +97,17 @@ public void clear() {

public int size() {
LocalMapStats stats = map.getLocalMapStats();
final LocalMapStats stats = map.getLocalMapStats();
return (int) (stats.getOwnedEntryCount() + stats.getBackupEntryCount());

public boolean containsKey(Object key) {
public boolean containsKey(final Object key) {
return map.containsKey(key);

public boolean containsValue(Object value) {
public boolean containsValue(final Object value) {
return map.containsValue(value);

@@ -130,7 +127,7 @@ public boolean isEmpty() {

public void putAll(Map<? extends K, ? extends V> entries) {
public void putAll(final Map<? extends K, ? extends V> entries) {

@@ -146,13 +143,13 @@ public long getCacheHits() {

public long getCacheMisses() {
long hits = map.getLocalMapStats().getHits();
final long hits = map.getLocalMapStats().getHits();
return numberOfGets > hits ? numberOfGets - hits : 0;

public int getCacheSize() {
LocalMapStats stats = map.getLocalMapStats();
final LocalMapStats stats = map.getLocalMapStats();
return (int) (stats.getOwnedEntryMemoryCost() + stats.getBackupEntryMemoryCost());

@@ -162,7 +159,7 @@ public long getMaxCacheSize() {

public void setMaxCacheSize(int maxSize) {
public void setMaxCacheSize(final int maxSize) {
CacheFactory.setMaxSizeProperty(getName(), maxSize);

@@ -172,7 +169,7 @@ public long getMaxLifetime() {

public void setMaxLifetime(long maxLifetime) {
public void setMaxLifetime(final long maxLifetime) {
CacheFactory.setMaxLifetimeProperty(getName(), maxLifetime);

@@ -181,7 +178,7 @@ void destroy() {

boolean lock(K key, long timeout) {
boolean lock(final K key, final long timeout) {
boolean result = true;
if (timeout < 0) {
@@ -190,18 +187,18 @@ boolean lock(K key, long timeout) {
} else {
try {
result = map.tryLock(key, timeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
} catch (final InterruptedException e) {
logger.error("Failed to get cluster lock", e);
result = false;
return result;

void unlock(K key) {
void unlock(final K key) {
try {
} catch (IllegalMonitorStateException e) {
} catch (final IllegalMonitorStateException e) {
logger.error("Failed to release cluster lock", e);
@@ -257,12 +257,20 @@ public Cache createCache(final String name) {
// Determine the max cache size. Note that in Hazelcast the max cache size must be positive
final long openfireMaxCacheSize = CacheFactory.getMaxCacheSize(name);
final int hazelcastMaxCacheSize = openfireMaxCacheSize < 0 ? Integer.MAX_VALUE : (int) openfireMaxCacheSize;
final MapConfig mapConfig = hazelcast.getConfig().getMapConfig(name);
mapConfig.setMaxSizeConfig(new MaxSizeConfig(hazelcastMaxCacheSize, MaxSizeConfig.MaxSizePolicy.USED_HEAP_SIZE));
// It's only possible to create a dynamic config if a static one doesn't already exist
final MapConfig staticConfig = hazelcast.getConfig().getMapConfigOrNull(name);
if (staticConfig == null) {
final MapConfig dynamicConfig = new MapConfig(name);
dynamicConfig.setMaxSizeConfig(new MaxSizeConfig(hazelcastMaxCacheSize, MaxSizeConfig.MaxSizePolicy.USED_HEAP_SIZE));
logger.debug("Creating dynamic map config for cache={}, dynamicConfig={}", name, dynamicConfig);
} else {
logger.debug("Static configuration already exists for cache={}, staticConfig={}", name, staticConfig);
// TODO: Better genericize this method in CacheFactoryStrategy so we can stop suppressing this warning
final ClusteredCache clusteredCache = new ClusteredCache(name, hazelcast.getMap(name), hazelcastLifetimeInSeconds);
final ClusteredCache clusteredCache = new ClusteredCache(name, hazelcast.getMap(name));
return clusteredCache;

0 comments on commit 127f2be

Please sign in to comment.
You can’t perform that action at this time.