Skip to content

Commit

Permalink
Merge pull request #21 from GregDThomas/dynamic-cache-expiry
Browse files Browse the repository at this point in the history
Fix issue #5; allow code to create Caches on the fly
  • Loading branch information
GregDThomas committed Mar 18, 2019
2 parents 6f3e135 + 1b4fe64 commit 127f2be
Show file tree
Hide file tree
Showing 4 changed files with 73 additions and 27 deletions.
1 change: 1 addition & 0 deletions changelog.html
Expand Up @@ -47,6 +47,7 @@ <h1>

<p><b>2.4.1</b> -- tbd</p>
<ul>
<li>[<a href='https://github.com/igniterealtime/openfire-hazelcast-plugin/issues/5'>Issue #5</a>] - Dynamically created ClusteredCache entries not expiring</li>
<li>[<a href='https://github.com/igniterealtime/openfire-hazelcast-plugin/issues/13'>Issue #13</a>] - Less dramatic log messages when running a one-node cluster</li>
<li>[<a href='https://github.com/igniterealtime/openfire-hazelcast-plugin/issues/16'>Issue #16</a>] - Expose host name of cluster members on the admin UI</li>
</ul>
Expand Down
40 changes: 40 additions & 0 deletions readme.html
Expand Up @@ -260,5 +260,45 @@ <h3>A Word About Garbage Collection</h3>
<p>This GC configuration will also emit helpful GC diagnostic information to the console to aid further
tuning and troubleshooting as appropriate for your deployment.
</p>

<h2>Configuring Cache expiry times and sizes</h2>
<h3>Core Openfire caches</h3>
When clustering is enabled, the only way to change the size of standard Openfire caches is by
editing <code>plugins/hazelcast/classes/hazelcast-cache-config.xml</code> on every node in the cluster, shutting down
every node in the cluster, and then restarting the nodes. Dynamic configuration is not currently possible, and using
different configurations on different nodes is liable to lead to odd behaviour.

<p>This is different to non-clustered caches, where it is sufficient to edit the
<code>cache.[cache name].maxLifetime</code> and <code>cache.[cache name].size</code> System Properties</p>

<h3>Plugin defined caches</h3>
A plugin can create it's own Cache without the requirement to edit any configuration files. For example;

<pre>
final String cacheName = "my-test-cache";
CacheFactory.setMaxSizeProperty(cacheName, maxCacheSize);
CacheFactory.setMaxLifetimeProperty(cacheName, maxLifetime);

final Cache&lt;JID, Instant&gt; cache = CacheFactory.createCache(cacheName);
</pre>

Notes;
<ul>
<li>
<code>CacheFactory.setMaxSizeProperty</code>/<code>CacheFactory.setMaxLifetimeProperty</code> will set Openfire
System Properties that are used to configure the Cache when it is created.
</li>
<li>
If no Openfire System Properties are set, the default expiry values are used (unlimited size and six hours,
respectively).
</li>
<li>
The first node in the cluster to call <code>CacheFactory.createCache</code> will use the configured expiry
values, subsequent calls to <code>CacheFactory.createCache</code> will simply retrieve the existing Cache
with the previously configured expiry values. It is not possible to change the expiry values after the Cache
has been created.
</li>
</ul>

</body>
</html>
Expand Up @@ -37,15 +37,14 @@
*/
public class ClusteredCache<K extends Serializable, V extends Serializable> implements Cache<K, V> {

private static Logger logger = LoggerFactory.getLogger(ClusteredCache.class);
private static final Logger logger = LoggerFactory.getLogger(ClusteredCache.class);

private final Set<String> listeners = ConcurrentHashMap.newKeySet();

/**
* The map is used for distributed operations such as get, put, etc.
*/
final IMap<K, V> map;
private final int hazelcastLifetimeInSeconds;
private String name;
private long numberOfGets = 0;

Expand All @@ -54,15 +53,13 @@ public class ClusteredCache<K extends Serializable, V extends Serializable> impl
*
* @param name a name for the cache, which should be unique per vm.
* @param cache the cache implementation
* @param hazelcastLifetimeInSeconds the lifetime of cache entries
*/
protected ClusteredCache(String name, IMap<K,V> cache, final int hazelcastLifetimeInSeconds) {
protected ClusteredCache(final String name, final IMap<K, V> cache) {
this.map = cache;
this.hazelcastLifetimeInSeconds = hazelcastLifetimeInSeconds;
this.name = name;
}

void addEntryListener(MapListener listener) {
void addEntryListener(final MapListener listener) {
listeners.add(map.addEntryListener(listener, false));
}

Expand All @@ -72,24 +69,24 @@ public String getName() {
}

@Override
public void setName(String name) {
public void setName(final String name) {
this.name = name;
}

@Override
public V put(K key, V object) {
public V put(final K key, final V object) {
if (object == null) { return null; }
return map.put(key, object, hazelcastLifetimeInSeconds, TimeUnit.SECONDS);
return map.put(key, object);
}

@Override
public V get(Object key) {
public V get(final Object key) {
numberOfGets++;
return map.get(key);
}

@Override
public V remove(Object key) {
public V remove(final Object key) {
return map.remove(key);
}

Expand All @@ -100,17 +97,17 @@ public void clear() {

@Override
public int size() {
LocalMapStats stats = map.getLocalMapStats();
final LocalMapStats stats = map.getLocalMapStats();
return (int) (stats.getOwnedEntryCount() + stats.getBackupEntryCount());
}

@Override
public boolean containsKey(Object key) {
public boolean containsKey(final Object key) {
return map.containsKey(key);
}

@Override
public boolean containsValue(Object value) {
public boolean containsValue(final Object value) {
return map.containsValue(value);
}

Expand All @@ -130,7 +127,7 @@ public Set<K> keySet() {
}

@Override
public void putAll(Map<? extends K, ? extends V> entries) {
public void putAll(final Map<? extends K, ? extends V> entries) {
map.putAll(entries);
}

Expand All @@ -146,13 +143,13 @@ public long getCacheHits() {

@Override
public long getCacheMisses() {
long hits = map.getLocalMapStats().getHits();
final long hits = map.getLocalMapStats().getHits();
return numberOfGets > hits ? numberOfGets - hits : 0;
}

@Override
public int getCacheSize() {
LocalMapStats stats = map.getLocalMapStats();
final LocalMapStats stats = map.getLocalMapStats();
return (int) (stats.getOwnedEntryMemoryCost() + stats.getBackupEntryMemoryCost());
}

Expand All @@ -162,7 +159,7 @@ public long getMaxCacheSize() {
}

@Override
public void setMaxCacheSize(int maxSize) {
public void setMaxCacheSize(final int maxSize) {
CacheFactory.setMaxSizeProperty(getName(), maxSize);
}

Expand All @@ -172,7 +169,7 @@ public long getMaxLifetime() {
}

@Override
public void setMaxLifetime(long maxLifetime) {
public void setMaxLifetime(final long maxLifetime) {
CacheFactory.setMaxLifetimeProperty(getName(), maxLifetime);
}

Expand All @@ -181,7 +178,7 @@ void destroy() {
map.destroy();
}

boolean lock(K key, long timeout) {
boolean lock(final K key, final long timeout) {
boolean result = true;
if (timeout < 0) {
map.lock(key);
Expand All @@ -190,18 +187,18 @@ boolean lock(K key, long timeout) {
} else {
try {
result = map.tryLock(key, timeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
} catch (final InterruptedException e) {
logger.error("Failed to get cluster lock", e);
result = false;
}
}
return result;
}

void unlock(K key) {
void unlock(final K key) {
try {
map.unlock(key);
} catch (IllegalMonitorStateException e) {
} catch (final IllegalMonitorStateException e) {
logger.error("Failed to release cluster lock", e);
}
}
Expand Down
Expand Up @@ -257,12 +257,20 @@ public Cache createCache(final String name) {
// Determine the max cache size. Note that in Hazelcast the max cache size must be positive
final long openfireMaxCacheSize = CacheFactory.getMaxCacheSize(name);
final int hazelcastMaxCacheSize = openfireMaxCacheSize < 0 ? Integer.MAX_VALUE : (int) openfireMaxCacheSize;
final MapConfig mapConfig = hazelcast.getConfig().getMapConfig(name);
mapConfig.setTimeToLiveSeconds(hazelcastLifetimeInSeconds);
mapConfig.setMaxSizeConfig(new MaxSizeConfig(hazelcastMaxCacheSize, MaxSizeConfig.MaxSizePolicy.USED_HEAP_SIZE));
// It's only possible to create a dynamic config if a static one doesn't already exist
final MapConfig staticConfig = hazelcast.getConfig().getMapConfigOrNull(name);
if (staticConfig == null) {
final MapConfig dynamicConfig = new MapConfig(name);
dynamicConfig.setTimeToLiveSeconds(hazelcastLifetimeInSeconds);
dynamicConfig.setMaxSizeConfig(new MaxSizeConfig(hazelcastMaxCacheSize, MaxSizeConfig.MaxSizePolicy.USED_HEAP_SIZE));
logger.debug("Creating dynamic map config for cache={}, dynamicConfig={}", name, dynamicConfig);
hazelcast.getConfig().addMapConfig(dynamicConfig);
} else {
logger.debug("Static configuration already exists for cache={}, staticConfig={}", name, staticConfig);
}
// TODO: Better genericize this method in CacheFactoryStrategy so we can stop suppressing this warning
@SuppressWarnings("unchecked")
final ClusteredCache clusteredCache = new ClusteredCache(name, hazelcast.getMap(name), hazelcastLifetimeInSeconds);
final ClusteredCache clusteredCache = new ClusteredCache(name, hazelcast.getMap(name));
return clusteredCache;
}

Expand Down

0 comments on commit 127f2be

Please sign in to comment.