Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP

Loading…

Issue #110: Fixed MaxSizeHeapPolicy and MaxSizeHeapPercentagePolicy. #134

Closed
wants to merge 1 commit into from

2 participants

dvinas Mehmet Dogan
dvinas

I made the following changes to CMap.java:

-Added a new method cost(), which determines the current cost of the map. (Tried to maintain a counter instead of iterating through the map, but records change size unpredictably, or have their values nulled before reindexing.)
-Fixed the implementations of both MaxSizeHeapPolicy and MaxSizeHeapPercentagePolicy using cost().
-Refactored startCleanup() so as not to call MapMaxSizePolicy.overCapacity() a 2nd time if it is called from CMap.overCapacity()->ConcurrentMapManager.executeCleanup() (indicated by forced=true). Removed redundant boolean logic.
-Added cost calculation to log message in startCleanup().

Mehmet Dogan
Owner

Thanks for the patch.

Problem here is that, CMap.overCapacity() is called before all put operations (PutOperationHandler) to check if map exceeds defined capacity but iterating through all records is too costly.

Instead we can set total cost calculated on every cleanup cycle to a volatile field in CMap and use that value during overcapacity check. I guess there is no need to be that much accurate.

Mehmet Dogan mdogan closed this
Mehmet Dogan
Owner

See commit for #110

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Commits on Apr 24, 2012
  1. dvinas
This page is out of date. Refresh to see the latest.
Showing with 29 additions and 11 deletions.
  1. +29 −11 hazelcast/src/main/java/com/hazelcast/impl/CMap.java
40 hazelcast/src/main/java/com/hazelcast/impl/CMap.java
View
@@ -1033,6 +1033,18 @@ public int size(int expectedPartitionVersion) {
return size;
}
+ //Note: Calling this is potentially expensive, so we shouldn't make it public.
+ private long cost() {
+ long cost = 0L;
+ long now = System.currentTimeMillis();
+ for (Record record : mapRecords.values()) {
+ if (record.isActive() && record.isValid(now)) {
+ cost += record.getCost();
+ }
+ }
+ return cost;
+ }
+
public void collectScheduledLocks(Map<Object, DistributedLock> lockOwners, Map<Object, DistributedLock> lockRequested) {
Collection<Record> records = mapRecords.values();
for (Record record : records) {
@@ -1214,31 +1226,34 @@ public MaxSizeConfig getMaxSizeConfig() {
MaxSizeHeapPolicy(MaxSizeConfig maxSizeConfig) {
super(maxSizeConfig);
- memoryLimit = maxSizeConfig.getSize() * 1000 * 1000; // MB to byte
+ memoryLimit = maxSizeConfig.getSize() * 1024 * 1024; // MB to byte
}
public boolean overCapacity() {
- boolean over = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) > memoryLimit;
+ long cost = cost();
+ boolean over = cost > memoryLimit;
if (over) {
- Runtime.getRuntime().gc();
+ logger.log(Level.FINEST, "Map " + getName() + " is over-capacity (" + cost + "/" + memoryLimit + " bytes)");
}
return over;
}
}
class MaxSizeHeapPercentagePolicy extends MaxSizePerJVMPolicy {
+ int maxPercent = 0;
MaxSizeHeapPercentagePolicy(MaxSizeConfig maxSizeConfig) {
super(maxSizeConfig);
+ maxPercent = maxSizeConfig.getSize();
}
public boolean overCapacity() {
- long total = Runtime.getRuntime().totalMemory();
- long free = Runtime.getRuntime().freeMemory();
- int usedPercentage = (int) (((total - free) / total) * 100D);
- boolean over = usedPercentage > maxSizeConfig.getSize();
+ long total = Runtime.getRuntime().maxMemory();
+ long cost = cost();
+ int usedPercent = (int)(((float)cost / total) * 100);
+ boolean over = usedPercent >= maxPercent;
if (over) {
- Runtime.getRuntime().gc();
+ logger.log(Level.FINEST, "Map " + getName() + " is over-capacity (" + usedPercent + "/" + maxPercent + "% of heap)");
}
return over;
}
@@ -1300,10 +1315,11 @@ boolean startCleanup(boolean forced) {
final Set<Record> recordsToEvict = new HashSet<Record>();
final Set<Record> sortedRecords = new TreeSet<Record>(new ComparatorWrapper(evictionComparator));
final Collection<Record> records = mapRecords.values();
- final boolean overCapacity = maxSizePolicy != null && maxSizePolicy.overCapacity();
+ final boolean overCapacity = forced || (maxSizePolicy != null && maxSizePolicy.overCapacity());
final boolean evictionAware = evictionComparator != null && overCapacity;
int recordsStillOwned = 0;
int backupPurgeCount = 0;
+ long totalCost = 0L;
PartitionManager partitionManager = concurrentMapManager.partitionManager;
for (Record record : records) {
PartitionInfo partition = partitionManager.getPartition(record.getBlockId());
@@ -1335,9 +1351,10 @@ boolean startCleanup(boolean forced) {
} else {
recordsUnknown.add(record);
}
+ totalCost += record.getCost();
}
}
- if (evictionAware && (forced || overCapacity)) {
+ if (evictionAware) {
int numberOfRecordsToEvict = (int) (recordsStillOwned * evictionRate);
int evictedCount = 0;
for (Record record : sortedRecords) {
@@ -1360,7 +1377,8 @@ boolean startCleanup(boolean forced) {
+ ", backupPurge:" + backupPurgeCount
);
logger.log(levelLog, thisAddress + " mapRecords: " + mapRecords.size()
- + " indexes: " + mapIndexService.getOwnedRecords().size());
+ + " indexes: " + mapIndexService.getOwnedRecords().size()
+ + " cost(bytes): " + totalCost);
}
executeStoreUpdate(recordsDirty);
executeEviction(recordsToEvict);
Something went wrong with that request. Please try again.