From 4e628fe96f88803059dcd39f5014af8f83b351bf Mon Sep 17 00:00:00 2001 From: Vinoth Chandar Date: Tue, 10 Jul 2012 11:12:52 -0700 Subject: [PATCH] add minimumSharedCache param + more tests. --- .../voldemort/server/VoldemortConfig.java | 10 + .../admin/AdminServiceRequestHandler.java | 1 + src/java/voldemort/store/StoreDefinition.java | 8 +- .../store/bdb/BdbStorageConfiguration.java | 50 +++- src/java/voldemort/utils/ByteUtils.java | 3 + .../store/bdb/BdbCachePartitioningTest.java | 259 +++++++++++++++--- 6 files changed, 285 insertions(+), 46 deletions(-) diff --git a/src/java/voldemort/server/VoldemortConfig.java b/src/java/voldemort/server/VoldemortConfig.java index 6f81db581d..3fc9ffc154 100644 --- a/src/java/voldemort/server/VoldemortConfig.java +++ b/src/java/voldemort/server/VoldemortConfig.java @@ -86,6 +86,7 @@ public class VoldemortConfig implements Serializable { private boolean bdbFairLatches; private long bdbStatsCacheTtlMs; private boolean bdbExposeSpaceUtilization; + private long bdbMinimumSharedCache; private String mysqlUsername; private String mysqlPassword; @@ -228,6 +229,7 @@ public VoldemortConfig(Props props) { this.bdbReadUncommitted = props.getBoolean("bdb.lock.read_uncommitted", true); this.bdbStatsCacheTtlMs = props.getLong("bdb.stats.cache.ttl.ms", 5 * Time.MS_PER_SECOND); this.bdbExposeSpaceUtilization = props.getBoolean("bdb.expose.space.utilization", true); + this.bdbMinimumSharedCache = props.getLong("bdb.minimum.shared.cache", 0); this.readOnlyBackups = props.getInt("readonly.backups", 1); this.readOnlySearchStrategy = props.getString("readonly.search.strategy", @@ -1167,6 +1169,14 @@ public void setBdbStatsCacheTtlMs(long statsCacheTtlMs) { this.bdbStatsCacheTtlMs = statsCacheTtlMs; } + public long getBdbMinimumSharedCache() { + return this.bdbMinimumSharedCache; + } + + public void setBdbMinimumSharedCache(long minimumSharedCache) { + this.bdbMinimumSharedCache = minimumSharedCache; + } + public int getSchedulerThreads() { return schedulerThreads; } diff --git a/src/java/voldemort/server/protocol/admin/AdminServiceRequestHandler.java b/src/java/voldemort/server/protocol/admin/AdminServiceRequestHandler.java index 28f10c074c..51f91980c1 100644 --- a/src/java/voldemort/server/protocol/admin/AdminServiceRequestHandler.java +++ b/src/java/voldemort/server/protocol/admin/AdminServiceRequestHandler.java @@ -1510,6 +1510,7 @@ public VAdminProto.ReserveMemoryResponse handleReserveMemory(VAdminProto.Reserve storeDefList.set(i, newStoreDef); storageService.updateStore(newStoreDef); + break; } } diff --git a/src/java/voldemort/store/StoreDefinition.java b/src/java/voldemort/store/StoreDefinition.java index 2c93d64eb4..ab9593328b 100644 --- a/src/java/voldemort/store/StoreDefinition.java +++ b/src/java/voldemort/store/StoreDefinition.java @@ -388,7 +388,8 @@ && getRoutingPolicy() == def.getRoutingPolicy() def.getSerializerFactory() != null ? def.getSerializerFactory() : null) && Objects.equal(getHintedHandoffStrategyType(), def.getHintedHandoffStrategyType()) - && Objects.equal(getHintPrefListSize(), def.getHintPrefListSize()); + && Objects.equal(getHintPrefListSize(), def.getHintPrefListSize()) + && Objects.equal(getMemoryFootprintMB(), def.getMemoryFootprintMB()); } @Override @@ -419,7 +420,8 @@ public int hashCode() { hasHintedHandoffStrategyType() ? getHintedHandoffStrategyType() : null, hasHintPreflistSize() ? getHintPrefListSize() : null, - getOwners()); + getOwners(), + getMemoryFootprintMB()); } @Override @@ -439,6 +441,6 @@ public String toString() { + getZoneCountWrites() + ", serializer factory = " + getSerializerFactory() + ")" + ", hinted-handoff-strategy = " + getHintedHandoffStrategyType() + ", hint-preflist-size = " + getHintPrefListSize() + ", owners = " + getOwners() - + ")"; + + ", memory-footprint(MB)" + getMemoryFootprintMB() + ")"; } } diff --git a/src/java/voldemort/store/bdb/BdbStorageConfiguration.java b/src/java/voldemort/store/bdb/BdbStorageConfiguration.java index 1bd6f3caf9..bf956e16c6 100644 --- a/src/java/voldemort/store/bdb/BdbStorageConfiguration.java +++ b/src/java/voldemort/store/bdb/BdbStorageConfiguration.java @@ -32,6 +32,7 @@ import voldemort.store.StorageInitializationException; import voldemort.store.StoreDefinition; import voldemort.utils.ByteArray; +import voldemort.utils.ByteUtils; import voldemort.utils.JmxUtils; import voldemort.utils.Time; @@ -58,8 +59,6 @@ public class BdbStorageConfiguration implements StorageConfiguration { private static final String SHARED_ENV_KEY = "shared"; private static Logger logger = Logger.getLogger(BdbStorageConfiguration.class); - private static final long BYTES_PER_MB = 1048576; - private final Object lock = new Object(); private final Map environments = Maps.newHashMap(); private final EnvironmentConfig environmentConfig; @@ -67,7 +66,6 @@ public class BdbStorageConfiguration implements StorageConfiguration { private final String bdbMasterDir; private final boolean useOneEnvPerStore; private final VoldemortConfig voldemortConfig; - private final long totalCacheSize; private long reservedCacheSize = 0; private Set unreservedStores; @@ -75,7 +73,6 @@ public BdbStorageConfiguration(VoldemortConfig config) { this.voldemortConfig = config; environmentConfig = new EnvironmentConfig(); environmentConfig.setTransactional(true); - totalCacheSize = config.getBdbCacheSize(); if(config.isBdbWriteTransactionsEnabled() && config.isBdbFlushTransactionsEnabled()) { environmentConfig.setDurability(Durability.COMMIT_SYNC); } else if(config.isBdbWriteTransactionsEnabled() && !config.isBdbFlushTransactionsEnabled()) { @@ -155,7 +152,7 @@ public StorageEngine getStore(StoreDefinition storeDe * */ private void adjustCacheSizes() { - long newSharedCacheSize = this.totalCacheSize - this.reservedCacheSize; + long newSharedCacheSize = voldemortConfig.getBdbCacheSize() - this.reservedCacheSize; logger.info("Setting the shared cache size to " + newSharedCacheSize); for(Environment environment: unreservedStores) { EnvironmentMutableConfig mConfig = environment.getMutableConfig(); @@ -180,14 +177,27 @@ public Environment getEnvironment(StoreDefinition storeDef) throws DatabaseExcep // configure the BDB cache if(storeDef.hasMemoryFootprint()) { // make room for the reservation, by adjusting other stores - long reservedBytes = storeDef.getMemoryFootprintMB() * BYTES_PER_MB; - this.reservedCacheSize += reservedBytes; + long reservedBytes = storeDef.getMemoryFootprintMB() * ByteUtils.BYTES_PER_MB; + long newReservedCacheSize = this.reservedCacheSize + reservedBytes; + + // check that we leave a 'minimum' shared cache + if((voldemortConfig.getBdbCacheSize() - newReservedCacheSize) < voldemortConfig.getBdbMinimumSharedCache()) { + throw new StorageInitializationException("Reservation of " + + storeDef.getMemoryFootprintMB() + + " MB for store " + + storeName + + " violates minimum shared cache size of " + + voldemortConfig.getBdbMinimumSharedCache()); + } + + this.reservedCacheSize = newReservedCacheSize; adjustCacheSizes(); environmentConfig.setSharedCache(false); environmentConfig.setCacheSize(reservedBytes); } else { environmentConfig.setSharedCache(true); - environmentConfig.setCacheSize(this.totalCacheSize - this.reservedCacheSize); + environmentConfig.setCacheSize(voldemortConfig.getBdbCacheSize() + - this.reservedCacheSize); } Environment environment = new Environment(bdbDir, environmentConfig); @@ -307,14 +317,28 @@ public void update(StoreDefinition storeDef) { if(!useOneEnvPerStore) throw new VoldemortException("Memory foot print can be set only when using different environments per store"); - Environment environment = environments.get(storeDef.getName()); + String storeName = storeDef.getName(); + Environment environment = environments.get(storeName); // change reservation amount of reserved store if(!unreservedStores.contains(environment) && storeDef.hasMemoryFootprint()) { EnvironmentMutableConfig mConfig = environment.getMutableConfig(); long currentCacheSize = mConfig.getCacheSize(); - long newCacheSize = storeDef.getMemoryFootprintMB() * BYTES_PER_MB; + long newCacheSize = storeDef.getMemoryFootprintMB() * ByteUtils.BYTES_PER_MB; if(currentCacheSize != newCacheSize) { - this.reservedCacheSize = this.reservedCacheSize - currentCacheSize + newCacheSize; + long newReservedCacheSize = this.reservedCacheSize - currentCacheSize + + newCacheSize; + + // check that we leave a 'minimum' shared cache + if((voldemortConfig.getBdbCacheSize() - newReservedCacheSize) < voldemortConfig.getBdbMinimumSharedCache()) { + throw new StorageInitializationException("Reservation of " + + storeDef.getMemoryFootprintMB() + + " MB for store " + + storeName + + " violates minimum shared cache size of " + + voldemortConfig.getBdbMinimumSharedCache()); + } + + this.reservedCacheSize = newReservedCacheSize; adjustCacheSizes(); mConfig.setCacheSize(newCacheSize); environment.setMutableConfig(mConfig); @@ -327,4 +351,8 @@ public void update(StoreDefinition storeDef) { throw new VoldemortException("Cannot switch between shared and private cache dynamically"); } } + + public long getReservedCacheSize() { + return this.reservedCacheSize; + } } diff --git a/src/java/voldemort/utils/ByteUtils.java b/src/java/voldemort/utils/ByteUtils.java index c58fc47680..4fb375458a 100644 --- a/src/java/voldemort/utils/ByteUtils.java +++ b/src/java/voldemort/utils/ByteUtils.java @@ -53,6 +53,9 @@ public class ByteUtils { public static final int MASK_00111111 = Integer.parseInt("00111111", 2); public static final int MASK_00011111 = Integer.parseInt("00011111", 2); + public static final int BYTES_PER_MB = 1048576; + public static final long BYTES_PER_GB = 1073741824; + public static MessageDigest getDigest(String algorithm) { try { return MessageDigest.getInstance(algorithm); diff --git a/test/unit/voldemort/store/bdb/BdbCachePartitioningTest.java b/test/unit/voldemort/store/bdb/BdbCachePartitioningTest.java index 7fc931c86d..5f4fa0d110 100644 --- a/test/unit/voldemort/store/bdb/BdbCachePartitioningTest.java +++ b/test/unit/voldemort/store/bdb/BdbCachePartitioningTest.java @@ -24,10 +24,16 @@ import voldemort.TestUtils; import voldemort.server.VoldemortConfig; +import voldemort.store.StorageInitializationException; import voldemort.store.StoreDefinition; +import voldemort.utils.ByteUtils; import voldemort.utils.Props; import voldemort.versioning.Versioned; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.StatsConfig; + /** * checks that BDB cache partitioning works and caches stay within limits * @@ -55,6 +61,21 @@ protected void tearDown() throws Exception { } } + private EnvironmentStats getStats(Environment environment) { + StatsConfig config = new StatsConfig(); + config.setFast(true); + return environment.getStats(config); + } + + private long getAndCheckCacheSize(BdbStorageEngine engine, StoreDefinition storeDef, String key) { + engine.get(TestUtils.toByteArray(key), null); + return getStats(bdbStorage.getEnvironment(storeDef)).getCacheTotalBytes(); + } + + private long getCacheSize(StoreDefinition storeDef) { + return getStats(bdbStorage.getEnvironment(storeDef)).getCacheTotalBytes(); + } + /** * Tests that, given no data completely fits in memory (realistic prod * conditions), stores will stay within their limits, no matter how much @@ -62,9 +83,9 @@ protected void tearDown() throws Exception { */ public void testStaticPrivateCaches() { - int totalCache = 20 * 1024 * 1024; // total cache size - int shareA = 10 * 1024 * 1024;// A reserves 10MB - int shareB = 5 * 1024 * 1024;// B reserves 5MB + int totalCache = 20 * ByteUtils.BYTES_PER_MB; // total cache size + int shareA = 10 * ByteUtils.BYTES_PER_MB;// A reserves 10MB + int shareB = 5 * ByteUtils.BYTES_PER_MB;// B reserves 5MB int shareC = totalCache - shareA - shareB; // the rest, 5 MB int numRecords = 40; @@ -78,18 +99,28 @@ public void testStaticPrivateCaches() { voldemortConfig.setBdbDataDirectory(bdbMasterDir.toURI().getPath()); bdbStorage = new BdbStorageConfiguration(voldemortConfig); - StoreDefinition defA = TestUtils.makeStoreDefinition("storeA", shareA / (1024 * 1024)); + StoreDefinition defA = TestUtils.makeStoreDefinition("storeA", shareA + / (ByteUtils.BYTES_PER_MB)); BdbStorageEngine storeA = (BdbStorageEngine) bdbStorage.getStore(defA); - StoreDefinition defB = TestUtils.makeStoreDefinition("storeB", shareB / (1024 * 1024)); + StoreDefinition defB = TestUtils.makeStoreDefinition("storeB", shareB + / (ByteUtils.BYTES_PER_MB)); BdbStorageEngine storeB = (BdbStorageEngine) bdbStorage.getStore(defB); StoreDefinition defC = TestUtils.makeStoreDefinition("storeC"); BdbStorageEngine storeC = (BdbStorageEngine) bdbStorage.getStore(defC); + // before any traffic, the cache will not have grown + assertTrue(Math.abs(shareA - getCacheSize(defA)) > ByteUtils.BYTES_PER_MB); + assertTrue(Math.abs(shareB - getCacheSize(defB)) > ByteUtils.BYTES_PER_MB); + + // sharedCacheSize reading 0 confirms that the store has a private cache + assertEquals(0, getStats(bdbStorage.getEnvironment(defA)).getSharedCacheTotalBytes()); + assertEquals(0, getStats(bdbStorage.getEnvironment(defB)).getSharedCacheTotalBytes()); + // load data into the stores; each store is guaranteed to be ~ 40MB. // Data won't fit in memory - byte[] value = new byte[1024 * 1024]; + byte[] value = new byte[ByteUtils.BYTES_PER_MB]; for(int i = 0; i < numRecords; i++) { storeA.put(TestUtils.toByteArray("testKey" + i), new Versioned(value), null); storeB.put(TestUtils.toByteArray("testKey" + i), new Versioned(value), null); @@ -98,45 +129,209 @@ public void testStaticPrivateCaches() { // we will bring all of that data into the cache, by doing a keywalk. // This should expand the cache as much as possible - for(int i = 0; i < numRecords; i++) { - storeA.get(TestUtils.toByteArray("testKey" + i), null); - storeB.get(TestUtils.toByteArray("testKey" + i), null); - storeC.get(TestUtils.toByteArray("testKey" + i), null); - } + long cacheSizeA = Long.MIN_VALUE; + long cacheSizeB = Long.MIN_VALUE; + long cacheSizeC = Long.MIN_VALUE; - long cacheSizeA = bdbStorage.getEnvironment(defA).getConfig().getCacheSize(); - long cacheSizeB = bdbStorage.getEnvironment(defB).getConfig().getCacheSize(); - long cacheSizeC = bdbStorage.getEnvironment(defC).getConfig().getCacheSize(); + for(int cycle = 0; cycle < 10; cycle++) { + for(int i = 0; i < numRecords; i++) { + long cycleCacheSizeA = getAndCheckCacheSize(storeA, defA, "testKey" + i); + long cycleCacheSizeB = getAndCheckCacheSize(storeB, defB, "testKey" + i); + long cycleCacheSizeC = getAndCheckCacheSize(storeC, defC, "testKey" + i); + // record the maximum cache size, each store every grew to + cacheSizeA = (cycleCacheSizeA > cacheSizeA) ? cycleCacheSizeA : cacheSizeA; + cacheSizeB = (cycleCacheSizeB > cacheSizeB) ? cycleCacheSizeB : cacheSizeB; + cacheSizeC = (cycleCacheSizeC > cacheSizeC) ? cycleCacheSizeC : cacheSizeC; + } + } - // check that they are certainly equal to expected limits. This should - // be true since the cache would definitely expand enough - assertTrue(cacheSizeA >= shareA); - assertTrue(cacheSizeB >= shareB); - assertTrue(cacheSizeC >= shareC); + // check that they are certainly less than expected limits. + assertTrue(cacheSizeA <= shareA); + assertTrue(cacheSizeB <= shareB); + assertTrue(cacheSizeC <= shareC); // check that they are not exceedingly high than their limits. Small - // overflows are expected. But should not be more than a 1MB - assertTrue((cacheSizeA - (shareA)) <= (1024 * 1024)); - assertTrue((cacheSizeB - (shareB)) <= (1024 * 1024)); - assertTrue((cacheSizeC - (shareC)) <= (1024 * 1024)); + // overflows are okay. But should not be more than a 1MB + assertTrue(Math.abs(cacheSizeA - shareA) <= ByteUtils.BYTES_PER_MB); + assertTrue(Math.abs(cacheSizeB - shareB) <= ByteUtils.BYTES_PER_MB); + assertTrue(Math.abs(cacheSizeC - shareC) <= ByteUtils.BYTES_PER_MB); // try doing reads on store C alone, for which we have no reservations. - // The other stores should not shrink. This simulates a spike on one - // store - for(int cycle = 0; cycle < 2; cycle++) { + // This simulates a spike on one store + long cacheSizeCNow = Long.MIN_VALUE; + for(int cycle = 0; cycle < 10; cycle++) { for(int i = 0; i < numRecords; i++) { - storeC.get(TestUtils.toByteArray("testKey" + i), null); + long cycleCacheSizeCNow = getAndCheckCacheSize(storeC, defC, "testkey" + i); + // record the maximum cache size, each store grew to + cacheSizeCNow = (cycleCacheSizeCNow > cacheSizeCNow) ? cycleCacheSizeCNow + : cacheSizeCNow; } } - long cacheSizeANow = bdbStorage.getEnvironment(defA).getConfig().getCacheSize(); - long cacheSizeBNow = bdbStorage.getEnvironment(defB).getConfig().getCacheSize(); - - assertTrue(cacheSizeA == cacheSizeANow); - assertTrue(cacheSizeB == cacheSizeBNow); + assertTrue(cacheSizeCNow <= shareC); storeA.close(); storeB.close(); storeC.close(); } + + /** + * Tests that any reservation that will not violate minimum shared cache + * will fail, during server startup and dynamic updation + */ + public void testMinimumSharedCache() { + int totalCache = 20 * ByteUtils.BYTES_PER_MB; // total cache size + int shareA = 10 * ByteUtils.BYTES_PER_MB;// A reserves 10MB + + // lets use all the default values. + Props props = new Props(); + props.put("node.id", 1); + props.put("voldemort.home", "test/common/voldemort/config"); + VoldemortConfig voldemortConfig = new VoldemortConfig(props); + voldemortConfig.setBdbCacheSize(totalCache); + voldemortConfig.setBdbOneEnvPerStore(true); + voldemortConfig.setBdbDataDirectory(bdbMasterDir.toURI().getPath()); + voldemortConfig.setBdbMinimumSharedCache(15 * ByteUtils.BYTES_PER_MB); + + BdbStorageEngine storeA = null; + bdbStorage = new BdbStorageConfiguration(voldemortConfig); + assertEquals(0, bdbStorage.getReservedCacheSize()); + + try { + StoreDefinition defA = TestUtils.makeStoreDefinition("storeA", shareA + / ByteUtils.BYTES_PER_MB); + storeA = (BdbStorageEngine) bdbStorage.getStore(defA); + fail("Should have thrown exception since minSharedCache will be violated"); + } catch(StorageInitializationException sie) { + // should come here. + } + // failing operations should not alter reserved cache size + assertEquals(0, bdbStorage.getReservedCacheSize()); + + voldemortConfig.setBdbMinimumSharedCache(10 * ByteUtils.BYTES_PER_MB); + bdbStorage = new BdbStorageConfiguration(voldemortConfig); + try { + StoreDefinition defA = TestUtils.makeStoreDefinition("storeA", shareA + / ByteUtils.BYTES_PER_MB); + storeA = (BdbStorageEngine) bdbStorage.getStore(defA); + } catch(StorageInitializationException sie) { + // should not come here. + fail("minSharedCache should n't have been violated"); + } + assertEquals(shareA, bdbStorage.getReservedCacheSize()); + + long reserveCacheSize = bdbStorage.getReservedCacheSize(); + // now, try increasing the reservation dynamically and it should fail + try { + StoreDefinition defA = TestUtils.makeStoreDefinition("storeA", 15); + bdbStorage.update(defA); + fail("Should have thrown exception since minSharedCache will be violated"); + } catch(StorageInitializationException sie) { + // should come here. + } + // this failure cannot alter the reservedCacheSize + assertEquals(reserveCacheSize, bdbStorage.getReservedCacheSize()); + + if(storeA != null) + storeA.close(); + } + + public void testDynamicReservations() { + int totalCache = 20 * ByteUtils.BYTES_PER_MB; // total cache size + int shareA = 10 * ByteUtils.BYTES_PER_MB;// A reserves 10MB + int shareB = totalCache - shareA; + int numRecords = 40; + + // lets use all the default values. + Props props = new Props(); + props.put("node.id", 1); + props.put("voldemort.home", "test/common/voldemort/config"); + VoldemortConfig voldemortConfig = new VoldemortConfig(props); + voldemortConfig.setBdbCacheSize(totalCache); + voldemortConfig.setBdbOneEnvPerStore(true); + voldemortConfig.setBdbDataDirectory(bdbMasterDir.toURI().getPath()); + voldemortConfig.setBdbMinimumSharedCache(5 * ByteUtils.BYTES_PER_MB); + + bdbStorage = new BdbStorageConfiguration(voldemortConfig); + StoreDefinition defA = TestUtils.makeStoreDefinition("storeA", shareA / (1024 * 1024)); + BdbStorageEngine storeA = (BdbStorageEngine) bdbStorage.getStore(defA); + + StoreDefinition defB = TestUtils.makeStoreDefinition("storeB"); + BdbStorageEngine storeB = (BdbStorageEngine) bdbStorage.getStore(defB); + + // load data into the stores; each store is guaranteed to be ~ 40MB. + // Data won't fit in memory + byte[] value = new byte[ByteUtils.BYTES_PER_MB]; + for(int i = 0; i < numRecords; i++) { + storeA.put(TestUtils.toByteArray("testKey" + i), new Versioned(value), null); + storeB.put(TestUtils.toByteArray("testKey" + i), new Versioned(value), null); + } + + // 1. start with 10MB reserved cache for A and the rest 10MB for B + long cacheSizeA = Long.MIN_VALUE; + long cacheSizeB = Long.MIN_VALUE; + + for(int cycle = 0; cycle < 10; cycle++) { + for(int i = 0; i < numRecords; i++) { + long cycleCacheSizeA = getAndCheckCacheSize(storeA, defA, "testKey" + i); + long cycleCacheSizeB = getAndCheckCacheSize(storeB, defB, "testKey" + i); + // record the maximum cache size, each store every grew to + cacheSizeA = (cycleCacheSizeA > cacheSizeA) ? cycleCacheSizeA : cacheSizeA; + cacheSizeB = (cycleCacheSizeB > cacheSizeB) ? cycleCacheSizeB : cacheSizeB; + } + } + + assertTrue(Math.abs(cacheSizeA - shareA) <= ByteUtils.BYTES_PER_MB); + assertTrue(Math.abs(cacheSizeB - shareB) <= ByteUtils.BYTES_PER_MB); + + // 2. dynamically grow the cache to 15MB and watch B shrink. + shareA = 15 * ByteUtils.BYTES_PER_MB; + shareB = totalCache - shareA; + defA = TestUtils.makeStoreDefinition("storeA", shareA / (1024 * 1024)); + bdbStorage.update(defA); + + cacheSizeA = Long.MIN_VALUE; + cacheSizeB = Long.MIN_VALUE; + + for(int cycle = 0; cycle < 10; cycle++) { + for(int i = 0; i < numRecords; i++) { + long cycleCacheSizeA = getAndCheckCacheSize(storeA, defA, "testKey" + i); + long cycleCacheSizeB = getAndCheckCacheSize(storeB, defB, "testKey" + i); + // record the maximum cache size, each store every grew to + cacheSizeA = (cycleCacheSizeA > cacheSizeA) ? cycleCacheSizeA : cacheSizeA; + cacheSizeB = (cycleCacheSizeB > cacheSizeB) ? cycleCacheSizeB : cacheSizeB; + } + } + + assertTrue(Math.abs(cacheSizeA - shareA) <= ByteUtils.BYTES_PER_MB); + assertTrue(Math.abs(cacheSizeB - shareB) <= ByteUtils.BYTES_PER_MB); + + // 3. dynamically shrink it back to 10MB and watch B expand again. + shareA = 10 * ByteUtils.BYTES_PER_MB; + shareB = totalCache - shareA; + defA = TestUtils.makeStoreDefinition("storeA", shareA / (1024 * 1024)); + bdbStorage.update(defA); + + cacheSizeA = Long.MIN_VALUE; + cacheSizeB = Long.MIN_VALUE; + + for(int cycle = 0; cycle < 10; cycle++) { + for(int i = 0; i < numRecords; i++) { + long cycleCacheSizeA = getAndCheckCacheSize(storeA, defA, "testKey" + i); + long cycleCacheSizeB = getAndCheckCacheSize(storeB, defB, "testKey" + i); + // record the maximum cache size, each store every grew to + cacheSizeA = (cycleCacheSizeA > cacheSizeA) ? cycleCacheSizeA : cacheSizeA; + cacheSizeB = (cycleCacheSizeB > cacheSizeB) ? cycleCacheSizeB : cacheSizeB; + } + } + + // check that they are not exceedingly high than their limits. Small + // overflows are expected. But should not be more than a 1MB + assertTrue(Math.abs(cacheSizeA - shareA) <= ByteUtils.BYTES_PER_MB); + assertTrue(Math.abs(cacheSizeB - shareB) <= ByteUtils.BYTES_PER_MB); + + storeA.close(); + storeB.close(); + } + }