From 2d5208b1bce38dcb497b906b8a02201356946870 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Thu, 8 Jun 2023 17:50:07 +0000 Subject: [PATCH] Add option to insert items to first free tier instead of always inserting to topmost tier --- cachelib/allocator/CacheAllocator-inl.h | 27 ++++++++++++++++++----- cachelib/allocator/CacheAllocator.h | 8 ++++++- cachelib/allocator/CacheAllocatorConfig.h | 15 +++++++++++++ cachelib/cachebench/cache/Cache-inl.h | 2 ++ cachelib/cachebench/util/CacheConfig.cpp | 2 ++ cachelib/cachebench/util/CacheConfig.h | 2 ++ 6 files changed, 49 insertions(+), 7 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index 5147159a6a..268f318e4b 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -425,7 +425,8 @@ CacheAllocator::allocateInternalTier(TierId tid, uint32_t size, uint32_t creationTime, uint32_t expiryTime, - bool fromBgThread) { + bool fromBgThread, + bool evict) { util::LatencyTracker tracker{stats().allocateLatency_}; SCOPE_FAIL { stats_.invalidAllocs.inc(); }; @@ -446,7 +447,9 @@ CacheAllocator::allocateInternalTier(TierId tid, backgroundEvictor_[backgroundWorkerId(tid, pid, cid, backgroundEvictor_.size())]->wakeUp(); } - if (memory == nullptr) { + if (memory == nullptr && !evict) { + return {}; + } else if (memory == nullptr) { memory = findEviction(tid, pid, cid); } @@ -496,7 +499,8 @@ CacheAllocator::allocateInternal(PoolId pid, bool fromBgThread) { auto tid = 0; /* TODO: consult admission policy */ for(TierId tid = 0; tid < getNumTiers(); ++tid) { - auto handle = allocateInternalTier(tid, pid, key, size, creationTime, expiryTime, fromBgThread); + bool evict = !config_.insertToFirstFreeTier || tid == getNumTiers() - 1; + auto handle = allocateInternalTier(tid, pid, key, size, creationTime, expiryTime, fromBgThread, evict); if (handle) return handle; } return {}; @@ -1813,13 +1817,17 @@ CacheAllocator::tryEvictToNextMemoryTier( TierId nextTier = tid; // TODO - calculate this based on some admission policy while (++nextTier < getNumTiers()) { // try to evict down to the next memory tiers + // always evict item from the nextTier to make room for new item + bool evict = true; + // allocateInternal might trigger another eviction auto newItemHdl = allocateInternalTier(nextTier, pid, item.getKey(), item.getSize(), item.getCreationTime(), item.getExpiryTime(), - fromBgThread); + fromBgThread, + evict); if (newItemHdl) { XDCHECK_EQ(newItemHdl->getSize(), item.getSize()); @@ -1855,13 +1863,17 @@ CacheAllocator::tryPromoteToNextMemoryTier( auto toPromoteTier = nextTier - 1; --nextTier; + // always evict item from the toPromoteTier to make room for new item + bool evict = true; + // allocateInternal might trigger another eviction auto newItemHdl = allocateInternalTier(toPromoteTier, pid, item.getKey(), item.getSize(), item.getCreationTime(), item.getExpiryTime(), - fromBgThread); + fromBgThread, + true); if (newItemHdl) { XDCHECK_EQ(newItemHdl->getSize(), item.getSize()); @@ -3228,6 +3240,8 @@ CacheAllocator::allocateNewItemForOldItem(const Item& oldItem) { const auto allocInfo = allocator_[getTierId(oldItem)]->getAllocInfo(static_cast(&oldItem)); + + bool evict = !config_.insertToFirstFreeTier || getTierId(oldItem) == getNumTiers() - 1; // Set up the destination for the move. Since oldItem would have the moving // bit set, it won't be picked for eviction. @@ -3237,7 +3251,8 @@ CacheAllocator::allocateNewItemForOldItem(const Item& oldItem) { oldItem.getSize(), oldItem.getCreationTime(), oldItem.getExpiryTime(), - false); + false, + evict); if (!newItemHdl) { return {}; } diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 60f40bcf07..28768ce563 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1520,13 +1520,19 @@ class CacheAllocator : public CacheBase { // For description see allocateInternal. // // @param tid id a memory tier + // @param fromBgThread whether this function was called from a bg + // thread - this is used to decide whether bg thread should + // be waken in case there is no free memory + // @param evict whether to evict an item from tier tid in case there + // is not enough memory WriteHandle allocateInternalTier(TierId tid, PoolId id, Key key, uint32_t size, uint32_t creationTime, uint32_t expiryTime, - bool fromBgThread); + bool fromBgThread, + bool evict); // Allocate a chained item // diff --git a/cachelib/allocator/CacheAllocatorConfig.h b/cachelib/allocator/CacheAllocatorConfig.h index a089e754c0..e1a2eefe11 100644 --- a/cachelib/allocator/CacheAllocatorConfig.h +++ b/cachelib/allocator/CacheAllocatorConfig.h @@ -309,6 +309,9 @@ class CacheAllocatorConfig { // Library team if you find yourself customizing this. CacheAllocatorConfig& setThrottlerConfig(util::Throttler::Config config); + // Insert items to first free memory tier + CacheAllocatorConfig& enableInsertToFirstFreeTier(); + // Passes in a callback to initialize an event tracker when the allocator // starts CacheAllocatorConfig& setEventTracker(EventTrackerSharedPtr&&); @@ -522,6 +525,11 @@ class CacheAllocatorConfig { // ABOVE are the config for various cache workers // + // if turned off, always insert new elements to topmost memory tier. + // if turned on, insert new element to first free memory tier or evict memory + // from the bottom one if memory cache is full + bool insertToFirstFreeTier = false; + // the number of tries to search for an item to evict // 0 means it's infinite unsigned int evictionSearchTries{50}; @@ -657,6 +665,12 @@ class CacheAllocatorConfig { {MemoryTierCacheConfig::fromShm().setRatio(1)}}; }; +template +CacheAllocatorConfig& CacheAllocatorConfig::enableInsertToFirstFreeTier() { + insertToFirstFreeTier = true; + return *this; +} + template CacheAllocatorConfig& CacheAllocatorConfig::setCacheName( const std::string& _cacheName) { @@ -1234,6 +1248,7 @@ std::map CacheAllocatorConfig::serialize() const { configMap["nvmAdmissionMinTTL"] = std::to_string(nvmAdmissionMinTTL); configMap["delayCacheWorkersStart"] = delayCacheWorkersStart ? "true" : "false"; + configMap["insertToFirstFreeTier"] = std::to_string(insertToFirstFreeTier); mergeWithPrefix(configMap, throttleConfig.serialize(), "throttleConfig"); mergeWithPrefix(configMap, chainedItemAccessConfig.serialize(), diff --git a/cachelib/cachebench/cache/Cache-inl.h b/cachelib/cachebench/cache/Cache-inl.h index 7fd2e60641..0eb40aa53a 100644 --- a/cachelib/cachebench/cache/Cache-inl.h +++ b/cachelib/cachebench/cache/Cache-inl.h @@ -104,6 +104,8 @@ Cache::Cache(const CacheConfig& config, allocatorConfig_.configureMemoryTiers(config_.memoryTierConfigs); } + allocatorConfig_.insertToFirstFreeTier = config_.insertToFirstFreeTier; + auto cleanupGuard = folly::makeGuard([&] { if (!nvmCacheFilePath_.empty()) { util::removePath(nvmCacheFilePath_); diff --git a/cachelib/cachebench/util/CacheConfig.cpp b/cachelib/cachebench/util/CacheConfig.cpp index 5072021f9e..af5d7b4f64 100644 --- a/cachelib/cachebench/util/CacheConfig.cpp +++ b/cachelib/cachebench/util/CacheConfig.cpp @@ -49,6 +49,8 @@ CacheConfig::CacheConfig(const folly::dynamic& configJson) { JSONSetVal(configJson, tryLockUpdate); JSONSetVal(configJson, lruIpSpec); JSONSetVal(configJson, useCombinedLockForIterators); + + JSONSetVal(configJson, insertToFirstFreeTier); JSONSetVal(configJson, lru2qHotPct); JSONSetVal(configJson, lru2qColdPct); diff --git a/cachelib/cachebench/util/CacheConfig.h b/cachelib/cachebench/util/CacheConfig.h index 6481aa67c3..ec120c900a 100644 --- a/cachelib/cachebench/util/CacheConfig.h +++ b/cachelib/cachebench/util/CacheConfig.h @@ -97,6 +97,8 @@ struct CacheConfig : public JSONConfig { bool lruUpdateOnRead{true}; bool tryLockUpdate{false}; bool useCombinedLockForIterators{true}; + + bool insertToFirstFreeTier{false}; // LRU param uint64_t lruIpSpec{0};