From 87fcff45c36ce8dfbc86ab4209a3917b4ff15338 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Fri, 24 Mar 2023 12:39:26 -0700 Subject: [PATCH] Remove parameter for markMoving() --- cachelib/allocator/CacheAllocator-inl.h | 57 ++++++------------- cachelib/allocator/CacheAllocator.h | 4 +- cachelib/allocator/CacheItem-inl.h | 4 +- cachelib/allocator/CacheItem.h | 2 +- cachelib/allocator/Refcount.h | 12 +++- .../test_configs/small_moving_bg.json | 40 +++++++++++++ run_tests.sh | 1 + 7 files changed, 72 insertions(+), 48 deletions(-) create mode 100644 cachelib/cachebench/test_configs/small_moving_bg.json diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index 954d533daa..56d84e0a47 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1303,19 +1303,6 @@ size_t CacheAllocator::wakeUpWaitersLocked(folly::StringPiece key, template bool CacheAllocator::moveRegularItemWithSync( Item& oldItem, WriteHandle& newItemHdl) { - //on function exit - the new item handle is no longer moving - //and other threads may access it - but in case where - //we failed to replace in access container we can give the - //new item back to the allocator - auto guard = folly::makeGuard([&]() { - auto ref = newItemHdl->unmarkMoving(); - if (UNLIKELY(ref == 0)) { - const auto res = - releaseBackToAllocator(*newItemHdl, RemoveContext::kNormal, false); - XDCHECK(res == ReleaseRes::kReleased); - } - }); - XDCHECK(oldItem.isMoving()); XDCHECK(!oldItem.isExpired()); // TODO: should we introduce new latency tracker. E.g. evictRegularLatency_ @@ -1330,20 +1317,26 @@ bool CacheAllocator::moveRegularItemWithSync( newItemHdl->markNvmClean(); } - // mark new item as moving to block readers until the data is copied - // (moveCb is called). Mark item in MMContainer temporarily (TODO: should - // we remove markMoving requirement for the item to be linked?) - newItemHdl->markInMMContainer(); - auto marked = newItemHdl->markMoving(false /* there is already a handle */); - newItemHdl->unmarkInMMContainer(); - XDCHECK(marked); - auto predicate = [&](const Item& item){ // we rely on moving flag being set (it should block all readers) XDCHECK(item.getRefCount() == 0); return true; }; + if (config_.moveCb) { + // Execute the move callback. We cannot make any guarantees about the + // consistency of the old item beyond this point, because the callback can + // do more than a simple memcpy() e.g. update external references. If there + // are any remaining handles to the old item, it is the caller's + // responsibility to invalidate them. The move can only fail after this + // statement if the old item has been removed or replaced, in which case it + // should be fine for it to be left in an inconsistent state. + config_.moveCb(oldItem, *newItemHdl, nullptr); + } else { + std::memcpy(newItemHdl->getMemory(), oldItem.getMemory(), + oldItem.getSize()); + } + auto replaced = accessContainer_->replaceIf(oldItem, *newItemHdl, predicate); // another thread may have called insertOrReplace which could have @@ -1363,26 +1356,13 @@ bool CacheAllocator::moveRegularItemWithSync( // 3. replaced handle is returned and eventually drops // ref to 0 and the item is recycled back to allocator. - if (config_.moveCb) { - // Execute the move callback. We cannot make any guarantees about the - // consistency of the old item beyond this point, because the callback can - // do more than a simple memcpy() e.g. update external references. If there - // are any remaining handles to the old item, it is the caller's - // responsibility to invalidate them. The move can only fail after this - // statement if the old item has been removed or replaced, in which case it - // should be fine for it to be left in an inconsistent state. - config_.moveCb(oldItem, *newItemHdl, nullptr); - } else { - std::memcpy(newItemHdl->getMemory(), oldItem.getMemory(), - oldItem.getSize()); - } - // Adding the item to mmContainer has to succeed since no one can remove the item auto& newContainer = getMMContainer(*newItemHdl); auto mmContainerAdded = newContainer.add(*newItemHdl); XDCHECK(mmContainerAdded); // no one can add or remove chained items at this point + // TODO: is this race? new Item is already accessible but not yet marked with hasChainedItem() if (oldItem.hasChainedItem()) { // safe to acquire handle for a moving Item auto incRes = incRef(oldItem, false); @@ -1612,7 +1592,7 @@ CacheAllocator::findEviction(TierId tid, PoolId pid, ClassId cid) { if (lastTier && shouldWriteToNvmCache(*candidate_) && !token.isValid()) { stats_.evictFailConcurrentFill.inc(); } else if ( (lastTier && candidate_->markForEviction()) || - (!lastTier && candidate_->markMoving(true)) ) { + (!lastTier && candidate_->markMoving()) ) { XDCHECK(candidate_->isMoving() || candidate_->isMarkedForEviction()); // markForEviction to make sure no other thead is evicting the item // nor holding a handle to that item if this is last tier @@ -3499,10 +3479,7 @@ bool CacheAllocator::markMovingForSlabRelease( // Since this callback is executed, the item is not yet freed itemFreed = false; Item* item = static_cast(memory); - // TODO: for chained items, moving bit is only used to avoid - // freeing the item prematurely - auto failIfRefNotZero = numTiers > 1 && !item->isChainedItem(); - if (item->markMoving(failIfRefNotZero)) { + if (item->markMoving()) { markedMoving = true; } }; diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index d2956795a9..c26987c88b 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1979,7 +1979,7 @@ auto& mmContainer = getMMContainer(tid, pid, cid); throw std::runtime_error("Not supported for chained items"); } - if (candidate->markMoving(true)) { + if (candidate->markMoving()) { mmContainer.remove(itr); candidates.push_back(candidate); } else { @@ -2052,7 +2052,7 @@ auto& mmContainer = getMMContainer(tid, pid, cid); // TODO: only allow it for read-only items? // or implement mvcc - if (candidate->markMoving(true)) { + if (candidate->markMoving()) { // promotions should rarely fail since we already marked moving mmContainer.remove(itr); candidates.push_back(candidate); diff --git a/cachelib/allocator/CacheItem-inl.h b/cachelib/allocator/CacheItem-inl.h index b33c1ea28a..0028e2776a 100644 --- a/cachelib/allocator/CacheItem-inl.h +++ b/cachelib/allocator/CacheItem-inl.h @@ -238,8 +238,8 @@ bool CacheItem::markForEvictionWhenMoving() { } template -bool CacheItem::markMoving(bool failIfRefNotZero) { - return ref_.markMoving(failIfRefNotZero); +bool CacheItem::markMoving() { + return ref_.markMoving(); } template diff --git a/cachelib/allocator/CacheItem.h b/cachelib/allocator/CacheItem.h index 6728b654eb..6713cd63f4 100644 --- a/cachelib/allocator/CacheItem.h +++ b/cachelib/allocator/CacheItem.h @@ -381,7 +381,7 @@ class CACHELIB_PACKED_ATTR CacheItem { * Unmarking moving will also return the refcount at the moment of * unmarking. */ - bool markMoving(bool failIfRefNotZero); + bool markMoving(); RefcountWithFlags::Value unmarkMoving() noexcept; bool isMoving() const noexcept; bool isOnlyMoving() const noexcept; diff --git a/cachelib/allocator/Refcount.h b/cachelib/allocator/Refcount.h index 8251ef15ba..30f90cdae4 100644 --- a/cachelib/allocator/Refcount.h +++ b/cachelib/allocator/Refcount.h @@ -320,12 +320,18 @@ class FOLLY_PACK_ATTR RefcountWithFlags { * * Unmarking moving does not depend on `isInMMContainer` */ - bool markMoving(bool failIfRefNotZero) { - auto predicate = [failIfRefNotZero](const Value curValue) { + bool markMoving() { + auto predicate = [](const Value curValue) { Value conditionBitMask = getAdminRef(); const bool flagSet = curValue & conditionBitMask; const bool alreadyExclusive = curValue & getAdminRef(); - if (failIfRefNotZero && (curValue & kAccessRefMask) != 0) { + const bool isChained = curValue & getFlag(); + + // chained item can have ref count == 1, this just means it's linked in the chain + if (isChained && (curValue & kAccessRefMask) > 1) { + return false; + } + if ((curValue & kAccessRefMask) != 0) { return false; } if (!flagSet || alreadyExclusive) { diff --git a/cachelib/cachebench/test_configs/small_moving_bg.json b/cachelib/cachebench/test_configs/small_moving_bg.json new file mode 100644 index 0000000000..5807b86666 --- /dev/null +++ b/cachelib/cachebench/test_configs/small_moving_bg.json @@ -0,0 +1,40 @@ +// @nolint like default.json, but moves items during slab release instead of evicting them. +{ + "cache_config" : { + "cacheSizeMB" : 2248, + "cacheDir": "/tmp/mem-tier5", + "memoryTiers" : [ + { + "ratio": 1, + "memBindNodes": 0 + }, { + "ratio": 1, + "memBindNodes": 0 + } + ], + "poolRebalanceIntervalSec" : 1, + "moveOnSlabRelease" : true, + "rebalanceMinSlabs" : 2, + "evictorThreads": 2, + "promoterThreads": 2 + }, + "test_config" : + { + "preallocateCache" : true, + "numOps" : 40000000, + "numThreads" : 32, + "numKeys" : 250000, + "generator": "online", + + "keySizeRange" : [1, 8, 32, 64, 128, 256, 512], + "keySizeRangeProbability" : [0.1, 0.1, 0.2, 0.2, 0.3, 0.1], + + "valSizeRange" : [1, 128, 512, 1024, 4096, 10240, 20480, 40960, 60000], + "valSizeRangeProbability" : [0.1, 0.1, 0.1, 0.2, 0.2, 0.1, 0.1, 0.1], + + "getRatio" : 0.70, + "setRatio" : 0.30 + } + + } + \ No newline at end of file diff --git a/run_tests.sh b/run_tests.sh index e575dbc62a..1d68eb41b7 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -13,3 +13,4 @@ fi ../bin/cachebench --json_test_config ../test_configs/consistency/navy.json ../bin/cachebench --json_test_config ../test_configs/consistency/navy-multi-tier.json +../opt/bin/cachebench --json_test_config /opt/test_configs/small_moving_bg.json