From ba35b3e190c7a1ebdf94d879d341fc9c9281263a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Igor=20Chor=C4=85=C5=BCewicz?= Date: Tue, 12 Apr 2022 07:42:13 -0400 Subject: [PATCH 1/9] Shorten critical section in findEviction Remove the item from mmContainer and drop the lock before attempting eviction. --- cachelib/allocator/CacheAllocator-inl.h | 58 ++++++------------------- cachelib/allocator/CacheAllocator.h | 14 +++--- 2 files changed, 20 insertions(+), 52 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index e725a70089..c9f4bb7f82 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1228,13 +1228,15 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { (*stats_.evictionAttempts)[pid][cid].inc(); Item* candidate = itr.get(); + mmContainer.remove(itr); + itr.destroy(); + // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - auto toReleaseHandle = - itr->isChainedItem() - ? advanceIteratorAndTryEvictChainedItem(itr) - : advanceIteratorAndTryEvictRegularItem(mmContainer, itr); + auto toReleaseHandle = candidate->isChainedItem() + ? tryEvictChainedItem(*candidate) + : tryEvictRegularItem(mmContainer, *candidate); if (toReleaseHandle) { if (toReleaseHandle->hasChainedItem()) { @@ -1249,9 +1251,6 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { AllocatorApiResult::EVICTED, toReleaseHandle->getSize(), toReleaseHandle->getConfiguredTTL().count()); } - // Invalidate iterator since later on we may use this mmContainer - // again, which cannot be done unless we drop this iterator - itr.destroy(); // we must be the last handle and for chained items, this will be // the parent. @@ -1276,11 +1275,9 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { } } - // If we destroyed the itr to possibly evict and failed, we restart - // from the beginning again - if (!itr) { - itr.resetToBegin(); - } + // Insert item back to the mmContainer if eviction failed. + mmContainer.add(*candidate); + itr.resetToBegin(); } return nullptr; } @@ -1335,11 +1332,10 @@ bool CacheAllocator::shouldWriteToNvmCacheExclusive( template typename CacheAllocator::WriteHandle -CacheAllocator::advanceIteratorAndTryEvictRegularItem( - MMContainer& mmContainer, EvictionIterator& itr) { +CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, + Item& item) { // we should flush this to nvmcache if it is not already present in nvmcache // and the item is not expired. - Item& item = *itr; const bool evictToNvmCache = shouldWriteToNvmCache(item); auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) @@ -1347,7 +1343,6 @@ CacheAllocator::advanceIteratorAndTryEvictRegularItem( // record the in-flight eviciton. If not, we move on to next item to avoid // stalling eviction. if (evictToNvmCache && !token.isValid()) { - ++itr; stats_.evictFailConcurrentFill.inc(); return WriteHandle{}; } @@ -1359,12 +1354,10 @@ CacheAllocator::advanceIteratorAndTryEvictRegularItem( auto evictHandle = accessContainer_->removeIf(item, &itemEvictionPredicate); if (!evictHandle) { - ++itr; stats_.evictFailAC.inc(); return evictHandle; } - mmContainer.remove(itr); XDCHECK_EQ(reinterpret_cast(evictHandle.get()), reinterpret_cast(&item)); XDCHECK(!evictHandle->isInMMContainer()); @@ -1379,15 +1372,6 @@ CacheAllocator::advanceIteratorAndTryEvictRegularItem( return WriteHandle{}; } - // Invalidate iterator since later on if we are not evicting this - // item, we may need to rely on the handle we created above to ensure - // proper cleanup if the item's raw refcount has dropped to 0. - // And since this item may be a parent item that has some child items - // in this very same mmContainer, we need to make sure we drop this - // exclusive iterator so we can gain access to it when we're cleaning - // up the child items - itr.destroy(); - // Ensure that there are no accessors after removing from the access // container XDCHECK(evictHandle->getRefCount() == 1); @@ -1401,12 +1385,10 @@ CacheAllocator::advanceIteratorAndTryEvictRegularItem( template typename CacheAllocator::WriteHandle -CacheAllocator::advanceIteratorAndTryEvictChainedItem( - EvictionIterator& itr) { - XDCHECK(itr->isChainedItem()); +CacheAllocator::tryEvictChainedItem(Item& item) { + XDCHECK(item.isChainedItem()); - ChainedItem* candidate = &itr->asChainedItem(); - ++itr; + ChainedItem* candidate = &item.asChainedItem(); // The parent could change at any point through transferChain. However, if // that happens, we would realize that the releaseBackToAllocator return @@ -1433,23 +1415,11 @@ CacheAllocator::advanceIteratorAndTryEvictChainedItem( return parentHandle; } - // Invalidate iterator since later on we may use the mmContainer - // associated with this iterator which cannot be done unless we - // drop this iterator - // - // This must be done once we know the parent is not nullptr. - // Since we can very well be the last holder of this parent item, - // which may have a chained item that is linked in this MM container. - itr.destroy(); - // Ensure we have the correct parent and we're the only user of the // parent, then free it from access container. Otherwise, we abort XDCHECK_EQ(reinterpret_cast(&parent), reinterpret_cast(parentHandle.get())); XDCHECK_EQ(1u, parent.getRefCount()); - - removeFromMMContainer(*parentHandle); - XDCHECK(!parent.isInMMContainer()); XDCHECK(!parent.isAccessible()); diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 676abd7198..f8edfee4fc 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1661,24 +1661,22 @@ class CacheAllocator : public CacheBase { using EvictionIterator = typename MMContainer::Iterator; - // Advance the current iterator and try to evict a regular item + // Try to evict a regular item. // // @param mmContainer the container to look for evictions. - // @param itr iterator holding the item + // @param item item to evict // // @return valid handle to regular item on success. This will be the last // handle to the item. On failure an empty handle. - WriteHandle advanceIteratorAndTryEvictRegularItem(MMContainer& mmContainer, - EvictionIterator& itr); + WriteHandle tryEvictRegularItem(MMContainer& mmContainer, Item& item); - // Advance the current iterator and try to evict a chained item - // Iterator may also be reset during the course of this function + // Try to evict a chained item. // - // @param itr iterator holding the item + // @param item item to evict // // @return valid handle to the parent item on success. This will be the last // handle to the item - WriteHandle advanceIteratorAndTryEvictChainedItem(EvictionIterator& itr); + WriteHandle tryEvictChainedItem(Item& item); // Deserializer CacheAllocatorMetadata and verify the version // From 544ef866f5e121fa9706570df1d7313141f45c28 Mon Sep 17 00:00:00 2001 From: igchor Date: Wed, 4 May 2022 15:27:44 -0400 Subject: [PATCH 2/9] Increment refcount under lock --- cachelib/allocator/CacheAllocator-inl.h | 38 +++++++++++++++++++------ cachelib/allocator/CacheAllocator.h | 4 +-- 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index c9f4bb7f82..dc51763a3a 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1228,15 +1228,23 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { (*stats_.evictionAttempts)[pid][cid].inc(); Item* candidate = itr.get(); - mmContainer.remove(itr); + + // make sure no other thead is evicting the item + if (candidate->getRefCount() != 0) { + ++itr; + continue; + } + + incRef(*candidate); itr.destroy(); // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. auto toReleaseHandle = candidate->isChainedItem() - ? tryEvictChainedItem(*candidate) + ? tryEvictChainedItem(mmContainer, *candidate) : tryEvictRegularItem(mmContainer, *candidate); + // XXX: fix chained item if (toReleaseHandle) { if (toReleaseHandle->hasChainedItem()) { @@ -1255,7 +1263,7 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // we must be the last handle and for chained items, this will be // the parent. XDCHECK(toReleaseHandle.get() == candidate || candidate->isChainedItem()); - XDCHECK_EQ(1u, toReleaseHandle->getRefCount()); + XDCHECK_EQ(2u, toReleaseHandle->getRefCount()); // We manually release the item here because we don't want to // invoke the Item Handle's destructor which will be decrementing @@ -1263,6 +1271,7 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { auto& itemToRelease = *toReleaseHandle.release(); // Decrementing the refcount because we want to recycle the item + decRef(itemToRelease); const auto ref = decRef(itemToRelease); XDCHECK_EQ(0u, ref); @@ -1275,9 +1284,13 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { } } - // Insert item back to the mmContainer if eviction failed. - mmContainer.add(*candidate); - itr.resetToBegin(); + // If we destroyed the itr to possibly evict and failed, we restart + // from the beginning again + if (!itr) { + itr.resetToBegin(); + for (int i = 0; i < searchTries; i++) + ++itr; + } } return nullptr; } @@ -1344,6 +1357,7 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, // stalling eviction. if (evictToNvmCache && !token.isValid()) { stats_.evictFailConcurrentFill.inc(); + decRef(item); return WriteHandle{}; } @@ -1355,9 +1369,12 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, if (!evictHandle) { stats_.evictFailAC.inc(); + decRef(item); return evictHandle; } + auto removed = mmContainer.remove(item); + XDCHECK(removed); XDCHECK_EQ(reinterpret_cast(evictHandle.get()), reinterpret_cast(&item)); XDCHECK(!evictHandle->isInMMContainer()); @@ -1369,12 +1386,13 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, // is set. Iterator was already advance by the remove call above. if (evictHandle->isMoving()) { stats_.evictFailMove.inc(); + decRef(item); return WriteHandle{}; } // Ensure that there are no accessors after removing from the access // container - XDCHECK(evictHandle->getRefCount() == 1); + XDCHECK(evictHandle->getRefCount() == 2); if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { XDCHECK(token.isValid()); @@ -1385,7 +1403,7 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, template typename CacheAllocator::WriteHandle -CacheAllocator::tryEvictChainedItem(Item& item) { +CacheAllocator::tryEvictChainedItem(MMContainer& mmContainer, Item& item) { XDCHECK(item.isChainedItem()); ChainedItem* candidate = &item.asChainedItem(); @@ -1417,9 +1435,11 @@ CacheAllocator::tryEvictChainedItem(Item& item) { // Ensure we have the correct parent and we're the only user of the // parent, then free it from access container. Otherwise, we abort + auto removed = mmContainer.remove(item); + XDCHECK(removed); XDCHECK_EQ(reinterpret_cast(&parent), reinterpret_cast(parentHandle.get())); - XDCHECK_EQ(1u, parent.getRefCount()); + XDCHECK_EQ(2u, parent.getRefCount()); // XXX? XDCHECK(!parent.isInMMContainer()); XDCHECK(!parent.isAccessible()); diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index f8edfee4fc..c64bf4917f 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1676,7 +1676,7 @@ class CacheAllocator : public CacheBase { // // @return valid handle to the parent item on success. This will be the last // handle to the item - WriteHandle tryEvictChainedItem(Item& item); + WriteHandle tryEvictChainedItem(MMContainer& mmContainer, Item& item); // Deserializer CacheAllocatorMetadata and verify the version // @@ -1929,7 +1929,7 @@ class CacheAllocator : public CacheBase { } static bool itemEvictionPredicate(const Item& item) { - return item.getRefCount() == 0 && !item.isMoving(); + return item.getRefCount() == 1 && !item.isMoving(); } static bool itemExpiryPredicate(const Item& item) { From 32979fbd55b668b01c9ac4e83f7d970dba054431 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Thu, 2 Jun 2022 04:40:48 -0400 Subject: [PATCH 3/9] Increment item refCount under AC lock to avoid races with remove/replace with predicate. Also, refact tryEvictRegularItem and tryEvictChainedItem into a single function. --- cachelib/allocator/CacheAllocator-inl.h | 156 +++++++--------------- cachelib/allocator/CacheAllocator.h | 18 +-- cachelib/allocator/ChainedHashTable-inl.h | 15 +++ cachelib/allocator/ChainedHashTable.h | 11 ++ 4 files changed, 80 insertions(+), 120 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index dc51763a3a..bfc605afce 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1230,24 +1230,29 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { Item* candidate = itr.get(); // make sure no other thead is evicting the item - if (candidate->getRefCount() != 0) { + if (candidate->getRefCount() != 0 || candidate->isMoving()) { ++itr; continue; } - - incRef(*candidate); + + if (candidate->isChainedItem()) { + candidate = &candidate->asChainedItem().getParentItem(compressor_); + } + + auto toReleaseHandle = accessContainer_->find(*candidate); + if (!toReleaseHandle || !toReleaseHandle.isReady()) { + ++itr; + continue; + } + itr.destroy(); // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - auto toReleaseHandle = candidate->isChainedItem() - ? tryEvictChainedItem(mmContainer, *candidate) - : tryEvictRegularItem(mmContainer, *candidate); - // XXX: fix chained item - - if (toReleaseHandle) { - if (toReleaseHandle->hasChainedItem()) { + bool evicted = tryEvictItem(mmContainer, std::move(toReleaseHandle)); + if (evicted) { + if (candidate->hasChainedItem()) { (*stats_.chainedItemEvictions)[pid][cid].inc(); } else { (*stats_.regularItemEvictions)[pid][cid].inc(); @@ -1260,37 +1265,20 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { toReleaseHandle->getConfiguredTTL().count()); } - // we must be the last handle and for chained items, this will be - // the parent. - XDCHECK(toReleaseHandle.get() == candidate || candidate->isChainedItem()); - XDCHECK_EQ(2u, toReleaseHandle->getRefCount()); - - // We manually release the item here because we don't want to - // invoke the Item Handle's destructor which will be decrementing - // an already zero refcount, which will throw exception - auto& itemToRelease = *toReleaseHandle.release(); - // Decrementing the refcount because we want to recycle the item - decRef(itemToRelease); - const auto ref = decRef(itemToRelease); + const auto ref = decRef(*candidate); XDCHECK_EQ(0u, ref); // check if by releasing the item we intend to, we actually // recycle the candidate. if (ReleaseRes::kRecycled == - releaseBackToAllocator(itemToRelease, RemoveContext::kEviction, + releaseBackToAllocator(*candidate, RemoveContext::kEviction, /* isNascent */ false, candidate)) { return candidate; } } - // If we destroyed the itr to possibly evict and failed, we restart - // from the beginning again - if (!itr) { - itr.resetToBegin(); - for (int i = 0; i < searchTries; i++) - ++itr; - } + itr.resetToBegin(); } return nullptr; } @@ -1344,9 +1332,10 @@ bool CacheAllocator::shouldWriteToNvmCacheExclusive( } template -typename CacheAllocator::WriteHandle -CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, - Item& item) { +bool CacheAllocator::tryEvictItem(MMContainer& mmContainer, + WriteHandle&& handle) { + auto& item = *handle; + // we should flush this to nvmcache if it is not already present in nvmcache // and the item is not expired. const bool evictToNvmCache = shouldWriteToNvmCache(item); @@ -1356,21 +1345,27 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, // record the in-flight eviciton. If not, we move on to next item to avoid // stalling eviction. if (evictToNvmCache && !token.isValid()) { - stats_.evictFailConcurrentFill.inc(); - decRef(item); - return WriteHandle{}; + if (item.isChainedItem()) + stats_.evictFailConcurrentFill.inc(); + else + stats_.evictFailConcurrentFill.inc(); + handle.reset(); + return false; } - // If there are other accessors, we should abort. Acquire a handle here since - // if we remove the item from both access containers and mm containers - // below, we will need a handle to ensure proper cleanup in case we end up - // not evicting this item + // If there are other accessors, we should abort. We should be the only + // handle owner. auto evictHandle = accessContainer_->removeIf(item, &itemEvictionPredicate); + /* We got new handle so it's safe to get rid of the old one. */ + handle.reset(); + if (!evictHandle) { - stats_.evictFailAC.inc(); - decRef(item); - return evictHandle; + if (item.isChainedItem()) + stats_.evictFailParentAC.inc(); + else + stats_.evictFailAC.inc(); + return false; } auto removed = mmContainer.remove(item); @@ -1383,81 +1378,28 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, // If the item is now marked as moving, that means its corresponding slab is // being released right now. So, we look for the next item that is eligible // for eviction. It is safe to destroy the handle here since the moving bit - // is set. Iterator was already advance by the remove call above. + // is set. if (evictHandle->isMoving()) { - stats_.evictFailMove.inc(); - decRef(item); - return WriteHandle{}; + if (item.isChainedItem()) + stats_.evictFailParentMove.inc(); + else + stats_.evictFailMove.inc(); + return false; } // Ensure that there are no accessors after removing from the access - // container - XDCHECK(evictHandle->getRefCount() == 2); + // container. + XDCHECK(evictHandle->getRefCount() == 1); if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { XDCHECK(token.isValid()); nvmCache_->put(evictHandle, std::move(token)); } - return evictHandle; -} - -template -typename CacheAllocator::WriteHandle -CacheAllocator::tryEvictChainedItem(MMContainer& mmContainer, Item& item) { - XDCHECK(item.isChainedItem()); - - ChainedItem* candidate = &item.asChainedItem(); - - // The parent could change at any point through transferChain. However, if - // that happens, we would realize that the releaseBackToAllocator return - // kNotRecycled and we would try another chained item, leading to transient - // failure. - auto& parent = candidate->getParentItem(compressor_); - - const bool evictToNvmCache = shouldWriteToNvmCache(parent); - - auto token = evictToNvmCache ? nvmCache_->createPutToken(parent.getKey()) - : typename NvmCacheT::PutToken{}; - - // if token is invalid, return. iterator is already advanced. - if (evictToNvmCache && !token.isValid()) { - stats_.evictFailConcurrentFill.inc(); - return WriteHandle{}; - } - - // check if the parent exists in the hashtable and refcount is drained. - auto parentHandle = - accessContainer_->removeIf(parent, &itemEvictionPredicate); - if (!parentHandle) { - stats_.evictFailParentAC.inc(); - return parentHandle; - } - // Ensure we have the correct parent and we're the only user of the - // parent, then free it from access container. Otherwise, we abort - auto removed = mmContainer.remove(item); - XDCHECK(removed); - XDCHECK_EQ(reinterpret_cast(&parent), - reinterpret_cast(parentHandle.get())); - XDCHECK_EQ(2u, parent.getRefCount()); // XXX? - XDCHECK(!parent.isInMMContainer()); - XDCHECK(!parent.isAccessible()); - - // We need to make sure the parent is not marked as moving - // and we're the only holder of the parent item. Safe to destroy the handle - // here since moving bit is set. - if (parentHandle->isMoving()) { - stats_.evictFailParentMove.inc(); - return WriteHandle{}; - } - - if (evictToNvmCache && shouldWriteToNvmCacheExclusive(*parentHandle)) { - XDCHECK(token.isValid()); - XDCHECK(parentHandle->hasChainedItem()); - nvmCache_->put(parentHandle, std::move(token)); - } + /* Release the handle, item will be reused by the called. */ + evictHandle.release(); - return parentHandle; + return true; } template diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index c64bf4917f..1756c891e4 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1661,22 +1661,14 @@ class CacheAllocator : public CacheBase { using EvictionIterator = typename MMContainer::Iterator; - // Try to evict a regular item. + // Try to evict an item. // // @param mmContainer the container to look for evictions. - // @param item item to evict + // @param handle handle to item to evict. eviction will fail if + // this is not an owning handle. // - // @return valid handle to regular item on success. This will be the last - // handle to the item. On failure an empty handle. - WriteHandle tryEvictRegularItem(MMContainer& mmContainer, Item& item); - - // Try to evict a chained item. - // - // @param item item to evict - // - // @return valid handle to the parent item on success. This will be the last - // handle to the item - WriteHandle tryEvictChainedItem(MMContainer& mmContainer, Item& item); + // @return whether eviction succeeded + bool tryEvictItem(MMContainer& mmContainer, WriteHandle&& handle); // Deserializer CacheAllocatorMetadata and verify the version // diff --git a/cachelib/allocator/ChainedHashTable-inl.h b/cachelib/allocator/ChainedHashTable-inl.h index 2655a4b12d..54bff4fb0c 100644 --- a/cachelib/allocator/ChainedHashTable-inl.h +++ b/cachelib/allocator/ChainedHashTable-inl.h @@ -450,6 +450,21 @@ typename T::Handle ChainedHashTable::Container::removeIf( } } +template T::*HookPtr, + typename LockT> +typename T::Handle ChainedHashTable::Container::find( + T& node) const { + const auto bucket = ht_.getBucket(node.getKey()); + auto l = locks_.lockShared(bucket); + + if (!node.isAccessible()) { + return {}; + } + + return handleMaker_(&node); +} + template T::*HookPtr, typename LockT> diff --git a/cachelib/allocator/ChainedHashTable.h b/cachelib/allocator/ChainedHashTable.h index 8333b43116..19c2f9f9f6 100644 --- a/cachelib/allocator/ChainedHashTable.h +++ b/cachelib/allocator/ChainedHashTable.h @@ -496,6 +496,17 @@ class ChainedHashTable { // creating this item handle. Handle find(Key key) const; + // returns a handle to specified node. + // + // @param node requested node + // + // @return handle to the node if find was sucessfull. returns a + // null handle if the node was not in the container. + // + // @throw std::overflow_error is the maximum item refcount is execeeded by + // creating this item handle. + Handle find(T& node) const; + // for saving the state of the hash table // // precondition: serialization must happen without any reader or writer From b3cebe32c302f2af4d30af8027096abdf47ae709 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Thu, 9 Jun 2022 06:01:11 +0000 Subject: [PATCH 4/9] Use moving bit for synchronization in findEviction moving bit is used to give exclusive right to evict the item to a particular thread. Originially, there was an assumption that whoever marked the item as moving will try to free it until he succeeds. Since we don't want to do that in findEviction (potentially can take a long time) we need to make sure that unmarking is safe. This patch checks the flags after unmarking (atomically) and if ref is zero it also recyles the item. This is needed as there might be some concurrent thread releasing the item (and decrementing ref count). If moving bit is set, that thread would not free the memory back to allocator, resulting in memory leak on unmarkMoving(). --- cachelib/allocator/CacheAllocator-inl.h | 139 ++++++++-------------- cachelib/allocator/CacheAllocator.h | 15 +-- cachelib/allocator/CacheItem-inl.h | 4 +- cachelib/allocator/CacheItem.h | 2 +- cachelib/allocator/ChainedHashTable-inl.h | 15 --- cachelib/allocator/ChainedHashTable.h | 11 -- cachelib/allocator/Refcount.h | 13 +- 7 files changed, 60 insertions(+), 139 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index bfc605afce..b86310daac 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1227,20 +1227,15 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { ++searchTries; (*stats_.evictionAttempts)[pid][cid].inc(); - Item* candidate = itr.get(); + Item* toRecycle = itr.get(); - // make sure no other thead is evicting the item - if (candidate->getRefCount() != 0 || candidate->isMoving()) { - ++itr; - continue; - } - - if (candidate->isChainedItem()) { - candidate = &candidate->asChainedItem().getParentItem(compressor_); - } + Item* candidate = + toRecycle->isChainedItem() + ? &toRecycle->asChainedItem().getParentItem(compressor_) + : toRecycle; - auto toReleaseHandle = accessContainer_->find(*candidate); - if (!toReleaseHandle || !toReleaseHandle.isReady()) { + // make sure no other thead is evicting the item + if (candidate->getRefCount() != 0 || !candidate->markMoving()) { ++itr; continue; } @@ -1250,8 +1245,11 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - bool evicted = tryEvictItem(mmContainer, std::move(toReleaseHandle)); - if (evicted) { + auto toReleaseHandle = + evictNormalItem(*candidate, true /* skipIfTokenInvalid */); + auto ref = candidate->unmarkMoving(); + + if (toReleaseHandle || ref == 0u) { if (candidate->hasChainedItem()) { (*stats_.chainedItemEvictions)[pid][cid].inc(); } else { @@ -1264,17 +1262,43 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { AllocatorApiResult::EVICTED, toReleaseHandle->getSize(), toReleaseHandle->getConfiguredTTL().count()); } + } else { + if (candidate->hasChainedItem()) { + stats_.evictFailParentAC.inc(); + } else { + stats_.evictFailAC.inc(); + } + } + + if (toReleaseHandle) { + XDCHECK(toReleaseHandle.get() == candidate); + XDCHECK(toRecycle == candidate || toRecycle->isChainedItem()); + XDCHECK_EQ(1u, toReleaseHandle->getRefCount()); + + // We manually release the item here because we don't want to + // invoke the Item Handle's destructor which will be decrementing + // an already zero refcount, which will throw exception + auto& itemToRelease = *toReleaseHandle.release(); // Decrementing the refcount because we want to recycle the item - const auto ref = decRef(*candidate); + ref = decRef(itemToRelease); XDCHECK_EQ(0u, ref); // check if by releasing the item we intend to, we actually // recycle the candidate. + if (ReleaseRes::kRecycled == + releaseBackToAllocator(itemToRelease, RemoveContext::kEviction, + /* isNascent */ false, toRecycle)) { + return toRecycle; + } + } else if (ref == 0u) { + // it's safe to recycle the item here as there are no more + // references and the item could not been marked as moving + // by other thread since it's detached from MMContainer. if (ReleaseRes::kRecycled == releaseBackToAllocator(*candidate, RemoveContext::kEviction, - /* isNascent */ false, candidate)) { - return candidate; + /* isNascent */ false, toRecycle)) { + return toRecycle; } } @@ -1331,77 +1355,6 @@ bool CacheAllocator::shouldWriteToNvmCacheExclusive( return true; } -template -bool CacheAllocator::tryEvictItem(MMContainer& mmContainer, - WriteHandle&& handle) { - auto& item = *handle; - - // we should flush this to nvmcache if it is not already present in nvmcache - // and the item is not expired. - const bool evictToNvmCache = shouldWriteToNvmCache(item); - - auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) - : typename NvmCacheT::PutToken{}; - // record the in-flight eviciton. If not, we move on to next item to avoid - // stalling eviction. - if (evictToNvmCache && !token.isValid()) { - if (item.isChainedItem()) - stats_.evictFailConcurrentFill.inc(); - else - stats_.evictFailConcurrentFill.inc(); - handle.reset(); - return false; - } - - // If there are other accessors, we should abort. We should be the only - // handle owner. - auto evictHandle = accessContainer_->removeIf(item, &itemEvictionPredicate); - - /* We got new handle so it's safe to get rid of the old one. */ - handle.reset(); - - if (!evictHandle) { - if (item.isChainedItem()) - stats_.evictFailParentAC.inc(); - else - stats_.evictFailAC.inc(); - return false; - } - - auto removed = mmContainer.remove(item); - XDCHECK(removed); - XDCHECK_EQ(reinterpret_cast(evictHandle.get()), - reinterpret_cast(&item)); - XDCHECK(!evictHandle->isInMMContainer()); - XDCHECK(!evictHandle->isAccessible()); - - // If the item is now marked as moving, that means its corresponding slab is - // being released right now. So, we look for the next item that is eligible - // for eviction. It is safe to destroy the handle here since the moving bit - // is set. - if (evictHandle->isMoving()) { - if (item.isChainedItem()) - stats_.evictFailParentMove.inc(); - else - stats_.evictFailMove.inc(); - return false; - } - - // Ensure that there are no accessors after removing from the access - // container. - XDCHECK(evictHandle->getRefCount() == 1); - - if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { - XDCHECK(token.isValid()); - nvmCache_->put(evictHandle, std::move(token)); - } - - /* Release the handle, item will be reused by the called. */ - evictHandle.release(); - - return true; -} - template typename CacheAllocator::RemoveRes CacheAllocator::remove(typename Item::Key key) { @@ -2640,7 +2593,7 @@ void CacheAllocator::evictForSlabRelease( auto owningHandle = item.isChainedItem() ? evictChainedItemForSlabRelease(item.asChainedItem()) - : evictNormalItemForSlabRelease(item); + : evictNormalItem(item); // we managed to evict the corresponding owner of the item and have the // last handle for the owner. @@ -2697,7 +2650,8 @@ void CacheAllocator::evictForSlabRelease( template typename CacheAllocator::WriteHandle -CacheAllocator::evictNormalItemForSlabRelease(Item& item) { +CacheAllocator::evictNormalItem(Item& item, + bool skipIfTokenInvalid) { XDCHECK(item.isMoving()); if (item.isOnlyMoving()) { @@ -2710,6 +2664,11 @@ CacheAllocator::evictNormalItemForSlabRelease(Item& item) { auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) : typename NvmCacheT::PutToken{}; + if (skipIfTokenInvalid && evictToNvmCache && !token.isValid()) { + stats_.evictFailConcurrentFill.inc(); + return WriteHandle{}; + } + // We remove the item from both access and mm containers. It doesn't matter // if someone else calls remove on the item at this moment, the item cannot // be freed as long as we have the moving bit set. diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 1756c891e4..57100bee5f 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1661,15 +1661,6 @@ class CacheAllocator : public CacheBase { using EvictionIterator = typename MMContainer::Iterator; - // Try to evict an item. - // - // @param mmContainer the container to look for evictions. - // @param handle handle to item to evict. eviction will fail if - // this is not an owning handle. - // - // @return whether eviction succeeded - bool tryEvictItem(MMContainer& mmContainer, WriteHandle&& handle); - // Deserializer CacheAllocatorMetadata and verify the version // // @param deserializer Deserializer object @@ -1787,7 +1778,7 @@ class CacheAllocator : public CacheBase { // // @return last handle for corresponding to item on success. empty handle on // failure. caller can retry if needed. - WriteHandle evictNormalItemForSlabRelease(Item& item); + WriteHandle evictNormalItem(Item& item, bool skipIfTokenInvalid = false); // Helper function to evict a child item for slab release // As a side effect, the parent item is also evicted @@ -1920,10 +1911,6 @@ class CacheAllocator : public CacheBase { return item.getRefCount() == 0; } - static bool itemEvictionPredicate(const Item& item) { - return item.getRefCount() == 1 && !item.isMoving(); - } - static bool itemExpiryPredicate(const Item& item) { return item.getRefCount() == 1 && item.isExpired(); } diff --git a/cachelib/allocator/CacheItem-inl.h b/cachelib/allocator/CacheItem-inl.h index b293971b88..fbdbee4fad 100644 --- a/cachelib/allocator/CacheItem-inl.h +++ b/cachelib/allocator/CacheItem-inl.h @@ -226,8 +226,8 @@ bool CacheItem::markMoving() noexcept { } template -void CacheItem::unmarkMoving() noexcept { - ref_.unmarkMoving(); +RefcountWithFlags::Value CacheItem::unmarkMoving() noexcept { + return ref_.unmarkMoving(); } template diff --git a/cachelib/allocator/CacheItem.h b/cachelib/allocator/CacheItem.h index 0d699d8f97..7c5e6d357a 100644 --- a/cachelib/allocator/CacheItem.h +++ b/cachelib/allocator/CacheItem.h @@ -360,7 +360,7 @@ class CACHELIB_PACKED_ATTR CacheItem { * Unmarking moving does not depend on `isInMMContainer` */ bool markMoving() noexcept; - void unmarkMoving() noexcept; + RefcountWithFlags::Value unmarkMoving() noexcept; bool isMoving() const noexcept; bool isOnlyMoving() const noexcept; diff --git a/cachelib/allocator/ChainedHashTable-inl.h b/cachelib/allocator/ChainedHashTable-inl.h index 54bff4fb0c..2655a4b12d 100644 --- a/cachelib/allocator/ChainedHashTable-inl.h +++ b/cachelib/allocator/ChainedHashTable-inl.h @@ -450,21 +450,6 @@ typename T::Handle ChainedHashTable::Container::removeIf( } } -template T::*HookPtr, - typename LockT> -typename T::Handle ChainedHashTable::Container::find( - T& node) const { - const auto bucket = ht_.getBucket(node.getKey()); - auto l = locks_.lockShared(bucket); - - if (!node.isAccessible()) { - return {}; - } - - return handleMaker_(&node); -} - template T::*HookPtr, typename LockT> diff --git a/cachelib/allocator/ChainedHashTable.h b/cachelib/allocator/ChainedHashTable.h index 19c2f9f9f6..8333b43116 100644 --- a/cachelib/allocator/ChainedHashTable.h +++ b/cachelib/allocator/ChainedHashTable.h @@ -496,17 +496,6 @@ class ChainedHashTable { // creating this item handle. Handle find(Key key) const; - // returns a handle to specified node. - // - // @param node requested node - // - // @return handle to the node if find was sucessfull. returns a - // null handle if the node was not in the container. - // - // @throw std::overflow_error is the maximum item refcount is execeeded by - // creating this item handle. - Handle find(T& node) const; - // for saving the state of the hash table // // precondition: serialization must happen without any reader or writer diff --git a/cachelib/allocator/Refcount.h b/cachelib/allocator/Refcount.h index 631e1695f9..8328019f27 100644 --- a/cachelib/allocator/Refcount.h +++ b/cachelib/allocator/Refcount.h @@ -247,10 +247,10 @@ class FOLLY_PACK_ATTR RefcountWithFlags { /** * The following four functions are used to track whether or not * an item is currently in the process of being moved. This happens during a - * slab rebalance or resize operation. + * slab rebalance or resize operation or during eviction. * - * An item can only be marked moving when `isInMMContainer` returns true. - * This operation is atomic. + * An item can only be marked moving when `isInMMContainer` returns true and + * the item is not yet marked as moving. This operation is atomic. * * User can also query if an item "isOnlyMoving". This returns true only * if the refcount is 0 and only the moving bit is set. @@ -267,7 +267,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags { Value curValue = __atomic_load_n(refPtr, __ATOMIC_RELAXED); while (true) { const bool flagSet = curValue & conditionBitMask; - if (!flagSet) { + const bool alreadyMoving = curValue & bitMask; + if (!flagSet || alreadyMoving) { return false; } @@ -286,9 +287,9 @@ class FOLLY_PACK_ATTR RefcountWithFlags { } } } - void unmarkMoving() noexcept { + Value unmarkMoving() noexcept { Value bitMask = ~getAdminRef(); - __atomic_and_fetch(&refCount_, bitMask, __ATOMIC_ACQ_REL); + return __atomic_and_fetch(&refCount_, bitMask, __ATOMIC_ACQ_REL) & kRefMask; } bool isMoving() const noexcept { return getRaw() & getAdminRef(); } bool isOnlyMoving() const noexcept { From 00f5d549afdb13296050a65fd8fe4de9ecaa79e4 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Mon, 13 Jun 2022 10:20:36 -0400 Subject: [PATCH 5/9] Execute findEviction critical section under MMContainer combined_lock. --- cachelib/allocator/CacheAllocator-inl.h | 48 ++++++++++++++++--------- cachelib/allocator/MM2Q-inl.h | 21 +++++------ cachelib/allocator/MM2Q.h | 5 +++ cachelib/allocator/MMLru-inl.h | 8 +++++ cachelib/allocator/MMLru.h | 5 +++ cachelib/allocator/MMTinyLFU-inl.h | 7 ++++ cachelib/allocator/MMTinyLFU.h | 5 +++ 7 files changed, 70 insertions(+), 29 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index b86310daac..c02502aa3b 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1220,27 +1220,43 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // Keep searching for a candidate until we were able to evict it // or until the search limit has been exhausted unsigned int searchTries = 0; - auto itr = mmContainer.getEvictionIterator(); while ((config_.evictionSearchTries == 0 || - config_.evictionSearchTries > searchTries) && - itr) { + config_.evictionSearchTries > searchTries)) { ++searchTries; (*stats_.evictionAttempts)[pid][cid].inc(); - Item* toRecycle = itr.get(); - - Item* candidate = - toRecycle->isChainedItem() - ? &toRecycle->asChainedItem().getParentItem(compressor_) - : toRecycle; - - // make sure no other thead is evicting the item - if (candidate->getRefCount() != 0 || !candidate->markMoving()) { - ++itr; + Item* toRecycle = nullptr; + Item* candidate = nullptr; + + mmContainer.withEvictionIterator( + [this, &candidate, &toRecycle, &searchTries](auto&& itr) { + while ((config_.evictionSearchTries == 0 || + config_.evictionSearchTries > searchTries) && + itr) { + ++searchTries; + + auto* toRecycle_ = itr.get(); + auto* candidate_ = + toRecycle_->isChainedItem() + ? &toRecycle_->asChainedItem().getParentItem(compressor_) + : toRecycle_; + + // make sure no other thead is evicting the item + if (candidate_->getRefCount() == 0 && candidate_->markMoving()) { + toRecycle = toRecycle_; + candidate = candidate_; + return; + } + + ++itr; + } + }); + + if (!toRecycle) continue; - } - itr.destroy(); + XDCHECK(toRecycle); + XDCHECK(candidate); // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent @@ -1301,8 +1317,6 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { return toRecycle; } } - - itr.resetToBegin(); } return nullptr; } diff --git a/cachelib/allocator/MM2Q-inl.h b/cachelib/allocator/MM2Q-inl.h index 1d2482d45b..493e050c09 100644 --- a/cachelib/allocator/MM2Q-inl.h +++ b/cachelib/allocator/MM2Q-inl.h @@ -250,22 +250,19 @@ MM2Q::Container::getEvictionIterator() const noexcept { // arbitrary amount of time outside a lambda-friendly piece of code (eg. they // can return the iterator from functions, pass it to functions, etc) // - // it would be theoretically possible to refactor this interface into - // something like the following to allow combining - // - // mm2q.withEvictionIterator([&](auto iterator) { - // // user code - // }); - // - // at the time of writing it is unclear if the gains from combining are - // reasonable justification for the codemod required to achieve combinability - // as we don't expect this critical section to be the hotspot in user code. - // This is however subject to change at some time in the future as and when - // this assertion becomes false. + // to get advantage of combining, use withEvictionIterator LockHolder l(*lruMutex_); return Iterator{std::move(l), lru_.rbegin()}; } +template T::*HookPtr> +template +void MM2Q::Container::withEvictionIterator(F&& fun) { + lruMutex_->lock_combine([this, &fun]() { + fun(Iterator{LockHolder{}, lru_.rbegin()}); + }); +} + template T::*HookPtr> void MM2Q::Container::removeLocked(T& node, bool doRebalance) noexcept { diff --git a/cachelib/allocator/MM2Q.h b/cachelib/allocator/MM2Q.h index 886dffdddd..31073965ce 100644 --- a/cachelib/allocator/MM2Q.h +++ b/cachelib/allocator/MM2Q.h @@ -447,6 +447,11 @@ class MM2Q { // container and only one such iterator can exist at a time Iterator getEvictionIterator() const noexcept; + // Execute provided function under container lock. Function gets + // iterator passed as parameter. + template + void withEvictionIterator(F&& f); + // get the current config as a copy Config getConfig() const; diff --git a/cachelib/allocator/MMLru-inl.h b/cachelib/allocator/MMLru-inl.h index 5a66161ae0..53ed1b5f59 100644 --- a/cachelib/allocator/MMLru-inl.h +++ b/cachelib/allocator/MMLru-inl.h @@ -218,6 +218,14 @@ MMLru::Container::getEvictionIterator() const noexcept { return Iterator{std::move(l), lru_.rbegin()}; } +template T::*HookPtr> +template +void MMLru::Container::withEvictionIterator(F&& fun) { + lruMutex_->lock_combine([this, &fun]() { + fun(Iterator{LockHolder{}, lru_.rbegin()}); + }); +} + template T::*HookPtr> void MMLru::Container::ensureNotInsertionPoint(T& node) noexcept { // If we are removing the insertion point node, grow tail before we remove diff --git a/cachelib/allocator/MMLru.h b/cachelib/allocator/MMLru.h index f89438dd3d..0ba27db3a4 100644 --- a/cachelib/allocator/MMLru.h +++ b/cachelib/allocator/MMLru.h @@ -332,6 +332,11 @@ class MMLru { // container and only one such iterator can exist at a time Iterator getEvictionIterator() const noexcept; + // Execute provided function under container lock. Function gets + // iterator passed as parameter. + template + void withEvictionIterator(F&& f); + // get copy of current config Config getConfig() const; diff --git a/cachelib/allocator/MMTinyLFU-inl.h b/cachelib/allocator/MMTinyLFU-inl.h index de06902482..d9e7da8a7d 100644 --- a/cachelib/allocator/MMTinyLFU-inl.h +++ b/cachelib/allocator/MMTinyLFU-inl.h @@ -220,6 +220,13 @@ MMTinyLFU::Container::getEvictionIterator() const noexcept { return Iterator{std::move(l), *this}; } +template T::*HookPtr> +template +void MMTinyLFU::Container::withEvictionIterator(F&& fun) { + LockHolder l(lruMutex_); + fun(Iterator{LockHolder{}, *this}); +} + template T::*HookPtr> void MMTinyLFU::Container::removeLocked(T& node) noexcept { if (isTiny(node)) { diff --git a/cachelib/allocator/MMTinyLFU.h b/cachelib/allocator/MMTinyLFU.h index 14d5ae6906..40886d53af 100644 --- a/cachelib/allocator/MMTinyLFU.h +++ b/cachelib/allocator/MMTinyLFU.h @@ -491,6 +491,11 @@ class MMTinyLFU { // container and only one such iterator can exist at a time Iterator getEvictionIterator() const noexcept; + // Execute provided function under container lock. Function gets + // iterator passed as parameter. + template + void withEvictionIterator(F&& f); + // for saving the state of the lru // // precondition: serialization must happen without any reader or writer From dee650840436ac3039f0e2a47e5f89dc85985212 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Mon, 20 Jun 2022 09:09:26 -0400 Subject: [PATCH 6/9] Remove skip skipIfTokenInvalid. Checking token validity should not be needed right after creating it. --- cachelib/allocator/CacheAllocator-inl.h | 11 ++--------- cachelib/allocator/CacheAllocator.h | 2 +- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index c02502aa3b..d8fd9dfbe1 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1261,8 +1261,7 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - auto toReleaseHandle = - evictNormalItem(*candidate, true /* skipIfTokenInvalid */); + auto toReleaseHandle = evictNormalItem(*candidate); auto ref = candidate->unmarkMoving(); if (toReleaseHandle || ref == 0u) { @@ -2664,8 +2663,7 @@ void CacheAllocator::evictForSlabRelease( template typename CacheAllocator::WriteHandle -CacheAllocator::evictNormalItem(Item& item, - bool skipIfTokenInvalid) { +CacheAllocator::evictNormalItem(Item& item) { XDCHECK(item.isMoving()); if (item.isOnlyMoving()) { @@ -2678,11 +2676,6 @@ CacheAllocator::evictNormalItem(Item& item, auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) : typename NvmCacheT::PutToken{}; - if (skipIfTokenInvalid && evictToNvmCache && !token.isValid()) { - stats_.evictFailConcurrentFill.inc(); - return WriteHandle{}; - } - // We remove the item from both access and mm containers. It doesn't matter // if someone else calls remove on the item at this moment, the item cannot // be freed as long as we have the moving bit set. diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 57100bee5f..e1452cd8a8 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1778,7 +1778,7 @@ class CacheAllocator : public CacheBase { // // @return last handle for corresponding to item on success. empty handle on // failure. caller can retry if needed. - WriteHandle evictNormalItem(Item& item, bool skipIfTokenInvalid = false); + WriteHandle evictNormalItem(Item& item); // Helper function to evict a child item for slab release // As a side effect, the parent item is also evicted From 944ebe59e663230b62f2b64a8944365ce4807072 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Mon, 20 Jun 2022 09:23:39 -0400 Subject: [PATCH 7/9] Unconditionally destroy toReleaseHandle in findEviction, and rely only of recount being zero when releasing items back to allocator. --- cachelib/allocator/CacheAllocator-inl.h | 56 +++++++++---------------- 1 file changed, 19 insertions(+), 37 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index d8fd9dfbe1..c98c6ec659 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1261,10 +1261,17 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - auto toReleaseHandle = evictNormalItem(*candidate); - auto ref = candidate->unmarkMoving(); + { + auto toReleaseHandle = evictNormalItem(*candidate); + // destroy toReleseHandle. The item won't be release to allocator + // since we marked it as moving. + } + const auto ref = candidate->unmarkMoving(); - if (toReleaseHandle || ref == 0u) { + if (ref == 0u) { + // recycle the item. it's safe to do so, even if toReleaseHandle was + // NULL. If `ref` == 0 then it means that we are the last holder of + // that item. if (candidate->hasChainedItem()) { (*stats_.chainedItemEvictions)[pid][cid].inc(); } else { @@ -1272,49 +1279,24 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { } if (auto eventTracker = getEventTracker()) { - eventTracker->record( - AllocatorApiEvent::DRAM_EVICT, toReleaseHandle->getKey(), - AllocatorApiResult::EVICTED, toReleaseHandle->getSize(), - toReleaseHandle->getConfiguredTTL().count()); + eventTracker->record(AllocatorApiEvent::DRAM_EVICT, candidate->getKey(), + AllocatorApiResult::EVICTED, candidate->getSize(), + candidate->getConfiguredTTL().count()); } - } else { - if (candidate->hasChainedItem()) { - stats_.evictFailParentAC.inc(); - } else { - stats_.evictFailAC.inc(); - } - } - - if (toReleaseHandle) { - XDCHECK(toReleaseHandle.get() == candidate); - XDCHECK(toRecycle == candidate || toRecycle->isChainedItem()); - XDCHECK_EQ(1u, toReleaseHandle->getRefCount()); - - // We manually release the item here because we don't want to - // invoke the Item Handle's destructor which will be decrementing - // an already zero refcount, which will throw exception - auto& itemToRelease = *toReleaseHandle.release(); - - // Decrementing the refcount because we want to recycle the item - ref = decRef(itemToRelease); - XDCHECK_EQ(0u, ref); // check if by releasing the item we intend to, we actually // recycle the candidate. - if (ReleaseRes::kRecycled == - releaseBackToAllocator(itemToRelease, RemoveContext::kEviction, - /* isNascent */ false, toRecycle)) { - return toRecycle; - } - } else if (ref == 0u) { - // it's safe to recycle the item here as there are no more - // references and the item could not been marked as moving - // by other thread since it's detached from MMContainer. if (ReleaseRes::kRecycled == releaseBackToAllocator(*candidate, RemoveContext::kEviction, /* isNascent */ false, toRecycle)) { return toRecycle; } + } else { + if (candidate->hasChainedItem()) { + stats_.evictFailParentAC.inc(); + } else { + stats_.evictFailAC.inc(); + } } } return nullptr; From 2144b96349b600a9730ec96ce6485b026a682fbb Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Fri, 24 Jun 2022 13:18:53 +0000 Subject: [PATCH 8/9] Rename moving bit to exclusive --- cachelib/allocator/CacheAllocator-inl.h | 77 +++++++++++------------ cachelib/allocator/CacheAllocator.h | 14 ++--- cachelib/allocator/CacheItem-inl.h | 33 +++++----- cachelib/allocator/CacheItem.h | 32 +++++----- cachelib/allocator/MM2Q-inl.h | 8 --- cachelib/allocator/MM2Q.h | 5 -- cachelib/allocator/MMLru-inl.h | 8 --- cachelib/allocator/MMLru.h | 5 -- cachelib/allocator/MMTinyLFU-inl.h | 7 --- cachelib/allocator/MMTinyLFU.h | 5 -- cachelib/allocator/Refcount.h | 49 +++++++-------- cachelib/allocator/tests/ItemTest.cpp | 4 +- cachelib/allocator/tests/RefCountTest.cpp | 28 ++++----- 13 files changed, 114 insertions(+), 161 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index c98c6ec659..fbb65e0429 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -813,20 +813,20 @@ CacheAllocator::releaseBackToAllocator(Item& it, removeFromMMContainer(*head); - // If this chained item is marked as moving, we will not free it. - // We must capture the moving state before we do the decRef when + // If this chained item is marked as exclusive, we will not free it. + // We must capture the exclusive state before we do the decRef when // we know the item must still be valid - const bool wasMoving = head->isMoving(); + const bool wasExclusive = head->isExclusive(); // Decref and check if we were the last reference. Now if the item - // was marked moving, after decRef, it will be free to be released + // was marked exclusive, after decRef, it will be free to be released // by slab release thread const auto childRef = head->decRef(); - // If the item is already moving and we already decremented the + // If the item is already exclusive and we already decremented the // refcount, we don't need to free this item. We'll let the slab // release thread take care of that - if (!wasMoving) { + if (!wasExclusive) { if (childRef != 0) { throw std::runtime_error(folly::sformat( "chained item refcount is not zero. We cannot proceed! " @@ -834,7 +834,7 @@ CacheAllocator::releaseBackToAllocator(Item& it, childRef, head->toString())); } - // Item is not moving and refcount is 0, we can proceed to + // Item is not exclusive and refcount is 0, we can proceed to // free it or recylce the memory if (head == toRecycle) { XDCHECK(ReleaseRes::kReleased != res); @@ -1110,7 +1110,8 @@ bool CacheAllocator::moveRegularItem(Item& oldItem, // this item through an API such as remove(itemHandle). In this case, // it is unsafe to replace the old item with a new one, so we should // also abort. - if (!accessContainer_->replaceIf(oldItem, *newItemHdl, itemMovingPredicate)) { + if (!accessContainer_->replaceIf(oldItem, *newItemHdl, + itemExclusivePredicate)) { return false; } @@ -1159,7 +1160,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // This item has been unlinked from its parent and we're the only // owner of it, so we're done here - if (!oldItem.isInMMContainer() || oldItem.isOnlyMoving()) { + if (!oldItem.isInMMContainer() || oldItem.isOnlyExclusive()) { return false; } @@ -1190,7 +1191,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // In case someone else had removed this chained item from its parent by now // So we check again to see if the it has been unlinked from its parent - if (!oldItem.isInMMContainer() || oldItem.isOnlyMoving()) { + if (!oldItem.isInMMContainer() || oldItem.isOnlyExclusive()) { return false; } @@ -1206,7 +1207,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // parent's chain and the MMContainer. auto oldItemHandle = replaceChainedItemLocked(oldItem, std::move(newItemHdl), *parentHandle); - XDCHECK(oldItemHandle->isMoving()); + XDCHECK(oldItemHandle->isExclusive()); XDCHECK(!oldItemHandle->isInMMContainer()); return true; @@ -1242,7 +1243,7 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { : toRecycle_; // make sure no other thead is evicting the item - if (candidate_->getRefCount() == 0 && candidate_->markMoving()) { + if (candidate_->getRefCount() == 0 && candidate_->markExclusive()) { toRecycle = toRecycle_; candidate = candidate_; return; @@ -1264,9 +1265,9 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { { auto toReleaseHandle = evictNormalItem(*candidate); // destroy toReleseHandle. The item won't be release to allocator - // since we marked it as moving. + // since we marked it as exclusive. } - const auto ref = candidate->unmarkMoving(); + const auto ref = candidate->unmarkExclusive(); if (ref == 0u) { // recycle the item. it's safe to do so, even if toReleaseHandle was @@ -2348,7 +2349,7 @@ void CacheAllocator::releaseSlabImpl( // Need to mark an item for release before proceeding // If we can't mark as moving, it means the item is already freed const bool isAlreadyFreed = - !markMovingForSlabRelease(releaseContext, alloc, throttler); + !markExclusiveForSlabRelease(releaseContext, alloc, throttler); if (isAlreadyFreed) { continue; } @@ -2393,8 +2394,8 @@ bool CacheAllocator::moveForSlabRelease( stats_.numMoveAttempts.inc(); // Nothing to move and the key is likely also bogus for chained items. - if (oldItem.isOnlyMoving()) { - oldItem.unmarkMoving(); + if (oldItem.isOnlyExclusive()) { + oldItem.unmarkExclusive(); const auto res = releaseBackToAllocator(oldItem, RemoveContext::kNormal, false); XDCHECK(res == ReleaseRes::kReleased); @@ -2433,7 +2434,7 @@ bool CacheAllocator::moveForSlabRelease( // that's identical to this one to replace it. Here we just need to wait // until all users have dropped the item handles before we can proceed. startTime = util::getCurrentTimeSec(); - while (!oldItem.isOnlyMoving()) { + while (!oldItem.isOnlyExclusive()) { throttleWith(throttler, [&] { XLOGF(WARN, "Spent {} seconds, slab release still waiting for refcount to " @@ -2488,7 +2489,7 @@ CacheAllocator::allocateNewItemForOldItem(const Item& oldItem) { } // Set up the destination for the move. Since oldChainedItem would have - // the moving bit set, it won't be picked for eviction. + // the exclusive bit set, it won't be picked for eviction. auto newItemHdl = allocateChainedItemInternal(parentHandle, oldChainedItem.getSize()); if (!newItemHdl) { @@ -2540,7 +2541,7 @@ bool CacheAllocator::tryMovingForSlabRelease( // item is still valid. const std::string parentKey = oldItem.asChainedItem().getParentItem(compressor_).getKey().str(); - if (oldItem.isOnlyMoving()) { + if (oldItem.isOnlyExclusive()) { // If chained item no longer has a refcount, its parent is already // being released, so we abort this try to moving. return false; @@ -2571,11 +2572,11 @@ void CacheAllocator::evictForSlabRelease( while (true) { stats_.numEvictionAttempts.inc(); - // if the item is already in a state where only the moving bit is set, - // nothing needs to be done. We simply need to unmark moving bit and free + // if the item is already in a state where only the exclusive bit is set, + // nothing needs to be done. We simply need to unmark exclusive bit and free // the item. - if (item.isOnlyMoving()) { - item.unmarkMoving(); + if (item.isOnlyExclusive()) { + item.unmarkExclusive(); const auto res = releaseBackToAllocator(item, RemoveContext::kNormal, false); XDCHECK(ReleaseRes::kReleased == res); @@ -2605,10 +2606,8 @@ void CacheAllocator::evictForSlabRelease( stats_.numEvictionSuccesses.inc(); - // we have the last handle. no longer need to hold on to the moving bit - item.unmarkMoving(); - - XDCHECK(owningHandle->isExclusive()); + // we have the last handle. no longer need to hold on to the exclusive bit + item.unmarkExclusive(); // manually decrement the refcount to call releaseBackToAllocator const auto ref = decRef(*owningHandle); @@ -2620,7 +2619,7 @@ void CacheAllocator::evictForSlabRelease( } if (shutDownInProgress_) { - item.unmarkMoving(); + item.unmarkExclusive(); allocator_->abortSlabRelease(ctx); throw exception::SlabReleaseAborted( folly::sformat("Slab Release aborted while trying to evict" @@ -2646,9 +2645,9 @@ void CacheAllocator::evictForSlabRelease( template typename CacheAllocator::WriteHandle CacheAllocator::evictNormalItem(Item& item) { - XDCHECK(item.isMoving()); + XDCHECK(item.isExclusive()); - if (item.isOnlyMoving()) { + if (item.isOnlyExclusive()) { return WriteHandle{}; } @@ -2660,7 +2659,7 @@ CacheAllocator::evictNormalItem(Item& item) { // We remove the item from both access and mm containers. It doesn't matter // if someone else calls remove on the item at this moment, the item cannot - // be freed as long as we have the moving bit set. + // be freed as long as we have the exclusive bit set. auto handle = accessContainer_->removeIf(item, std::move(predicate)); if (!handle) { @@ -2684,7 +2683,7 @@ CacheAllocator::evictNormalItem(Item& item) { template typename CacheAllocator::WriteHandle CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { - XDCHECK(child.isMoving()); + XDCHECK(child.isExclusive()); // We have the child marked as moving, but dont know anything about the // state of the parent. Unlike the case of regular eviction where we are @@ -2706,7 +2705,7 @@ CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { // check if the child is still in mmContainer and the expected parent is // valid under the chained item lock. if (expectedParent.getKey() != parentKey || !child.isInMMContainer() || - child.isOnlyMoving() || + child.isOnlyExclusive() || &expectedParent != &child.getParentItem(compressor_) || !expectedParent.isAccessible() || !expectedParent.hasChainedItem()) { return {}; @@ -2761,14 +2760,14 @@ CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { // In case someone else had removed this chained item from its parent by now // So we check again to see if it has been unlinked from its parent - if (!child.isInMMContainer() || child.isOnlyMoving()) { + if (!child.isInMMContainer() || child.isOnlyExclusive()) { return {}; } // check after removing from the MMContainer that the parent is still not // being marked as moving. If parent is moving, it will release the child // item and we will wait for that. - if (parentHandle->isMoving()) { + if (parentHandle->isExclusive()) { return {}; } @@ -2801,7 +2800,7 @@ bool CacheAllocator::removeIfExpired(const ReadHandle& handle) { } template -bool CacheAllocator::markMovingForSlabRelease( +bool CacheAllocator::markExclusiveForSlabRelease( const SlabReleaseContext& ctx, void* alloc, util::Throttler& throttler) { // MemoryAllocator::processAllocForRelease will execute the callback // if the item is not already free. So there are three outcomes here: @@ -2820,7 +2819,7 @@ bool CacheAllocator::markMovingForSlabRelease( // Since this callback is executed, the item is not yet freed itemFreed = false; Item* item = static_cast(memory); - if (item->markMoving()) { + if (item->markExclusive()) { markedMoving = true; } }; @@ -2842,7 +2841,7 @@ bool CacheAllocator::markMovingForSlabRelease( itemFreed = true; if (shutDownInProgress_) { - XDCHECK(!static_cast(alloc)->isMoving()); + XDCHECK(!static_cast(alloc)->isExclusive()); allocator_->abortSlabRelease(ctx); throw exception::SlabReleaseAborted( folly::sformat("Slab Release aborted while still trying to mark" diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index e1452cd8a8..3d7f0b1b76 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1507,7 +1507,7 @@ class CacheAllocator : public CacheBase { FOLLY_ALWAYS_INLINE WriteHandle findFastImpl(Key key, AccessMode mode); // Moves a regular item to a different slab. This should only be used during - // slab release after the item's moving bit has been set. The user supplied + // slab release after the item's exclusive bit has been set. The user supplied // callback is responsible for copying the contents and fixing the semantics // of chained item. // @@ -1530,7 +1530,7 @@ class CacheAllocator : public CacheBase { folly::IOBuf convertToIOBufT(Handle& handle); // Moves a chained item to a different slab. This should only be used during - // slab release after the item's moving bit has been set. The user supplied + // slab release after the item's exclusive bit has been set. The user supplied // callback is responsible for copying the contents and fixing the semantics // of chained item. // @@ -1736,9 +1736,9 @@ class CacheAllocator : public CacheBase { // @return true when successfully marked as moving, // fasle when this item has already been freed - bool markMovingForSlabRelease(const SlabReleaseContext& ctx, - void* alloc, - util::Throttler& throttler); + bool markExclusiveForSlabRelease(const SlabReleaseContext& ctx, + void* alloc, + util::Throttler& throttler); // "Move" (by copying) the content in this item to another memory // location by invoking the move callback. @@ -1907,7 +1907,7 @@ class CacheAllocator : public CacheBase { std::optional saveNvmCache(); void saveRamCache(); - static bool itemMovingPredicate(const Item& item) { + static bool itemExclusivePredicate(const Item& item) { return item.getRefCount() == 0; } @@ -1916,7 +1916,7 @@ class CacheAllocator : public CacheBase { } static bool parentEvictForSlabReleasePredicate(const Item& item) { - return item.getRefCount() == 1 && !item.isMoving(); + return item.getRefCount() == 1 && !item.isExclusive(); } std::unique_ptr createDeserializer(); diff --git a/cachelib/allocator/CacheItem-inl.h b/cachelib/allocator/CacheItem-inl.h index fbdbee4fad..8bdf9a96fe 100644 --- a/cachelib/allocator/CacheItem-inl.h +++ b/cachelib/allocator/CacheItem-inl.h @@ -148,12 +148,13 @@ std::string CacheItem::toString() const { return folly::sformat( "item: " "memory={}:raw-ref={}:size={}:key={}:hex-key={}:" - "isInMMContainer={}:isAccessible={}:isMoving={}:references={}:ctime={}:" + "isInMMContainer={}:isAccessible={}:isExclusive={}:references={}:ctime=" + "{}:" "expTime={}:updateTime={}:isNvmClean={}:isNvmEvicted={}:hasChainedItem=" "{}", this, getRefCountAndFlagsRaw(), getSize(), folly::humanify(getKey().str()), folly::hexlify(getKey()), - isInMMContainer(), isAccessible(), isMoving(), getRefCount(), + isInMMContainer(), isAccessible(), isExclusive(), getRefCount(), getCreationTime(), getExpiryTime(), getLastAccessTime(), isNvmClean(), isNvmEvicted(), hasChainedItem()); } @@ -185,11 +186,6 @@ bool CacheItem::isDrained() const noexcept { return ref_.isDrained(); } -template -bool CacheItem::isExclusive() const noexcept { - return ref_.isExclusive(); -} - template void CacheItem::markAccessible() noexcept { ref_.markAccessible(); @@ -221,23 +217,23 @@ bool CacheItem::isInMMContainer() const noexcept { } template -bool CacheItem::markMoving() noexcept { - return ref_.markMoving(); +bool CacheItem::markExclusive() noexcept { + return ref_.markExclusive(); } template -RefcountWithFlags::Value CacheItem::unmarkMoving() noexcept { - return ref_.unmarkMoving(); +RefcountWithFlags::Value CacheItem::unmarkExclusive() noexcept { + return ref_.unmarkExclusive(); } template -bool CacheItem::isMoving() const noexcept { - return ref_.isMoving(); +bool CacheItem::isExclusive() const noexcept { + return ref_.isExclusive(); } template -bool CacheItem::isOnlyMoving() const noexcept { - return ref_.isOnlyMoving(); +bool CacheItem::isOnlyExclusive() const noexcept { + return ref_.isOnlyExclusive(); } template @@ -339,7 +335,7 @@ bool CacheItem::updateExpiryTime(uint32_t expiryTimeSecs) noexcept { // check for moving to make sure we are not updating the expiry time while at // the same time re-allocating the item with the old state of the expiry time // in moveRegularItem(). See D6852328 - if (isMoving() || !isInMMContainer() || isChainedItem()) { + if (isExclusive() || !isInMMContainer() || isChainedItem()) { return false; } // attempt to atomically update the value of expiryTime @@ -455,10 +451,11 @@ std::string CacheChainedItem::toString() const { return folly::sformat( "chained item: " "memory={}:raw-ref={}:size={}:parent-compressed-ptr={}:" - "isInMMContainer={}:isAccessible={}:isMoving={}:references={}:ctime={}:" + "isInMMContainer={}:isAccessible={}:isExclusive={}:references={}:ctime={}" + ":" "expTime={}:updateTime={}", this, Item::getRefCountAndFlagsRaw(), Item::getSize(), cPtr.getRaw(), - Item::isInMMContainer(), Item::isAccessible(), Item::isMoving(), + Item::isInMMContainer(), Item::isAccessible(), Item::isExclusive(), Item::getRefCount(), Item::getCreationTime(), Item::getExpiryTime(), Item::getLastAccessTime()); } diff --git a/cachelib/allocator/CacheItem.h b/cachelib/allocator/CacheItem.h index 7c5e6d357a..350d6c1a59 100644 --- a/cachelib/allocator/CacheItem.h +++ b/cachelib/allocator/CacheItem.h @@ -246,23 +246,23 @@ class CACHELIB_PACKED_ATTR CacheItem { * * This API will only succeed when an item is a regular item, and user * has already inserted it into the cache (via @insert or @insertOrReplace). - * In addition, the item cannot be in a "moving" state. + * In addition, the item cannot be in a "exclusive" state. * * @param expiryTime the expiryTime value to update to * * @return boolean indicating whether expiry time was successfully updated - * false when item is not linked in cache, or in moving state, or a + * false when item is not linked in cache, or in exclusive state, or a * chained item */ bool updateExpiryTime(uint32_t expiryTimeSecs) noexcept; // Same as @updateExpiryTime, but sets expiry time to @ttl seconds from now. // It has the same restrictions as @updateExpiryTime. An item must be a - // regular item and is part of the cache and NOT in the moving state. + // regular item and is part of the cache and NOT in the exclusive state. // // @param ttl TTL (from now) // @return boolean indicating whether expiry time was successfully updated - // false when item is not linked in cache, or in moving state, or a + // false when item is not linked in cache, or in exclusive state, or a // chained item bool extendTTL(std::chrono::seconds ttl) noexcept; @@ -320,13 +320,9 @@ class CACHELIB_PACKED_ATTR CacheItem { // Whether or not an item is completely drained of all references including // the internal ones. This means there is no access refcount bits and zero // admin bits set. I.e. refcount is 0 and the item is not linked, accessible, - // nor moving + // nor exclusive bool isDrained() const noexcept; - // Whether or not we hold the last exclusive access to this item - // Refcount is 1 and the item is not linked, accessible, nor moving - bool isExclusive() const noexcept; - /** * The following three functions correspond to the state of the allocation * in the memory management container. This is protected by the @@ -349,20 +345,20 @@ class CACHELIB_PACKED_ATTR CacheItem { /** * The following two functions corresond to whether or not an item is * currently in the process of being moved. This happens during a slab - * rebalance or resize operation. + * rebalance, eviction or resize operation. * - * An item can only be marked moving when `isInMMContainer` returns true. + * An item can only be marked exclusive when `isInMMContainer` returns true. * This operation is atomic. * - * User can also query if an item "isOnlyMoving". This returns true only - * if the refcount is 0 and only the moving bit is set. + * User can also query if an item "isOnlyExclusive". This returns true only + * if the refcount is 0 and only the exclusive bit is set. * - * Unmarking moving does not depend on `isInMMContainer` + * Unmarking exclusive does not depend on `isInMMContainer` */ - bool markMoving() noexcept; - RefcountWithFlags::Value unmarkMoving() noexcept; - bool isMoving() const noexcept; - bool isOnlyMoving() const noexcept; + bool markExclusive() noexcept; + RefcountWithFlags::Value unmarkExclusive() noexcept; + bool isExclusive() const noexcept; + bool isOnlyExclusive() const noexcept; /** * Item cannot be marked both chained allocation and diff --git a/cachelib/allocator/MM2Q-inl.h b/cachelib/allocator/MM2Q-inl.h index 493e050c09..848a9a0de0 100644 --- a/cachelib/allocator/MM2Q-inl.h +++ b/cachelib/allocator/MM2Q-inl.h @@ -255,14 +255,6 @@ MM2Q::Container::getEvictionIterator() const noexcept { return Iterator{std::move(l), lru_.rbegin()}; } -template T::*HookPtr> -template -void MM2Q::Container::withEvictionIterator(F&& fun) { - lruMutex_->lock_combine([this, &fun]() { - fun(Iterator{LockHolder{}, lru_.rbegin()}); - }); -} - template T::*HookPtr> void MM2Q::Container::removeLocked(T& node, bool doRebalance) noexcept { diff --git a/cachelib/allocator/MM2Q.h b/cachelib/allocator/MM2Q.h index 31073965ce..886dffdddd 100644 --- a/cachelib/allocator/MM2Q.h +++ b/cachelib/allocator/MM2Q.h @@ -447,11 +447,6 @@ class MM2Q { // container and only one such iterator can exist at a time Iterator getEvictionIterator() const noexcept; - // Execute provided function under container lock. Function gets - // iterator passed as parameter. - template - void withEvictionIterator(F&& f); - // get the current config as a copy Config getConfig() const; diff --git a/cachelib/allocator/MMLru-inl.h b/cachelib/allocator/MMLru-inl.h index 53ed1b5f59..5a66161ae0 100644 --- a/cachelib/allocator/MMLru-inl.h +++ b/cachelib/allocator/MMLru-inl.h @@ -218,14 +218,6 @@ MMLru::Container::getEvictionIterator() const noexcept { return Iterator{std::move(l), lru_.rbegin()}; } -template T::*HookPtr> -template -void MMLru::Container::withEvictionIterator(F&& fun) { - lruMutex_->lock_combine([this, &fun]() { - fun(Iterator{LockHolder{}, lru_.rbegin()}); - }); -} - template T::*HookPtr> void MMLru::Container::ensureNotInsertionPoint(T& node) noexcept { // If we are removing the insertion point node, grow tail before we remove diff --git a/cachelib/allocator/MMLru.h b/cachelib/allocator/MMLru.h index 0ba27db3a4..f89438dd3d 100644 --- a/cachelib/allocator/MMLru.h +++ b/cachelib/allocator/MMLru.h @@ -332,11 +332,6 @@ class MMLru { // container and only one such iterator can exist at a time Iterator getEvictionIterator() const noexcept; - // Execute provided function under container lock. Function gets - // iterator passed as parameter. - template - void withEvictionIterator(F&& f); - // get copy of current config Config getConfig() const; diff --git a/cachelib/allocator/MMTinyLFU-inl.h b/cachelib/allocator/MMTinyLFU-inl.h index d9e7da8a7d..de06902482 100644 --- a/cachelib/allocator/MMTinyLFU-inl.h +++ b/cachelib/allocator/MMTinyLFU-inl.h @@ -220,13 +220,6 @@ MMTinyLFU::Container::getEvictionIterator() const noexcept { return Iterator{std::move(l), *this}; } -template T::*HookPtr> -template -void MMTinyLFU::Container::withEvictionIterator(F&& fun) { - LockHolder l(lruMutex_); - fun(Iterator{LockHolder{}, *this}); -} - template T::*HookPtr> void MMTinyLFU::Container::removeLocked(T& node) noexcept { if (isTiny(node)) { diff --git a/cachelib/allocator/MMTinyLFU.h b/cachelib/allocator/MMTinyLFU.h index 40886d53af..14d5ae6906 100644 --- a/cachelib/allocator/MMTinyLFU.h +++ b/cachelib/allocator/MMTinyLFU.h @@ -491,11 +491,6 @@ class MMTinyLFU { // container and only one such iterator can exist at a time Iterator getEvictionIterator() const noexcept; - // Execute provided function under container lock. Function gets - // iterator passed as parameter. - template - void withEvictionIterator(F&& f); - // for saving the state of the lru // // precondition: serialization must happen without any reader or writer diff --git a/cachelib/allocator/Refcount.h b/cachelib/allocator/Refcount.h index 8328019f27..47e249b382 100644 --- a/cachelib/allocator/Refcount.h +++ b/cachelib/allocator/Refcount.h @@ -82,7 +82,7 @@ class FOLLY_PACK_ATTR RefcountWithFlags { // this flag indicates the allocation is being moved elsewhere // (can be triggered by a resize or reblanace operation) - kMoving, + kExclusive, }; /** @@ -119,7 +119,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags { // Unused. This is just to indciate the maximum number of flags kFlagMax, }; - static_assert(static_cast(kMMFlag0) > static_cast(kMoving), + static_assert(static_cast(kMMFlag0) > + static_cast(kExclusive), "Flags and control bits cannot overlap in bit range."); static_assert(kFlagMax <= NumBits::value, "Too many flags."); @@ -249,16 +250,16 @@ class FOLLY_PACK_ATTR RefcountWithFlags { * an item is currently in the process of being moved. This happens during a * slab rebalance or resize operation or during eviction. * - * An item can only be marked moving when `isInMMContainer` returns true and - * the item is not yet marked as moving. This operation is atomic. + * An item can only be marked exclusive when `isInMMContainer` returns true + * and the item is not yet marked as exclusive. This operation is atomic. * - * User can also query if an item "isOnlyMoving". This returns true only - * if the refcount is 0 and only the moving bit is set. + * User can also query if an item "isOnlyExclusive". This returns true only + * if the refcount is 0 and only the exclusive bit is set. * - * Unmarking moving does not depend on `isInMMContainer` + * Unmarking exclusive does not depend on `isInMMContainer` */ - bool markMoving() noexcept { - Value bitMask = getAdminRef(); + bool markExclusive() noexcept { + Value bitMask = getAdminRef(); Value conditionBitMask = getAdminRef(); Value* const refPtr = &refCount_; @@ -267,8 +268,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags { Value curValue = __atomic_load_n(refPtr, __ATOMIC_RELAXED); while (true) { const bool flagSet = curValue & conditionBitMask; - const bool alreadyMoving = curValue & bitMask; - if (!flagSet || alreadyMoving) { + const bool alreadyExclusive = curValue & bitMask; + if (!flagSet || alreadyExclusive) { return false; } @@ -287,21 +288,23 @@ class FOLLY_PACK_ATTR RefcountWithFlags { } } } - Value unmarkMoving() noexcept { - Value bitMask = ~getAdminRef(); + Value unmarkExclusive() noexcept { + Value bitMask = ~getAdminRef(); return __atomic_and_fetch(&refCount_, bitMask, __ATOMIC_ACQ_REL) & kRefMask; } - bool isMoving() const noexcept { return getRaw() & getAdminRef(); } - bool isOnlyMoving() const noexcept { - // An item is only moving when its refcount is zero and only the moving bit - // among all the control bits is set. This indicates an item is already on - // its way out of cache and does not need to be moved. + bool isExclusive() const noexcept { + return getRaw() & getAdminRef(); + } + bool isOnlyExclusive() const noexcept { + // An item is only exclusive when its refcount is zero and only the + // exclusive bit among all the control bits is set. This indicates an item + // is already on its way out of cache and does not need to be moved. auto ref = getRefWithAccessAndAdmin(); - bool anyOtherBitSet = ref & ~getAdminRef(); + bool anyOtherBitSet = ref & ~getAdminRef(); if (anyOtherBitSet) { return false; } - return ref & getAdminRef(); + return ref & getAdminRef(); } /** @@ -331,13 +334,9 @@ class FOLLY_PACK_ATTR RefcountWithFlags { bool isNvmEvicted() const noexcept { return isFlagSet(); } // Whether or not an item is completely drained of access - // Refcount is 0 and the item is not linked, accessible, nor moving + // Refcount is 0 and the item is not linked, accessible, nor exclusive bool isDrained() const noexcept { return getRefWithAccessAndAdmin() == 0; } - // Whether or not we hold the last exclusive access to this item - // Refcount is 1 and the item is not linked, accessible, nor moving - bool isExclusive() const noexcept { return getRefWithAccessAndAdmin() == 1; } - /** * Functions to set, unset and check flag presence. This is exposed * for external components to manipulate flags. E.g. MMContainer will diff --git a/cachelib/allocator/tests/ItemTest.cpp b/cachelib/allocator/tests/ItemTest.cpp index 45bc529a00..edece3fb58 100644 --- a/cachelib/allocator/tests/ItemTest.cpp +++ b/cachelib/allocator/tests/ItemTest.cpp @@ -83,10 +83,10 @@ TEST(ItemTest, ExpiryTime) { EXPECT_EQ(tenMins, item->getConfiguredTTL()); // Test that writes fail while the item is moving - item->markMoving(); + item->markExclusive(); result = item->updateExpiryTime(0); EXPECT_FALSE(result); - item->unmarkMoving(); + item->unmarkExclusive(); // Test that writes fail while the item is not in an MMContainer item->unmarkInMMContainer(); diff --git a/cachelib/allocator/tests/RefCountTest.cpp b/cachelib/allocator/tests/RefCountTest.cpp index 7d789a85d8..153f4a94a4 100644 --- a/cachelib/allocator/tests/RefCountTest.cpp +++ b/cachelib/allocator/tests/RefCountTest.cpp @@ -81,7 +81,7 @@ void RefCountTest::testBasic() { ASSERT_EQ(0, ref.getRaw()); ASSERT_FALSE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -89,7 +89,7 @@ void RefCountTest::testBasic() { ref.markInMMContainer(); ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -111,7 +111,7 @@ void RefCountTest::testBasic() { // Bumping up access ref shouldn't affect admin ref and flags ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(RefcountWithFlags::kAccessRefMask, ref.getAccessRef()); ASSERT_TRUE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -128,7 +128,7 @@ void RefCountTest::testBasic() { // Bumping down access ref shouldn't affect admin ref and flags ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_TRUE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -136,7 +136,7 @@ void RefCountTest::testBasic() { ref.template unSetFlag(); ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -145,28 +145,28 @@ void RefCountTest::testBasic() { ASSERT_EQ(0, ref.getRaw()); ASSERT_FALSE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); // conditionally set flags - ASSERT_FALSE((ref.markMoving())); + ASSERT_FALSE((ref.markExclusive())); ref.markInMMContainer(); - ASSERT_TRUE((ref.markMoving())); - ASSERT_FALSE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.markExclusive())); + ASSERT_FALSE((ref.isOnlyExclusive())); ref.unmarkInMMContainer(); ref.template setFlag(); - // Have no other admin refcount but with a flag still means "isOnlyMoving" - ASSERT_TRUE((ref.isOnlyMoving())); + // Have no other admin refcount but with a flag still means "isOnlyExclusive" + ASSERT_TRUE((ref.isOnlyExclusive())); - // Set some flags and verify that "isOnlyMoving" does not care about flags + // Set some flags and verify that "isOnlyExclusive" does not care about flags ref.markIsChainedItem(); ASSERT_TRUE(ref.isChainedItem()); - ASSERT_TRUE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.isOnlyExclusive())); ref.unmarkIsChainedItem(); ASSERT_FALSE(ref.isChainedItem()); - ASSERT_TRUE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.isOnlyExclusive())); } } // namespace From 5b6d18c9a5c909c22378a890428605157e5bd1bf Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Mon, 3 Oct 2022 14:22:17 +0000 Subject: [PATCH 9/9] Synchronize with SlabRelease before eviction attempts. Mark the item as exclusive before trying to remove from AC or MMContainer. --- cachelib/allocator/CacheAllocator-inl.h | 198 +++++++++++++++++++----- cachelib/allocator/CacheAllocator.h | 21 ++- cachelib/allocator/CacheItem.h | 4 +- cachelib/allocator/MM2Q-inl.h | 13 +- cachelib/allocator/Refcount.h | 7 +- 5 files changed, 195 insertions(+), 48 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index fbb65e0429..58cf99593b 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1221,55 +1221,46 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // Keep searching for a candidate until we were able to evict it // or until the search limit has been exhausted unsigned int searchTries = 0; + auto itr = mmContainer.getEvictionIterator(); while ((config_.evictionSearchTries == 0 || - config_.evictionSearchTries > searchTries)) { + config_.evictionSearchTries > searchTries) && + itr) { ++searchTries; (*stats_.evictionAttempts)[pid][cid].inc(); - Item* toRecycle = nullptr; - Item* candidate = nullptr; - - mmContainer.withEvictionIterator( - [this, &candidate, &toRecycle, &searchTries](auto&& itr) { - while ((config_.evictionSearchTries == 0 || - config_.evictionSearchTries > searchTries) && - itr) { - ++searchTries; - - auto* toRecycle_ = itr.get(); - auto* candidate_ = - toRecycle_->isChainedItem() - ? &toRecycle_->asChainedItem().getParentItem(compressor_) - : toRecycle_; - - // make sure no other thead is evicting the item - if (candidate_->getRefCount() == 0 && candidate_->markExclusive()) { - toRecycle = toRecycle_; - candidate = candidate_; - return; - } - - ++itr; - } - }); - - if (!toRecycle) - continue; + Item* toRecycle = itr.get(); + + Item* candidate = + toRecycle->isChainedItem() + ? &toRecycle->asChainedItem().getParentItem(compressor_) + : toRecycle; - XDCHECK(toRecycle); - XDCHECK(candidate); + // make sure no other thead is evicting the item + if (candidate->getRefCount() != 0 || !candidate->markExclusive()) { + ++itr; + continue; + } // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. + bool evictionSuccessful = false; { - auto toReleaseHandle = evictNormalItem(*candidate); - // destroy toReleseHandle. The item won't be release to allocator + auto toReleaseHandle = + itr->isChainedItem() + ? advanceIteratorAndTryEvictChainedItem(itr) + : advanceIteratorAndTryEvictRegularItem(mmContainer, itr); + evictionSuccessful = toReleaseHandle != nullptr; + // destroy toReleseHandle. The item won't be released to allocator // since we marked it as exclusive. } - const auto ref = candidate->unmarkExclusive(); + const auto ref = candidate->unmarkExclusive(); if (ref == 0u) { + // Invalidate iterator since later on we may use this mmContainer + // again, which cannot be done unless we drop this iterator + itr.destroy(); + // recycle the item. it's safe to do so, even if toReleaseHandle was // NULL. If `ref` == 0 then it means that we are the last holder of // that item. @@ -1293,11 +1284,13 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { return toRecycle; } } else { - if (candidate->hasChainedItem()) { - stats_.evictFailParentAC.inc(); - } else { - stats_.evictFailAC.inc(); - } + XDCHECK(!evictionSuccessful); + } + + // If we destroyed the itr to possibly evict and failed, we restart + // from the beginning again + if (!itr) { + itr.resetToBegin(); } } return nullptr; @@ -2589,7 +2582,7 @@ void CacheAllocator::evictForSlabRelease( auto owningHandle = item.isChainedItem() ? evictChainedItemForSlabRelease(item.asChainedItem()) - : evictNormalItem(item); + : evictNormalItemForSlabRelease(item); // we managed to evict the corresponding owner of the item and have the // last handle for the owner. @@ -2644,7 +2637,127 @@ void CacheAllocator::evictForSlabRelease( template typename CacheAllocator::WriteHandle -CacheAllocator::evictNormalItem(Item& item) { +CacheAllocator::advanceIteratorAndTryEvictRegularItem( + MMContainer& mmContainer, EvictionIterator& itr) { + // we should flush this to nvmcache if it is not already present in nvmcache + // and the item is not expired. + Item& item = *itr; + const bool evictToNvmCache = shouldWriteToNvmCache(item); + + auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) + : typename NvmCacheT::PutToken{}; + + // record the in-flight eviciton. If not, we move on to next item to avoid + // stalling eviction. + if (evictToNvmCache && !token.isValid()) { + ++itr; + stats_.evictFailConcurrentFill.inc(); + return WriteHandle{}; + } + + // If there are other accessors, we should abort. Acquire a handle here since + // if we remove the item from both access containers and mm containers + // below, we will need a handle to ensure proper cleanup in case we end up + // not evicting this item + auto evictHandle = accessContainer_->removeIf(item, &itemExclusivePredicate); + if (!evictHandle) { + ++itr; + stats_.evictFailAC.inc(); + return evictHandle; + } + + mmContainer.remove(itr); + XDCHECK_EQ(reinterpret_cast(evictHandle.get()), + reinterpret_cast(&item)); + XDCHECK(!evictHandle->isInMMContainer()); + XDCHECK(!evictHandle->isAccessible()); + + // Invalidate iterator since later on if we are not evicting this + // item, we may need to rely on the handle we created above to ensure + // proper cleanup if the item's raw refcount has dropped to 0. + // And since this item may be a parent item that has some child items + // in this very same mmContainer, we need to make sure we drop this + // exclusive iterator so we can gain access to it when we're cleaning + // up the child items + itr.destroy(); + + // Ensure that there are no accessors after removing from the access + // container + XDCHECK(evictHandle->getRefCount() == 1); + + if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { + XDCHECK(token.isValid()); + nvmCache_->put(evictHandle, std::move(token)); + } + return evictHandle; +} + +template +typename CacheAllocator::WriteHandle +CacheAllocator::advanceIteratorAndTryEvictChainedItem( + EvictionIterator& itr) { + XDCHECK(itr->isChainedItem()); + + ChainedItem* candidate = &itr->asChainedItem(); + ++itr; + + // The parent could change at any point through transferChain. However, if + // that happens, we would realize that the releaseBackToAllocator return + // kNotRecycled and we would try another chained item, leading to transient + // failure. + auto& parent = candidate->getParentItem(compressor_); + + const bool evictToNvmCache = shouldWriteToNvmCache(parent); + + auto token = evictToNvmCache ? nvmCache_->createPutToken(parent.getKey()) + : typename NvmCacheT::PutToken{}; + + // if token is invalid, return. iterator is already advanced. + if (evictToNvmCache && !token.isValid()) { + stats_.evictFailConcurrentFill.inc(); + return WriteHandle{}; + } + + // check if the parent exists in the hashtable and refcount is drained. + auto parentHandle = + accessContainer_->removeIf(parent, &itemExclusivePredicate); + if (!parentHandle) { + stats_.evictFailParentAC.inc(); + return parentHandle; + } + + // Invalidate iterator since later on we may use the mmContainer + // associated with this iterator which cannot be done unless we + // drop this iterator + // + // This must be done once we know the parent is not nullptr. + // Since we can very well be the last holder of this parent item, + // which may have a chained item that is linked in this MM container. + itr.destroy(); + + // Ensure we have the correct parent and we're the only user of the + // parent, then free it from access container. Otherwise, we abort + XDCHECK_EQ(reinterpret_cast(&parent), + reinterpret_cast(parentHandle.get())); + XDCHECK_EQ(1u, parent.getRefCount()); + + removeFromMMContainer(*parentHandle); + + XDCHECK(!parent.isInMMContainer()); + XDCHECK(!parent.isAccessible()); + + if (evictToNvmCache && shouldWriteToNvmCacheExclusive(*parentHandle)) { + XDCHECK(token.isValid()); + XDCHECK(parentHandle->hasChainedItem()); + nvmCache_->put(parentHandle, std::move(token)); + } + + return parentHandle; +} + +template +typename CacheAllocator::WriteHandle +CacheAllocator::evictNormalItemForSlabRelease(Item& item) { XDCHECK(item.isExclusive()); if (item.isOnlyExclusive()) { @@ -2841,7 +2954,6 @@ bool CacheAllocator::markExclusiveForSlabRelease( itemFreed = true; if (shutDownInProgress_) { - XDCHECK(!static_cast(alloc)->isExclusive()); allocator_->abortSlabRelease(ctx); throw exception::SlabReleaseAborted( folly::sformat("Slab Release aborted while still trying to mark" diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 3d7f0b1b76..63065c7807 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1661,6 +1661,25 @@ class CacheAllocator : public CacheBase { using EvictionIterator = typename MMContainer::Iterator; + // Advance the current iterator and try to evict a regular item + // + // @param mmContainer the container to look for evictions. + // @param itr iterator holding the item + // + // @return valid handle to regular item on success. This will be the last + // handle to the item. On failure an empty handle. + WriteHandle advanceIteratorAndTryEvictRegularItem(MMContainer& mmContainer, + EvictionIterator& itr); + + // Advance the current iterator and try to evict a chained item + // Iterator may also be reset during the course of this function + // + // @param itr iterator holding the item + // + // @return valid handle to the parent item on success. This will be the last + // handle to the item + WriteHandle advanceIteratorAndTryEvictChainedItem(EvictionIterator& itr); + // Deserializer CacheAllocatorMetadata and verify the version // // @param deserializer Deserializer object @@ -1778,7 +1797,7 @@ class CacheAllocator : public CacheBase { // // @return last handle for corresponding to item on success. empty handle on // failure. caller can retry if needed. - WriteHandle evictNormalItem(Item& item); + WriteHandle evictNormalItemForSlabRelease(Item& item); // Helper function to evict a child item for slab release // As a side effect, the parent item is also evicted diff --git a/cachelib/allocator/CacheItem.h b/cachelib/allocator/CacheItem.h index 350d6c1a59..c17ebb90d5 100644 --- a/cachelib/allocator/CacheItem.h +++ b/cachelib/allocator/CacheItem.h @@ -353,7 +353,9 @@ class CACHELIB_PACKED_ATTR CacheItem { * User can also query if an item "isOnlyExclusive". This returns true only * if the refcount is 0 and only the exclusive bit is set. * - * Unmarking exclusive does not depend on `isInMMContainer` + * Unmarking exclusive does not depend on `isInMMContainer`. + * Unmarking exclusive will also return the refcount at the moment of + * unmarking. */ bool markExclusive() noexcept; RefcountWithFlags::Value unmarkExclusive() noexcept; diff --git a/cachelib/allocator/MM2Q-inl.h b/cachelib/allocator/MM2Q-inl.h index 848a9a0de0..1d2482d45b 100644 --- a/cachelib/allocator/MM2Q-inl.h +++ b/cachelib/allocator/MM2Q-inl.h @@ -250,7 +250,18 @@ MM2Q::Container::getEvictionIterator() const noexcept { // arbitrary amount of time outside a lambda-friendly piece of code (eg. they // can return the iterator from functions, pass it to functions, etc) // - // to get advantage of combining, use withEvictionIterator + // it would be theoretically possible to refactor this interface into + // something like the following to allow combining + // + // mm2q.withEvictionIterator([&](auto iterator) { + // // user code + // }); + // + // at the time of writing it is unclear if the gains from combining are + // reasonable justification for the codemod required to achieve combinability + // as we don't expect this critical section to be the hotspot in user code. + // This is however subject to change at some time in the future as and when + // this assertion becomes false. LockHolder l(*lruMutex_); return Iterator{std::move(l), lru_.rbegin()}; } diff --git a/cachelib/allocator/Refcount.h b/cachelib/allocator/Refcount.h index 47e249b382..3c37e2e098 100644 --- a/cachelib/allocator/Refcount.h +++ b/cachelib/allocator/Refcount.h @@ -256,7 +256,9 @@ class FOLLY_PACK_ATTR RefcountWithFlags { * User can also query if an item "isOnlyExclusive". This returns true only * if the refcount is 0 and only the exclusive bit is set. * - * Unmarking exclusive does not depend on `isInMMContainer` + * Unmarking exclusive does not depend on `isInMMContainer`. + * Unmarking exclusive will also return the refcount at the moment of + * unmarking. */ bool markExclusive() noexcept { Value bitMask = getAdminRef(); @@ -298,7 +300,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags { bool isOnlyExclusive() const noexcept { // An item is only exclusive when its refcount is zero and only the // exclusive bit among all the control bits is set. This indicates an item - // is already on its way out of cache and does not need to be moved. + // is exclusive to the current thread. No other thread is allowed to + // do anything with it. auto ref = getRefWithAccessAndAdmin(); bool anyOtherBitSet = ref & ~getAdminRef(); if (anyOtherBitSet) {