diff --git a/src/gpgmm/SlabMemoryAllocator.cpp b/src/gpgmm/SlabMemoryAllocator.cpp index 1bc3d102b..95e639efd 100644 --- a/src/gpgmm/SlabMemoryAllocator.cpp +++ b/src/gpgmm/SlabMemoryAllocator.cpp @@ -116,19 +116,16 @@ namespace gpgmm { SlabCache* cache = GetOrCreateCache(slabSize); ASSERT(cache != nullptr); - auto* pHead = cache->FreeList.head(); - - Slab* pFreeSlab = nullptr; - // Check free-list since HEAD must always exist (linked-list is self-referential). - if (!cache->FreeList.empty()) { - pFreeSlab = pHead->value(); - } + auto* pFreeHead = cache->FreeList.head(); + Slab* pFreeSlab = pFreeHead->value(); // Splice the full slab from the free-list to the full-list. - if (pFreeSlab != nullptr && pFreeSlab->IsFull()) { - pHead->RemoveFromList(); - pHead->InsertBefore(cache->FullList.head()); + if (!cache->FreeList.empty() && pFreeSlab->IsFull()) { + pFreeHead->RemoveFromList(); + pFreeHead->InsertBefore(cache->FullList.head()); + pFreeSlab = cache->FreeList.head()->value(); + pFreeHead = nullptr; } // Push new free slab at free-list HEAD @@ -140,6 +137,7 @@ namespace gpgmm { ASSERT(pFreeSlab != nullptr); ASSERT(!pFreeSlab->IsFull()); + ASSERT(!cache->FreeList.empty()); std::unique_ptr subAllocation; GPGMM_TRY_ASSIGN( diff --git a/src/tests/unittests/SlabMemoryAllocatorTests.cpp b/src/tests/unittests/SlabMemoryAllocatorTests.cpp index 3ac3821f4..bada71e8a 100644 --- a/src/tests/unittests/SlabMemoryAllocatorTests.cpp +++ b/src/tests/unittests/SlabMemoryAllocatorTests.cpp @@ -31,7 +31,7 @@ static constexpr uint64_t kDefaultSlabAlignment = 1u; static constexpr double kDefaultSlabFragmentationLimit = 0.125; static constexpr bool kDefaultPrefetchSlab = false; -// Verify a single resource allocation in a single slab. +// Verify allocation in a single slab. TEST(SlabMemoryAllocatorTests, SingleSlab) { std::unique_ptr dummyMemoryAllocator = std::make_unique(); @@ -143,15 +143,16 @@ TEST(SlabMemoryAllocatorTests, SingleSlab) { } } -// Verify a single resource allocation in multiple slabs. +// Verify allocation in multiple slabs. TEST(SlabMemoryAllocatorTests, MultipleSlabs) { std::unique_ptr dummyMemoryAllocator = std::make_unique(); - constexpr uint64_t kBlockSize = 32; - constexpr uint64_t kMaxSlabSize = 512; // Fill up exactly N slabs (allocation = block = slab size). { + constexpr uint64_t kBlockSize = 32; + constexpr uint64_t kMaxSlabSize = 512; + SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, /*slabSize*/ kBlockSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, dummyMemoryAllocator.get()); @@ -175,6 +176,9 @@ TEST(SlabMemoryAllocatorTests, MultipleSlabs) { // Fill up slabs through pre-allocation (allocation < block < slab size). { + constexpr uint64_t kBlockSize = 32; + constexpr uint64_t kMaxSlabSize = 512; + SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, dummyMemoryAllocator.get()); @@ -196,6 +200,89 @@ TEST(SlabMemoryAllocatorTests, MultipleSlabs) { EXPECT_EQ(allocator.GetSlabSizeForTesting(), 0u); } + + // Verify slabs are reused in LIFO. + { + constexpr uint64_t kBlockSize = 64; + constexpr uint64_t kMaxSlabSize = 512; + SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, + kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, + kDefaultPrefetchSlab, dummyMemoryAllocator.get()); + + // Both allocation A and B go in Slab A, which will become full. + std::unique_ptr allocationAinSlabA = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + ASSERT_NE(allocationAinSlabA, nullptr); + + std::unique_ptr allocationBInSlabA = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + ASSERT_NE(allocationAinSlabA, nullptr); + + EXPECT_EQ(allocationAinSlabA->GetMemory(), allocationBInSlabA->GetMemory()); + + // Allocation C and D go in Slab B, which will become full. + std::unique_ptr allocationCInSlabB = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + ASSERT_NE(allocationCInSlabB, nullptr); + + EXPECT_NE(allocationBInSlabA->GetMemory(), allocationCInSlabB->GetMemory()); + + std::unique_ptr allocationDInSlabB = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + ASSERT_NE(allocationDInSlabB, nullptr); + + EXPECT_EQ(allocationCInSlabB->GetMemory(), allocationDInSlabB->GetMemory()); + + // Allocation E and F goes in Slab C, which will become full. + std::unique_ptr allocationEInSlabC = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + ASSERT_NE(allocationEInSlabC, nullptr); + + EXPECT_NE(allocationDInSlabB->GetMemory(), allocationEInSlabC->GetMemory()); + + std::unique_ptr allocationFInSlabC = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + ASSERT_NE(allocationFInSlabC, nullptr); + + EXPECT_EQ(allocationEInSlabC->GetMemory(), allocationFInSlabC->GetMemory()); + + // Free list: [] + // Full list: C -> B -> A. + + allocator.DeallocateMemory(std::move(allocationAinSlabA)); + allocator.DeallocateMemory(std::move(allocationCInSlabB)); + + // Free list: B -> A. + // Full list: C. + + std::unique_ptr allocationGInSlabB = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + ASSERT_NE(allocationGInSlabB, nullptr); + EXPECT_EQ(allocationDInSlabB->GetMemory(), allocationGInSlabB->GetMemory()); + + // Free list: A. + // Full list: B -> C. + + std::unique_ptr allocationHInSlabA = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + ASSERT_NE(allocationGInSlabB, nullptr); + + EXPECT_EQ(allocationBInSlabA->GetMemory(), allocationHInSlabA->GetMemory()); + + // Free list: []. + // Full list: A -> B -> C. + + allocator.DeallocateMemory(std::move(allocationHInSlabA)); + allocator.DeallocateMemory(std::move(allocationBInSlabA)); + + allocator.DeallocateMemory(std::move(allocationGInSlabB)); + allocator.DeallocateMemory(std::move(allocationDInSlabB)); + + allocator.DeallocateMemory(std::move(allocationEInSlabC)); + allocator.DeallocateMemory(std::move(allocationFInSlabC)); + + EXPECT_EQ(allocator.GetInfo().UsedMemoryUsage, 0u); + } } // Verify a very large allocation does not overflow.