diff --git a/src/gpgmm/BUILD.gn b/src/gpgmm/BUILD.gn index 100d50ce3..82e93b98a 100644 --- a/src/gpgmm/BUILD.gn +++ b/src/gpgmm/BUILD.gn @@ -133,7 +133,6 @@ source_set("gpgmm_sources") { "d3d12/CapsD3D12.h", "d3d12/DebugResourceAllocatorD3D12.cpp", "d3d12/DebugResourceAllocatorD3D12.h", - "d3d12/DefaultsD3D12.h", "d3d12/ErrorD3D12.cpp", "d3d12/ErrorD3D12.h", "d3d12/FenceD3D12.cpp", diff --git a/src/gpgmm/CMakeLists.txt b/src/gpgmm/CMakeLists.txt index 211ce891c..56d26984e 100644 --- a/src/gpgmm/CMakeLists.txt +++ b/src/gpgmm/CMakeLists.txt @@ -47,7 +47,6 @@ if (GPGMM_ENABLE_D3D12) "d3d12/DebugResourceAllocatorD3D12.h" "d3d12/CapsD3D12.cpp" "d3d12/CapsD3D12.h" - "d3d12/DefaultsD3D12.h" "d3d12/ErrorD3D12.cpp" "d3d12/ErrorD3D12.h" "d3d12/FenceD3D12.cpp" diff --git a/src/gpgmm/common/Defaults.h b/src/gpgmm/common/Defaults.h index 53014847f..442b30b54 100644 --- a/src/gpgmm/common/Defaults.h +++ b/src/gpgmm/common/Defaults.h @@ -20,6 +20,7 @@ namespace gpgmm { static constexpr const char* kDefaultTraceFile = "gpgmm_event_trace.json"; static constexpr double kDefaultFragmentationLimit = 0.125; // 1/8th or 12.5% + static constexpr double kDefaultMemoryGrowthFactor = 1.25; // 25% growth } // namespace gpgmm #endif // GPGMM_COMMON_DEFAULTS_H_ diff --git a/src/gpgmm/common/SlabMemoryAllocator.cpp b/src/gpgmm/common/SlabMemoryAllocator.cpp index 1e5e5440f..40282f6d8 100644 --- a/src/gpgmm/common/SlabMemoryAllocator.cpp +++ b/src/gpgmm/common/SlabMemoryAllocator.cpp @@ -31,21 +31,25 @@ namespace gpgmm { SlabMemoryAllocator::SlabMemoryAllocator(uint64_t blockSize, uint64_t maxSlabSize, - uint64_t slabSize, + uint64_t minSlabSize, uint64_t slabAlignment, double slabFragmentationLimit, bool prefetchSlab, + double slabGrowthFactor, MemoryAllocator* memoryAllocator) - : mBlockSize(blockSize), - mMaxSlabSize(maxSlabSize), - mSlabSize(slabSize), + : mLastUsedSlabSize(0), + mBlockSize(blockSize), mSlabAlignment(slabAlignment), + mMaxSlabSize(maxSlabSize), + mMinSlabSize(std::max(minSlabSize, mSlabAlignment)), mSlabFragmentationLimit(slabFragmentationLimit), mPrefetchSlab(prefetchSlab), + mSlabGrowthFactor(slabGrowthFactor), mMemoryAllocator(memoryAllocator) { ASSERT(IsPowerOfTwo(mMaxSlabSize)); ASSERT(mMemoryAllocator != nullptr); - ASSERT(mSlabSize <= mMaxSlabSize); + ASSERT(mSlabGrowthFactor >= 1); + ASSERT(mSlabAlignment > 0); } SlabMemoryAllocator::~SlabMemoryAllocator() { @@ -60,18 +64,16 @@ namespace gpgmm { } } - uint64_t SlabMemoryAllocator::ComputeSlabSize(uint64_t requestSize) const { + // Returns a new slab size of a power-of-two value. + uint64_t SlabMemoryAllocator::ComputeSlabSize(uint64_t requestSize, uint64_t slabSize) const { + ASSERT(requestSize <= mBlockSize); + // If the left over empty space is less than |mSlabFragmentationLimit| x slab size, // then the fragmentation is acceptable and we are done. For example, a 4MB slab and and a // 512KB block fits exactly 8 blocks with no wasted space. But a 3MB block has 1MB worth of // empty space leftover which exceeds |mSlabFragmentationLimit| x slab size or 500KB. - ASSERT(requestSize <= mBlockSize); - - // Slabs are grown in multiple of powers of two of the block size or |mSlabSize| - // if specified. - uint64_t slabSize = std::max(mSlabSize, mBlockSize); - const uint64_t wastedBytes = mBlockSize - requestSize; - while (wastedBytes > (mSlabFragmentationLimit * slabSize)) { + const uint64_t fragmentedBytes = mBlockSize - requestSize; + while (requestSize > slabSize || fragmentedBytes > (mSlabFragmentationLimit * slabSize)) { slabSize *= 2; } @@ -104,7 +106,7 @@ namespace gpgmm { return {}; } - const uint64_t slabSize = ComputeSlabSize(requestSize); + uint64_t slabSize = ComputeSlabSize(requestSize, std::max(mMinSlabSize, mLastUsedSlabSize)); if (slabSize > mMaxSlabSize) { InfoEvent("SlabMemoryAllocator.TryAllocateMemory", ALLOCATOR_MESSAGE_ID_SIZE_EXCEEDED) << "Slab size exceeded the max slab size (" + std::to_string(slabSize) + " vs " + @@ -130,6 +132,16 @@ namespace gpgmm { // Push new free slab at free-list HEAD if (cache->FreeList.empty() || pFreeSlab->IsFull()) { + // Get the next free slab. + if (mLastUsedSlabSize > 0) { + uint64_t newSlabSize = std::min( + ComputeSlabSize(requestSize, slabSize * mSlabGrowthFactor), mMaxSlabSize); + if (newSlabSize > slabSize) { + cache = GetOrCreateCache(newSlabSize); + slabSize = newSlabSize; + } + } + Slab* pNewFreeSlab = new Slab(slabSize / mBlockSize, mBlockSize); pNewFreeSlab->InsertBefore(cache->FreeList.head()); pFreeSlab = pNewFreeSlab; @@ -137,7 +149,6 @@ namespace gpgmm { ASSERT(pFreeSlab != nullptr); ASSERT(!pFreeSlab->IsFull()); - ASSERT(!cache->FreeList.empty()); std::unique_ptr subAllocation; GPGMM_TRY_ASSIGN( @@ -185,6 +196,10 @@ namespace gpgmm { mMemoryAllocator->TryAllocateMemoryAsync(slabSize, mSlabAlignment); } + // Remember the last allocated slab size so if a subsequent allocation requests a new slab, + // the new slab size will be slightly larger than the old slab size. + mLastUsedSlabSize = slabSize; + // Wrap the block in the containing slab. Since the slab's block could reside in another // allocated block, the slab's allocation offset must be made relative to slab's underlying // memory and not the slab. @@ -275,19 +290,22 @@ namespace gpgmm { SlabCacheAllocator::SlabCacheAllocator(uint64_t minBlockSize, uint64_t maxSlabSize, - uint64_t slabSize, + uint64_t minSlabSize, uint64_t slabAlignment, double slabFragmentationLimit, bool prefetchSlab, + double slabGrowthFactor, std::unique_ptr memoryAllocator) : MemoryAllocator(std::move(memoryAllocator)), mMinBlockSize(minBlockSize), mMaxSlabSize(maxSlabSize), - mSlabSize(slabSize), + mMinSlabSize(minSlabSize), mSlabAlignment(slabAlignment), mSlabFragmentationLimit(slabFragmentationLimit), - mPrefetchSlab(prefetchSlab) { + mPrefetchSlab(prefetchSlab), + mSlabGrowthFactor(slabGrowthFactor) { ASSERT(IsPowerOfTwo(mMaxSlabSize)); + ASSERT(mSlabGrowthFactor >= 1); } SlabCacheAllocator::~SlabCacheAllocator() { @@ -312,9 +330,9 @@ namespace gpgmm { auto entry = mSizeCache.GetOrCreate(SlabAllocatorCacheEntry(blockSize), cacheSize); SlabMemoryAllocator* slabAllocator = entry->GetValue().pSlabAllocator; if (slabAllocator == nullptr) { - slabAllocator = - new SlabMemoryAllocator(blockSize, mMaxSlabSize, mSlabSize, mSlabAlignment, - mSlabFragmentationLimit, mPrefetchSlab, GetFirstChild()); + slabAllocator = new SlabMemoryAllocator( + blockSize, mMaxSlabSize, mMinSlabSize, mSlabAlignment, mSlabFragmentationLimit, + mPrefetchSlab, mSlabGrowthFactor, GetFirstChild()); entry->GetValue().pSlabAllocator = slabAllocator; mSlabAllocators.Append(slabAllocator); } @@ -329,7 +347,7 @@ namespace gpgmm { // Hold onto the cached allocator until the last allocation gets deallocated. entry->Ref(); - TRACE_COUNTER1(TraceEventCategory::Default, "GPU slabs allocated (MB)", + TRACE_COUNTER1(TraceEventCategory::Default, "GPU slab memory used (MB)", (GetFirstChild()->GetInfo().UsedMemoryUsage) / 1e6); TRACE_COUNTER1(TraceEventCategory::Default, "GPU slab cache miss-rate (%)", @@ -374,12 +392,10 @@ namespace gpgmm { } // Memory allocator is common across slab allocators. - { - const MEMORY_ALLOCATOR_INFO& info = GetFirstChild()->GetInfo(); - result.FreeMemoryUsage = info.FreeMemoryUsage; - result.UsedMemoryCount = info.UsedMemoryCount; - result.UsedMemoryUsage = info.UsedMemoryUsage; - } + const MEMORY_ALLOCATOR_INFO& info = GetFirstChild()->GetInfo(); + result.FreeMemoryUsage = info.FreeMemoryUsage; + result.UsedMemoryCount = info.UsedMemoryCount; + result.UsedMemoryUsage = info.UsedMemoryUsage; return result; } diff --git a/src/gpgmm/common/SlabMemoryAllocator.h b/src/gpgmm/common/SlabMemoryAllocator.h index 8801f9626..2a3be7bd3 100644 --- a/src/gpgmm/common/SlabMemoryAllocator.h +++ b/src/gpgmm/common/SlabMemoryAllocator.h @@ -45,10 +45,11 @@ namespace gpgmm { public: SlabMemoryAllocator(uint64_t blockSize, uint64_t maxSlabSize, - uint64_t slabSize, + uint64_t minSlabSize, uint64_t slabAlignment, double slabFragmentationLimit, bool prefetchSlab, + double slabGrowthFactor, MemoryAllocator* memoryAllocator); ~SlabMemoryAllocator() override; @@ -65,7 +66,7 @@ namespace gpgmm { uint64_t GetSlabSizeForTesting() const; private: - uint64_t ComputeSlabSize(uint64_t requestSize) const; + uint64_t ComputeSlabSize(uint64_t requestSize, uint64_t slabSize) const; // Slab is a node in a doubly-linked list that contains a free-list of blocks // and a reference to underlying memory. @@ -112,12 +113,16 @@ namespace gpgmm { std::vector mCaches; + uint64_t mLastUsedSlabSize = 0; + const uint64_t mBlockSize; - const uint64_t mMaxSlabSize; - const uint64_t mSlabSize; const uint64_t mSlabAlignment; + const uint64_t mMaxSlabSize; + const uint64_t mMinSlabSize; // Optional size when non-zero. + const double mSlabFragmentationLimit; const bool mPrefetchSlab; + const double mSlabGrowthFactor; MemoryAllocator* mMemoryAllocator = nullptr; std::shared_ptr mNextSlabAllocationEvent; @@ -129,10 +134,11 @@ namespace gpgmm { public: SlabCacheAllocator(uint64_t minBlockSize, uint64_t maxSlabSize, - uint64_t slabSize, + uint64_t minSlabSize, uint64_t slabAlignment, double slabFragmentationLimit, bool prefetchSlab, + double slabGrowthFactor, std::unique_ptr memoryAllocator); ~SlabCacheAllocator() override; @@ -171,10 +177,12 @@ namespace gpgmm { const uint64_t mMinBlockSize; const uint64_t mMaxSlabSize; - const uint64_t mSlabSize; // Optional size when non-zero. + const uint64_t mMinSlabSize; const uint64_t mSlabAlignment; + const double mSlabFragmentationLimit; const bool mPrefetchSlab; + const double mSlabGrowthFactor; LinkedList mSlabAllocators; MemoryCache mSizeCache; diff --git a/src/gpgmm/d3d12/DefaultsD3D12.h b/src/gpgmm/d3d12/DefaultsD3D12.h deleted file mode 100644 index 7a8a7f5b6..000000000 --- a/src/gpgmm/d3d12/DefaultsD3D12.h +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2021 The GPGMM Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef GPGMM_D3D12_DEFAULTSD3D12_H_ -#define GPGMM_D3D12_DEFAULTSD3D12_H_ - -#include "gpgmm/common/Defaults.h" - -namespace gpgmm { namespace d3d12 { - - static constexpr uint64_t kDefaultPreferredResourceHeapSize = 4ll * 1024ll * 1024ll; // 4MB -}} // namespace gpgmm::d3d12 - -#endif // GPGMM_D3D12_DEFAULTSD3D12_H_ diff --git a/src/gpgmm/d3d12/ResidencyManagerD3D12.cpp b/src/gpgmm/d3d12/ResidencyManagerD3D12.cpp index 182f0d5da..69cba938c 100644 --- a/src/gpgmm/d3d12/ResidencyManagerD3D12.cpp +++ b/src/gpgmm/d3d12/ResidencyManagerD3D12.cpp @@ -16,7 +16,6 @@ #include "gpgmm/d3d12/ResidencyManagerD3D12.h" #include "gpgmm/common/Debug.h" -#include "gpgmm/d3d12/DefaultsD3D12.h" #include "gpgmm/d3d12/ErrorD3D12.h" #include "gpgmm/d3d12/FenceD3D12.h" #include "gpgmm/d3d12/HeapD3D12.h" diff --git a/src/gpgmm/d3d12/ResourceAllocatorD3D12.cpp b/src/gpgmm/d3d12/ResourceAllocatorD3D12.cpp index 1137d972f..08604f9b6 100644 --- a/src/gpgmm/d3d12/ResourceAllocatorD3D12.cpp +++ b/src/gpgmm/d3d12/ResourceAllocatorD3D12.cpp @@ -18,6 +18,7 @@ #include "gpgmm/common/BuddyMemoryAllocator.h" #include "gpgmm/common/ConditionalMemoryAllocator.h" #include "gpgmm/common/Debug.h" +#include "gpgmm/common/Defaults.h" #include "gpgmm/common/MemorySize.h" #include "gpgmm/common/SegmentedMemoryAllocator.h" #include "gpgmm/common/SlabMemoryAllocator.h" @@ -26,7 +27,6 @@ #include "gpgmm/d3d12/BufferAllocatorD3D12.h" #include "gpgmm/d3d12/CapsD3D12.h" #include "gpgmm/d3d12/DebugResourceAllocatorD3D12.h" -#include "gpgmm/d3d12/DefaultsD3D12.h" #include "gpgmm/d3d12/ErrorD3D12.h" #include "gpgmm/d3d12/HeapD3D12.h" #include "gpgmm/d3d12/JSONSerializerD3D12.h" @@ -341,9 +341,9 @@ namespace gpgmm { namespace d3d12 { } ALLOCATOR_DESC newDescriptor = descriptor; - newDescriptor.PreferredResourceHeapSize = (descriptor.PreferredResourceHeapSize > 0) - ? descriptor.PreferredResourceHeapSize - : kDefaultPreferredResourceHeapSize; + newDescriptor.MemoryGrowthFactor = (descriptor.MemoryGrowthFactor >= 1.0) + ? descriptor.MemoryGrowthFactor + : kDefaultMemoryGrowthFactor; newDescriptor.MaxResourceHeapSize = (descriptor.MaxResourceHeapSize > 0) @@ -455,21 +455,17 @@ namespace gpgmm { namespace d3d12 { pooledOrNonPooledAllocator = std::move(resourceHeapAllocator); } - std::unique_ptr buddyAllocator = - std::make_unique( - PrevPowerOfTwo(mMaxResourceHeapSize), descriptor.PreferredResourceHeapSize, - heapAlignment, std::move(pooledOrNonPooledAllocator)); - - // TODO: Figure out the optimal slab size to heap ratio. + // TODO: Re-enable the buddy allocator? mResourceAllocatorOfType[resourceHeapTypeIndex] = std::make_unique< SlabCacheAllocator>( /*minBlockSize*/ D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT, /*maxSlabSize*/ PrevPowerOfTwo(mMaxResourceHeapSize), - /*slabSize*/ descriptor.PreferredResourceHeapSize, + /*minSlabSize*/ std::max(heapAlignment, descriptor.PreferredResourceHeapSize), /*slabAlignment*/ heapAlignment, /*slabFragmentationLimit*/ descriptor.MemoryFragmentationLimit, /*enablePrefetch*/ !(descriptor.Flags & ALLOCATOR_FLAG_DISABLE_MEMORY_PREFETCH), - std::move(buddyAllocator)); + /*slabGrowthFactor*/ descriptor.MemoryGrowthFactor, + std::move(pooledOrNonPooledAllocator)); } { @@ -519,7 +515,8 @@ namespace gpgmm { namespace d3d12 { /*slabSize*/ D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT, /*slabAlignment*/ D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT, /*slabFragmentationLimit*/ 0, - /*enablePrefetch*/ false, std::move(pooledOrNonPooledAllocator)); + /*enablePrefetch*/ false, + /*slabMemoryGrowth*/ 1, std::move(pooledOrNonPooledAllocator)); } // Cache resource sizes commonly requested. diff --git a/src/gpgmm/d3d12/ResourceAllocatorD3D12.h b/src/gpgmm/d3d12/ResourceAllocatorD3D12.h index 15c116244..df0a75074 100644 --- a/src/gpgmm/d3d12/ResourceAllocatorD3D12.h +++ b/src/gpgmm/d3d12/ResourceAllocatorD3D12.h @@ -256,7 +256,7 @@ namespace gpgmm { namespace d3d12 { A larger resource heap consumes more memory but could be faster for sub-allocation. Optional parameter. When 0 is specified, the API will automatically set the preferred - resource heap size to the default value of 4MB. + resource heap size to be a multiple of minimum resource heap size allowed by D3D12. */ uint64_t PreferredResourceHeapSize; @@ -309,6 +309,23 @@ namespace gpgmm { namespace d3d12 { fragmentation limit to 1/8th the resource heap size. */ double MemoryFragmentationLimit; + + /** \brief Memory growth factor, expressed as a multipler of the resource heap size + that will monotonically increase. + + A factor value of 1.0 specifies no growth, where the resource heap size is always determined + by other limits or constraints. If no factor gets specified (or a value less than 1 is + specified), GPGMM will allocate a resource heap size with enough space to fit exactly one + resource. + + Memory growth avoids the need to specify |PreferredResourceHeapSize|, which + especially helps in situations where the resource size cannot be predicated (eg. + user-defined), by allowing the resource heap size to gradually increase in size + per demand to achieve a balance of memory usage and performance. + + Optional parameter. When 0 is specified, the default of 1.25 is used (or 25% growth). + */ + double MemoryGrowthFactor; }; /** \enum ALLOCATION_FLAGS diff --git a/src/tests/end2end/D3D12ResourceAllocatorTests.cpp b/src/tests/end2end/D3D12ResourceAllocatorTests.cpp index 29f810200..39f4b6b53 100644 --- a/src/tests/end2end/D3D12ResourceAllocatorTests.cpp +++ b/src/tests/end2end/D3D12ResourceAllocatorTests.cpp @@ -13,7 +13,6 @@ // limitations under the License. #include "gpgmm/d3d12/BackendD3D12.h" -#include "gpgmm/d3d12/DefaultsD3D12.h" #include "gpgmm/d3d12/ErrorD3D12.h" #include "gpgmm/utils/Math.h" #include "tests/D3D12Test.h" @@ -25,6 +24,8 @@ using namespace gpgmm::d3d12; +static constexpr uint64_t kDefaultBufferSize = 4ll * 1024ll * 1024ll; // 4MB + class D3D12ResourceAllocatorTests : public D3D12TestBase, public ::testing::Test { protected: void SetUp() override { @@ -76,8 +77,8 @@ TEST_F(D3D12ResourceAllocatorTests, CreateAllocator) { // heap size should always fail. { ALLOCATOR_DESC desc = CreateBasicAllocatorDesc(); - desc.PreferredResourceHeapSize = kDefaultPreferredResourceHeapSize; - desc.MaxResourceHeapSize = kDefaultPreferredResourceHeapSize / 2; + desc.PreferredResourceHeapSize = kDefaultBufferSize; + desc.MaxResourceHeapSize = kDefaultBufferSize / 2; ComPtr resourceAllocator; ASSERT_FAILED(ResourceAllocator::CreateAllocator(desc, &resourceAllocator)); @@ -117,17 +118,17 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBuffer) { // Creating a resource without allocation should always fail. { - ASSERT_FAILED(resourceAllocator->CreateResource( - {}, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), - D3D12_RESOURCE_STATE_COMMON, nullptr, nullptr)); + ASSERT_FAILED( + resourceAllocator->CreateResource({}, CreateBasicBufferDesc(kDefaultBufferSize), + D3D12_RESOURCE_STATE_COMMON, nullptr, nullptr)); } // Using the min resource heap size should always succeed. { ComPtr allocation; - ASSERT_SUCCEEDED(resourceAllocator->CreateResource( - {}, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), - D3D12_RESOURCE_STATE_COMMON, nullptr, &allocation)); + ASSERT_SUCCEEDED( + resourceAllocator->CreateResource({}, CreateBasicBufferDesc(kDefaultBufferSize), + D3D12_RESOURCE_STATE_COMMON, nullptr, &allocation)); ASSERT_NE(allocation, nullptr); ASSERT_NE(allocation->GetResource(), nullptr); } @@ -139,7 +140,7 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBuffer) { allocationDesc.HeapType = D3D12_HEAP_TYPE_UPLOAD; ASSERT_SUCCEEDED(resourceAllocator->CreateResource( - allocationDesc, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), + allocationDesc, CreateBasicBufferDesc(kDefaultBufferSize), D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, &allocation)); ASSERT_NE(allocation, nullptr); ASSERT_NE(allocation->GetResource(), nullptr); @@ -154,7 +155,7 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBuffer) { ComPtr allocation; ASSERT_SUCCEEDED(resourceAllocator->CreateResource( - allocationDesc, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), + allocationDesc, CreateBasicBufferDesc(kDefaultBufferSize), D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, &allocation)); ASSERT_NE(allocation, nullptr); } @@ -164,7 +165,7 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBuffer) { ComPtr allocation; ASSERT_SUCCEEDED(resourceAllocator->CreateResource( - allocationDesc, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), + allocationDesc, CreateBasicBufferDesc(kDefaultBufferSize), D3D12_RESOURCE_STATE_COPY_DEST, nullptr, &allocation)); ASSERT_NE(allocation, nullptr); } @@ -174,8 +175,8 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBuffer) { ComPtr allocation; ASSERT_SUCCEEDED(resourceAllocator->CreateResource( - allocationDesc, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), - D3D12_RESOURCE_STATE_COMMON, nullptr, &allocation)); + allocationDesc, CreateBasicBufferDesc(kDefaultBufferSize), D3D12_RESOURCE_STATE_COMMON, + nullptr, &allocation)); ASSERT_NE(allocation, nullptr); } { @@ -184,8 +185,8 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBuffer) { ComPtr allocation; ASSERT_FAILED(resourceAllocator->CreateResource( - allocationDesc, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), - D3D12_RESOURCE_STATE_COMMON, nullptr, &allocation)); + allocationDesc, CreateBasicBufferDesc(kDefaultBufferSize), D3D12_RESOURCE_STATE_COMMON, + nullptr, &allocation)); } // Creating a zero sized buffer is not allowed. @@ -247,14 +248,13 @@ TEST_F(D3D12ResourceAllocatorTests, ImportBuffer) { ASSERT_EQ(externalAllocation, nullptr); // Importing a buffer without returning the allocation should always fail. - ASSERT_FAILED(resourceAllocator->CreateResource( - {}, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), D3D12_RESOURCE_STATE_COMMON, - nullptr, nullptr)); + ASSERT_FAILED(resourceAllocator->CreateResource({}, CreateBasicBufferDesc(kDefaultBufferSize), + D3D12_RESOURCE_STATE_COMMON, nullptr, nullptr)); // Importing a buffer should always succeed. ASSERT_SUCCEEDED(resourceAllocator->CreateResource( - {}, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), D3D12_RESOURCE_STATE_COMMON, - nullptr, &externalAllocation)); + {}, CreateBasicBufferDesc(kDefaultBufferSize), D3D12_RESOURCE_STATE_COMMON, nullptr, + &externalAllocation)); ASSERT_NE(externalAllocation, nullptr); ComPtr internalAllocation; @@ -273,7 +273,7 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferInvalid) { ASSERT_NE(resourceAllocator, nullptr); // Garbage buffer descriptor should always fail. - D3D12_RESOURCE_DESC badBufferDesc = CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize); + D3D12_RESOURCE_DESC badBufferDesc = CreateBasicBufferDesc(kDefaultBufferSize); badBufferDesc.Flags = static_cast(0xFF); ComPtr allocation; @@ -291,11 +291,11 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferAlwaysCommitted) { ASSERT_NE(resourceAllocator, nullptr); ComPtr allocation; - ASSERT_SUCCEEDED(resourceAllocator->CreateResource( - {}, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), D3D12_RESOURCE_STATE_COMMON, - nullptr, &allocation)); + ASSERT_SUCCEEDED( + resourceAllocator->CreateResource({}, CreateBasicBufferDesc(kDefaultBufferSize), + D3D12_RESOURCE_STATE_COMMON, nullptr, &allocation)); ASSERT_NE(allocation, nullptr); - EXPECT_EQ(allocation->GetSize(), kDefaultPreferredResourceHeapSize); + EXPECT_EQ(allocation->GetSize(), kDefaultBufferSize); // Commmitted resources cannot be backed by a D3D12 heap. Heap* resourceHeap = allocation->GetMemory(); @@ -303,7 +303,7 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferAlwaysCommitted) { ASSERT_EQ(resourceHeap->GetHeap(), nullptr); // Commited resources must use all the memory allocated. - EXPECT_EQ(resourceAllocator->GetInfo().UsedMemoryUsage, kDefaultPreferredResourceHeapSize); + EXPECT_EQ(resourceAllocator->GetInfo().UsedMemoryUsage, kDefaultBufferSize); EXPECT_EQ(resourceAllocator->GetInfo().UsedBlockUsage, resourceAllocator->GetInfo().UsedMemoryUsage); } @@ -319,11 +319,11 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferNeverAllocate) { allocationDesc.Flags = ALLOCATION_FLAG_NEVER_ALLOCATE_MEMORY; ComPtr allocation; ASSERT_FAILED(resourceAllocator->CreateResource( - allocationDesc, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize + 1), - D3D12_RESOURCE_STATE_COMMON, nullptr, &allocation)); + allocationDesc, CreateBasicBufferDesc(kDefaultBufferSize + 1), D3D12_RESOURCE_STATE_COMMON, + nullptr, &allocation)); ASSERT_EQ(allocation, nullptr); - constexpr uint64_t bufferSize = kDefaultPreferredResourceHeapSize / 8; + constexpr uint64_t bufferSize = kDefaultBufferSize / 8; allocationDesc.Flags = ALLOCATION_FLAG_NONE; ComPtr allocationA; @@ -433,7 +433,7 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferNeverSubAllocated) { ResourceAllocator::CreateAllocator(CreateBasicAllocatorDesc(), &resourceAllocator)); ASSERT_NE(resourceAllocator, nullptr); - constexpr uint64_t bufferSize = kDefaultPreferredResourceHeapSize / 2; + constexpr uint64_t bufferSize = kDefaultBufferSize / 2; ALLOCATION_DESC allocationDesc = {}; allocationDesc.HeapType = D3D12_HEAP_TYPE_UPLOAD; @@ -459,9 +459,9 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferNeverPooled) { ALLOCATION_DESC baseAllocationDesc = {}; baseAllocationDesc.HeapType = D3D12_HEAP_TYPE_UPLOAD; - constexpr uint64_t bufferSize = kDefaultPreferredResourceHeapSize; + constexpr uint64_t bufferSize = kDefaultBufferSize; - // Create the first buffer. + // Create the first buffer of size A without recyling its memory. { ComPtr allocation; ASSERT_SUCCEEDED(resourceAllocator->CreateResource( @@ -469,10 +469,9 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferNeverPooled) { D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, &allocation)); ASSERT_NE(allocation, nullptr); EXPECT_NE(allocation->GetResource(), nullptr); - EXPECT_EQ(allocation->GetMethod(), gpgmm::AllocationMethod::kStandalone); } - // Check the first buffer was not pool-allocated by creating it again. + // Check the first buffer of size A cannot be from recycled memory. { ALLOCATION_DESC allocationDesc = baseAllocationDesc; allocationDesc.Flags = ALLOCATION_FLAG_NEVER_ALLOCATE_MEMORY; @@ -483,7 +482,7 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferNeverPooled) { D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, &allocation)); } - // Create another buffer. + // Create another buffer of size B which cannot use recycled memory of size A. { ComPtr allocation; ASSERT_SUCCEEDED(resourceAllocator->CreateResource( @@ -491,10 +490,9 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferNeverPooled) { D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, &allocation)); ASSERT_NE(allocation, nullptr); EXPECT_NE(allocation->GetResource(), nullptr); - EXPECT_EQ(allocation->GetMethod(), gpgmm::AllocationMethod::kStandalone); } - // Check the second buffer was not pool-allocated by creating it again. + // Check the second buffer of size B cannot be from recycled memory. { ALLOCATION_DESC allocationDesc = baseAllocationDesc; allocationDesc.Flags = ALLOCATION_FLAG_NEVER_ALLOCATE_MEMORY; @@ -507,7 +505,7 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferNeverPooled) { } TEST_F(D3D12ResourceAllocatorTests, CreateBufferPooled) { - constexpr uint64_t bufferSize = kDefaultPreferredResourceHeapSize; + constexpr uint64_t bufferSize = kDefaultBufferSize; ALLOCATOR_DESC allocatorDesc = CreateBasicAllocatorDesc(); @@ -625,14 +623,14 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferGetInfo) { ComPtr firstAllocation; ASSERT_SUCCEEDED(resourceAllocator->CreateResource( - standaloneAllocationDesc, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), + standaloneAllocationDesc, CreateBasicBufferDesc(kDefaultBufferSize), D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, &firstAllocation)); ASSERT_NE(firstAllocation, nullptr); EXPECT_EQ(firstAllocation->GetMethod(), gpgmm::AllocationMethod::kStandalone); RESOURCE_ALLOCATOR_INFO info = resourceAllocator->GetInfo(); EXPECT_EQ(info.UsedMemoryCount, 1u); - EXPECT_EQ(info.UsedMemoryUsage, kDefaultPreferredResourceHeapSize); + EXPECT_EQ(info.UsedMemoryUsage, kDefaultBufferSize); } // Calculate info for two pooled standalone allocations. @@ -648,25 +646,25 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferGetInfo) { ComPtr firstAllocation; ASSERT_SUCCEEDED(resourceAllocator->CreateResource( - standaloneAllocationDesc, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), + standaloneAllocationDesc, CreateBasicBufferDesc(kDefaultBufferSize), D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, &firstAllocation)); ASSERT_NE(firstAllocation, nullptr); EXPECT_EQ(firstAllocation->GetMethod(), gpgmm::AllocationMethod::kStandalone); RESOURCE_ALLOCATOR_INFO info = resourceAllocator->GetInfo(); EXPECT_EQ(info.UsedMemoryCount, 1u); - EXPECT_EQ(info.UsedMemoryUsage, kDefaultPreferredResourceHeapSize); + EXPECT_EQ(info.UsedMemoryUsage, kDefaultBufferSize); ComPtr secondAllocation; ASSERT_SUCCEEDED(resourceAllocator->CreateResource( - standaloneAllocationDesc, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), + standaloneAllocationDesc, CreateBasicBufferDesc(kDefaultBufferSize), D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, &secondAllocation)); ASSERT_NE(secondAllocation, nullptr); EXPECT_EQ(secondAllocation->GetMethod(), gpgmm::AllocationMethod::kStandalone); info = resourceAllocator->GetInfo(); EXPECT_EQ(info.UsedMemoryCount, 2u); - EXPECT_EQ(info.UsedMemoryUsage, kDefaultPreferredResourceHeapSize * 2); + EXPECT_EQ(info.UsedMemoryUsage, kDefaultBufferSize * 2); } // Calculate info for two sub-allocations. @@ -676,7 +674,7 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferGetInfo) { ResourceAllocator::CreateAllocator(CreateBasicAllocatorDesc(), &resourceAllocator)); ASSERT_NE(resourceAllocator, nullptr); - constexpr uint64_t kBufferSize = kDefaultPreferredResourceHeapSize / 8; + constexpr uint64_t kBufferSize = kDefaultBufferSize / 8; ComPtr firstAllocation; ASSERT_SUCCEEDED(resourceAllocator->CreateResource({}, CreateBasicBufferDesc(kBufferSize), D3D12_RESOURCE_STATE_GENERIC_READ, @@ -690,7 +688,7 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferGetInfo) { RESOURCE_ALLOCATOR_INFO info = resourceAllocator->GetInfo(); EXPECT_EQ(info.UsedMemoryCount, 1u); - EXPECT_EQ(info.UsedMemoryUsage, kDefaultPreferredResourceHeapSize); + EXPECT_EQ(info.UsedMemoryUsage, kDefaultBufferSize); EXPECT_EQ(info.UsedBlockCount, 1u); EXPECT_GE(info.UsedBlockUsage, kBufferSize); @@ -703,7 +701,7 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferGetInfo) { info = resourceAllocator->GetInfo(); EXPECT_GE(info.UsedMemoryCount, 1u); - EXPECT_GE(info.UsedMemoryUsage, kDefaultPreferredResourceHeapSize); + EXPECT_GE(info.UsedMemoryUsage, kDefaultBufferSize); EXPECT_EQ(info.UsedBlockCount, 2u); EXPECT_GE(info.UsedBlockUsage, kBufferSize * 2); } @@ -852,8 +850,8 @@ TEST_F(D3D12ResourceAllocatorTests, CreateBufferManyThreaded) { threads[threadIdx] = std::thread([&]() { ComPtr allocation; ASSERT_SUCCEEDED(resourceAllocator->CreateResource( - {}, CreateBasicBufferDesc(kDefaultPreferredResourceHeapSize), - D3D12_RESOURCE_STATE_COMMON, nullptr, &allocation)); + {}, CreateBasicBufferDesc(kDefaultBufferSize), D3D12_RESOURCE_STATE_COMMON, nullptr, + &allocation)); ASSERT_NE(allocation, nullptr); }); } diff --git a/src/tests/unittests/SlabMemoryAllocatorTests.cpp b/src/tests/unittests/SlabMemoryAllocatorTests.cpp index bcb25acfc..4e9ddc0e9 100644 --- a/src/tests/unittests/SlabMemoryAllocatorTests.cpp +++ b/src/tests/unittests/SlabMemoryAllocatorTests.cpp @@ -29,6 +29,7 @@ using namespace gpgmm; static constexpr uint64_t kDefaultSlabSize = 128u; static constexpr uint64_t kDefaultSlabAlignment = 1u; static constexpr double kDefaultSlabFragmentationLimit = 0.125; +static constexpr double kNoSlabGrowthFactor = 1.0; static constexpr bool kDefaultPrefetchSlab = false; // Verify allocation in a single slab. @@ -42,7 +43,8 @@ TEST(SlabMemoryAllocatorTests, SingleSlab) { constexpr uint64_t kMaxSlabSize = 512; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, - kDefaultPrefetchSlab, dummyMemoryAllocator.get()); + kDefaultPrefetchSlab, kNoSlabGrowthFactor, + dummyMemoryAllocator.get()); ASSERT_EQ(allocator.TryAllocateMemory(kBlockSize * 2, 1, false, false, false), nullptr); } @@ -53,7 +55,8 @@ TEST(SlabMemoryAllocatorTests, SingleSlab) { constexpr uint64_t kMaxSlabSize = 512; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, - kDefaultPrefetchSlab, dummyMemoryAllocator.get()); + kDefaultPrefetchSlab, kNoSlabGrowthFactor, + dummyMemoryAllocator.get()); ASSERT_EQ(allocator.TryAllocateMemory(kMaxSlabSize, 1, false, false, false), nullptr); ASSERT_EQ(allocator.TryAllocateMemory(kMaxSlabSize - 1, 1, false, false, false), nullptr); @@ -66,7 +69,7 @@ TEST(SlabMemoryAllocatorTests, SingleSlab) { constexpr uint64_t kMaxSlabSize = kBlockSize; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, - dummyMemoryAllocator.get()); + kNoSlabGrowthFactor, dummyMemoryAllocator.get()); std::unique_ptr allocation = allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); @@ -85,7 +88,7 @@ TEST(SlabMemoryAllocatorTests, SingleSlab) { constexpr uint64_t kSlabSize = 0; // deduce slab size from allocation size. SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, - dummyMemoryAllocator.get()); + kNoSlabGrowthFactor, dummyMemoryAllocator.get()); // Max allocation cannot be more than 1/8th the max slab size or 4 bytes. // Since a 10 byte allocation requires a 128 byte slab, allocation should always fail. @@ -110,7 +113,7 @@ TEST(SlabMemoryAllocatorTests, SingleSlab) { constexpr uint64_t kMaxSlabSize = 128; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, - dummyMemoryAllocator.get()); + kNoSlabGrowthFactor, dummyMemoryAllocator.get()); std::unique_ptr allocation = allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); @@ -128,7 +131,7 @@ TEST(SlabMemoryAllocatorTests, SingleSlab) { constexpr uint64_t kMaxSlabSize = 128; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, - dummyMemoryAllocator.get()); + kNoSlabGrowthFactor, dummyMemoryAllocator.get()); std::unique_ptr allocation = allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); @@ -145,7 +148,8 @@ TEST(SlabMemoryAllocatorTests, SingleSlab) { constexpr uint64_t kMaxSlabSize = 512; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, - kDefaultPrefetchSlab, dummyMemoryAllocator.get()); + kDefaultPrefetchSlab, kNoSlabGrowthFactor, + dummyMemoryAllocator.get()); EXPECT_EQ(allocator.TryAllocateMemory(kBlockSize, 1, true, false, false), nullptr); EXPECT_EQ(allocator.TryAllocateMemory(kBlockSize / 2, 1, true, false, false), nullptr); @@ -165,7 +169,8 @@ TEST(SlabMemoryAllocatorTests, MultipleSlabs) { SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, /*slabSize*/ kBlockSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, - kDefaultPrefetchSlab, dummyMemoryAllocator.get()); + kDefaultPrefetchSlab, kNoSlabGrowthFactor, + dummyMemoryAllocator.get()); const uint64_t kNumOfSlabs = 12; std::vector> allocations = {}; for (uint32_t slabi = 0; slabi < kNumOfSlabs; slabi++) { @@ -191,7 +196,8 @@ TEST(SlabMemoryAllocatorTests, MultipleSlabs) { SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, - kDefaultPrefetchSlab, dummyMemoryAllocator.get()); + kDefaultPrefetchSlab, kNoSlabGrowthFactor, + dummyMemoryAllocator.get()); // Fill up exactly two 128B slabs. std::vector> allocations = {}; for (uint32_t blocki = 0; blocki < (kDefaultSlabSize * 2 / kBlockSize); blocki++) { @@ -217,7 +223,8 @@ TEST(SlabMemoryAllocatorTests, MultipleSlabs) { constexpr uint64_t kMaxSlabSize = 512; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, - kDefaultPrefetchSlab, dummyMemoryAllocator.get()); + kDefaultPrefetchSlab, kNoSlabGrowthFactor, + dummyMemoryAllocator.get()); // Both allocation A and B go in Slab A, which will become full. std::unique_ptr allocationAinSlabA = @@ -304,7 +311,7 @@ TEST(SlabMemoryAllocatorTests, AllocationOverflow) { constexpr uint64_t kMaxSlabSize = 512; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, - dummyMemoryAllocator.get()); + kNoSlabGrowthFactor, dummyMemoryAllocator.get()); constexpr uint64_t largeBlock = (1ull << 63) + 1; std::unique_ptr invalidAllocation = @@ -322,7 +329,7 @@ TEST(SlabMemoryAllocatorTests, ReuseSlabs) { constexpr uint64_t kMaxSlabSize = 512; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, - poolAllocator.get()); + kNoSlabGrowthFactor, poolAllocator.get()); std::set slabMemory = {}; std::vector> allocations = {}; @@ -365,7 +372,8 @@ TEST(SlabMemoryAllocatorTests, GetInfo) { constexpr uint64_t kMaxSlabSize = 512; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, - kDefaultPrefetchSlab, dummyMemoryAllocator.get()); + kDefaultPrefetchSlab, kNoSlabGrowthFactor, + dummyMemoryAllocator.get()); std::unique_ptr allocation = allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); @@ -399,7 +407,8 @@ TEST(SlabMemoryAllocatorTests, GetInfo) { constexpr uint64_t kMaxSlabSize = 512; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, - kDefaultPrefetchSlab, poolAllocator.get()); + kDefaultPrefetchSlab, kNoSlabGrowthFactor, + poolAllocator.get()); std::unique_ptr allocation = allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); @@ -421,13 +430,213 @@ TEST(SlabMemoryAllocatorTests, GetInfo) { } } +// Grow slabs one after another below kMaxSlabSize. +TEST(SlabMemoryAllocatorTests, SlabGrowth) { + // Start from kMinSlabSize == kBlockSize. + { + constexpr uint64_t kBlockSize = 32; + constexpr uint64_t kMaxSlabSize = 512; + constexpr uint64_t kMinSlabSize = kBlockSize; + + DummyMemoryAllocator dummyAllocator; + SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kMinSlabSize, kDefaultSlabAlignment, + kDefaultSlabFragmentationLimit, false, + /*slabGrowthFactor*/ 2, &dummyAllocator); + + // Slab A contains 1 allocation. + std::unique_ptr allocationAInSlabA = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationAInSlabA->GetSize(), kBlockSize); + EXPECT_EQ(allocationAInSlabA->GetMemory()->GetSize(), kBlockSize); + + // Slab B grows 2x and contains 2 allocations. + std::unique_ptr allocationAInSlabB = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationAInSlabB->GetSize(), kBlockSize); + EXPECT_EQ(allocationAInSlabB->GetMemory()->GetSize(), kBlockSize * 2); + + std::unique_ptr allocationBInSlabB = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationBInSlabB->GetSize(), kBlockSize); + EXPECT_EQ(allocationBInSlabB->GetMemory()->GetSize(), kBlockSize * 2); + + // Slab C grows 2x and contains 4 allocations. + std::unique_ptr allocationAInSlabC = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationAInSlabC->GetSize(), kBlockSize); + EXPECT_EQ(allocationAInSlabC->GetMemory()->GetSize(), kBlockSize * 4); + + std::unique_ptr allocationBInSlabC = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationBInSlabC->GetSize(), kBlockSize); + EXPECT_EQ(allocationBInSlabC->GetMemory()->GetSize(), kBlockSize * 4); + + std::unique_ptr allocationCInSlabC = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationCInSlabC->GetSize(), kBlockSize); + EXPECT_EQ(allocationCInSlabC->GetMemory()->GetSize(), kBlockSize * 4); + + std::unique_ptr allocationDInSlabC = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationDInSlabC->GetSize(), kBlockSize); + EXPECT_EQ(allocationDInSlabC->GetMemory()->GetSize(), kBlockSize * 4); + + allocator.DeallocateMemory(std::move(allocationDInSlabC)); + allocator.DeallocateMemory(std::move(allocationCInSlabC)); + allocator.DeallocateMemory(std::move(allocationBInSlabC)); + allocator.DeallocateMemory(std::move(allocationAInSlabC)); + + allocator.DeallocateMemory(std::move(allocationBInSlabB)); + allocator.DeallocateMemory(std::move(allocationAInSlabB)); + + allocator.DeallocateMemory(std::move(allocationAInSlabA)); + + EXPECT_EQ(allocator.GetInfo().UsedMemoryUsage, 0u); + } + + // Start from a kMinSlabSize > kBlockSize. + { + constexpr uint64_t kBlockSize = 16; + constexpr uint64_t kMinSlabSize = 32; + constexpr uint64_t kMaxSlabSize = 64; + + DummyMemoryAllocator dummyAllocator; + SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kMinSlabSize, kDefaultSlabAlignment, + kDefaultSlabFragmentationLimit, false, + /*slabGrowthFactor*/ 2, &dummyAllocator); + + // Slab A grows 1x kMinSlabSize. + std::unique_ptr allocationAInSlabA = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationAInSlabA->GetSize(), kBlockSize); + EXPECT_EQ(allocationAInSlabA->GetMemory()->GetSize(), kMinSlabSize); + + std::unique_ptr allocationBInSlabA = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationBInSlabA->GetSize(), kBlockSize); + EXPECT_EQ(allocationBInSlabA->GetMemory()->GetSize(), kMinSlabSize); + + EXPECT_EQ(allocationAInSlabA->GetMemory(), allocationBInSlabA->GetMemory()); + + // Slab B grows 2x kMinSlabSize. + std::unique_ptr allocationAInSlabB = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationAInSlabB->GetSize(), kBlockSize); + EXPECT_EQ(allocationAInSlabB->GetMemory()->GetSize(), kMinSlabSize * 2); + + EXPECT_NE(allocationBInSlabA->GetMemory(), allocationAInSlabB->GetMemory()); + + allocator.DeallocateMemory(std::move(allocationAInSlabB)); + + allocator.DeallocateMemory(std::move(allocationBInSlabA)); + allocator.DeallocateMemory(std::move(allocationAInSlabA)); + + EXPECT_EQ(allocator.GetInfo().UsedMemoryUsage, 0u); + } +} + +// Grow slabs until kMaxSlabSize is reached. +TEST(SlabMemoryAllocatorTests, SlabGrowthLimit) { + // Start from a kMinSlabSize > kBlockSize. + { + constexpr uint64_t kBlockSize = 16; + constexpr uint64_t kMinSlabSize = 32; + constexpr uint64_t kMaxSlabSize = 64; + + DummyMemoryAllocator dummyAllocator; + SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kMinSlabSize, kDefaultSlabAlignment, + kDefaultSlabFragmentationLimit, false, + /*slabGrowthFactor*/ 2, &dummyAllocator); + + // Slab A grows 1x kMinSlabSize. + std::unique_ptr allocationAInSlabA = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationAInSlabA->GetSize(), kBlockSize); + EXPECT_EQ(allocationAInSlabA->GetMemory()->GetSize(), kMinSlabSize); + + std::unique_ptr allocationBInSlabA = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationBInSlabA->GetSize(), kBlockSize); + EXPECT_EQ(allocationBInSlabA->GetMemory()->GetSize(), kMinSlabSize); + + EXPECT_EQ(allocationAInSlabA->GetMemory(), allocationBInSlabA->GetMemory()); + + // Slab B grows 2x kMinSlabSize. + std::unique_ptr allocationAInSlabB = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationAInSlabB->GetSize(), kBlockSize); + EXPECT_EQ(allocationAInSlabB->GetMemory()->GetSize(), kMinSlabSize * 2); + + EXPECT_NE(allocationBInSlabA->GetMemory(), allocationAInSlabB->GetMemory()); + + allocator.DeallocateMemory(std::move(allocationAInSlabB)); + + allocator.DeallocateMemory(std::move(allocationBInSlabA)); + allocator.DeallocateMemory(std::move(allocationAInSlabA)); + } + + // Start from a kMinSlabSize == kBlockSize. + { + constexpr uint64_t kBlockSize = 32; + constexpr uint64_t kMaxSlabSize = 64; + constexpr uint64_t kMinSlabSize = kBlockSize; + + DummyMemoryAllocator dummyAllocator; + SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kMinSlabSize, kDefaultSlabAlignment, + kDefaultSlabFragmentationLimit, false, + /*slabGrowthFactor*/ 2, &dummyAllocator); + + // Slab A grows 1x kBlockSize. + std::unique_ptr allocationAInSlabA = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationAInSlabA->GetSize(), kBlockSize); + EXPECT_EQ(allocationAInSlabA->GetMemory()->GetSize(), kBlockSize); + + // Slab B grows 2x kBlockSize. + std::unique_ptr allocationAInSlabB = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationAInSlabB->GetSize(), kBlockSize); + EXPECT_EQ(allocationAInSlabB->GetMemory()->GetSize(), kBlockSize * 2); + + EXPECT_NE(allocationAInSlabA->GetMemory(), allocationAInSlabB->GetMemory()); + + std::unique_ptr allocationBInSlabB = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationBInSlabB->GetSize(), kBlockSize); + EXPECT_EQ(allocationBInSlabB->GetMemory()->GetSize(), kBlockSize * 2); + + // Slab C STILL grows 2x kBlockSize. + std::unique_ptr allocationAInSlabC = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationAInSlabC->GetSize(), kBlockSize); + EXPECT_EQ(allocationAInSlabC->GetMemory()->GetSize(), kBlockSize * 2); + + EXPECT_NE(allocationBInSlabB->GetMemory(), allocationAInSlabC->GetMemory()); + + std::unique_ptr allocationBInSlabC = + allocator.TryAllocateMemory(kBlockSize, 1, false, false, false); + EXPECT_EQ(allocationBInSlabC->GetSize(), kBlockSize); + EXPECT_EQ(allocationBInSlabC->GetMemory()->GetSize(), kBlockSize * 2); + + allocator.DeallocateMemory(std::move(allocationBInSlabC)); + allocator.DeallocateMemory(std::move(allocationAInSlabC)); + + allocator.DeallocateMemory(std::move(allocationBInSlabB)); + allocator.DeallocateMemory(std::move(allocationAInSlabB)); + + allocator.DeallocateMemory(std::move(allocationAInSlabA)); + + EXPECT_EQ(allocator.GetInfo().UsedMemoryUsage, 0u); + } +} + TEST(SlabCacheAllocatorTests, SingleSlabMultipleSize) { constexpr uint64_t kMinBlockSize = 4; constexpr uint64_t kMaxSlabSize = 256; constexpr uint64_t kSlabSize = 0; // deduce slab size from allocation size. SlabCacheAllocator allocator(kMinBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, - std::make_unique()); + kNoSlabGrowthFactor, std::make_unique()); // Verify requesting an allocation without memory will not return a valid allocation. { @@ -442,7 +651,7 @@ TEST(SlabCacheAllocatorTests, MultipleSlabsSameSize) { constexpr uint64_t kSlabSize = 0; // deduce slab size from allocation size. SlabCacheAllocator allocator(kMinBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, - std::make_unique()); + kNoSlabGrowthFactor, std::make_unique()); std::unique_ptr firstAllocation = allocator.TryAllocateMemory(22, 1, false, false, false); @@ -473,7 +682,7 @@ TEST(SlabCacheAllocatorTests, MultipleSlabsVariableSizes) { constexpr uint64_t kSlabSize = 0; // deduce slab size from allocation size. SlabCacheAllocator allocator(kMinBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, - std::make_unique()); + kNoSlabGrowthFactor, std::make_unique()); { constexpr uint64_t allocationSize = 22; std::unique_ptr allocation = @@ -520,6 +729,7 @@ TEST(SlabCacheAllocatorTests, SingleSlabInBuddy) { constexpr uint64_t kSlabSize = kDefaultSlabSize / 8; SlabCacheAllocator allocator(kMinBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, + kNoSlabGrowthFactor, std::make_unique( kMaxBlockSize, kDefaultSlabSize, kDefaultSlabAlignment, std::make_unique())); @@ -543,6 +753,7 @@ TEST(SlabCacheAllocatorTests, MultipleSlabsInBuddy) { constexpr uint64_t kSlabSize = kDefaultSlabSize / 8; SlabCacheAllocator allocator(kMinBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, + kNoSlabGrowthFactor, std::make_unique( kMaxBlockSize, kDefaultSlabSize, kDefaultSlabAlignment, std::make_unique())); @@ -620,7 +831,7 @@ TEST(SlabCacheAllocatorTests, GetInfo) { constexpr uint64_t kMaxSlabSize = 512; SlabCacheAllocator allocator(kMinBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, - kDefaultPrefetchSlab, + kDefaultPrefetchSlab, kNoSlabGrowthFactor, std::make_unique()); std::unique_ptr allocation = @@ -652,7 +863,7 @@ TEST(SlabCacheAllocatorTests, GetInfo) { constexpr uint64_t kMaxSlabSize = 512; SlabCacheAllocator allocator(kMinBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, - kDefaultPrefetchSlab, + kDefaultPrefetchSlab, kNoSlabGrowthFactor, std::make_unique( std::make_unique(), &pool)); @@ -685,6 +896,7 @@ TEST(SlabCacheAllocatorTests, GetInfo) { constexpr uint64_t kSlabSize = kDefaultSlabSize / 8; SlabCacheAllocator allocator(kMinBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kDefaultPrefetchSlab, + kNoSlabGrowthFactor, std::make_unique( kMaxBlockSize, kDefaultSlabSize, kDefaultSlabAlignment, std::make_unique())); @@ -712,14 +924,15 @@ TEST(SlabCacheAllocatorTests, GetInfo) { } // Pre-fetch |kNumOfSlabs| slabs worth of sub-allocations of various sizes. -TEST(SlabCacheAllocatorTests, PrefetchSlabs) { +TEST(SlabCacheAllocatorTests, SlabPrefetch) { constexpr uint64_t kBlockSize = 32; constexpr uint64_t kMinBlockSize = 4; constexpr uint64_t kMaxSlabSize = 512; SlabCacheAllocator allocator(kMinBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, - /*prefetchSlab*/ true, std::make_unique()); + /*prefetchSlab*/ true, kNoSlabGrowthFactor, + std::make_unique()); constexpr uint64_t kNumOfSlabs = 10u; std::vector> allocations = {};