diff --git a/src/gpgmm/common/BuddyMemoryAllocator.cpp b/src/gpgmm/common/BuddyMemoryAllocator.cpp index f203f0c1..7f9b2d14 100644 --- a/src/gpgmm/common/BuddyMemoryAllocator.cpp +++ b/src/gpgmm/common/BuddyMemoryAllocator.cpp @@ -24,7 +24,7 @@ namespace gpgmm { BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t systemSize, uint64_t memorySize, uint64_t memoryAlignment, - std::unique_ptr memoryAllocator) + ScopedRef memoryAllocator) : MemoryAllocatorBase(std::move(memoryAllocator)), mMemorySize(memorySize), mMemoryAlignment(memoryAlignment), diff --git a/src/gpgmm/common/BuddyMemoryAllocator.h b/src/gpgmm/common/BuddyMemoryAllocator.h index 41416e56..a6fca625 100644 --- a/src/gpgmm/common/BuddyMemoryAllocator.h +++ b/src/gpgmm/common/BuddyMemoryAllocator.h @@ -40,7 +40,7 @@ namespace gpgmm { BuddyMemoryAllocator(uint64_t systemSize, uint64_t memorySize, uint64_t memoryAlignment, - std::unique_ptr memoryAllocator); + ScopedRef memoryAllocator); // MemoryAllocatorBase interface ResultOrError> TryAllocateMemory( diff --git a/src/gpgmm/common/DedicatedMemoryAllocator.cpp b/src/gpgmm/common/DedicatedMemoryAllocator.cpp index 2091acc3..f0ffc2f1 100644 --- a/src/gpgmm/common/DedicatedMemoryAllocator.cpp +++ b/src/gpgmm/common/DedicatedMemoryAllocator.cpp @@ -22,7 +22,7 @@ namespace gpgmm { DedicatedMemoryAllocator::DedicatedMemoryAllocator( - std::unique_ptr memoryAllocator, + ScopedRef memoryAllocator, uint64_t memoryAlignment) : MemoryAllocatorBase(std::move(memoryAllocator)), mMemoryAlignment(memoryAlignment) { } diff --git a/src/gpgmm/common/DedicatedMemoryAllocator.h b/src/gpgmm/common/DedicatedMemoryAllocator.h index 78250540..f6a72bc9 100644 --- a/src/gpgmm/common/DedicatedMemoryAllocator.h +++ b/src/gpgmm/common/DedicatedMemoryAllocator.h @@ -25,7 +25,7 @@ namespace gpgmm { // memory to be tracked. class DedicatedMemoryAllocator final : public MemoryAllocatorBase { public: - DedicatedMemoryAllocator(std::unique_ptr memoryAllocator, + DedicatedMemoryAllocator(ScopedRef memoryAllocator, uint64_t memoryAlignment); // MemoryAllocatorBase interface diff --git a/src/gpgmm/common/MemoryAllocator.cpp b/src/gpgmm/common/MemoryAllocator.cpp index 69842170..0f399ff9 100644 --- a/src/gpgmm/common/MemoryAllocator.cpp +++ b/src/gpgmm/common/MemoryAllocator.cpp @@ -90,11 +90,11 @@ namespace gpgmm { // MemoryAllocatorBase - MemoryAllocatorBase::MemoryAllocatorBase() { + MemoryAllocatorBase::MemoryAllocatorBase() : RefCounted(0) { } - MemoryAllocatorBase::MemoryAllocatorBase(std::unique_ptr next) { - InsertIntoChain(std::move(next)); + MemoryAllocatorBase::MemoryAllocatorBase(ScopedRef next) : RefCounted(0) { + InsertIntoChain(next); } MemoryAllocatorBase::~MemoryAllocatorBase() { @@ -110,7 +110,7 @@ namespace gpgmm { // Deletes adjacent node recursively (post-order). if (mNext != nullptr) { - SafeDelete(mNext); + mNext = nullptr; } if (IsInList()) { @@ -195,17 +195,17 @@ namespace gpgmm { } MemoryAllocatorBase* MemoryAllocatorBase::GetNextInChain() const { - return mNext; + return mNext.Get(); } MemoryAllocatorBase* MemoryAllocatorBase::GetParent() const { return mParent; } - void MemoryAllocatorBase::InsertIntoChain(std::unique_ptr next) { + void MemoryAllocatorBase::InsertIntoChain(ScopedRef next) { ASSERT(next != nullptr); next->mParent = this->value(); - mNext = next.release(); + mNext = next; } } // namespace gpgmm diff --git a/src/gpgmm/common/MemoryAllocator.h b/src/gpgmm/common/MemoryAllocator.h index 246247cd..a242bddd 100644 --- a/src/gpgmm/common/MemoryAllocator.h +++ b/src/gpgmm/common/MemoryAllocator.h @@ -25,6 +25,7 @@ #include "gpgmm/utils/Limits.h" #include "gpgmm/utils/LinkedList.h" #include "gpgmm/utils/Log.h" +#include "gpgmm/utils/Refcount.h" #include #include @@ -140,13 +141,15 @@ namespace gpgmm { // parent) and the next MemoryAllocatorBase (or child) form a one-way edge. This allows the // first-order MemoryAllocatorBase to sub-allocate from larger blocks provided by the // second-order MemoryAllocatorBase and so on. - class MemoryAllocatorBase : public ObjectBase, public LinkNode { + class MemoryAllocatorBase : public ObjectBase, + public LinkNode, + public RefCounted { public: // Constructs a standalone MemoryAllocatorBase. MemoryAllocatorBase(); // Constructs a MemoryAllocatorBase that also owns a (child) allocator. - explicit MemoryAllocatorBase(std::unique_ptr next); + explicit MemoryAllocatorBase(ScopedRef next); virtual ~MemoryAllocatorBase() override; @@ -252,14 +255,14 @@ namespace gpgmm { nullptr, memory, kInvalidOffset, AllocationMethod::kUndefined, block, requestSize); } - void InsertIntoChain(std::unique_ptr next); + void InsertIntoChain(ScopedRef next); MemoryAllocatorStats mStats = {}; mutable std::mutex mMutex; private: - MemoryAllocatorBase* mNext = nullptr; + ScopedRef mNext; MemoryAllocatorBase* mParent = nullptr; }; diff --git a/src/gpgmm/common/PooledMemoryAllocator.cpp b/src/gpgmm/common/PooledMemoryAllocator.cpp index 5777634a..5b4b7e94 100644 --- a/src/gpgmm/common/PooledMemoryAllocator.cpp +++ b/src/gpgmm/common/PooledMemoryAllocator.cpp @@ -21,10 +21,9 @@ namespace gpgmm { - PooledMemoryAllocator::PooledMemoryAllocator( - uint64_t memorySize, - uint64_t memoryAlignment, - std::unique_ptr memoryAllocator) + PooledMemoryAllocator::PooledMemoryAllocator(uint64_t memorySize, + uint64_t memoryAlignment, + ScopedRef memoryAllocator) : MemoryAllocatorBase(std::move(memoryAllocator)), mPool(new LIFOMemoryPool(memorySize)), mMemoryAlignment(memoryAlignment) { diff --git a/src/gpgmm/common/PooledMemoryAllocator.h b/src/gpgmm/common/PooledMemoryAllocator.h index d90dae35..8f68709f 100644 --- a/src/gpgmm/common/PooledMemoryAllocator.h +++ b/src/gpgmm/common/PooledMemoryAllocator.h @@ -26,7 +26,7 @@ namespace gpgmm { public: PooledMemoryAllocator(uint64_t memorySize, uint64_t memoryAlignment, - std::unique_ptr memoryAllocator); + ScopedRef memoryAllocator); ~PooledMemoryAllocator() override; // MemoryAllocatorBase interface diff --git a/src/gpgmm/common/SegmentedMemoryAllocator.cpp b/src/gpgmm/common/SegmentedMemoryAllocator.cpp index a26be18f..0225f1b3 100644 --- a/src/gpgmm/common/SegmentedMemoryAllocator.cpp +++ b/src/gpgmm/common/SegmentedMemoryAllocator.cpp @@ -79,7 +79,7 @@ namespace gpgmm { // SegmentedMemoryAllocator SegmentedMemoryAllocator::SegmentedMemoryAllocator( - std::unique_ptr memoryAllocator, + ScopedRef memoryAllocator, uint64_t memoryAlignment) : MemoryAllocatorBase(std::move(memoryAllocator)), mMemoryAlignment(memoryAlignment) { } diff --git a/src/gpgmm/common/SegmentedMemoryAllocator.h b/src/gpgmm/common/SegmentedMemoryAllocator.h index 83fce8a7..9dd7d6c9 100644 --- a/src/gpgmm/common/SegmentedMemoryAllocator.h +++ b/src/gpgmm/common/SegmentedMemoryAllocator.h @@ -34,7 +34,7 @@ namespace gpgmm { // variable-size memory blocks. class SegmentedMemoryAllocator : public MemoryAllocatorBase { public: - SegmentedMemoryAllocator(std::unique_ptr memoryAllocator, + SegmentedMemoryAllocator(ScopedRef memoryAllocator, uint64_t memoryAlignment); ~SegmentedMemoryAllocator() override; diff --git a/src/gpgmm/common/SlabMemoryAllocator.cpp b/src/gpgmm/common/SlabMemoryAllocator.cpp index a493e851..58bcfb8f 100644 --- a/src/gpgmm/common/SlabMemoryAllocator.cpp +++ b/src/gpgmm/common/SlabMemoryAllocator.cpp @@ -492,7 +492,7 @@ namespace gpgmm { float slabFragmentationLimit, bool allowPrefetchSlab, float slabGrowthFactor, - std::unique_ptr memoryAllocator) + ScopedRef memoryAllocator) : MemoryAllocatorBase(std::move(memoryAllocator)), mMaxSlabSize(maxSlabSize), mMinSlabSize(minSlabSize), diff --git a/src/gpgmm/common/SlabMemoryAllocator.h b/src/gpgmm/common/SlabMemoryAllocator.h index d38359a3..e6f7c597 100644 --- a/src/gpgmm/common/SlabMemoryAllocator.h +++ b/src/gpgmm/common/SlabMemoryAllocator.h @@ -117,7 +117,7 @@ namespace gpgmm { float slabFragmentationLimit, bool allowSlabPrefetch, float slabGrowthFactor, - std::unique_ptr memoryAllocator); + ScopedRef memoryAllocator); ~SlabCacheAllocator() override; diff --git a/src/gpgmm/d3d12/ResourceAllocatorD3D12.cpp b/src/gpgmm/d3d12/ResourceAllocatorD3D12.cpp index 515882f8..b419d1e4 100644 --- a/src/gpgmm/d3d12/ResourceAllocatorD3D12.cpp +++ b/src/gpgmm/d3d12/ResourceAllocatorD3D12.cpp @@ -615,7 +615,7 @@ namespace gpgmm::d3d12 { GPGMM_TRACE_EVENT_OBJECT_NEW(this); if (descriptor.Flags & RESOURCE_ALLOCATOR_FLAG_NEVER_LEAK) { - mTrackingAllocator = std::make_unique(); + mTrackingAllocator = new ResourceAllocationTrackingAllocator(); } const bool isUMA = @@ -647,8 +647,7 @@ namespace gpgmm::d3d12 { mMSAAResourceAllocatorOfType[resourceHeapTypeIndex] = CreateResourceAllocator( descriptor, heapFlags, heapProperties, msaaHeapAlignment); } else { - mMSAAResourceAllocatorOfType[resourceHeapTypeIndex] = - std::make_unique(); + mMSAAResourceAllocatorOfType[resourceHeapTypeIndex] = new SentinelMemoryAllocator(); } // Dedicated allocators are used when sub-allocation cannot but heaps could still be @@ -665,7 +664,7 @@ namespace gpgmm::d3d12 { msaaHeapAlignment); } else { mMSAADedicatedResourceAllocatorOfType[resourceHeapTypeIndex] = - std::make_unique(); + new SentinelMemoryAllocator; } if (IsBuffersAllowed(heapFlags, mResourceHeapTier)) { @@ -673,8 +672,7 @@ namespace gpgmm::d3d12 { CreateSmallBufferAllocator(descriptor, heapFlags, heapProperties, heapAlignment, GetInitialResourceState(heapType)); } else { - mSmallBufferAllocatorOfType[resourceHeapTypeIndex] = - std::make_unique(); + mSmallBufferAllocatorOfType[resourceHeapTypeIndex] = new SentinelMemoryAllocator; } // Cache resource sizes commonly requested. @@ -696,19 +694,19 @@ namespace gpgmm::d3d12 { cacheRequest.Alignment = sizeInfo.Alignment; MemoryAllocatorBase* allocator = - mSmallBufferAllocatorOfType[resourceHeapTypeIndex].get(); + mSmallBufferAllocatorOfType[resourceHeapTypeIndex].Get(); if (cacheRequest.SizeInBytes <= allocator->GetMemorySize() && sizeInfo.Alignment == D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT) { allocator->TryAllocateMemory(cacheRequest); } - allocator = mResourceAllocatorOfType[resourceHeapTypeIndex].get(); + allocator = mResourceAllocatorOfType[resourceHeapTypeIndex].Get(); if (cacheRequest.SizeInBytes <= allocator->GetMemorySize() && sizeInfo.Alignment == D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT) { allocator->TryAllocateMemory(cacheRequest); } - allocator = mMSAAResourceAllocatorOfType[resourceHeapTypeIndex].get(); + allocator = mMSAAResourceAllocatorOfType[resourceHeapTypeIndex].Get(); if (cacheRequest.SizeInBytes <= allocator->GetMemorySize() && sizeInfo.Alignment == D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT) { allocator->TryAllocateMemory(cacheRequest); @@ -719,24 +717,24 @@ namespace gpgmm::d3d12 { } } - std::unique_ptr ResourceAllocator::CreatePoolAllocator( + ScopedRef ResourceAllocator::CreatePoolAllocator( RESOURCE_ALLOCATION_ALGORITHM algorithm, uint64_t memorySize, uint64_t memoryAlignment, bool isAlwaysOnDemand, - std::unique_ptr underlyingAllocator) { + ScopedRef underlyingAllocator) { if (isAlwaysOnDemand) { return underlyingAllocator; } switch (algorithm) { case RESOURCE_ALLOCATION_ALGORITHM_FIXED_POOL: { - return std::make_unique(memorySize, memoryAlignment, - std::move(underlyingAllocator)); + return new PooledMemoryAllocator(memorySize, memoryAlignment, + std::move(underlyingAllocator)); } case RESOURCE_ALLOCATION_ALGORITHM_SEGMENTED_POOL: { - return std::make_unique(std::move(underlyingAllocator), - memoryAlignment); + return new SegmentedMemoryAllocator(std::move(underlyingAllocator), + memoryAlignment); } default: { UNREACHABLE(); @@ -745,18 +743,18 @@ namespace gpgmm::d3d12 { } } - std::unique_ptr ResourceAllocator::CreateSubAllocator( + ScopedRef ResourceAllocator::CreateSubAllocator( RESOURCE_ALLOCATION_ALGORITHM algorithm, uint64_t memorySize, uint64_t memoryAlignment, float memoryFragmentationLimit, float memoryGrowthFactor, bool isPrefetchAllowed, - std::unique_ptr underlyingAllocator) { + ScopedRef underlyingAllocator) { switch (algorithm) { case RESOURCE_ALLOCATION_ALGORITHM_BUDDY_SYSTEM: { // System and memory size must be aligned at creation-time. - return std::make_unique( + return new BuddyMemoryAllocator( /*systemSize*/ LowerPowerOfTwo(mMaxResourceHeapSize), /*memorySize*/ UpperPowerOfTwo(memorySize), /*memoryAlignment*/ memoryAlignment, @@ -765,7 +763,7 @@ namespace gpgmm::d3d12 { case RESOURCE_ALLOCATION_ALGORITHM_SLAB: { // Min slab size is always equal to the memory size because the // slab allocator aligns the slab size at allocate-time. - return std::make_unique( + return new SlabCacheAllocator( /*maxSlabSize*/ LowerPowerOfTwo(mMaxResourceHeapSize), /*minSlabSize*/ memorySize, /*slabAlignment*/ memoryAlignment, @@ -775,7 +773,7 @@ namespace gpgmm::d3d12 { /*memoryAllocator*/ std::move(underlyingAllocator)); } case RESOURCE_ALLOCATION_ALGORITHM_DEDICATED: { - return std::make_unique( + return new DedicatedMemoryAllocator( /*memoryAllocator*/ std::move(underlyingAllocator), memoryAlignment); } default: { @@ -785,20 +783,18 @@ namespace gpgmm::d3d12 { } } - std::unique_ptr ResourceAllocator::CreateResourceAllocator( + ScopedRef ResourceAllocator::CreateResourceAllocator( const RESOURCE_ALLOCATOR_DESC& descriptor, D3D12_HEAP_FLAGS heapFlags, const D3D12_HEAP_PROPERTIES& heapProperties, uint64_t heapAlignment) { - std::unique_ptr resourceHeapAllocator = - std::make_unique(mResidencyManager.Get(), mDevice, - heapProperties, heapFlags, - mIsAlwaysCreatedInBudget); + ScopedRef resourceHeapAllocator(new ResourceHeapAllocator( + mResidencyManager.Get(), mDevice, heapProperties, heapFlags, mIsAlwaysCreatedInBudget)); const uint64_t heapSize = std::max(heapAlignment, AlignTo(descriptor.PreferredResourceHeapSize, heapAlignment)); - std::unique_ptr pooledOrNonPooledAllocator = + ScopedRef pooledOrNonPooledAllocator = CreatePoolAllocator(descriptor.PoolAlgorithm, heapSize, heapAlignment, (descriptor.Flags & RESOURCE_ALLOCATOR_FLAG_ALWAYS_ON_DEMAND), std::move(resourceHeapAllocator)); @@ -811,17 +807,16 @@ namespace gpgmm::d3d12 { std::move(pooledOrNonPooledAllocator)); } - std::unique_ptr ResourceAllocator::CreateSmallBufferAllocator( + ScopedRef ResourceAllocator::CreateSmallBufferAllocator( const RESOURCE_ALLOCATOR_DESC& descriptor, D3D12_HEAP_FLAGS heapFlags, const D3D12_HEAP_PROPERTIES& heapProperties, uint64_t heapAlignment, D3D12_RESOURCE_STATES initialResourceState) { - std::unique_ptr smallBufferOnlyAllocator = - std::make_unique(this, heapProperties, heapFlags, - D3D12_RESOURCE_FLAG_NONE, initialResourceState); + ScopedRef smallBufferOnlyAllocator(new BufferAllocator( + this, heapProperties, heapFlags, D3D12_RESOURCE_FLAG_NONE, initialResourceState)); - std::unique_ptr pooledOrNonPooledAllocator = + ScopedRef pooledOrNonPooledAllocator = CreatePoolAllocator(descriptor.PoolAlgorithm, heapAlignment, heapAlignment, (descriptor.Flags & RESOURCE_ALLOCATOR_FLAG_ALWAYS_ON_DEMAND), std::move(smallBufferOnlyAllocator)); @@ -850,13 +845,25 @@ namespace gpgmm::d3d12 { // Destroy allocators in the reverse order they were created so we can record delete events // before event tracer shutdown. - mSmallBufferAllocatorOfType = {}; + for (auto& allocator : mSmallBufferAllocatorOfType) { + allocator = nullptr; + } + + for (auto& allocator : mMSAADedicatedResourceAllocatorOfType) { + allocator = nullptr; + } - mMSAADedicatedResourceAllocatorOfType = {}; - mMSAAResourceAllocatorOfType = {}; + for (auto& allocator : mMSAAResourceAllocatorOfType) { + allocator = nullptr; + } + + for (auto& allocator : mResourceAllocatorOfType) { + allocator = nullptr; + } - mResourceAllocatorOfType = {}; - mDedicatedResourceAllocatorOfType = {}; + for (auto& allocator : mDedicatedResourceAllocatorOfType) { + allocator = nullptr; + } #if defined(GPGMM_ENABLE_DEVICE_LEAK_CHECKS) ReportLiveDeviceObjects(mDevice); @@ -1294,7 +1301,7 @@ namespace gpgmm::d3d12 { newResourceDesc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER && newResourceDesc.Flags == D3D12_RESOURCE_FLAG_NONE && isCreatedResourceStateRequired && !isSubAllocationDisabled) { - allocator = mSmallBufferAllocatorOfType[static_cast(resourceHeapType)].get(); + allocator = mSmallBufferAllocatorOfType[static_cast(resourceHeapType)].Get(); // GetResourceAllocationInfo() always rejects alignments smaller than 64KB. So if the // alignment was unspecified, assign the smallest alignment possible. @@ -1345,9 +1352,9 @@ namespace gpgmm::d3d12 { if (!isAlwaysCommitted && !isSubAllocationDisabled) { if (isMSAA) { allocator = - mMSAAResourceAllocatorOfType[static_cast(resourceHeapType)].get(); + mMSAAResourceAllocatorOfType[static_cast(resourceHeapType)].Get(); } else { - allocator = mResourceAllocatorOfType[static_cast(resourceHeapType)].get(); + allocator = mResourceAllocatorOfType[static_cast(resourceHeapType)].Get(); } GPGMM_RETURN_IF_NOT_FATAL( @@ -1389,10 +1396,10 @@ namespace gpgmm::d3d12 { if (isMSAA) { allocator = mMSAADedicatedResourceAllocatorOfType[static_cast(resourceHeapType)] - .get(); + .Get(); } else { allocator = - mDedicatedResourceAllocatorOfType[static_cast(resourceHeapType)].get(); + mDedicatedResourceAllocatorOfType[static_cast(resourceHeapType)].Get(); } MemoryAllocationRequest dedicatedRequest = request; diff --git a/src/gpgmm/d3d12/ResourceAllocatorD3D12.h b/src/gpgmm/d3d12/ResourceAllocatorD3D12.h index 55f61feb..b75f2a34 100644 --- a/src/gpgmm/d3d12/ResourceAllocatorD3D12.h +++ b/src/gpgmm/d3d12/ResourceAllocatorD3D12.h @@ -95,34 +95,34 @@ namespace gpgmm::d3d12 { const D3D12_CLEAR_VALUE* clearValue, ResourceAllocation** ppResourceAllocationOut); - std::unique_ptr CreateResourceAllocator( + ScopedRef CreateResourceAllocator( const RESOURCE_ALLOCATOR_DESC& descriptor, D3D12_HEAP_FLAGS heapFlags, const D3D12_HEAP_PROPERTIES& heapProperties, uint64_t heapAlignment); - std::unique_ptr CreateSmallBufferAllocator( + ScopedRef CreateSmallBufferAllocator( const RESOURCE_ALLOCATOR_DESC& descriptor, D3D12_HEAP_FLAGS heapFlags, const D3D12_HEAP_PROPERTIES& heapProperties, uint64_t heapAlignment, D3D12_RESOURCE_STATES initialResourceState); - std::unique_ptr CreatePoolAllocator( + ScopedRef CreatePoolAllocator( RESOURCE_ALLOCATION_ALGORITHM algorithm, uint64_t memorySize, uint64_t memoryAlignment, bool isAlwaysOnDemand, - std::unique_ptr underlyingAllocator); + ScopedRef underlyingAllocator); - std::unique_ptr CreateSubAllocator( + ScopedRef CreateSubAllocator( RESOURCE_ALLOCATION_ALGORITHM algorithm, uint64_t memorySize, uint64_t memoryAlignment, float memoryFragmentationLimit, float memoryGrowthFactor, bool isPrefetchAllowed, - std::unique_ptr underlyingAllocator); + ScopedRef underlyingAllocator); HRESULT CreatePlacedResource(ResidencyHeap* const resourceHeap, uint64_t resourceOffset, @@ -176,20 +176,20 @@ namespace gpgmm::d3d12 { static constexpr uint64_t kNumOfResourceHeapTypes = 12u; - std::array, kNumOfResourceHeapTypes> + std::array, kNumOfResourceHeapTypes> mDedicatedResourceAllocatorOfType; - std::array, kNumOfResourceHeapTypes> + std::array, kNumOfResourceHeapTypes> mResourceAllocatorOfType; - std::array, kNumOfResourceHeapTypes> + std::array, kNumOfResourceHeapTypes> mMSAADedicatedResourceAllocatorOfType; - std::array, kNumOfResourceHeapTypes> + std::array, kNumOfResourceHeapTypes> mMSAAResourceAllocatorOfType; - std::array, kNumOfResourceHeapTypes> + std::array, kNumOfResourceHeapTypes> mSmallBufferAllocatorOfType; - std::unique_ptr mTrackingAllocator; + ScopedRef mTrackingAllocator; }; } // namespace gpgmm::d3d12 diff --git a/src/gpgmm/utils/RefCount.h b/src/gpgmm/utils/RefCount.h index c60b61d2..edae3b4b 100644 --- a/src/gpgmm/utils/RefCount.h +++ b/src/gpgmm/utils/RefCount.h @@ -86,6 +86,11 @@ namespace gpgmm { return *this; } + ScopedRef& operator=(nullptr_t) { + SafeRelease(mPtr); + return *this; + } + T* Get() const { return mPtr; } @@ -122,6 +127,10 @@ namespace gpgmm { return !operator==(other); } + operator bool() const { + return mPtr != nullptr; + } + private: static void SafeRelease(T*& ptr) { if (SafeUnref(ptr)) { diff --git a/src/gpgmm/vk/ResourceAllocatorVk.cpp b/src/gpgmm/vk/ResourceAllocatorVk.cpp index f8063cfa..d47b82bd 100644 --- a/src/gpgmm/vk/ResourceAllocatorVk.cpp +++ b/src/gpgmm/vk/ResourceAllocatorVk.cpp @@ -344,12 +344,12 @@ namespace gpgmm::vk { ResultOrError> result; if (!isSubAllocationDisabled) { - allocator = mResourceAllocatorsPerType[memoryTypeIndex].get(); + allocator = mResourceAllocatorsPerType[memoryTypeIndex].Get(); result = allocator->TryAllocateMemory(request); } if (!result.IsSuccess()) { - allocator = mDeviceAllocatorsPerType[memoryTypeIndex].get(); + allocator = mDeviceAllocatorsPerType[memoryTypeIndex].Get(); result = allocator->TryAllocateMemory(request); } @@ -384,23 +384,23 @@ namespace gpgmm::vk { return mCaps.get(); } - std::unique_ptr GpResourceAllocator_T::CreateDeviceMemoryAllocator( + ScopedRef GpResourceAllocator_T::CreateDeviceMemoryAllocator( const GpAllocatorCreateInfo& info, uint64_t memoryTypeIndex, uint64_t memoryAlignment) { - std::unique_ptr deviceMemoryAllocator = - std::make_unique(this, memoryTypeIndex); + ScopedRef deviceMemoryAllocator( + new DeviceMemoryAllocator(this, memoryTypeIndex)); if (!(info.flags & GP_ALLOCATOR_CREATE_ALWAYS_ON_DEMAND)) { switch (info.poolAlgorithm) { case GP_ALLOCATOR_ALGORITHM_FIXED_POOL: { - return std::make_unique( - info.preferredDeviceMemorySize, memoryAlignment, - std::move(deviceMemoryAllocator)); + return new PooledMemoryAllocator(info.preferredDeviceMemorySize, + memoryAlignment, + std::move(deviceMemoryAllocator)); } case GP_ALLOCATOR_ALGORITHM_SEGMENTED_POOL: { - return std::make_unique( - std::move(deviceMemoryAllocator), memoryAlignment); + return new SegmentedMemoryAllocator(std::move(deviceMemoryAllocator), + memoryAlignment); } default: { UNREACHABLE(); @@ -412,11 +412,11 @@ namespace gpgmm::vk { return deviceMemoryAllocator; } - std::unique_ptr GpResourceAllocator_T::CreateResourceSubAllocator( + ScopedRef GpResourceAllocator_T::CreateResourceSubAllocator( const GpAllocatorCreateInfo& info, uint64_t memoryTypeIndex, uint64_t memoryAlignment) { - std::unique_ptr pooledOrNonPooledAllocator = + ScopedRef pooledOrNonPooledAllocator = CreateDeviceMemoryAllocator(info, memoryTypeIndex, memoryAlignment); // TODO: Figure out how to specify this using Vulkan API. @@ -427,14 +427,14 @@ namespace gpgmm::vk { switch (info.subAllocationAlgorithm) { case GP_ALLOCATOR_ALGORITHM_BUDDY_SYSTEM: { - return std::make_unique( + return new BuddyMemoryAllocator( /*systemSize*/ kMaxDeviceMemorySize, /*memorySize*/ std::max(memoryAlignment, info.preferredDeviceMemorySize), /*memoryAlignment*/ memoryAlignment, /*memoryAllocator*/ std::move(pooledOrNonPooledAllocator)); } case GP_ALLOCATOR_ALGORITHM_SLAB: { - return std::make_unique( + return new SlabCacheAllocator( /*maxSlabSize*/ kMaxDeviceMemorySize, /*minSlabSize*/ std::max(memoryAlignment, info.preferredDeviceMemorySize), /*slabAlignment*/ memoryAlignment, diff --git a/src/gpgmm/vk/ResourceAllocatorVk.h b/src/gpgmm/vk/ResourceAllocatorVk.h index 4fbaae7a..e58ccf8c 100644 --- a/src/gpgmm/vk/ResourceAllocatorVk.h +++ b/src/gpgmm/vk/ResourceAllocatorVk.h @@ -57,15 +57,14 @@ namespace gpgmm::vk { const GpResourceAllocationCreateInfo& allocationInfo, uint32_t* memoryTypeIndexOut); - std::unique_ptr CreateDeviceMemoryAllocator( + ScopedRef CreateDeviceMemoryAllocator( const GpAllocatorCreateInfo& info, uint64_t memoryTypeIndex, uint64_t memoryAlignment); - std::unique_ptr CreateResourceSubAllocator( - const GpAllocatorCreateInfo& info, - uint64_t memoryTypeIndex, - uint64_t memoryAlignment); + ScopedRef CreateResourceSubAllocator(const GpAllocatorCreateInfo& info, + uint64_t memoryTypeIndex, + uint64_t memoryAlignment); // ObjectBase interface DEFINE_OBJECT_BASE_OVERRIDES(GpResourceAllocator_T) @@ -74,8 +73,8 @@ namespace gpgmm::vk { VulkanFunctions mVulkanFunctions; std::unique_ptr mCaps; - std::vector> mResourceAllocatorsPerType; - std::vector> mDeviceAllocatorsPerType; + std::vector> mResourceAllocatorsPerType; + std::vector> mDeviceAllocatorsPerType; std::vector mMemoryTypes; }; diff --git a/src/tests/DummyMemoryAllocator.h b/src/tests/DummyMemoryAllocator.h index 74f5cb44..70a14645 100644 --- a/src/tests/DummyMemoryAllocator.h +++ b/src/tests/DummyMemoryAllocator.h @@ -31,7 +31,7 @@ namespace gpgmm { public: DummyMemoryAllocator() = default; - explicit DummyMemoryAllocator(std::unique_ptr next) + explicit DummyMemoryAllocator(ScopedRef next) : MemoryAllocatorBase(std::move(next)) { } diff --git a/src/tests/perftests/MemoryAllocatorPerfTests.cpp b/src/tests/perftests/MemoryAllocatorPerfTests.cpp index 24ad1818..501c822b 100644 --- a/src/tests/perftests/MemoryAllocatorPerfTests.cpp +++ b/src/tests/perftests/MemoryAllocatorPerfTests.cpp @@ -81,7 +81,7 @@ class SingleSizeAllocationPerfTests : public MemoryAllocatorPerfTests { BENCHMARK_DEFINE_F(SingleSizeAllocationPerfTests, SlabCache_Warm)(benchmark::State& state) { SlabCacheAllocator allocator(state.range(1), state.range(0), kMemoryAlignment, kMemoryAlignment, /*allowPrefetch*/ false, kDisableSlabGrowth, - std::make_unique()); + new DummyMemoryAllocator); // Below is effectively equivelent to STL's reserve(size=1). { @@ -99,7 +99,7 @@ BENCHMARK_DEFINE_F(SingleSizeAllocationPerfTests, SlabCache_Warm)(benchmark::Sta BENCHMARK_DEFINE_F(SingleSizeAllocationPerfTests, SlabCache_Cold)(benchmark::State& state) { SlabCacheAllocator allocator(state.range(1), state.range(0), kMemoryAlignment, /*slabFragmentationLimit*/ 1, /*allowPrefetch*/ false, - kDisableSlabGrowth, std::make_unique()); + kDisableSlabGrowth, new DummyMemoryAllocator); for (auto _ : state) { SingleStep(state, &allocator, CreateBasicRequest(state.range(2))); @@ -107,11 +107,10 @@ BENCHMARK_DEFINE_F(SingleSizeAllocationPerfTests, SlabCache_Cold)(benchmark::Sta } BENCHMARK_DEFINE_F(SingleSizeAllocationPerfTests, Slab)(benchmark::State& state) { - std::unique_ptr memoryAllocator = - std::make_unique(); + ScopedRef memoryAllocator(new DummyMemoryAllocator); SlabMemoryAllocator allocator(state.range(2), state.range(1), state.range(0), kMemoryAlignment, /*slabFragmentationLimit*/ 1, /*allowPrefetch*/ false, - /*slabGrowthFactor*/ 1, memoryAllocator.get()); + /*slabGrowthFactor*/ 1, memoryAllocator.Get()); for (auto _ : state) { SingleStep(state, &allocator, CreateBasicRequest(state.range(2))); @@ -120,7 +119,7 @@ BENCHMARK_DEFINE_F(SingleSizeAllocationPerfTests, Slab)(benchmark::State& state) BENCHMARK_DEFINE_F(SingleSizeAllocationPerfTests, BuddySystem)(benchmark::State& state) { BuddyMemoryAllocator allocator(state.range(1), state.range(0), kMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); for (auto _ : state) { SingleStep(state, &allocator, CreateBasicRequest(state.range(2))); @@ -128,7 +127,7 @@ BENCHMARK_DEFINE_F(SingleSizeAllocationPerfTests, BuddySystem)(benchmark::State& } BENCHMARK_DEFINE_F(SingleSizeAllocationPerfTests, Standalone)(benchmark::State& state) { - DedicatedMemoryAllocator allocator(std::make_unique(), kMemoryAlignment); + DedicatedMemoryAllocator allocator(new DummyMemoryAllocator, kMemoryAlignment); for (auto _ : state) { SingleStep(state, &allocator, CreateBasicRequest(state.range(2))); @@ -136,7 +135,7 @@ BENCHMARK_DEFINE_F(SingleSizeAllocationPerfTests, Standalone)(benchmark::State& } BENCHMARK_DEFINE_F(SingleSizeAllocationPerfTests, SegmentedPool)(benchmark::State& state) { - SegmentedMemoryAllocator allocator(std::make_unique(), kMemoryAlignment); + SegmentedMemoryAllocator allocator(new DummyMemoryAllocator, kMemoryAlignment); for (auto _ : state) { SingleStep(state, &allocator, CreateBasicRequest(state.range(2))); diff --git a/src/tests/unittests/BuddyMemoryAllocatorTests.cpp b/src/tests/unittests/BuddyMemoryAllocatorTests.cpp index fb5d06f9..6395996d 100644 --- a/src/tests/unittests/BuddyMemoryAllocatorTests.cpp +++ b/src/tests/unittests/BuddyMemoryAllocatorTests.cpp @@ -52,7 +52,7 @@ TEST_F(BuddyMemoryAllocatorTests, SingleHeap) { // constexpr uint64_t maxBlockSize = kDefaultMemorySize; BuddyMemoryAllocator allocator(maxBlockSize, kDefaultMemorySize, kDefaultMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); // Cannot allocate greater than heap size. { @@ -95,7 +95,7 @@ TEST_F(BuddyMemoryAllocatorTests, MultipleHeaps) { // constexpr uint64_t maxBlockSize = 256; BuddyMemoryAllocator allocator(maxBlockSize, kDefaultMemorySize, kDefaultMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); // Cannot allocate greater than heap size. { @@ -157,7 +157,7 @@ TEST_F(BuddyMemoryAllocatorTests, MultipleSplitHeaps) { // constexpr uint64_t maxBlockSize = 256; BuddyMemoryAllocator allocator(maxBlockSize, kDefaultMemorySize, kDefaultMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); // Allocate two 64 byte sub-allocations. std::unique_ptr allocation1 = @@ -220,7 +220,7 @@ TEST_F(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) { // constexpr uint64_t maxBlockSize = 512; BuddyMemoryAllocator allocator(maxBlockSize, kDefaultMemorySize, kDefaultMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); // Allocate two 64-byte allocations. std::unique_ptr allocation1 = @@ -311,7 +311,7 @@ TEST_F(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) { // constexpr uint64_t maxBlockSize = 512; BuddyMemoryAllocator allocator(maxBlockSize, kDefaultMemorySize, kDefaultMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); std::unique_ptr allocation1 = allocator.TryAllocateMemoryForTesting(CreateBasicRequest(64, 128)); @@ -385,7 +385,7 @@ TEST_F(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) { // constexpr uint64_t maxBlockSize = 512; BuddyMemoryAllocator allocator(maxBlockSize, kDefaultMemorySize, kDefaultMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); constexpr uint64_t alignment = 64; @@ -448,7 +448,7 @@ TEST_F(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) { TEST_F(BuddyMemoryAllocatorTests, AllocationOverflow) { constexpr uint64_t maxBlockSize = 512; BuddyMemoryAllocator allocator(maxBlockSize, kDefaultMemorySize, kDefaultMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); constexpr uint64_t largeBlock = (1ull << 63) + 1; std::unique_ptr invalidAllocation = allocator.TryAllocateMemoryForTesting( @@ -460,8 +460,8 @@ TEST_F(BuddyMemoryAllocatorTests, AllocationOverflow) { TEST_F(BuddyMemoryAllocatorTests, ReuseFreedHeaps) { constexpr uint64_t kMaxBlockSize = 4096; - std::unique_ptr poolAllocator = std::make_unique( - kDefaultMemorySize, kDefaultMemoryAlignment, std::make_unique()); + ScopedRef poolAllocator(new PooledMemoryAllocator( + kDefaultMemorySize, kDefaultMemoryAlignment, new DummyMemoryAllocator)); BuddyMemoryAllocator allocator(kMaxBlockSize, kDefaultMemorySize, kDefaultMemoryAlignment, std::move(poolAllocator)); @@ -521,8 +521,8 @@ TEST_F(BuddyMemoryAllocatorTests, ReuseFreedHeaps) { TEST_F(BuddyMemoryAllocatorTests, DestroyHeaps) { constexpr uint64_t kMaxBlockSize = 4096; - std::unique_ptr poolAllocator = std::make_unique( - kDefaultMemorySize, kDefaultMemoryAlignment, std::make_unique()); + ScopedRef poolAllocator(new PooledMemoryAllocator( + kDefaultMemorySize, kDefaultMemoryAlignment, new DummyMemoryAllocator)); BuddyMemoryAllocator allocator(kMaxBlockSize, kDefaultMemorySize, kDefaultMemoryAlignment, std::move(poolAllocator)); diff --git a/src/tests/unittests/MemoryAllocatorTests.cpp b/src/tests/unittests/MemoryAllocatorTests.cpp index 61d96c9e..b488d2ac 100644 --- a/src/tests/unittests/MemoryAllocatorTests.cpp +++ b/src/tests/unittests/MemoryAllocatorTests.cpp @@ -30,7 +30,7 @@ class TestMemoryAllocator final : public DummyMemoryAllocator { public: TestMemoryAllocator() = default; - explicit TestMemoryAllocator(std::unique_ptr next) + explicit TestMemoryAllocator(ScopedRef next) : DummyMemoryAllocator(std::move(next)) { } @@ -53,28 +53,28 @@ class MemoryAllocatorTests : public testing::Test { }; TEST_F(MemoryAllocatorTests, SingleAllocator) { - auto child = std::make_unique(); - auto parent = std::make_unique(std::move(child)); + ScopedRef child(new TestMemoryAllocator); + ScopedRef parent(new TestMemoryAllocator(std::move(child))); EXPECT_TRUE(parent->GetNextInChain() != nullptr); parent->ReleaseMemory(kReleaseAllMemory); EXPECT_EQ(ReleaseMemoryCount, 2u); - parent.reset(); + parent = nullptr; EXPECT_EQ(DestructCount, 2u); } TEST_F(MemoryAllocatorTests, MultipleAllocators) { - auto grandChild = std::make_unique(); - auto child = std::make_unique(std::move(grandChild)); - auto parent = std::make_unique(std::move(child)); + ScopedRef grandChild(new TestMemoryAllocator); + ScopedRef child(new TestMemoryAllocator(std::move(grandChild))); + ScopedRef parent(new TestMemoryAllocator(std::move(child))); EXPECT_TRUE(parent->GetNextInChain() != nullptr); parent->ReleaseMemory(kReleaseAllMemory); EXPECT_EQ(ReleaseMemoryCount, 3u); - parent.reset(); + parent = nullptr; EXPECT_EQ(DestructCount, 3u); } diff --git a/src/tests/unittests/PooledMemoryAllocatorTests.cpp b/src/tests/unittests/PooledMemoryAllocatorTests.cpp index 1c4732e4..b3524f41 100644 --- a/src/tests/unittests/PooledMemoryAllocatorTests.cpp +++ b/src/tests/unittests/PooledMemoryAllocatorTests.cpp @@ -40,7 +40,7 @@ class PooledMemoryAllocatorTests : public testing::Test { TEST_F(PooledMemoryAllocatorTests, SingleHeap) { PooledMemoryAllocator allocator(kDefaultMemorySize, kDefaultMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting( CreateBasicRequest(kDefaultMemorySize, kDefaultMemoryAlignment)); @@ -58,7 +58,7 @@ TEST_F(PooledMemoryAllocatorTests, SingleHeap) { TEST_F(PooledMemoryAllocatorTests, MultipleHeaps) { PooledMemoryAllocator allocator(kDefaultMemorySize, kDefaultMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); std::unique_ptr firstAllocation = allocator.TryAllocateMemoryForTesting( CreateBasicRequest(kDefaultMemorySize, kDefaultMemoryAlignment)); @@ -85,7 +85,7 @@ TEST_F(PooledMemoryAllocatorTests, MultipleHeaps) { TEST_F(PooledMemoryAllocatorTests, ReuseFreedHeaps) { PooledMemoryAllocator allocator(kDefaultMemorySize, kDefaultMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); { std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting( CreateBasicRequest(kDefaultMemorySize, kDefaultMemoryAlignment)); @@ -111,7 +111,7 @@ TEST_F(PooledMemoryAllocatorTests, ReuseFreedHeaps) { TEST_F(PooledMemoryAllocatorTests, GetInfo) { PooledMemoryAllocator allocator(kDefaultMemorySize, kDefaultMemoryAlignment, - std::make_unique()); + new DummyMemoryAllocator); std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting( CreateBasicRequest(kDefaultMemorySize, kDefaultMemoryAlignment)); diff --git a/src/tests/unittests/SegmentedMemoryAllocatorTests.cpp b/src/tests/unittests/SegmentedMemoryAllocatorTests.cpp index 5ed20695..84b793a0 100644 --- a/src/tests/unittests/SegmentedMemoryAllocatorTests.cpp +++ b/src/tests/unittests/SegmentedMemoryAllocatorTests.cpp @@ -33,8 +33,7 @@ MemoryAllocationRequest CreateBasicRequest(uint64_t size, uint64_t alignment) { } TEST(SegmentedMemoryAllocatorTests, SingleHeap) { - SegmentedMemoryAllocator allocator(std::make_unique(), - kDefaultMemoryAlignment); + SegmentedMemoryAllocator allocator(new DummyMemoryAllocator, kDefaultMemoryAlignment); std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting( CreateBasicRequest(kDefaultMemorySize, kDefaultMemoryAlignment)); @@ -51,8 +50,7 @@ TEST(SegmentedMemoryAllocatorTests, SingleHeap) { } TEST(SegmentedMemoryAllocatorTests, MultipleHeaps) { - SegmentedMemoryAllocator allocator(std::make_unique(), - kDefaultMemoryAlignment); + SegmentedMemoryAllocator allocator(new DummyMemoryAllocator, kDefaultMemoryAlignment); std::unique_ptr firstAllocation = allocator.TryAllocateMemoryForTesting( CreateBasicRequest(kDefaultMemorySize, kDefaultMemoryAlignment)); @@ -78,8 +76,7 @@ TEST(SegmentedMemoryAllocatorTests, MultipleHeaps) { } TEST(SegmentedMemoryAllocatorTests, MultipleHeapsVariousSizes) { - SegmentedMemoryAllocator allocator(std::make_unique(), - kDefaultMemoryAlignment); + SegmentedMemoryAllocator allocator(new DummyMemoryAllocator, kDefaultMemoryAlignment); // Append the 1st and 3rd segment, in sequence. uint64_t firstMemorySize = kDefaultMemorySize / 2; @@ -161,8 +158,7 @@ TEST(SegmentedMemoryAllocatorTests, MultipleHeapsVariousSizes) { } TEST(SegmentedMemoryAllocatorTests, ReuseFreedHeaps) { - SegmentedMemoryAllocator allocator(std::make_unique(), - kDefaultMemoryAlignment); + SegmentedMemoryAllocator allocator(new DummyMemoryAllocator, kDefaultMemoryAlignment); { std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting( CreateBasicRequest(kDefaultMemorySize, kDefaultMemoryAlignment)); @@ -187,8 +183,7 @@ TEST(SegmentedMemoryAllocatorTests, ReuseFreedHeaps) { } TEST(SegmentedMemoryAllocatorTests, GetInfo) { - SegmentedMemoryAllocator allocator(std::make_unique(), - kDefaultMemoryAlignment); + SegmentedMemoryAllocator allocator(new DummyMemoryAllocator, kDefaultMemoryAlignment); std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting( CreateBasicRequest(kDefaultMemorySize, kDefaultMemoryAlignment)); diff --git a/src/tests/unittests/SlabMemoryAllocatorTests.cpp b/src/tests/unittests/SlabMemoryAllocatorTests.cpp index e22ca63d..1499970b 100644 --- a/src/tests/unittests/SlabMemoryAllocatorTests.cpp +++ b/src/tests/unittests/SlabMemoryAllocatorTests.cpp @@ -51,8 +51,7 @@ class SlabMemoryAllocatorTests : public testing::Test { // Verify allocation in a single slab. TEST_F(SlabMemoryAllocatorTests, SingleSlab) { - std::unique_ptr dummyMemoryAllocator = - std::make_unique(); + ScopedRef dummyMemoryAllocator(new DummyMemoryAllocator); // Verify allocation greater then the block size fails. { @@ -61,7 +60,7 @@ TEST_F(SlabMemoryAllocatorTests, SingleSlab) { SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, kDisableSlabGrowth, - dummyMemoryAllocator.get()); + dummyMemoryAllocator.Get()); ASSERT_EQ(allocator.TryAllocateMemoryForTesting(CreateBasicRequest(kBlockSize * 2, 1)), nullptr); @@ -74,7 +73,7 @@ TEST_F(SlabMemoryAllocatorTests, SingleSlab) { SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, kDisableSlabGrowth, - dummyMemoryAllocator.get()); + dummyMemoryAllocator.Get()); ASSERT_EQ(allocator.TryAllocateMemoryForTesting(CreateBasicRequest(kMaxSlabSize, 1)), nullptr); @@ -89,7 +88,7 @@ TEST_F(SlabMemoryAllocatorTests, SingleSlab) { constexpr uint64_t kMaxSlabSize = kBlockSize; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, dummyMemoryAllocator.get()); + kDisableSlabGrowth, dummyMemoryAllocator.Get()); std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting(CreateBasicRequest(kBlockSize, 1)); @@ -108,7 +107,7 @@ TEST_F(SlabMemoryAllocatorTests, SingleSlab) { constexpr uint64_t kSlabSize = 0; // deduce slab size from allocation size. SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, dummyMemoryAllocator.get()); + kDisableSlabGrowth, dummyMemoryAllocator.Get()); // Max allocation cannot be more than 1/8th the max slab size or 4 bytes. // Since a 10 byte allocation requires a 128 byte slab, allocation should always fail. @@ -133,7 +132,7 @@ TEST_F(SlabMemoryAllocatorTests, SingleSlab) { constexpr uint64_t kMaxSlabSize = 128; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, dummyMemoryAllocator.get()); + kDisableSlabGrowth, dummyMemoryAllocator.Get()); std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting(CreateBasicRequest(kBlockSize, 1)); @@ -151,7 +150,7 @@ TEST_F(SlabMemoryAllocatorTests, SingleSlab) { constexpr uint64_t kMaxSlabSize = 128; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, dummyMemoryAllocator.get()); + kDisableSlabGrowth, dummyMemoryAllocator.Get()); std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting(CreateBasicRequest(kBlockSize, 1)); @@ -169,7 +168,7 @@ TEST_F(SlabMemoryAllocatorTests, SingleSlab) { SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, kDisableSlabGrowth, - dummyMemoryAllocator.get()); + dummyMemoryAllocator.Get()); EXPECT_EQ(allocator.TryAllocateMemoryForTesting(CreateBasicRequest(kBlockSize, 1, true)), nullptr); @@ -184,8 +183,7 @@ TEST_F(SlabMemoryAllocatorTests, SingleSlab) { // Verify allocation in multiple slabs. TEST_F(SlabMemoryAllocatorTests, MultipleSlabs) { - std::unique_ptr dummyMemoryAllocator = - std::make_unique(); + ScopedRef dummyMemoryAllocator(new DummyMemoryAllocator); // Fill up exactly N slabs (allocation = block = slab size). { @@ -195,7 +193,7 @@ TEST_F(SlabMemoryAllocatorTests, MultipleSlabs) { SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, /*slabSize*/ kBlockSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, kDisableSlabGrowth, - dummyMemoryAllocator.get()); + dummyMemoryAllocator.Get()); const uint64_t kNumOfSlabs = 12; std::vector> allocations = {}; for (uint32_t slabi = 0; slabi < kNumOfSlabs; slabi++) { @@ -222,7 +220,7 @@ TEST_F(SlabMemoryAllocatorTests, MultipleSlabs) { SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, kDisableSlabGrowth, - dummyMemoryAllocator.get()); + dummyMemoryAllocator.Get()); // Fill up exactly two 128B slabs. std::vector> allocations = {}; for (uint32_t blocki = 0; blocki < (kDefaultSlabSize * 2 / kBlockSize); blocki++) { @@ -249,7 +247,7 @@ TEST_F(SlabMemoryAllocatorTests, MultipleSlabs) { SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, kDisableSlabGrowth, - dummyMemoryAllocator.get()); + dummyMemoryAllocator.Get()); // Both allocation A and B go in Slab A, which will become full. std::unique_ptr allocationAinSlabA = @@ -329,14 +327,13 @@ TEST_F(SlabMemoryAllocatorTests, MultipleSlabs) { // Verify a very large allocation does not overflow. TEST_F(SlabMemoryAllocatorTests, AllocationOverflow) { - std::unique_ptr dummyMemoryAllocator = - std::make_unique(); + ScopedRef dummyMemoryAllocator(new DummyMemoryAllocator); constexpr uint64_t kBlockSize = 32; constexpr uint64_t kMaxSlabSize = 512; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, dummyMemoryAllocator.get()); + kDisableSlabGrowth, dummyMemoryAllocator.Get()); constexpr uint64_t largeBlock = (1ull << 63) + 1; std::unique_ptr invalidAllocation = allocator.TryAllocateMemoryForTesting( @@ -347,7 +344,7 @@ TEST_F(SlabMemoryAllocatorTests, AllocationOverflow) { // Verify slab will be reused from a pool. TEST_F(SlabMemoryAllocatorTests, ReuseSlabs) { std::unique_ptr poolAllocator = std::make_unique( - kDefaultSlabSize, kDefaultSlabAlignment, std::make_unique()); + kDefaultSlabSize, kDefaultSlabAlignment, new DummyMemoryAllocator); constexpr uint64_t kBlockSize = 32; constexpr uint64_t kMaxSlabSize = 512; @@ -389,15 +386,14 @@ TEST_F(SlabMemoryAllocatorTests, ReuseSlabs) { TEST_F(SlabMemoryAllocatorTests, GetInfo) { // Test slab allocator. { - std::unique_ptr dummyMemoryAllocator = - std::make_unique(); + ScopedRef dummyMemoryAllocator(new DummyMemoryAllocator); constexpr uint64_t kBlockSize = 32; constexpr uint64_t kMaxSlabSize = 512; SlabMemoryAllocator allocator(kBlockSize, kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, kDisableSlabGrowth, - dummyMemoryAllocator.get()); + dummyMemoryAllocator.Get()); std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting(CreateBasicRequest(kBlockSize, 1)); @@ -424,7 +420,7 @@ TEST_F(SlabMemoryAllocatorTests, GetInfo) { { std::unique_ptr poolAllocator = std::make_unique(kDefaultSlabSize, kDefaultSlabAlignment, - std::make_unique()); + new DummyMemoryAllocator); constexpr uint64_t kBlockSize = 32; constexpr uint64_t kMaxSlabSize = 512; @@ -696,7 +692,7 @@ TEST_F(SlabCacheAllocatorTests, SlabOversized) { constexpr uint64_t kMinSlabSize = 16; SlabCacheAllocator allocator(kMaxSlabSize, kMinSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, std::make_unique()); + kDisableSlabGrowth, new DummyMemoryAllocator); EXPECT_EQ(allocator.TryAllocateMemoryForTesting(CreateBasicRequest(kMaxSlabSize + 1, 1)), nullptr); @@ -707,7 +703,7 @@ TEST_F(SlabCacheAllocatorTests, SingleSlabMultipleSize) { constexpr uint64_t kSlabSize = 0; // deduce slab size from allocation size. SlabCacheAllocator allocator(kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, std::make_unique()); + kDisableSlabGrowth, new DummyMemoryAllocator); // Verify requesting an allocation without memory will not return a valid allocation. { @@ -725,7 +721,7 @@ TEST_F(SlabCacheAllocatorTests, SingleSlabMultipleAlignments) { constexpr uint64_t kSlabSize = 0; // deduce slab size from allocation size. SlabCacheAllocator allocator(kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, std::make_unique()); + kDisableSlabGrowth, new DummyMemoryAllocator); // Verify requesting an allocation of same size using multiple alignment succeeds. { @@ -750,7 +746,7 @@ TEST_F(SlabCacheAllocatorTests, MultipleSlabsSameSize) { constexpr uint64_t kSlabSize = 0; // deduce slab size from allocation size. SlabCacheAllocator allocator(kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, std::make_unique()); + kDisableSlabGrowth, new DummyMemoryAllocator); std::unique_ptr firstAllocation = allocator.TryAllocateMemoryForTesting(CreateBasicRequest(22, 1)); @@ -780,7 +776,7 @@ TEST_F(SlabCacheAllocatorTests, MultipleSlabsVariableSizes) { constexpr uint64_t kSlabSize = 0; // deduce slab size from allocation size. SlabCacheAllocator allocator(kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, std::make_unique()); + kDisableSlabGrowth, new DummyMemoryAllocator); { constexpr uint64_t allocationSize = 22; std::unique_ptr allocation = @@ -824,12 +820,11 @@ TEST_F(SlabCacheAllocatorTests, SingleSlabInBuddy) { constexpr uint64_t kMaxBlockSize = 256; constexpr uint64_t kMaxSlabSize = kMaxBlockSize; constexpr uint64_t kSlabSize = kDefaultSlabSize / 8; - SlabCacheAllocator allocator(kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, - kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, - std::make_unique( - kMaxBlockSize, kDefaultSlabSize, kDefaultSlabAlignment, - std::make_unique())); + SlabCacheAllocator allocator( + kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, + kNoSlabPrefetchAllowed, kDisableSlabGrowth, + new BuddyMemoryAllocator(kMaxBlockSize, kDefaultSlabSize, kDefaultSlabAlignment, + new DummyMemoryAllocator)); constexpr uint64_t kBlockSize = 4; std::unique_ptr allocation = @@ -848,12 +843,11 @@ TEST_F(SlabCacheAllocatorTests, MultipleSlabsInBuddy) { constexpr uint64_t kMaxBlockSize = 256; constexpr uint64_t kMaxSlabSize = kMaxBlockSize; constexpr uint64_t kSlabSize = kDefaultSlabSize / 8; - SlabCacheAllocator allocator(kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, - kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, - std::make_unique( - kMaxBlockSize, kDefaultSlabSize, kDefaultSlabAlignment, - std::make_unique())); + SlabCacheAllocator allocator( + kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, + kNoSlabPrefetchAllowed, kDisableSlabGrowth, + new BuddyMemoryAllocator(kMaxBlockSize, kDefaultSlabSize, kDefaultSlabAlignment, + new DummyMemoryAllocator)); // Verify multiple slab-buddy sub-allocation in the same slab are allocated contigiously. { @@ -927,7 +921,7 @@ TEST_F(SlabCacheAllocatorTests, GetInfo) { constexpr uint64_t kMaxSlabSize = 512; SlabCacheAllocator allocator(kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, std::make_unique()); + kDisableSlabGrowth, new DummyMemoryAllocator); std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting(CreateBasicRequest(kBlockSize, 1)); @@ -957,8 +951,8 @@ TEST_F(SlabCacheAllocatorTests, GetInfo) { SlabCacheAllocator allocator( kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, kDisableSlabGrowth, - std::make_unique(kDefaultSlabSize, kDefaultSlabAlignment, - std::make_unique())); + new PooledMemoryAllocator(kDefaultSlabSize, kDefaultSlabAlignment, + new DummyMemoryAllocator)); std::unique_ptr allocation = allocator.TryAllocateMemoryForTesting(CreateBasicRequest(kBlockSize, 1)); @@ -986,12 +980,11 @@ TEST_F(SlabCacheAllocatorTests, GetInfo) { constexpr uint64_t kMaxBlockSize = 256; constexpr uint64_t kMaxSlabSize = kMaxBlockSize; constexpr uint64_t kSlabSize = kDefaultSlabSize / 8; - SlabCacheAllocator allocator(kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, - kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, - std::make_unique( - kMaxBlockSize, kDefaultSlabSize, kDefaultSlabAlignment, - std::make_unique())); + SlabCacheAllocator allocator( + kMaxSlabSize, kSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, + kNoSlabPrefetchAllowed, kDisableSlabGrowth, + new BuddyMemoryAllocator(kMaxBlockSize, kDefaultSlabSize, kDefaultSlabAlignment, + new DummyMemoryAllocator)); constexpr uint64_t kBlockSize = 4; std::unique_ptr allocation = @@ -1023,7 +1016,7 @@ TEST_F(SlabCacheAllocatorTests, SlabPrefetch) { SlabCacheAllocator allocator(kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kAllowSlabPrefetching, - kDisableSlabGrowth, std::make_unique()); + kDisableSlabGrowth, new DummyMemoryAllocator); constexpr uint64_t kNumOfSlabs = 10u; std::vector> allocations = {}; @@ -1046,7 +1039,7 @@ TEST_F(SlabCacheAllocatorTests, SlabPrefetchDisabled) { SlabCacheAllocator allocator(kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, !kAllowSlabPrefetching, - kDisableSlabGrowth, std::make_unique()); + kDisableSlabGrowth, new DummyMemoryAllocator); MemoryAllocationRequest alwaysPrefetchRequest = CreateBasicRequest(kBlockSize, 1); alwaysPrefetchRequest.AlwaysPrefetch = true; @@ -1069,7 +1062,7 @@ TEST_F(SlabCacheAllocatorTests, AlwaysCache) { constexpr uint64_t kMaxSlabSize = 512; SlabCacheAllocator allocator(kMaxSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, std::make_unique()); + kDisableSlabGrowth, new DummyMemoryAllocator); // Re-requesting same size from cached allocation should always succeed. MemoryAllocationRequest request = CreateBasicRequest(32, 1); @@ -1088,7 +1081,7 @@ TEST_F(SlabCacheAllocatorTests, AlwaysCache) { TEST_F(SlabCacheAllocatorTests, OutOfMemory) { SlabCacheAllocator allocator(kDefaultSlabSize, kDefaultSlabSize, kDefaultSlabAlignment, kDefaultSlabFragmentationLimit, kNoSlabPrefetchAllowed, - kDisableSlabGrowth, std::make_unique()); + kDisableSlabGrowth, new DummyMemoryAllocator); constexpr uint64_t kTotalMemoryAvailable = 512;