diff --git a/compiler-rt/lib/scudo/standalone/CMakeLists.txt b/compiler-rt/lib/scudo/standalone/CMakeLists.txt index 60092005cc33b..6fb4e88de3155 100644 --- a/compiler-rt/lib/scudo/standalone/CMakeLists.txt +++ b/compiler-rt/lib/scudo/standalone/CMakeLists.txt @@ -58,6 +58,7 @@ endif() set(SCUDO_HEADERS allocator_common.h allocator_config.h + allocator_config_wrapper.h atomic_helpers.h bytemap.h checksum.h diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def new file mode 100644 index 0000000000000..92f4e39872d4c --- /dev/null +++ b/compiler-rt/lib/scudo/standalone/allocator_config.def @@ -0,0 +1,124 @@ +//===-- allocator_config.def ------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines all the flags and types supported in Scudo. For optional +// flags and types, only explicitly define them when interested (i.e., unused +// optional flags or types can be skipped). + +#ifndef BASE_REQUIRED_TEMPLATE_TYPE +#define BASE_REQUIRED_TEMPLATE_TYPE(...) +#endif +#ifndef BASE_OPTIONAL +#define BASE_OPTIONAL(...) +#endif +#ifndef PRIMARY_REQUIRED_TYPE +#define PRIMARY_REQUIRED_TYPE(...) +#endif +#ifndef PRIMARY_REQUIRED +#define PRIMARY_REQUIRED(...) +#endif +#ifndef PRIMARY_OPTIONAL +#define PRIMARY_OPTIONAL(...) +#endif +#ifndef PRIMARY_OPTIONAL_TYPE +#define PRIMARY_OPTIONAL_TYPE(...) +#endif +#ifndef SECONDARY_REQUIRED_TEMPLATE_TYPE +#define SECONDARY_REQUIRED_TEMPLATE_TYPE(...) +#endif +#ifndef SECONDARY_CACHE_OPTIONAL +#define SECONDARY_CACHE_OPTIONAL(...) +#endif + +// BASE_REQUIRED_TEMPLATE_TYPE(NAME) +// +// Thread-Specific Data Registry used, shared or exclusive. +BASE_REQUIRED_TEMPLATE_TYPE(TSDRegistryT) + +// Defines the type of Primary allocator to use. +BASE_REQUIRED_TEMPLATE_TYPE(PrimaryT) + +// Defines the type of Secondary allocator to use. +BASE_REQUIRED_TEMPLATE_TYPE(SecondaryT) + +// BASE_OPTIONAL(TYPE, NAME, DEFAULT) +// +// Indicates possible support for Memory Tagging. +BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false) + +// PRIMARY_REQUIRED_TYPE(NAME) +// +// SizeClassMap to use with the Primary. +PRIMARY_REQUIRED_TYPE(SizeClassMap) + +// Defines the type and scale of a compact pointer. A compact pointer can +// be understood as the offset of a pointer within the region it belongs +// to, in increments of a power-of-2 scale. See `CompactPtrScale` also. +PRIMARY_REQUIRED_TYPE(CompactPtrT) + +// PRIMARY_REQUIRED(TYPE, NAME) +// +// The scale of a compact pointer. E.g., Ptr = Base + (CompactPtr << Scale). +PRIMARY_REQUIRED(const uptr, CompactPtrScale) + +// Log2 of the size of a size class region, as used by the Primary. +PRIMARY_REQUIRED(const uptr, RegionSizeLog) + +// Conceptually, a region will be divided into groups based on the address +// range. Each allocation consumes blocks in the same group until exhaustion +// then it pops out blocks in a new group. Therefore, `GroupSizeLog` is always +// smaller or equal to `RegionSizeLog`. Note that `GroupSizeLog` needs to be +// equal to `RegionSizeLog` for SizeClassAllocator32 because of certain +// constraints. +PRIMARY_REQUIRED(const uptr, GroupSizeLog) + +// Call map for user memory with at least this size. Only used with primary64. +PRIMARY_REQUIRED(const uptr, MapSizeIncrement) + +// Defines the minimal & maximal release interval that can be set. +PRIMARY_REQUIRED(const s32, MinReleaseToOsIntervalMs) +PRIMARY_REQUIRED(const s32, MaxReleaseToOsIntervalMs) + +// PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT) +// +// Indicates support for offsetting the start of a region by a random number of +// pages. Only used with primary64. +PRIMARY_OPTIONAL(const bool, EnableRandomOffset, false) + +// PRIMARY_OPTIONAL_TYPE(NAME, DEFAULT) +// +// Use condition variable to shorten the waiting time of refillment of +// freelist. Note that this depends on the implementation of condition +// variable on each platform and the performance may vary so that it does not +// guarantee a performance benefit. +PRIMARY_OPTIONAL_TYPE(ConditionVariableT, ConditionVariableDummy) + +// SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME) +// +// Defines the type of Secondary Cache to use. +SECONDARY_REQUIRED_TEMPLATE_TYPE(CacheT) + +// SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT) +// +// Defines the type of cache used by the Secondary. Some additional +// configuration entries can be necessary depending on the Cache. +SECONDARY_CACHE_OPTIONAL(const u32, EntriesArraySize, 0) +SECONDARY_CACHE_OPTIONAL(const u32, QuarantineSize, 0) +SECONDARY_CACHE_OPTIONAL(const u32, DefaultMaxEntriesCount, 0) +SECONDARY_CACHE_OPTIONAL(const u32, DefaultMaxEntrySize, 0) +SECONDARY_CACHE_OPTIONAL(const s32, MinReleaseToOsIntervalMs, INT32_MIN) +SECONDARY_CACHE_OPTIONAL(const s32, MaxReleaseToOsIntervalMs, INT32_MAX) + +#undef SECONDARY_CACHE_OPTIONAL +#undef SECONDARY_REQUIRED_TEMPLATE_TYPE +#undef PRIMARY_OPTIONAL_TYPE +#undef PRIMARY_OPTIONAL +#undef PRIMARY_REQUIRED +#undef PRIMARY_REQUIRED_TYPE +#undef BASE_OPTIONAL +#undef BASE_REQUIRED_TEMPLATE_TYPE diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h index 3c6aa3acb0e45..1e0cf1015ba67 100644 --- a/compiler-rt/lib/scudo/standalone/allocator_config.h +++ b/compiler-rt/lib/scudo/standalone/allocator_config.h @@ -38,80 +38,10 @@ namespace scudo { -// The combined allocator uses a structure as a template argument that -// specifies the configuration options for the various subcomponents of the -// allocator. -// -// struct ExampleConfig { -// // Indicates possible support for Memory Tagging. -// static const bool MaySupportMemoryTagging = false; -// -// // Thread-Specific Data Registry used, shared or exclusive. -// template using TSDRegistryT = TSDRegistrySharedT; -// -// struct Primary { -// // SizeClassMap to use with the Primary. -// using SizeClassMap = DefaultSizeClassMap; -// -// // Log2 of the size of a size class region, as used by the Primary. -// static const uptr RegionSizeLog = 30U; -// -// // Log2 of the size of block group, as used by the Primary. Each group -// // contains a range of memory addresses, blocks in the range will belong -// // to the same group. In general, single region may have 1 or 2MB group -// // size. Multiple regions will have the group size equal to the region -// // size because the region size is usually smaller than 1 MB. -// // Smaller value gives fine-grained control of memory usage but the -// // trade-off is that it may take longer time of deallocation. -// static const uptr GroupSizeLog = 20U; -// -// // Defines the type and scale of a compact pointer. A compact pointer can -// // be understood as the offset of a pointer within the region it belongs -// // to, in increments of a power-of-2 scale. -// // eg: Ptr = Base + (CompactPtr << Scale). -// typedef u32 CompactPtrT; -// static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG; -// -// // Indicates support for offsetting the start of a region by -// // a random number of pages. Only used with primary64. -// static const bool EnableRandomOffset = true; -// -// // Call map for user memory with at least this size. Only used with -// // primary64. -// static const uptr MapSizeIncrement = 1UL << 18; -// -// // Defines the minimal & maximal release interval that can be set. -// static const s32 MinReleaseToOsIntervalMs = INT32_MIN; -// static const s32 MaxReleaseToOsIntervalMs = INT32_MAX; -// -// // Use condition variable to shorten the waiting time of refillment of -// // freelist. Note that this depends on the implementation of condition -// // variable on each platform and the performance may vary so that it -// // doesn't guarantee a performance benefit. -// // Note that both variables have to be defined to enable it. -// static const bool UseConditionVariable = true; -// using ConditionVariableT = ConditionVariableLinux; -// }; -// // Defines the type of Primary allocator to use. -// template using PrimaryT = SizeClassAllocator64; -// -// // Defines the type of cache used by the Secondary. Some additional -// // configuration entries can be necessary depending on the Cache. -// struct Secondary { -// struct Cache { -// static const u32 EntriesArraySize = 32U; -// static const u32 QuarantineSize = 0U; -// static const u32 DefaultMaxEntriesCount = 32U; -// static const uptr DefaultMaxEntrySize = 1UL << 19; -// static const s32 MinReleaseToOsIntervalMs = INT32_MIN; -// static const s32 MaxReleaseToOsIntervalMs = INT32_MAX; -// }; -// // Defines the type of Secondary Cache to use. -// template using CacheT = MapAllocatorCache; -// }; -// // Defines the type of Secondary allocator to use. -// template using SecondaryT = MapAllocator; -// }; +// Scudo uses a structure as a template argument that specifies the +// configuration options for the various subcomponents of the allocator. See the +// following configs as examples and check `allocator_config.def` for all the +// available options. #ifndef SCUDO_USE_CUSTOM_CONFIG diff --git a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h new file mode 100644 index 0000000000000..a51d770b46646 --- /dev/null +++ b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h @@ -0,0 +1,135 @@ +//===-- allocator_config_wrapper.h ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_ALLOCATOR_CONFIG_WRAPPER_H_ +#define SCUDO_ALLOCATOR_CONFIG_WRAPPER_H_ + +#include "condition_variable.h" +#include "internal_defs.h" +#include "secondary.h" + +namespace { + +template struct removeConst { + using type = T; +}; +template struct removeConst { + using type = T; +}; + +// This is only used for SFINAE when detecting if a type is defined. +template struct voidAdaptor { + using type = void; +}; + +} // namespace + +namespace scudo { + +#define OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, MEMBER) \ + template struct NAME##State { \ + static constexpr removeConst::type getValue() { return DEFAULT; } \ + }; \ + template \ + struct NAME##State { \ + static constexpr removeConst::type getValue() { \ + return Config::MEMBER; \ + } \ + }; + +#define OPTIONAL_TYPE_TEMPLATE(NAME, DEFAULT, MEMBER) \ + template struct NAME##Type { \ + static constexpr bool enabled() { return false; } \ + using NAME = DEFAULT; \ + }; \ + template \ + struct NAME##Type::type> { \ + static constexpr bool enabled() { return true; } \ + using NAME = typename Config::MEMBER; \ + }; + +template struct BaseConfig { +#define BASE_REQUIRED_TEMPLATE_TYPE(NAME) \ + template using NAME = typename AllocatorConfig::template NAME; + +#define BASE_OPTIONAL(TYPE, NAME, DEFAULT) \ + OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, NAME) \ + static constexpr removeConst::type get##NAME() { \ + return NAME##State::getValue(); \ + } + +#include "allocator_config.def" +}; // BaseConfig + +template struct PrimaryConfig { + // TODO: Pass this flag through template argument to remove this hard-coded + // function. + static constexpr bool getMaySupportMemoryTagging() { + return BaseConfig::getMaySupportMemoryTagging(); + } + +#define PRIMARY_REQUIRED_TYPE(NAME) \ + using NAME = typename AllocatorConfig::Primary::NAME; + +#define PRIMARY_REQUIRED(TYPE, NAME) \ + static constexpr removeConst::type get##NAME() { \ + return AllocatorConfig::Primary::NAME; \ + } + +#define PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT) \ + OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, NAME) \ + static constexpr removeConst::type get##NAME() { \ + return NAME##State::getValue(); \ + } + +#define PRIMARY_OPTIONAL_TYPE(NAME, DEFAULT) \ + OPTIONAL_TYPE_TEMPLATE(NAME, DEFAULT, NAME) \ + static constexpr bool has##NAME() { \ + return NAME##Type::enabled(); \ + } \ + using NAME = typename NAME##Type::NAME; + +#include "allocator_config.def" + +}; // PrimaryConfig + +template struct SecondaryConfig { + // TODO: Pass this flag through template argument to remove this hard-coded + // function. + static constexpr bool getMaySupportMemoryTagging() { + return BaseConfig::getMaySupportMemoryTagging(); + } + +#define SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME) \ + template \ + using NAME = typename AllocatorConfig::Secondary::template NAME; +#include "allocator_config.def" + + struct CacheConfig { + // TODO: Pass this flag through template argument to remove this hard-coded + // function. + static constexpr bool getMaySupportMemoryTagging() { + return BaseConfig::getMaySupportMemoryTagging(); + } + +#define SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT) \ + OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, Cache::NAME) \ + static constexpr removeConst::type get##NAME() { \ + return NAME##State::getValue(); \ + } +#include "allocator_config.def" + }; // CacheConfig +}; // SecondaryConfig + +#undef OPTIONAL_TEMPLATE +#undef OPTIONAL_TEMPLATE_TYPE + +} // namespace scudo + +#endif // SCUDO_ALLOCATOR_CONFIG_WRAPPER_H_ diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h index 9e1fd6d6dca3c..f4dd90aac6655 100644 --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -9,6 +9,7 @@ #ifndef SCUDO_COMBINED_H_ #define SCUDO_COMBINED_H_ +#include "allocator_config_wrapper.h" #include "atomic_helpers.h" #include "chunk.h" #include "common.h" @@ -47,11 +48,14 @@ namespace scudo { template class Allocator { public: - using PrimaryT = typename Config::template PrimaryT; - using SecondaryT = typename Config::template SecondaryT; + using AllocatorConfig = BaseConfig; + using PrimaryT = + typename AllocatorConfig::template PrimaryT>; + using SecondaryT = + typename AllocatorConfig::template SecondaryT>; using CacheT = typename PrimaryT::CacheT; typedef Allocator ThisT; - typedef typename Config::template TSDRegistryT TSDRegistryT; + typedef typename AllocatorConfig::template TSDRegistryT TSDRegistryT; void callPostInitCallback() { pthread_once(&PostInitNonce, PostInitCallback); @@ -72,7 +76,7 @@ class Allocator { Header.State = Chunk::State::Available; Chunk::storeHeader(Allocator.Cookie, Ptr, &Header); - if (allocatorSupportsMemoryTagging()) + if (allocatorSupportsMemoryTagging()) Ptr = untagPointer(Ptr); void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header); Cache.deallocate(Header.ClassId, BlockBegin); @@ -99,7 +103,8 @@ class Allocator { // Reset tag to 0 as this chunk may have been previously used for a tagged // user allocation. - if (UNLIKELY(useMemoryTagging(Allocator.Primary.Options.load()))) + if (UNLIKELY(useMemoryTagging( + Allocator.Primary.Options.load()))) storeTags(reinterpret_cast(Ptr), reinterpret_cast(Ptr) + sizeof(QuarantineBatch)); @@ -159,7 +164,7 @@ class Allocator { Primary.Options.set(OptionBit::DeallocTypeMismatch); if (getFlags()->delete_size_mismatch) Primary.Options.set(OptionBit::DeleteSizeMismatch); - if (allocatorSupportsMemoryTagging() && + if (allocatorSupportsMemoryTagging() && systemSupportsMemoryTagging()) Primary.Options.set(OptionBit::UseMemoryTagging); @@ -274,7 +279,7 @@ class Allocator { void drainCaches() { TSDRegistry.drainCaches(this); } ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) { - if (!allocatorSupportsMemoryTagging()) + if (!allocatorSupportsMemoryTagging()) return Ptr; auto UntaggedPtr = untagPointer(Ptr); if (UntaggedPtr != Ptr) @@ -286,7 +291,7 @@ class Allocator { } ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) { - if (!allocatorSupportsMemoryTagging()) + if (!allocatorSupportsMemoryTagging()) return Ptr; return addFixedTag(Ptr, 2); } @@ -419,7 +424,7 @@ class Allocator { // // When memory tagging is enabled, zeroing the contents is done as part of // setting the tag. - if (UNLIKELY(useMemoryTagging(Options))) { + if (UNLIKELY(useMemoryTagging(Options))) { uptr PrevUserPtr; Chunk::UnpackedHeader Header; const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId); @@ -501,7 +506,7 @@ class Allocator { } else { Block = addHeaderTag(Block); Ptr = addHeaderTag(Ptr); - if (UNLIKELY(useMemoryTagging(Options))) { + if (UNLIKELY(useMemoryTagging(Options))) { storeTags(reinterpret_cast(Block), reinterpret_cast(Ptr)); storeSecondaryAllocationStackMaybe(Options, Ptr, Size); } @@ -661,7 +666,7 @@ class Allocator { (reinterpret_cast(OldTaggedPtr) + NewSize)) & Chunk::SizeOrUnusedBytesMask; Chunk::storeHeader(Cookie, OldPtr, &Header); - if (UNLIKELY(useMemoryTagging(Options))) { + if (UNLIKELY(useMemoryTagging(Options))) { if (ClassId) { resizeTaggedChunk(reinterpret_cast(OldTaggedPtr) + OldSize, reinterpret_cast(OldTaggedPtr) + NewSize, @@ -764,8 +769,9 @@ class Allocator { Base = untagPointer(Base); const uptr From = Base; const uptr To = Base + Size; - bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging() && - systemSupportsMemoryTagging(); + bool MayHaveTaggedPrimary = + allocatorSupportsMemoryTagging() && + systemSupportsMemoryTagging(); auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback, Arg](uptr Block) { if (Block < From || Block >= To) @@ -786,9 +792,9 @@ class Allocator { } if (Header.State == Chunk::State::Allocated) { uptr TaggedChunk = Chunk; - if (allocatorSupportsMemoryTagging()) + if (allocatorSupportsMemoryTagging()) TaggedChunk = untagPointer(TaggedChunk); - if (useMemoryTagging(Primary.Options.load())) + if (useMemoryTagging(Primary.Options.load())) TaggedChunk = loadTag(Chunk); Callback(TaggedChunk, getSize(reinterpret_cast(Chunk), &Header), Arg); @@ -887,7 +893,7 @@ class Allocator { } bool useMemoryTaggingTestOnly() const { - return useMemoryTagging(Primary.Options.load()); + return useMemoryTagging(Primary.Options.load()); } void disableMemoryTagging() { // If we haven't been initialized yet, we need to initialize now in order to @@ -897,7 +903,7 @@ class Allocator { // callback), which may cause mappings to be created with memory tagging // enabled. TSDRegistry.initOnceMaybe(this); - if (allocatorSupportsMemoryTagging()) { + if (allocatorSupportsMemoryTagging()) { Secondary.disableMemoryTagging(); Primary.Options.clear(OptionBit::UseMemoryTagging); } @@ -983,7 +989,7 @@ class Allocator { // should not be able to crash the crash dumper (crash_dump on Android). // See also the get_error_info_fuzzer. *ErrorInfo = {}; - if (!allocatorSupportsMemoryTagging() || + if (!allocatorSupportsMemoryTagging() || MemoryAddr + MemorySize < MemoryAddr) return; @@ -1032,7 +1038,7 @@ class Allocator { static_assert(MinAlignment >= sizeof(Chunk::PackedHeader), "Minimal alignment must at least cover a chunk header."); - static_assert(!allocatorSupportsMemoryTagging() || + static_assert(!allocatorSupportsMemoryTagging() || MinAlignment >= archMemoryTagGranuleSize(), ""); @@ -1142,7 +1148,7 @@ class Allocator { const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; if (LIKELY(Header->ClassId)) return SizeOrUnusedBytes; - if (allocatorSupportsMemoryTagging()) + if (allocatorSupportsMemoryTagging()) Ptr = untagPointer(const_cast(Ptr)); return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) - reinterpret_cast(Ptr) - SizeOrUnusedBytes; @@ -1162,12 +1168,12 @@ class Allocator { Header->State = Chunk::State::Available; else Header->State = Chunk::State::Quarantined; - Header->OriginOrWasZeroed = useMemoryTagging(Options) && + Header->OriginOrWasZeroed = useMemoryTagging(Options) && Header->ClassId && !TSDRegistry.getDisableMemInit(); Chunk::storeHeader(Cookie, Ptr, Header); - if (UNLIKELY(useMemoryTagging(Options))) { + if (UNLIKELY(useMemoryTagging(Options))) { u8 PrevTag = extractTag(reinterpret_cast(TaggedPtr)); storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size); if (Header->ClassId) { @@ -1184,7 +1190,7 @@ class Allocator { } } if (BypassQuarantine) { - if (allocatorSupportsMemoryTagging()) + if (allocatorSupportsMemoryTagging()) Ptr = untagPointer(Ptr); void *BlockBegin = getBlockBegin(Ptr, Header); const uptr ClassId = Header->ClassId; @@ -1201,7 +1207,7 @@ class Allocator { if (CacheDrained) Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal); } else { - if (UNLIKELY(useMemoryTagging(Options))) + if (UNLIKELY(useMemoryTagging(Options))) storeTags(reinterpret_cast(BlockBegin), reinterpret_cast(Ptr)); Secondary.deallocate(Options, BlockBegin); diff --git a/compiler-rt/lib/scudo/standalone/condition_variable.h b/compiler-rt/lib/scudo/standalone/condition_variable.h index 4afebdc9d04c2..3f16c86651e73 100644 --- a/compiler-rt/lib/scudo/standalone/condition_variable.h +++ b/compiler-rt/lib/scudo/standalone/condition_variable.h @@ -39,22 +39,6 @@ class ConditionVariableDummy } }; -template -struct ConditionVariableState { - static constexpr bool enabled() { return false; } - // This is only used for compilation purpose so that we won't end up having - // many conditional compilations. If you want to use `ConditionVariableDummy`, - // define `ConditionVariableT` in your allocator configuration. See - // allocator_config.h for more details. - using ConditionVariableT = ConditionVariableDummy; -}; - -template -struct ConditionVariableState { - static constexpr bool enabled() { return Config::UseConditionVariable; } - using ConditionVariableT = typename Config::ConditionVariableT; -}; - } // namespace scudo #endif // SCUDO_CONDITION_VARIABLE_H_ diff --git a/compiler-rt/lib/scudo/standalone/memtag.h b/compiler-rt/lib/scudo/standalone/memtag.h index aaed2192ad752..1f6983e99404a 100644 --- a/compiler-rt/lib/scudo/standalone/memtag.h +++ b/compiler-rt/lib/scudo/standalone/memtag.h @@ -326,7 +326,7 @@ inline void *addFixedTag(void *Ptr, uptr Tag) { template inline constexpr bool allocatorSupportsMemoryTagging() { - return archSupportsMemoryTagging() && Config::MaySupportMemoryTagging && + return archSupportsMemoryTagging() && Config::getMaySupportMemoryTagging() && (1 << SCUDO_MIN_ALIGNMENT_LOG) >= archMemoryTagGranuleSize(); } diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h index c86e75b8fd66a..1d8a77b73e5c2 100644 --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -43,14 +43,13 @@ namespace scudo { template class SizeClassAllocator32 { public: - typedef typename Config::Primary::CompactPtrT CompactPtrT; - typedef typename Config::Primary::SizeClassMap SizeClassMap; - static const uptr GroupSizeLog = Config::Primary::GroupSizeLog; + typedef typename Config::CompactPtrT CompactPtrT; + typedef typename Config::SizeClassMap SizeClassMap; + static const uptr GroupSizeLog = Config::getGroupSizeLog(); // The bytemap can only track UINT8_MAX - 1 classes. static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), ""); // Regions should be large enough to hold the largest Block. - static_assert((1UL << Config::Primary::RegionSizeLog) >= - SizeClassMap::MaxSize, + static_assert((1UL << Config::getRegionSizeLog()) >= SizeClassMap::MaxSize, ""); typedef SizeClassAllocator32 ThisT; typedef SizeClassAllocatorLocalCache CacheT; @@ -331,9 +330,9 @@ template class SizeClassAllocator32 { bool setOption(Option O, sptr Value) { if (O == Option::ReleaseInterval) { - const s32 Interval = Max(Min(static_cast(Value), - Config::Primary::MaxReleaseToOsIntervalMs), - Config::Primary::MinReleaseToOsIntervalMs); + const s32 Interval = Max( + Min(static_cast(Value), Config::getMaxReleaseToOsIntervalMs()), + Config::getMinReleaseToOsIntervalMs()); atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval); return true; } @@ -373,9 +372,9 @@ template class SizeClassAllocator32 { private: static const uptr NumClasses = SizeClassMap::NumClasses; - static const uptr RegionSize = 1UL << Config::Primary::RegionSizeLog; - static const uptr NumRegions = - SCUDO_MMAP_RANGE_SIZE >> Config::Primary::RegionSizeLog; + static const uptr RegionSize = 1UL << Config::getRegionSizeLog(); + static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> + Config::getRegionSizeLog(); static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U; typedef FlatByteMap ByteMap; @@ -408,7 +407,7 @@ template class SizeClassAllocator32 { static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, ""); uptr computeRegionId(uptr Mem) { - const uptr Id = Mem >> Config::Primary::RegionSizeLog; + const uptr Id = Mem >> Config::getRegionSizeLog(); CHECK_LT(Id, NumRegions); return Id; } @@ -437,7 +436,7 @@ template class SizeClassAllocator32 { unmap(reinterpret_cast(End), MapEnd - End); DCHECK_EQ(Region % RegionSize, 0U); - static_assert(Config::Primary::RegionSizeLog == GroupSizeLog, + static_assert(Config::getRegionSizeLog() == GroupSizeLog, "Memory group should be the same size as Region"); return Region; diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h index d89a2e6a4e5c8..f5e4ab57b4dfd 100644 --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -47,13 +47,12 @@ namespace scudo { template class SizeClassAllocator64 { public: - typedef typename Config::Primary::CompactPtrT CompactPtrT; - typedef typename Config::Primary::SizeClassMap SizeClassMap; - typedef typename ConditionVariableState< - typename Config::Primary>::ConditionVariableT ConditionVariableT; - static const uptr CompactPtrScale = Config::Primary::CompactPtrScale; - static const uptr RegionSizeLog = Config::Primary::RegionSizeLog; - static const uptr GroupSizeLog = Config::Primary::GroupSizeLog; + typedef typename Config::CompactPtrT CompactPtrT; + typedef typename Config::SizeClassMap SizeClassMap; + typedef typename Config::ConditionVariableT ConditionVariableT; + static const uptr CompactPtrScale = Config::getCompactPtrScale(); + static const uptr RegionSizeLog = Config::getRegionSizeLog(); + static const uptr GroupSizeLog = Config::getGroupSizeLog(); static_assert(RegionSizeLog >= GroupSizeLog, "Group size shouldn't be greater than the region size"); static const uptr GroupScale = GroupSizeLog - CompactPtrScale; @@ -74,7 +73,7 @@ template class SizeClassAllocator64 { static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; } static bool conditionVariableEnabled() { - return ConditionVariableState::enabled(); + return Config::hasConditionVariableT(); } void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS { @@ -135,7 +134,7 @@ template class SizeClassAllocator64 { // The actual start of a region is offset by a random number of pages // when PrimaryEnableRandomOffset is set. Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) + - (Config::Primary::EnableRandomOffset + (Config::getEnableRandomOffset() ? ((getRandomModN(&Seed, 16) + 1) * PageSize) : 0); Region->RandState = getRandomU32(&Seed); @@ -400,9 +399,9 @@ template class SizeClassAllocator64 { bool setOption(Option O, sptr Value) { if (O == Option::ReleaseInterval) { - const s32 Interval = Max(Min(static_cast(Value), - Config::Primary::MaxReleaseToOsIntervalMs), - Config::Primary::MinReleaseToOsIntervalMs); + const s32 Interval = Max( + Min(static_cast(Value), Config::getMaxReleaseToOsIntervalMs()), + Config::getMinReleaseToOsIntervalMs()); atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval); return true; } @@ -516,7 +515,7 @@ template class SizeClassAllocator64 { static const uptr NumClasses = SizeClassMap::NumClasses; static const uptr PrimarySize = RegionSize * NumClasses; - static const uptr MapSizeIncrement = Config::Primary::MapSizeIncrement; + static const uptr MapSizeIncrement = Config::getMapSizeIncrement(); // Fill at most this number of batches from the newly map'd memory. static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U; diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h index 732fd307ed2f4..202c55cc1a92b 100644 --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -173,8 +173,6 @@ template class NonZeroLengthArray { template class MapAllocatorCache { public: - using CacheConfig = typename Config::Secondary::Cache; - void getStats(ScopedString *Str) { ScopedLock L(Mutex); uptr Integral; @@ -199,16 +197,16 @@ template class MapAllocatorCache { } // Ensure the default maximum specified fits the array. - static_assert(CacheConfig::DefaultMaxEntriesCount <= - CacheConfig::EntriesArraySize, + static_assert(Config::getDefaultMaxEntriesCount() <= + Config::getEntriesArraySize(), ""); void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS { DCHECK_EQ(EntriesCount, 0U); setOption(Option::MaxCacheEntriesCount, - static_cast(CacheConfig::DefaultMaxEntriesCount)); + static_cast(Config::getDefaultMaxEntriesCount())); setOption(Option::MaxCacheEntrySize, - static_cast(CacheConfig::DefaultMaxEntrySize)); + static_cast(Config::getDefaultMaxEntrySize())); setOption(Option::ReleaseInterval, static_cast(ReleaseToOsInterval)); } @@ -253,9 +251,9 @@ template class MapAllocatorCache { // just unmap it. break; } - if (CacheConfig::QuarantineSize && useMemoryTagging(Options)) { + if (Config::getQuarantineSize() && useMemoryTagging(Options)) { QuarantinePos = - (QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u); + (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u); if (!Quarantine[QuarantinePos].isValid()) { Quarantine[QuarantinePos] = Entry; return; @@ -382,14 +380,14 @@ template class MapAllocatorCache { bool setOption(Option O, sptr Value) { if (O == Option::ReleaseInterval) { const s32 Interval = Max( - Min(static_cast(Value), CacheConfig::MaxReleaseToOsIntervalMs), - CacheConfig::MinReleaseToOsIntervalMs); + Min(static_cast(Value), Config::getMaxReleaseToOsIntervalMs()), + Config::getMinReleaseToOsIntervalMs()); atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval); return true; } if (O == Option::MaxCacheEntriesCount) { const u32 MaxCount = static_cast(Value); - if (MaxCount > CacheConfig::EntriesArraySize) + if (MaxCount > Config::getEntriesArraySize()) return false; atomic_store_relaxed(&MaxEntriesCount, MaxCount); return true; @@ -406,7 +404,7 @@ template class MapAllocatorCache { void disableMemoryTagging() EXCLUDES(Mutex) { ScopedLock L(Mutex); - for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) { + for (u32 I = 0; I != Config::getQuarantineSize(); ++I) { if (Quarantine[I].isValid()) { MemMapT &MemMap = Quarantine[I].MemMap; MemMap.unmap(MemMap.getBase(), MemMap.getCapacity()); @@ -431,11 +429,11 @@ template class MapAllocatorCache { private: void empty() { - MemMapT MapInfo[CacheConfig::EntriesArraySize]; + MemMapT MapInfo[Config::getEntriesArraySize()]; uptr N = 0; { ScopedLock L(Mutex); - for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) { + for (uptr I = 0; I < Config::getEntriesArraySize(); I++) { if (!Entries[I].isValid()) continue; MapInfo[N] = Entries[I].MemMap; @@ -468,9 +466,9 @@ template class MapAllocatorCache { if (!EntriesCount || OldestTime == 0 || OldestTime > Time) return; OldestTime = 0; - for (uptr I = 0; I < CacheConfig::QuarantineSize; I++) + for (uptr I = 0; I < Config::getQuarantineSize(); I++) releaseIfOlderThan(Quarantine[I], Time); - for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) + for (uptr I = 0; I < Config::getEntriesArraySize(); I++) releaseIfOlderThan(Entries[I], Time); } @@ -485,8 +483,8 @@ template class MapAllocatorCache { u32 CallsToRetrieve GUARDED_BY(Mutex) = 0; u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0; - CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {}; - NonZeroLengthArray + CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {}; + NonZeroLengthArray Quarantine GUARDED_BY(Mutex) = {}; }; @@ -555,7 +553,7 @@ template class MapAllocator { void getStats(ScopedString *Str); private: - typename Config::Secondary::template CacheT Cache; + typename Config::template CacheT Cache; mutable HybridMutex Mutex; DoublyLinkedList InUseBlocks GUARDED_BY(Mutex); diff --git a/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt b/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt index ac92805872f92..1786756fa5ea6 100644 --- a/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt +++ b/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt @@ -90,6 +90,7 @@ macro(add_scudo_unittest testname) endmacro() set(SCUDO_UNIT_TEST_SOURCES + allocator_config_test.cpp atomic_test.cpp bytemap_test.cpp checksum_test.cpp diff --git a/compiler-rt/lib/scudo/standalone/tests/allocator_config_test.cpp b/compiler-rt/lib/scudo/standalone/tests/allocator_config_test.cpp new file mode 100644 index 0000000000000..4c4ceb832e27b --- /dev/null +++ b/compiler-rt/lib/scudo/standalone/tests/allocator_config_test.cpp @@ -0,0 +1,119 @@ +//===-- allocator_config_test.cpp -------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "tests/scudo_unit_test.h" + +#include "allocator_config.h" +#include "allocator_config_wrapper.h" +#include "common.h" +#include "secondary.h" + +#include + +struct TestBaseConfig { + template using TSDRegistryT = void; + template using PrimaryT = void; + template using SecondaryT = void; +}; + +struct TestBaseConfigEnableOptionalFlag : public TestBaseConfig { + static const bool MaySupportMemoryTagging = true; + // Use the getter to avoid the test to `use` the address of static const + // variable (which requires additional explicit definition). + static bool getMaySupportMemoryTagging() { return MaySupportMemoryTagging; } +}; + +struct TestBasePrimaryConfig { + using SizeClassMap = void; + static const scudo::uptr RegionSizeLog = 18U; + static const scudo::uptr GroupSizeLog = 18U; + static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN; + static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX; + typedef scudo::uptr CompactPtrT; + static const scudo::uptr CompactPtrScale = 0; + static const scudo::uptr MapSizeIncrement = 1UL << 18; +}; + +struct TestPrimaryConfig : public TestBaseConfig { + struct Primary : TestBasePrimaryConfig {}; +}; + +struct TestPrimaryConfigEnableOptionalFlag : public TestBaseConfig { + struct Primary : TestBasePrimaryConfig { + static const bool EnableRandomOffset = true; + static bool getEnableRandomOffset() { return EnableRandomOffset; } + }; +}; + +struct TestPrimaryConfigEnableOptionalType : public TestBaseConfig { + struct DummyConditionVariable {}; + + struct Primary : TestBasePrimaryConfig { + using ConditionVariableT = DummyConditionVariable; + }; +}; + +struct TestSecondaryConfig : public TestPrimaryConfig { + struct Secondary { + template + using CacheT = scudo::MapAllocatorNoCache; + }; +}; + +struct TestSecondaryCacheConfigEnableOptionalFlag : public TestPrimaryConfig { + struct Secondary { + struct Cache { + static const scudo::u32 EntriesArraySize = 256U; + static scudo::u32 getEntriesArraySize() { return EntriesArraySize; } + }; + template using CacheT = scudo::MapAllocatorCache; + }; +}; + +TEST(ScudoAllocatorConfigTest, VerifyOptionalFlags) { + // Test the top level allocator optional config. + // + // `MaySupportMemoryTagging` is default off. + EXPECT_FALSE(scudo::BaseConfig::getMaySupportMemoryTagging()); + EXPECT_EQ(scudo::BaseConfig< + TestBaseConfigEnableOptionalFlag>::getMaySupportMemoryTagging(), + TestBaseConfigEnableOptionalFlag::getMaySupportMemoryTagging()); + + // Test primary optional config. + // + // `EnableRandomeOffset` is default off. + EXPECT_FALSE( + scudo::PrimaryConfig::getEnableRandomOffset()); + EXPECT_EQ( + scudo::PrimaryConfig< + TestPrimaryConfigEnableOptionalFlag>::getEnableRandomOffset(), + TestPrimaryConfigEnableOptionalFlag::Primary::getEnableRandomOffset()); + + // `ConditionVariableT` is default off. + EXPECT_FALSE( + scudo::PrimaryConfig::hasConditionVariableT()); + EXPECT_TRUE(scudo::PrimaryConfig< + TestPrimaryConfigEnableOptionalType>::hasConditionVariableT()); + EXPECT_TRUE((std::is_same_v< + typename scudo::PrimaryConfig< + TestPrimaryConfigEnableOptionalType>::ConditionVariableT, + typename TestPrimaryConfigEnableOptionalType::Primary:: + ConditionVariableT>)); + + // Test secondary cache optional config. + using NoCacheConfig = + scudo::SecondaryConfig::CacheConfig; + // `EntriesArraySize` is default 0. + EXPECT_EQ(NoCacheConfig::getEntriesArraySize(), 0U); + + using CacheConfig = scudo::SecondaryConfig< + TestSecondaryCacheConfigEnableOptionalFlag>::CacheConfig; + EXPECT_EQ(CacheConfig::getEntriesArraySize(), + TestSecondaryCacheConfigEnableOptionalFlag::Secondary::Cache:: + getEntriesArraySize()); +} diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index 13d627b116809..6a311adc55e4b 100644 --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -190,7 +190,6 @@ struct TestConditionVariableConfig { #endif static const scudo::s32 MinReleaseToOsIntervalMs = 1000; static const scudo::s32 MaxReleaseToOsIntervalMs = 1000; - static const bool UseConditionVariable = true; #if SCUDO_LINUX using ConditionVariableT = scudo::ConditionVariableLinux; #else diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp index f64a5143b30d4..683ce3e596596 100644 --- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp @@ -9,6 +9,7 @@ #include "tests/scudo_unit_test.h" #include "allocator_config.h" +#include "allocator_config_wrapper.h" #include "condition_variable.h" #include "primary32.h" #include "primary64.h" @@ -29,6 +30,9 @@ template struct TestConfig1 { static const bool MaySupportMemoryTagging = false; + template using TSDRegistryT = void; + template using PrimaryT = void; + template using SecondaryT = void; struct Primary { using SizeClassMap = SizeClassMapT; @@ -45,6 +49,9 @@ template struct TestConfig1 { template struct TestConfig2 { static const bool MaySupportMemoryTagging = false; + template using TSDRegistryT = void; + template using PrimaryT = void; + template using SecondaryT = void; struct Primary { using SizeClassMap = SizeClassMapT; @@ -66,6 +73,9 @@ template struct TestConfig2 { template struct TestConfig3 { static const bool MaySupportMemoryTagging = true; + template using TSDRegistryT = void; + template using PrimaryT = void; + template using SecondaryT = void; struct Primary { using SizeClassMap = SizeClassMapT; @@ -87,6 +97,9 @@ template struct TestConfig3 { template struct TestConfig4 { static const bool MaySupportMemoryTagging = true; + template using TSDRegistryT = void; + template using PrimaryT = void; + template using SecondaryT = void; struct Primary { using SizeClassMap = SizeClassMapT; @@ -109,6 +122,9 @@ template struct TestConfig4 { // This is the only test config that enables the condition variable. template struct TestConfig5 { static const bool MaySupportMemoryTagging = true; + template using TSDRegistryT = void; + template using PrimaryT = void; + template using SecondaryT = void; struct Primary { using SizeClassMap = SizeClassMapT; @@ -125,7 +141,6 @@ template struct TestConfig5 { typedef scudo::u32 CompactPtrT; static const bool EnableRandomOffset = true; static const scudo::uptr MapSizeIncrement = 1UL << 18; - static const bool UseConditionVariable = true; #if SCUDO_LINUX using ConditionVariableT = scudo::ConditionVariableLinux; #else @@ -139,10 +154,12 @@ struct Config : public BaseConfig {}; template