diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp index 12fe5b98bea005..11c2e945afe5af 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp @@ -59,7 +59,7 @@ struct ChainedOriginDepotNode { } // namespace -static PersistentAllocator allocator; +static PersistentAllocator allocator; static StackDepotBase depot; @@ -71,8 +71,7 @@ uptr ChainedOriginDepotNode::allocated() { return allocator.allocated(); } ChainedOriginDepotNode *ChainedOriginDepotNode::allocate( const args_type &args) { - return static_cast( - allocator.alloc(sizeof(ChainedOriginDepotNode))); + return allocator.alloc(); } /* This is murmur2 hash for the 64->32 bit case. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h index e943fa5da07f1a..2c51fb46e1232b 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h @@ -20,9 +20,10 @@ namespace __sanitizer { +template class PersistentAllocator { public: - void *alloc(uptr size); + T *alloc(uptr count = 1); uptr allocated() const { SpinMutexLock l(&mtx); return atomic_load_relaxed(&mapped_size) + @@ -30,42 +31,47 @@ class PersistentAllocator { } private: - void *tryAlloc(uptr size); - void *refillAndAlloc(uptr size); + T *tryAlloc(uptr count); + T *refillAndAlloc(uptr count); mutable StaticSpinMutex mtx; // Protects alloc of new blocks. atomic_uintptr_t region_pos; // Region allocator for Node's. atomic_uintptr_t region_end; atomic_uintptr_t mapped_size; }; -inline void *PersistentAllocator::tryAlloc(uptr size) { +template +inline T *PersistentAllocator::tryAlloc(uptr count) { // Optimisic lock-free allocation, essentially try to bump the region ptr. for (;;) { uptr cmp = atomic_load(®ion_pos, memory_order_acquire); uptr end = atomic_load(®ion_end, memory_order_acquire); + uptr size = count * sizeof(T); if (cmp == 0 || cmp + size > end) return nullptr; if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size, memory_order_acquire)) - return (void *)cmp; + return reinterpret_cast(cmp); } } -inline void *PersistentAllocator::alloc(uptr size) { +template +inline T *PersistentAllocator::alloc(uptr count) { // First, try to allocate optimisitically. - void *s = tryAlloc(size); + T *s = tryAlloc(count); if (LIKELY(s)) return s; - return refillAndAlloc(size); + return refillAndAlloc(count); } -inline void *PersistentAllocator::refillAndAlloc(uptr size) { +template +inline T *PersistentAllocator::refillAndAlloc(uptr count) { // If failed, lock, retry and alloc new superblock. SpinMutexLock l(&mtx); for (;;) { - void *s = tryAlloc(size); + T *s = tryAlloc(count); if (s) return s; atomic_store(®ion_pos, 0, memory_order_relaxed); + uptr size = count * sizeof(T); uptr allocsz = 64 * 1024; if (allocsz < size) allocsz = size; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp index d478f15d4f49ef..a067463f2f67d6 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp @@ -19,8 +19,8 @@ namespace __sanitizer { -static PersistentAllocator allocator; -static PersistentAllocator traceAllocator; +static PersistentAllocator allocator; +static PersistentAllocator traceAllocator; struct StackDepotNode { using hash_type = u64; @@ -43,7 +43,7 @@ struct StackDepotNode { return allocator.allocated() + traceAllocator.allocated(); } static StackDepotNode *allocate(const args_type &args) { - return (StackDepotNode *)allocator.alloc(sizeof(StackDepotNode)); + return allocator.alloc(); } static hash_type hash(const args_type &args) { MurMur2Hash64Builder H(args.size * sizeof(uptr)); @@ -59,7 +59,7 @@ struct StackDepotNode { atomic_store(&tag_and_use_count, args.tag << kUseCountBits, memory_order_relaxed); stack_hash = hash; - stack_trace = (uptr *)traceAllocator.alloc((args.size + 1) * sizeof(uptr)); + stack_trace = traceAllocator.alloc(args.size + 1); *stack_trace = args.size; internal_memcpy(stack_trace + 1, args.trace, args.size * sizeof(uptr)); }