diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h index 06ed28221eb19..b17acc71f8920 100644 --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -1178,9 +1178,16 @@ class Allocator { if (LIKELY(ClassId)) { bool UnlockRequired; auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired); - TSD->getCache().deallocate(ClassId, BlockBegin); + const bool CacheDrained = + TSD->getCache().deallocate(ClassId, BlockBegin); if (UnlockRequired) TSD->unlock(); + // When we have drained some blocks back to the Primary from TSD, that + // implies that we may have the chance to release some pages as well. + // Note that in order not to block other thread's accessing the TSD, + // release the TSD first then try the page release. + if (CacheDrained) + Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal); } else { if (UNLIKELY(useMemoryTagging(Options))) storeTags(reinterpret_cast(BlockBegin), diff --git a/compiler-rt/lib/scudo/standalone/local_cache.h b/compiler-rt/lib/scudo/standalone/local_cache.h index a3eca744b8f68..6d36a1c399ff1 100644 --- a/compiler-rt/lib/scudo/standalone/local_cache.h +++ b/compiler-rt/lib/scudo/standalone/local_cache.h @@ -113,13 +113,16 @@ template struct SizeClassAllocatorLocalCache { return Allocator->decompactPtr(ClassId, CompactP); } - void deallocate(uptr ClassId, void *P) { + bool deallocate(uptr ClassId, void *P) { CHECK_LT(ClassId, NumClasses); PerClass *C = &PerClassArray[ClassId]; // We still have to initialize the cache in the event that the first heap // operation in a thread is a deallocation. initCacheMaybe(C); - if (C->Count == C->MaxCount) + + // If the cache is full, drain half of blocks back to the main allocator. + const bool NeedToDrainCache = C->Count == C->MaxCount; + if (NeedToDrainCache) drain(C, ClassId); // See comment in allocate() about memory accesses. const uptr ClassSize = C->ClassSize; @@ -127,6 +130,8 @@ template struct SizeClassAllocatorLocalCache { Allocator->compactPtr(ClassId, reinterpret_cast(P)); Stats.sub(StatAllocated, ClassSize); Stats.add(StatFree, ClassSize); + + return NeedToDrainCache; } bool isEmpty() const { diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h index f6891dab0b026..c8dd6977b9ab0 100644 --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -232,9 +232,6 @@ template class SizeClassAllocator32 { ScopedLock L(Sci->Mutex); pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup); - - if (ClassId != SizeClassMap::BatchClassId) - releaseToOSMaybe(Sci, ClassId); } void disable() NO_THREAD_SAFETY_ANALYSIS { @@ -323,6 +320,14 @@ template class SizeClassAllocator32 { return true; } + uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) { + SizeClassInfo *Sci = getSizeClassInfo(ClassId); + // TODO: Once we have separate locks like primary64, we may consider using + // tryLock() as well. + ScopedLock L(Sci->Mutex); + return releaseToOSMaybe(Sci, ClassId, ReleaseType); + } + uptr releaseToOS(ReleaseToOS ReleaseType) { uptr TotalReleasedBytes = 0; for (uptr I = 0; I < NumClasses; I++) { diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h index 8571a2dfbae50..dd58ebabba0b3 100644 --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -331,16 +331,6 @@ template class SizeClassAllocator64 { ScopedLock L(Region->FLLock); pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup); } - - // Only non-BatchClass will be here, try to release the pages in the region. - - // Note that the tryLock() may fail spuriously, given that it should rarely - // happen and page releasing is fine to skip, we don't take certain - // approaches to ensure one page release is done. - if (Region->MMLock.tryLock()) { - releaseToOSMaybe(Region, ClassId); - Region->MMLock.unlock(); - } } void disable() NO_THREAD_SAFETY_ANALYSIS { @@ -426,6 +416,19 @@ template class SizeClassAllocator64 { return true; } + uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) { + RegionInfo *Region = getRegionInfo(ClassId); + // Note that the tryLock() may fail spuriously, given that it should rarely + // happen and page releasing is fine to skip, we don't take certain + // approaches to ensure one page release is done. + if (Region->MMLock.tryLock()) { + uptr BytesReleased = releaseToOSMaybe(Region, ClassId, ReleaseType); + Region->MMLock.unlock(); + return BytesReleased; + } + return 0; + } + uptr releaseToOS(ReleaseToOS ReleaseType) { uptr TotalReleasedBytes = 0; for (uptr I = 0; I < NumClasses; I++) {