diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h index f3f91c40e7a03..286e5d332f57c 100644 --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -206,11 +206,13 @@ class MapAllocatorCache { computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral, &Fractional); const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs); - Str->append( - "Stats: MapAllocatorCache: EntriesCount: %zu, " - "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n", - LRUEntries.size(), atomic_load_relaxed(&MaxEntriesCount), - atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1); + Str->append("Stats: MapAllocatorCache: EntriesCount: %zu, " + "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsSkips: " + "%zu, ReleaseToOsIntervalMs = %d\n", + LRUEntries.size(), atomic_load_relaxed(&MaxEntriesCount), + atomic_load_relaxed(&MaxEntrySize), + atomic_load_relaxed(&ReleaseToOsSkips), + Interval >= 0 ? Interval : -1); Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u " "(%zu.%02zu%%)\n", SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional); @@ -343,8 +345,15 @@ class MapAllocatorCache { unmapCallBack(EvictMemMap); if (Interval >= 0) { - // TODO: Add ReleaseToOS logic to LRU algorithm - releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000); + // It is very likely that multiple threads trying to do a release at the + // same time will not actually release any extra elements. Therefore, + // let any other thread continue, skipping the release. + if (Mutex.tryLock()) { + // TODO: Add ReleaseToOS logic to LRU algorithm + releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000); + Mutex.unlock(); + } else + atomic_fetch_add(&ReleaseToOsSkips, 1U, memory_order_relaxed); } } @@ -488,7 +497,12 @@ class MapAllocatorCache { return true; } - void releaseToOS() { releaseOlderThan(UINT64_MAX); } + void releaseToOS() EXCLUDES(Mutex) { + // Since this is a request to release everything, always wait for the + // lock so that we guarantee all entries are released after this call. + ScopedLock L(Mutex); + releaseOlderThan(UINT64_MAX); + } void disableMemoryTagging() EXCLUDES(Mutex) { ScopedLock L(Mutex); @@ -554,8 +568,7 @@ class MapAllocatorCache { Entry.Time = 0; } - void releaseOlderThan(u64 Time) EXCLUDES(Mutex) { - ScopedLock L(Mutex); + void releaseOlderThan(u64 Time) REQUIRES(Mutex) { if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time) return; OldestTime = 0; @@ -573,6 +586,7 @@ class MapAllocatorCache { atomic_s32 ReleaseToOsIntervalMs = {}; u32 CallsToRetrieve GUARDED_BY(Mutex) = 0; u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0; + atomic_uptr ReleaseToOsSkips = {}; CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {}; NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>