Skip to content

Commit

Permalink
[scudo] Add a method to force release everything.
Browse files Browse the repository at this point in the history
The force flag to releaseToOSMaybe does not release everything
since it is an expensive operation. Modify the release flag to
have three states: normal, force, forceall. Force behaves the same
as setting Force to true from before this change. Forceall will
release everything regardless of how much time it takes, or
how much there is to release.

In addition, add a new mallopt that will call the release function
with the forceall flag set.

Reviewed By: Chia-hungDuan

Differential Revision: https://reviews.llvm.org/D146106
  • Loading branch information
cferris1000 committed Mar 16, 2023
1 parent e95d9c9 commit 8aaefa9
Show file tree
Hide file tree
Showing 8 changed files with 45 additions and 25 deletions.
4 changes: 2 additions & 2 deletions compiler-rt/lib/scudo/standalone/combined.h
Expand Up @@ -745,9 +745,9 @@ class Allocator {
Str.output();
}

void releaseToOS() {
void releaseToOS(ReleaseToOS ReleaseType) {
initThreadMaybe();
Primary.releaseToOS();
Primary.releaseToOS(ReleaseType);
Secondary.releaseToOS();
}

Expand Down
7 changes: 7 additions & 0 deletions compiler-rt/lib/scudo/standalone/common.h
Expand Up @@ -215,6 +215,13 @@ enum class Option : u8 {
MaxTSDsCount, // Number of usable TSDs for the shared registry.
};

enum class ReleaseToOS : u8 {
Normal, // Follow the normal rules for releasing pages to the OS
Force, // Force release pages to the OS, but avoid cases that take too long.
ForceAll, // Force release every page possible regardless of how long it will
// take.
};

constexpr unsigned char PatternFillByte = 0xAB;

enum FillContentsMode {
Expand Down
4 changes: 4 additions & 0 deletions compiler-rt/lib/scudo/standalone/include/scudo/interface.h
Expand Up @@ -118,6 +118,10 @@ size_t __scudo_get_ring_buffer_size(void);
#define M_PURGE -101
#endif

#ifndef M_PURGE_ALL
#define M_PURGE_ALL -104
#endif

// Tune the allocator's choice of memory tags to make it more likely that
// a certain class of memory errors will be detected. The value argument should
// be one of the M_MEMTAG_TUNING_* constants below.
Expand Down
15 changes: 9 additions & 6 deletions compiler-rt/lib/scudo/standalone/primary32.h
Expand Up @@ -281,14 +281,14 @@ template <typename Config> class SizeClassAllocator32 {
return true;
}

uptr releaseToOS() {
uptr releaseToOS(ReleaseToOS ReleaseType) {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
SizeClassInfo *Sci = getSizeClassInfo(I);
ScopedLock L(Sci->Mutex);
TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
TotalReleasedBytes += releaseToOSMaybe(Sci, I, ReleaseType);
}
return TotalReleasedBytes;
}
Expand Down Expand Up @@ -727,7 +727,8 @@ template <typename Config> class SizeClassAllocator32 {
}

NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
bool Force = false) REQUIRES(Sci->Mutex) {
ReleaseToOS ReleaseType = ReleaseToOS::Normal)
REQUIRES(Sci->Mutex) {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();

Expand All @@ -743,16 +744,18 @@ template <typename Config> class SizeClassAllocator32 {
if (BytesPushed < PageSize)
return 0; // Nothing new to release.

const bool CheckDensity = BlockSize < PageSize / 16U;
const bool CheckDensity =
BlockSize < PageSize / 16U && ReleaseType != ReleaseToOS::ForceAll;
// Releasing smaller blocks is expensive, so we want to make sure that a
// significant amount of bytes are free, and that there has been a good
// amount of batches pushed to the freelist before attempting to release.
if (CheckDensity) {
if (!Force && BytesPushed < Sci->AllocatedUser / 16U)
if (ReleaseType == ReleaseToOS::Normal &&
BytesPushed < Sci->AllocatedUser / 16U)
return 0;
}

if (!Force) {
if (ReleaseType == ReleaseToOS::Normal) {
const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
if (IntervalMs < 0)
return 0;
Expand Down
15 changes: 9 additions & 6 deletions compiler-rt/lib/scudo/standalone/primary64.h
Expand Up @@ -321,14 +321,14 @@ template <typename Config> class SizeClassAllocator64 {
return true;
}

uptr releaseToOS() {
uptr releaseToOS(ReleaseToOS ReleaseType) {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
RegionInfo *Region = getRegionInfo(I);
ScopedLock L(Region->Mutex);
TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
TotalReleasedBytes += releaseToOSMaybe(Region, I, ReleaseType);
}
return TotalReleasedBytes;
}
Expand Down Expand Up @@ -805,7 +805,8 @@ template <typename Config> class SizeClassAllocator64 {
}

NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
bool Force = false) REQUIRES(Region->Mutex) {
ReleaseToOS ReleaseType = ReleaseToOS::Normal)
REQUIRES(Region->Mutex) {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();

Expand All @@ -821,16 +822,18 @@ template <typename Config> class SizeClassAllocator64 {
if (BytesPushed < PageSize)
return 0; // Nothing new to release.

const bool CheckDensity = BlockSize < PageSize / 16U;
const bool CheckDensity =
BlockSize < PageSize / 16U && ReleaseType != ReleaseToOS::ForceAll;
// Releasing smaller blocks is expensive, so we want to make sure that a
// significant amount of bytes are free, and that there has been a good
// amount of batches pushed to the freelist before attempting to release.
if (CheckDensity) {
if (!Force && BytesPushed < Region->AllocatedUser / 16U)
if (ReleaseType == ReleaseToOS::Normal &&
BytesPushed < Region->AllocatedUser / 16U)
return 0;
}

if (!Force) {
if (ReleaseType == ReleaseToOS::Normal) {
const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
if (IntervalMs < 0)
return 0;
Expand Down
10 changes: 5 additions & 5 deletions compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
Expand Up @@ -92,7 +92,7 @@ template <class TypeParam> struct ScudoCombinedTest : public Test {
Allocator = std::make_unique<AllocatorT>();
}
~ScudoCombinedTest() {
Allocator->releaseToOS();
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
UseQuarantine = true;
}

Expand Down Expand Up @@ -412,7 +412,7 @@ SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
reinterpret_cast<char *>(P)[2048] = 0xaa;
Allocator->deallocate(P, Origin);

Allocator->releaseToOS();
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
}
}

Expand Down Expand Up @@ -488,7 +488,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
}
for (auto &T : Threads)
T.join();
Allocator->releaseToOS();
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
}

// Test that multiple instantiations of the allocator have not messed up the
Expand Down Expand Up @@ -601,7 +601,7 @@ TEST(ScudoCombinedTest, FullRegion) {
// operation without issue.
SCUDO_TYPED_TEST(ScudoCombinedTest, ReleaseToOS) {
auto *Allocator = this->Allocator.get();
Allocator->releaseToOS();
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
}

SCUDO_TYPED_TEST(ScudoCombinedTest, OddEven) {
Expand Down Expand Up @@ -740,7 +740,7 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) {
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
TSD->getCache().drain();

Allocator->releaseToOS();
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
}

#endif
Expand Down
10 changes: 5 additions & 5 deletions compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
Expand Up @@ -161,7 +161,7 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
Cache.deallocate(ClassId, Pointers[J]);
}
Cache.destroy(nullptr);
Allocator->releaseToOS();
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
scudo::ScopedString Str;
Allocator->getStats(&Str);
Str.output();
Expand Down Expand Up @@ -215,7 +215,7 @@ TEST(ScudoPrimaryTest, Primary64OOM) {
Cache.deallocate(Primary::SizeClassMap::BatchClassId, B);
}
Cache.destroy(nullptr);
Allocator.releaseToOS();
Allocator.releaseToOS(scudo::ReleaseToOS::Force);
scudo::ScopedString Str;
Allocator.getStats(&Str);
Str.output();
Expand Down Expand Up @@ -253,7 +253,7 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) {
V.pop_back();
}
Cache.destroy(nullptr);
Allocator->releaseToOS();
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
scudo::ScopedString Str;
Allocator->getStats(&Str);
Str.output();
Expand Down Expand Up @@ -300,7 +300,7 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
}
for (auto &T : Threads)
T.join();
Allocator->releaseToOS();
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
scudo::ScopedString Str;
Allocator->getStats(&Str);
Str.output();
Expand All @@ -322,7 +322,7 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, ReleaseToOS) {
EXPECT_NE(P, nullptr);
Cache.deallocate(ClassId, P);
Cache.destroy(nullptr);
EXPECT_GT(Allocator->releaseToOS(), 0U);
EXPECT_GT(Allocator->releaseToOS(scudo::ReleaseToOS::Force), 0U);
}

SCUDO_TYPED_TEST(ScudoPrimaryTest, MemoryGroup) {
Expand Down
5 changes: 4 additions & 1 deletion compiler-rt/lib/scudo/standalone/wrappers_c.inc
Expand Up @@ -188,7 +188,10 @@ INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
static_cast<scudo::sptr>(value));
return 1;
} else if (param == M_PURGE) {
SCUDO_ALLOCATOR.releaseToOS();
SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::Force);
return 1;
} else if (param == M_PURGE_ALL) {
SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::ForceAll);
return 1;
} else {
scudo::Option option;
Expand Down

0 comments on commit 8aaefa9

Please sign in to comment.