Skip to content

Commit

Permalink
Revert "[scudo] Support partial page releasing"
Browse files Browse the repository at this point in the history
This reverts commit 9c26f51.
  • Loading branch information
kamaub committed Oct 14, 2022
1 parent 5fe6f3e commit 1cf1b36
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 91 deletions.
7 changes: 0 additions & 7 deletions compiler-rt/lib/scudo/standalone/local_cache.h
Expand Up @@ -65,13 +65,6 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
uptr GroupId;
// Cache value of TransferBatch::getMaxCached()
u16 MaxCachedPerBatch;
// Number of blocks pushed into this group. This is an increment-only
// counter.
uptr PushedBlocks;
// This is used to track how many blocks are pushed since last time we
// checked `PushedBlocks`. It's useful for page releasing to determine the
// usage of a BatchGroup.
uptr PushedBlocksAtLastCheckpoint;
// Blocks are managed by TransferBatch in a list.
SinglyLinkedList<TransferBatch> Batches;
};
Expand Down
51 changes: 10 additions & 41 deletions compiler-rt/lib/scudo/standalone/primary32.h
Expand Up @@ -421,8 +421,6 @@ template <typename Config> class SizeClassAllocator32 {

BG->GroupId = GroupId;
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->PushedBlocksAtLastCheckpoint = 0;
BG->MaxCachedPerBatch =
TransferBatch::getMaxCached(getSizeByClassId(ClassId));

Expand All @@ -448,8 +446,6 @@ template <typename Config> class SizeClassAllocator32 {
CurBatch->appendFromArray(&Array[I], AppendSize);
I += AppendSize;
}

BG->PushedBlocks += Size;
};

BatchGroup *Cur = Sci->FreeList.front();
Expand Down Expand Up @@ -656,13 +652,16 @@ template <typename Config> class SizeClassAllocator32 {
if (BytesPushed < PageSize)
return 0; // Nothing new to release.

const bool CheckDensity = BlockSize < PageSize / 16U;
// Releasing smaller blocks is expensive, so we want to make sure that a
// significant amount of bytes are free, and that there has been a good
// amount of batches pushed to the freelist before attempting to release.
if (CheckDensity) {
if (BlockSize < PageSize / 16U) {
if (!Force && BytesPushed < Sci->AllocatedUser / 16U)
return 0;
// We want 8x% to 9x% free bytes (the larger the block, the lower the %).
if ((BytesInFreeList * 100U) / Sci->AllocatedUser <
(100U - 1U - BlockSize / 16U))
return 0;
}

if (!Force) {
Expand All @@ -683,47 +682,17 @@ template <typename Config> class SizeClassAllocator32 {
uptr TotalReleasedBytes = 0;
const uptr Base = First * RegionSize;
const uptr NumberOfRegions = Last - First + 1U;
const uptr GroupSize = (1U << GroupSizeLog);
const uptr CurRegionGroupId =
compactPtrGroup(compactPtr(ClassId, Sci->CurrentRegion));

ReleaseRecorder Recorder(Base);
PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);

auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
};
auto DecompactPtr = [](CompactPtrT CompactPtr) {
return reinterpret_cast<uptr>(CompactPtr);
};
for (BatchGroup &BG : Sci->FreeList) {
const uptr PushedBytesDelta =
BG.PushedBlocks - BG.PushedBlocksAtLastCheckpoint;
if (PushedBytesDelta * BlockSize < PageSize)
continue;

uptr AllocatedGroupSize =
BG.GroupId == CurRegionGroupId ? Sci->CurrentRegionAllocated :
GroupSize;
if (AllocatedGroupSize == 0) continue;

const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
BG.Batches.back()->getCount();
const uptr BytesInBG = NumBlocks * BlockSize;
// Given the randomness property, we try to release the pages only if the
// bytes used by free blocks exceed certain proportion of allocated
// spaces.
if (CheckDensity && (BytesInBG * 100U) / AllocatedGroupSize <
(100U - 1U - BlockSize / 16U)) {
continue;
}

BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks;
// Note that we don't always visit blocks in each BatchGroup so that we
// may miss the chance of releasing certain pages that cross BatchGroups.
PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
for (BatchGroup &BG : Sci->FreeList)
Context.markFreeBlocks(BG.Batches, DecompactPtr, Base);
}

auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
};
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);

if (Recorder.getReleasedRangesCount() > 0) {
Expand Down
50 changes: 8 additions & 42 deletions compiler-rt/lib/scudo/standalone/primary64.h
Expand Up @@ -370,9 +370,6 @@ template <typename Config> class SizeClassAllocator64 {
static uptr compactPtrGroup(CompactPtrT CompactPtr) {
return CompactPtr >> (GroupSizeLog - CompactPtrScale);
}
static uptr batchGroupBase(uptr Base, uptr GroupId) {
return (GroupId << GroupSizeLog) + Base;
}

// Push the blocks to their batch group. The layout will be like,
//
Expand Down Expand Up @@ -427,8 +424,6 @@ template <typename Config> class SizeClassAllocator64 {

BG->GroupId = GroupId;
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->PushedBlocksAtLastCheckpoint = 0;
BG->MaxCachedPerBatch =
TransferBatch::getMaxCached(getSizeByClassId(ClassId));

Expand All @@ -455,8 +450,6 @@ template <typename Config> class SizeClassAllocator64 {
CurBatch->appendFromArray(&Array[I], AppendSize);
I += AppendSize;
}

BG->PushedBlocks += Size;
};

BatchGroup *Cur = Region->FreeList.front();
Expand Down Expand Up @@ -668,13 +661,16 @@ template <typename Config> class SizeClassAllocator64 {
if (BytesPushed < PageSize)
return 0; // Nothing new to release.

bool CheckDensity = BlockSize < PageSize / 16U;
// Releasing smaller blocks is expensive, so we want to make sure that a
// significant amount of bytes are free, and that there has been a good
// amount of batches pushed to the freelist before attempting to release.
if (CheckDensity) {
if (BlockSize < PageSize / 16U) {
if (!Force && BytesPushed < Region->AllocatedUser / 16U)
return 0;
// We want 8x% to 9x% free bytes (the larger the block, the lower the %).
if ((BytesInFreeList * 100U) / Region->AllocatedUser <
(100U - 1U - BlockSize / 16U))
return 0;
}

if (!Force) {
Expand All @@ -688,46 +684,16 @@ template <typename Config> class SizeClassAllocator64 {
}
}

const uptr GroupSize = (1U << GroupSizeLog);
const uptr AllocatedUserEnd = Region->AllocatedUser + Region->RegionBeg;
ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
PageReleaseContext Context(BlockSize, RegionSize, /*NumberOfRegions=*/1U);

const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
return decompactPtrInternal(CompactPtrBase, CompactPtr);
};
for (BatchGroup &BG : Region->FreeList) {
const uptr PushedBytesDelta =
BG.PushedBlocks - BG.PushedBlocksAtLastCheckpoint;
if (PushedBytesDelta * BlockSize < PageSize)
continue;
const uptr BatchGroupEnd =
batchGroupBase(BG.GroupId, CompactPtrBase) + GroupSize;
const uptr AllocatedGroupSize =
AllocatedUserEnd >= BatchGroupEnd ? GroupSize :
AllocatedUserEnd - BatchGroupEnd;
if (AllocatedGroupSize == 0) continue;

const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
BG.Batches.back()->getCount();
const uptr BytesInBG = NumBlocks * BlockSize;
// Given the randomness property, we try to release the pages only if the
// bytes used by free blocks exceed certain proportion of group size. Note
// that this heuristic only applies when all the spaces in a BatchGroup
// are allocated.
if (CheckDensity && (BytesInBG * 100U) / AllocatedGroupSize <
(100U - 1U - BlockSize / 16U)) {
continue;
}

BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks;
// Note that we don't always visit blocks in each BatchGroup so that we
// may miss the chance of releasing certain pages that cross BatchGroups.
auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
PageReleaseContext Context(BlockSize, RegionSize, /*NumberOfRegions=*/1U);
for (BatchGroup &BG : Region->FreeList)
Context.markFreeBlocks(BG.Batches, DecompactPtr, Region->RegionBeg);
}

auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);

if (Recorder.getReleasedRangesCount() > 0) {
Expand Down
2 changes: 1 addition & 1 deletion compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
Expand Up @@ -506,7 +506,7 @@ struct DeathSizeClassConfig {
static const scudo::uptr MinSizeLog = 10;
static const scudo::uptr MidSizeLog = 10;
static const scudo::uptr MaxSizeLog = 13;
static const scudo::u16 MaxNumCachedHint = 8;
static const scudo::u16 MaxNumCachedHint = 4;
static const scudo::uptr MaxBytesCachedLog = 12;
static const scudo::uptr SizeDelta = 0;
};
Expand Down

0 comments on commit 1cf1b36

Please sign in to comment.