Skip to content

Commit

Permalink
[scudo] Move the blocks marking logic into markFreeBlocks()
Browse files Browse the repository at this point in the history
This change is only in SizeClassAllocator32. SizeClassAllocator64 has
it implemented.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D158455
  • Loading branch information
ChiaHungDuan committed Aug 24, 2023
1 parent 56241b6 commit fe0cb7b
Showing 1 changed file with 98 additions and 80 deletions.
178 changes: 98 additions & 80 deletions compiler-rt/lib/scudo/standalone/primary32.h
Original file line number Diff line number Diff line change
Expand Up @@ -866,7 +866,6 @@ template <typename Config> class SizeClassAllocator32 {
ReleaseToOS ReleaseType = ReleaseToOS::Normal)
REQUIRES(Sci->Mutex) {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();

DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
const uptr BytesInFreeList =
Expand All @@ -878,7 +877,7 @@ template <typename Config> class SizeClassAllocator32 {
return 0;

// ====================================================================== //
// Check if we have enough free blocks and if it's worth doing a page
// 1. Check if we have enough free blocks and if it's worth doing a page
// release.
// ====================================================================== //
if (ReleaseType != ReleaseToOS::ForceAll &&
Expand All @@ -894,88 +893,20 @@ template <typename Config> class SizeClassAllocator32 {
uptr TotalReleasedBytes = 0;
const uptr Base = First * RegionSize;
const uptr NumberOfRegions = Last - First + 1U;
const uptr GroupSize = (1UL << GroupSizeLog);
const uptr CurGroupBase =
compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));

ReleaseRecorder Recorder(Base);
PageReleaseContext Context(BlockSize, NumberOfRegions,
/*ReleaseSize=*/RegionSize);

auto DecompactPtr = [](CompactPtrT CompactPtr) {
return reinterpret_cast<uptr>(CompactPtr);
};
for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
// The `GroupSize` may not be divided by `BlockSize`, which means there is
// an unused space at the end of Region. Exclude that space to avoid
// unused page map entry.
uptr AllocatedGroupSize = GroupBase == CurGroupBase
? Sci->CurrentRegionAllocated
: roundDownSlow(GroupSize, BlockSize);
if (AllocatedGroupSize == 0)
continue;

// TransferBatches are pushed in front of BG.Batches. The first one may
// not have all caches used.
const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
BG.Batches.front()->getCount();
const uptr BytesInBG = NumBlocks * BlockSize;

if (ReleaseType != ReleaseToOS::ForceAll) {
if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
BG.BytesInBGAtLastCheckpoint = BytesInBG;
continue;
}

const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
if (PushedBytesDelta < PageSize)
continue;

// Given the randomness property, we try to release the pages only if
// the bytes used by free blocks exceed certain proportion of allocated
// spaces.
if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
(100U - 1U - BlockSize / 16U)) {
continue;
}
}

// TODO: Consider updating this after page release if `ReleaseRecorder`
// can tell the releasd bytes in each group.
BG.BytesInBGAtLastCheckpoint = BytesInBG;

const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
const uptr RegionIndex = (GroupBase - Base) / RegionSize;

if (NumBlocks == MaxContainedBlocks) {
for (const auto &It : BG.Batches)
for (u16 I = 0; I < It.getCount(); ++I)
DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);

const uptr To = GroupBase + AllocatedGroupSize;
Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
AllocatedGroupSize);
} else {
DCHECK_LT(NumBlocks, MaxContainedBlocks);

// Note that we don't always visit blocks in each BatchGroup so that we
// may miss the chance of releasing certain pages that cross
// BatchGroups.
Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
RegionIndex, AllocatedGroupSize,
/*MayContainLastBlockInRegion=*/true);
}

// We may not be able to do the page release In a rare case that we may
// fail on PageMap allocation.
if (UNLIKELY(!Context.hasBlockMarked()))
return 0;
}

// ==================================================================== //
// 2. Mark the free blocks and we can tell which pages are in-use by
// querying `PageReleaseContext`.
// ==================================================================== //
PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
NumberOfRegions, ReleaseType);
if (!Context.hasBlockMarked())
return 0;

// ==================================================================== //
// 3. Release the unused physical pages back to the OS.
// ==================================================================== //
ReleaseRecorder Recorder(Base);
auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
ScopedLock L(ByteMapMutex);
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
Expand Down Expand Up @@ -1054,6 +985,93 @@ template <typename Config> class SizeClassAllocator32 {
return true;
}

PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
const uptr BlockSize, const uptr Base,
const uptr NumberOfRegions,
ReleaseToOS ReleaseType)
REQUIRES(Sci->Mutex) {
const uptr PageSize = getPageSizeCached();
const uptr GroupSize = (1UL << GroupSizeLog);
const uptr CurGroupBase =
compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));

PageReleaseContext Context(BlockSize, NumberOfRegions,
/*ReleaseSize=*/RegionSize);

auto DecompactPtr = [](CompactPtrT CompactPtr) {
return reinterpret_cast<uptr>(CompactPtr);
};
for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
// The `GroupSize` may not be divided by `BlockSize`, which means there is
// an unused space at the end of Region. Exclude that space to avoid
// unused page map entry.
uptr AllocatedGroupSize = GroupBase == CurGroupBase
? Sci->CurrentRegionAllocated
: roundDownSlow(GroupSize, BlockSize);
if (AllocatedGroupSize == 0)
continue;

// TransferBatches are pushed in front of BG.Batches. The first one may
// not have all caches used.
const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
BG.Batches.front()->getCount();
const uptr BytesInBG = NumBlocks * BlockSize;

if (ReleaseType != ReleaseToOS::ForceAll) {
if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
BG.BytesInBGAtLastCheckpoint = BytesInBG;
continue;
}

const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
if (PushedBytesDelta < PageSize)
continue;

// Given the randomness property, we try to release the pages only if
// the bytes used by free blocks exceed certain proportion of allocated
// spaces.
if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
(100U - 1U - BlockSize / 16U)) {
continue;
}
}

// TODO: Consider updating this after page release if `ReleaseRecorder`
// can tell the released bytes in each group.
BG.BytesInBGAtLastCheckpoint = BytesInBG;

const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
const uptr RegionIndex = (GroupBase - Base) / RegionSize;

if (NumBlocks == MaxContainedBlocks) {
for (const auto &It : BG.Batches)
for (u16 I = 0; I < It.getCount(); ++I)
DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);

const uptr To = GroupBase + AllocatedGroupSize;
Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
AllocatedGroupSize);
} else {
DCHECK_LT(NumBlocks, MaxContainedBlocks);

// Note that we don't always visit blocks in each BatchGroup so that we
// may miss the chance of releasing certain pages that cross
// BatchGroups.
Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
RegionIndex, AllocatedGroupSize,
/*MayContainLastBlockInRegion=*/true);
}

// We may not be able to do the page release In a rare case that we may
// fail on PageMap allocation.
if (UNLIKELY(!Context.hasBlockMarked()))
break;
}

return Context;
}

SizeClassInfo SizeClassInfoArray[NumClasses] = {};

HybridMutex ByteMapMutex;
Expand Down

0 comments on commit fe0cb7b

Please sign in to comment.