Skip to content

Commit

Permalink
[scudo] Group poppedBlocks/pushedBlocks into BlocksInfo (NFC)
Browse files Browse the repository at this point in the history
Create a new BlocksInfo to contain a list of blocks, poppedBlocks and
pushedBlocks. This is the preparation of adding new lock for operations
on freelist.

Differential Revision: https://reviews.llvm.org/D149143
  • Loading branch information
ChiaHungDuan committed Jun 15, 2023
1 parent 24563b8 commit 5beaa73
Show file tree
Hide file tree
Showing 2 changed files with 100 additions and 86 deletions.
72 changes: 38 additions & 34 deletions compiler-rt/lib/scudo/standalone/primary32.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ template <typename Config> class SizeClassAllocator32 {
// if `populateFreeList` succeeded, we are supposed to get free blocks.
DCHECK_NE(B, nullptr);
}
Sci->Stats.PoppedBlocks += B->getCount();
Sci->FreeListInfo.PoppedBlocks += B->getCount();
return B;
}

Expand All @@ -175,7 +175,7 @@ template <typename Config> class SizeClassAllocator32 {
if (Size == 1 && !populateFreeList(C, ClassId, Sci))
return;
pushBlocksImpl(C, ClassId, Sci, Array, Size);
Sci->Stats.PushedBlocks += Size;
Sci->FreeListInfo.PushedBlocks += Size;
return;
}

Expand All @@ -201,7 +201,7 @@ template <typename Config> class SizeClassAllocator32 {
ScopedLock L(Sci->Mutex);
pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);

Sci->Stats.PushedBlocks += Size;
Sci->FreeListInfo.PushedBlocks += Size;
if (ClassId != SizeClassMap::BatchClassId)
releaseToOSMaybe(Sci, ClassId);
}
Expand Down Expand Up @@ -267,8 +267,8 @@ template <typename Config> class SizeClassAllocator32 {
SizeClassInfo *Sci = getSizeClassInfo(I);
ScopedLock L(Sci->Mutex);
TotalMapped += Sci->AllocatedUser;
PoppedBlocks += Sci->Stats.PoppedBlocks;
PushedBlocks += Sci->Stats.PushedBlocks;
PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
PushedBlocks += Sci->FreeListInfo.PushedBlocks;
}
Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
"remains %zu\n",
Expand Down Expand Up @@ -322,24 +322,24 @@ template <typename Config> class SizeClassAllocator32 {
static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
typedef FlatByteMap<NumRegions> ByteMap;

struct SizeClassStats {
uptr PoppedBlocks;
uptr PushedBlocks;
};

struct ReleaseToOsInfo {
uptr BytesInFreeListAtLastCheckpoint;
uptr RangesReleased;
uptr LastReleasedBytes;
u64 LastReleaseAtNs;
};

struct BlocksInfo {
SinglyLinkedList<BatchGroup> BlockList = {};
uptr PoppedBlocks = 0;
uptr PushedBlocks = 0;
};

struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
HybridMutex Mutex;
SinglyLinkedList<BatchGroup> FreeList GUARDED_BY(Mutex);
BlocksInfo FreeListInfo GUARDED_BY(Mutex);
uptr CurrentRegion GUARDED_BY(Mutex);
uptr CurrentRegionAllocated GUARDED_BY(Mutex);
SizeClassStats Stats GUARDED_BY(Mutex);
u32 RandState;
uptr AllocatedUser GUARDED_BY(Mutex);
// Lowest & highest region index allocated for this size class, to avoid
Expand Down Expand Up @@ -416,13 +416,13 @@ template <typename Config> class SizeClassAllocator32 {

// Push the blocks to their batch group. The layout will be like,
//
// FreeList - > BG -> BG -> BG
// | | |
// v v v
// TB TB TB
// |
// v
// TB
// FreeListInfo.BlockList - > BG -> BG -> BG
// | | |
// v v v
// TB TB TB
// |
// v
// TB
//
// Each BlockGroup(BG) will associate with unique group id and the free blocks
// are managed by a list of TransferBatch(TB). To reduce the time of inserting
Expand Down Expand Up @@ -533,13 +533,13 @@ template <typename Config> class SizeClassAllocator32 {
BG->PushedBlocks += Size;
};

BatchGroup *Cur = Sci->FreeList.front();
BatchGroup *Cur = Sci->FreeListInfo.BlockList.front();

if (ClassId == SizeClassMap::BatchClassId) {
if (Cur == nullptr) {
// Don't need to classify BatchClassId.
Cur = CreateGroup(/*CompactPtrGroupBase=*/0);
Sci->FreeList.push_front(Cur);
Sci->FreeListInfo.BlockList.push_front(Cur);
}
InsertBlocks(Cur, Array, Size);
return;
Expand All @@ -559,9 +559,9 @@ template <typename Config> class SizeClassAllocator32 {
compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
Cur = CreateGroup(compactPtrGroupBase(Array[0]));
if (Prev == nullptr)
Sci->FreeList.push_front(Cur);
Sci->FreeListInfo.BlockList.push_front(Cur);
else
Sci->FreeList.insert(Prev, Cur);
Sci->FreeListInfo.BlockList.insert(Prev, Cur);
}

// All the blocks are from the same group, just push without checking group
Expand Down Expand Up @@ -592,7 +592,7 @@ template <typename Config> class SizeClassAllocator32 {
compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
Cur = CreateGroup(compactPtrGroupBase(Array[I]));
DCHECK_NE(Prev, nullptr);
Sci->FreeList.insert(Prev, Cur);
Sci->FreeListInfo.BlockList.insert(Prev, Cur);
}

Count = 1;
Expand All @@ -610,10 +610,11 @@ template <typename Config> class SizeClassAllocator32 {
// The region mutex needs to be held while calling this method.
TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
REQUIRES(Sci->Mutex) {
if (Sci->FreeList.empty())
if (Sci->FreeListInfo.BlockList.empty())
return nullptr;

SinglyLinkedList<TransferBatch> &Batches = Sci->FreeList.front()->Batches;
SinglyLinkedList<TransferBatch> &Batches =
Sci->FreeListInfo.BlockList.front()->Batches;
DCHECK(!Batches.empty());

TransferBatch *B = Batches.front();
Expand All @@ -622,8 +623,8 @@ template <typename Config> class SizeClassAllocator32 {
DCHECK_GT(B->getCount(), 0U);

if (Batches.empty()) {
BatchGroup *BG = Sci->FreeList.front();
Sci->FreeList.pop_front();
BatchGroup *BG = Sci->FreeListInfo.BlockList.front();
Sci->FreeListInfo.BlockList.pop_front();

// We don't keep BatchGroup with zero blocks to avoid empty-checking while
// allocating. Note that block used by constructing BatchGroup is recorded
Expand Down Expand Up @@ -728,13 +729,15 @@ template <typename Config> class SizeClassAllocator32 {
REQUIRES(Sci->Mutex) {
if (Sci->AllocatedUser == 0)
return;
const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
const uptr InUse =
Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
"inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n",
ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks,
InUse, AvailableChunks, Rss >> 10,
Sci->ReleaseInfo.RangesReleased);
}

NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
Expand All @@ -743,10 +746,11 @@ template <typename Config> class SizeClassAllocator32 {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();

DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
const uptr BytesInFreeList =
Sci->AllocatedUser -
(Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
(Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
BlockSize;

if (UNLIKELY(BytesInFreeList == 0))
return 0;
Expand Down Expand Up @@ -823,7 +827,7 @@ template <typename Config> class SizeClassAllocator32 {
auto DecompactPtr = [](CompactPtrT CompactPtr) {
return reinterpret_cast<uptr>(CompactPtr);
};
for (BatchGroup &BG : Sci->FreeList) {
for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
// The `GroupSize` may not be divided by `BlockSize`, which means there is
// an unused space at the end of Region. Exclude that space to avoid
Expand Down
Loading

0 comments on commit 5beaa73

Please sign in to comment.