Skip to content

Commit

Permalink
Revert "[scudo] Store more blocks in each TransferBatch (#70390)"
Browse files Browse the repository at this point in the history
This reverts commit 1865c7e.
  • Loading branch information
ChiaHungDuan committed Feb 26, 2024
1 parent 3d2a918 commit 2868743
Show file tree
Hide file tree
Showing 4 changed files with 141 additions and 159 deletions.
7 changes: 0 additions & 7 deletions compiler-rt/lib/scudo/standalone/allocator_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ template <class SizeClassAllocator> struct TransferBatch {
B->Count = static_cast<u16>(B->Count - N);
}
void clear() { Count = 0; }
bool empty() { return Count == 0; }
void add(CompactPtrT P) {
DCHECK_LT(Count, MaxNumCached);
Batch[Count++] = P;
Expand All @@ -49,12 +48,6 @@ template <class SizeClassAllocator> struct TransferBatch {
memcpy(Array, Batch, sizeof(Batch[0]) * Count);
clear();
}

void moveNToArray(CompactPtrT *Array, u16 N) {
DCHECK_LE(N, Count);
memcpy(Array, Batch + Count - N, sizeof(Batch[0]) * Count);
Count -= N;
}
u16 getCount() const { return Count; }
bool isEmpty() const { return Count == 0U; }
CompactPtrT get(u16 I) const {
Expand Down
106 changes: 51 additions & 55 deletions compiler-rt/lib/scudo/standalone/primary32.h
Original file line number Diff line number Diff line change
Expand Up @@ -191,21 +191,38 @@ template <typename Config> class SizeClassAllocator32 {
return BlockSize > PageSize;
}

// Note that the `MaxBlockCount` will be used when we support arbitrary blocks
// count. Now it's the same as the number of blocks stored in the
// `TransferBatch`.
u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
const u16 MaxBlockCount) {
UNUSED const u16 MaxBlockCount) {
TransferBatchT *B = popBatch(C, ClassId);
if (!B)
return 0;

const u16 Count = B->getCount();
DCHECK_GT(Count, 0U);
B->moveToArray(ToArray);

if (ClassId != SizeClassMap::BatchClassId)
C->deallocate(SizeClassMap::BatchClassId, B);

return Count;
}

TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
ScopedLock L(Sci->Mutex);

u16 PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
if (UNLIKELY(PopCount == 0)) {
TransferBatchT *B = popBatchImpl(C, ClassId, Sci);
if (UNLIKELY(!B)) {
if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
return 0U;
PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
DCHECK_NE(PopCount, 0U);
return nullptr;
B = popBatchImpl(C, ClassId, Sci);
// if `populateFreeList` succeeded, we are supposed to get free blocks.
DCHECK_NE(B, nullptr);
}

return PopCount;
return B;
}

// Push the array of free blocks to the designated batch group.
Expand Down Expand Up @@ -493,7 +510,7 @@ template <typename Config> class SizeClassAllocator32 {
// by TransferBatch is also free for use. We don't need to recycle the
// TransferBatch. Note that the correctness is maintained by the invariant,
//
// Each popBlocks() request returns the entire TransferBatch. Returning
// The unit of each popBatch() request is entire TransferBatch. Return
// part of the blocks in a TransferBatch is invalid.
//
// This ensures that TransferBatch won't leak the address itself while it's
Expand Down Expand Up @@ -617,7 +634,7 @@ template <typename Config> class SizeClassAllocator32 {
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));

return BG;
};
Expand Down Expand Up @@ -709,11 +726,14 @@ template <typename Config> class SizeClassAllocator32 {
InsertBlocks(Cur, Array + Size - Count, Count);
}

u16 popBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
CompactPtrT *ToArray, const u16 MaxBlockCount)
// Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
// group id will be considered first.
//
// The region mutex needs to be held while calling this method.
TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
REQUIRES(Sci->Mutex) {
if (Sci->FreeListInfo.BlockList.empty())
return 0U;
return nullptr;

SinglyLinkedList<TransferBatchT> &Batches =
Sci->FreeListInfo.BlockList.front()->Batches;
Expand All @@ -726,57 +746,33 @@ template <typename Config> class SizeClassAllocator32 {
// Block used by `BatchGroup` is from BatchClassId. Turn the block into
// `TransferBatch` with single block.
TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
ToArray[0] =
compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB));
TB->clear();
TB->add(
compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
Sci->FreeListInfo.PoppedBlocks += 1;
return 1U;
return TB;
}

// So far, instead of always filling the blocks to `MaxBlockCount`, we only
// examine single `TransferBatch` to minimize the time spent on the primary
// allocator. Besides, the sizes of `TransferBatch` and
// `CacheT::getMaxCached()` may also impact the time spent on accessing the
// primary allocator.
// TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
// blocks and/or adjust the size of `TransferBatch` according to
// `CacheT::getMaxCached()`.
TransferBatchT *B = Batches.front();
Batches.pop_front();
DCHECK_NE(B, nullptr);
DCHECK_GT(B->getCount(), 0U);

// BachClassId should always take all blocks in the TransferBatch. Read the
// comment in `pushBatchClassBlocks()` for more details.
const u16 PopCount = ClassId == SizeClassMap::BatchClassId
? B->getCount()
: Min(MaxBlockCount, B->getCount());
B->moveNToArray(ToArray, PopCount);

// TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
// done without holding `Mutex`.
if (B->empty()) {
Batches.pop_front();
// `TransferBatch` of BatchClassId is self-contained, no need to
// deallocate. Read the comment in `pushBatchClassBlocks()` for more
// details.
if (Batches.empty()) {
BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
Sci->FreeListInfo.BlockList.pop_front();

// We don't keep BatchGroup with zero blocks to avoid empty-checking while
// allocating. Note that block used by constructing BatchGroup is recorded
// as free blocks in the last element of BatchGroup::Batches. Which means,
// once we pop the last TransferBatch, the block is implicitly
// deallocated.
if (ClassId != SizeClassMap::BatchClassId)
C->deallocate(SizeClassMap::BatchClassId, B);

if (Batches.empty()) {
BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
Sci->FreeListInfo.BlockList.pop_front();

// We don't keep BatchGroup with zero blocks to avoid empty-checking
// while allocating. Note that block used for constructing BatchGroup is
// recorded as free blocks in the last element of BatchGroup::Batches.
// Which means, once we pop the last TransferBatch, the block is
// implicitly deallocated.
if (ClassId != SizeClassMap::BatchClassId)
C->deallocate(SizeClassMap::BatchClassId, BG);
}
C->deallocate(SizeClassMap::BatchClassId, BG);
}

Sci->FreeListInfo.PoppedBlocks += PopCount;
return PopCount;
Sci->FreeListInfo.PoppedBlocks += B->getCount();
return B;
}

NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
Expand Down

0 comments on commit 2868743

Please sign in to comment.