Skip to content

Commit

Permalink
[scudo][standalone] Correct min/max region indices
Browse files Browse the repository at this point in the history
The original code to keep track of the minimum and maximum indices
of allocated 32-bit primary regions was sketchy at best.

`MinRegionIndex` & `MaxRegionIndex` were shared between all size
classes, and could (theoretically) have been updated concurrently. This
didn't materialize anywhere I could see, but still it's not proper.

This changes those min/max indices by making them class specific rather
than global: classes are locked when growing, so there is no
concurrency there. This also allows to simplify some of the 32-bit
release code, that now doesn't have to go through all the regions to
get the proper min/max. Iterate and unmap will no longer have access to
the global min/max, but they aren't used as much so this is fine.

Differential Revision: https://reviews.llvm.org/D91106
  • Loading branch information
Kostya Kortchinsky committed Nov 16, 2020
1 parent 7c62c63 commit f0703cb
Showing 1 changed file with 49 additions and 47 deletions.
96 changes: 49 additions & 47 deletions compiler-rt/lib/scudo/standalone/primary32.h
Expand Up @@ -70,7 +70,6 @@ class SizeClassAllocator32 {
reportError("SizeClassAllocator32 is not supported on Fuchsia");

PossibleRegions.initLinkerInitialized();
MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.

u32 Seed;
const u64 Time = getMonotonicTime();
Expand All @@ -81,6 +80,8 @@ class SizeClassAllocator32 {
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
Sci->RandState = getRandomU32(&Seed);
// Sci->MaxRegionIndex is already initialized to 0.
Sci->MinRegionIndex = NumRegions;
// See comment in the 64-bit primary about releasing smaller size classes.
Sci->CanRelease = (I != SizeClassMap::BatchClassId) &&
(getSizeByClassId(I) >= (PageSize / 32));
Expand All @@ -98,7 +99,15 @@ class SizeClassAllocator32 {
while (NumberOfStashedRegions > 0)
unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
RegionSize);
for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
if (Sci->MinRegionIndex < MinRegionIndex)
MinRegionIndex = Sci->MinRegionIndex;
if (Sci->MaxRegionIndex > MaxRegionIndex)
MaxRegionIndex = Sci->MaxRegionIndex;
}
for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
if (PossibleRegions[I])
unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
PossibleRegions.unmapTestOnly();
Expand Down Expand Up @@ -156,6 +165,14 @@ class SizeClassAllocator32 {
}

template <typename F> void iterateOverBlocks(F Callback) {
uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
if (Sci->MinRegionIndex < MinRegionIndex)
MinRegionIndex = Sci->MinRegionIndex;
if (Sci->MaxRegionIndex > MaxRegionIndex)
MaxRegionIndex = Sci->MaxRegionIndex;
}
for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
if (PossibleRegions[I] &&
(PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
Expand Down Expand Up @@ -207,18 +224,14 @@ class SizeClassAllocator32 {
return TotalReleasedBytes;
}

static bool useMemoryTagging(Options Options) {
(void)Options;
return false;
}
static bool useMemoryTagging(UNUSED Options Options) { return false; }
void disableMemoryTagging() {}

const char *getRegionInfoArrayAddress() const { return nullptr; }
static uptr getRegionInfoArraySize() { return 0; }

static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
(void)RegionInfoData;
(void)Ptr;
static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
UNUSED uptr Ptr) {
return {};
}

Expand Down Expand Up @@ -252,6 +265,10 @@ class SizeClassAllocator32 {
bool CanRelease;
u32 RandState;
uptr AllocatedUser;
// Lowest & highest region index allocated for this size class, to avoid
// looping through the whole NumRegions.
uptr MinRegionIndex;
uptr MaxRegionIndex;
ReleaseToOsInfo ReleaseInfo;
};
static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
Expand Down Expand Up @@ -287,7 +304,7 @@ class SizeClassAllocator32 {
return Region;
}

uptr allocateRegion(uptr ClassId) {
uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
uptr Region = 0;
{
Expand All @@ -298,11 +315,12 @@ class SizeClassAllocator32 {
if (!Region)
Region = allocateRegionSlow();
if (LIKELY(Region)) {
// Sci->Mutex is held by the caller, updating the Min/Max is safe.
const uptr RegionIndex = computeRegionId(Region);
if (RegionIndex < MinRegionIndex)
MinRegionIndex = RegionIndex;
if (RegionIndex > MaxRegionIndex)
MaxRegionIndex = RegionIndex;
if (RegionIndex < Sci->MinRegionIndex)
Sci->MinRegionIndex = RegionIndex;
if (RegionIndex > Sci->MaxRegionIndex)
Sci->MaxRegionIndex = RegionIndex;
PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
}
return Region;
Expand All @@ -327,7 +345,7 @@ class SizeClassAllocator32 {
Offset = Sci->CurrentRegionAllocated;
} else {
DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
Region = allocateRegion(ClassId);
Region = allocateRegion(Sci, ClassId);
if (UNLIKELY(!Region))
return nullptr;
C->getStats().add(StatMapped, RegionSize);
Expand Down Expand Up @@ -411,7 +429,7 @@ class SizeClassAllocator32 {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();

CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
const uptr BytesInFreeList =
Sci->AllocatedUser -
(Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
Expand Down Expand Up @@ -446,50 +464,34 @@ class SizeClassAllocator32 {
}
}

DCHECK_GT(MinRegionIndex, 0U);
uptr First = 0;
for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
if (PossibleRegions[I] - 1U == ClassId) {
First = I;
break;
}
}
uptr Last = 0;
for (uptr I = MaxRegionIndex; I >= MinRegionIndex; I--) {
if (PossibleRegions[I] - 1U == ClassId) {
Last = I;
break;
}
}
const uptr First = Sci->MinRegionIndex;
const uptr Last = Sci->MaxRegionIndex;
DCHECK_NE(Last, 0U);
DCHECK_LE(First, Last);
uptr TotalReleasedBytes = 0;
const uptr Base = First * RegionSize;
const uptr NumberOfRegions = Last - First + 1U;
ReleaseRecorder Recorder(Base);
auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
};
if (First && Last) {
const uptr Base = First * RegionSize;
const uptr NumberOfRegions = Last - First + 1U;
ReleaseRecorder Recorder(Base);
releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions,
BlockSize, &Recorder, SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
}
releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions,
BlockSize, &Recorder, SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
}
Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();

return TotalReleasedBytes;
}

SizeClassInfo SizeClassInfoArray[NumClasses];

// Track the regions in use, 0 is unused, otherwise store ClassId + 1.
ByteMap PossibleRegions;
// Keep track of the lowest & highest regions allocated to avoid looping
// through the whole NumRegions.
uptr MinRegionIndex;
uptr MaxRegionIndex;
atomic_s32 ReleaseToOsIntervalMs;
// Unless several threads request regions simultaneously from different size
// classes, the stash rarely contains more than 1 entry.
Expand Down

0 comments on commit f0703cb

Please sign in to comment.