Skip to content

Commit

Permalink
[scudo] seperate cache retrieval logic
Browse files Browse the repository at this point in the history
Split cache::retrieve() into separate functions. One that retrieves
the cached block and another that sets the header and MTE environment.
These were split so that the retrieve function could be more easily
changed in the future and so that the retrieve function had the sole
purpose of retrieving a CachedBlock.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D155660
  • Loading branch information
Fernando authored and ChiaHungDuan committed Jul 25, 2023
1 parent 7a4968b commit 4c6b8bb
Showing 1 changed file with 45 additions and 53 deletions.
98 changes: 45 additions & 53 deletions compiler-rt/lib/scudo/standalone/secondary.h
Expand Up @@ -72,13 +72,20 @@ static inline void unmap(LargeBlock::Header *H) {
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
}

namespace {
struct CachedBlock {
uptr CommitBase = 0;
uptr CommitSize = 0;
uptr BlockBegin = 0;
MemMapT MemMap = {};
u64 Time = 0;
};
} // namespace

template <typename Config> class MapAllocatorNoCache {
public:
void init(UNUSED s32 ReleaseToOsInterval) {}
bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
return false;
}
bool retrieve(UNUSED uptr Size, UNUSED CachedBlock &Entry) { return false; }
void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
bool canCache(UNUSED uptr Size) { return false; }
void disable() {}
Expand Down Expand Up @@ -248,62 +255,26 @@ template <typename Config> class MapAllocatorCache {
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
}

bool retrieve(Options Options, uptr Size, uptr Alignment,
LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
const uptr PageSize = getPageSizeCached();
bool retrieve(uptr Size, CachedBlock &Entry) EXCLUDES(Mutex) {
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
bool Found = false;
CachedBlock Entry;
uptr HeaderPos = 0;
{
ScopedLock L(Mutex);
if (EntriesCount == 0)
return false;
for (u32 I = 0; I < MaxCount; I++) {
const uptr CommitBase = Entries[I].CommitBase;
if (!CommitBase)
continue;
const uptr CommitSize = Entries[I].CommitSize;
const uptr AllocPos =
roundDown(CommitBase + CommitSize - Size, Alignment);
HeaderPos =
AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
if (HeaderPos > CommitBase + CommitSize)
if (!Entries[I].CommitBase)
continue;
if (HeaderPos < CommitBase ||
AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
if (Size > Entries[I].CommitSize)
continue;
}
Found = true;
Entry = Entries[I];
Entries[I].CommitBase = 0;
EntriesCount--;
break;
}
}
if (!Found)
return false;

*H = reinterpret_cast<LargeBlock::Header *>(
LargeBlock::addHeaderTag<Config>(HeaderPos));
*Zeroed = Entry.Time == 0;
if (useMemoryTagging<Config>(Options))
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
if (useMemoryTagging<Config>(Options)) {
if (*Zeroed) {
storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
NewBlockBegin);
} else if (Entry.BlockBegin < NewBlockBegin) {
storeTags(Entry.BlockBegin, NewBlockBegin);
} else {
storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
}
}
(*H)->CommitBase = Entry.CommitBase;
(*H)->CommitSize = Entry.CommitSize;
(*H)->MemMap = Entry.MemMap;
return true;
return Found;
}

bool canCache(uptr Size) {
Expand Down Expand Up @@ -383,14 +354,6 @@ template <typename Config> class MapAllocatorCache {
}
}

struct CachedBlock {
uptr CommitBase = 0;
uptr CommitSize = 0;
uptr BlockBegin = 0;
MemMapT MemMap = {};
u64 Time = 0;
};

void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
if (!Entry.CommitBase || !Entry.Time)
return;
Expand Down Expand Up @@ -476,6 +439,27 @@ template <typename Config> class MapAllocator {
}
}

inline void setHeader(Options Options, CachedBlock &Entry,
LargeBlock::Header *H, bool &Zeroed) {
Zeroed = Entry.Time == 0;
if (useMemoryTagging<Config>(Options)) {
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
// Block begins after the LargeBlock::Header
uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1);
if (Zeroed) {
storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
NewBlockBegin);
} else if (Entry.BlockBegin < NewBlockBegin) {
storeTags(Entry.BlockBegin, NewBlockBegin);
} else {
storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
}
}
H->CommitBase = Entry.CommitBase;
H->CommitSize = Entry.CommitSize;
H->MemMap = Entry.MemMap;
}

bool canCache(uptr Size) { return Cache.canCache(Size); }

bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
Expand Down Expand Up @@ -530,7 +514,15 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
LargeBlock::Header *H;
bool Zeroed;
if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
CachedBlock Entry;
if (Cache.retrieve(RoundedSize, Entry)) {
const uptr AllocPos =
roundDown(Entry.CommitBase + Entry.CommitSize - Size, Alignment);
const uptr HeaderPos =
AllocPos - LargeBlock::getHeaderSize() - Chunk::getHeaderSize();
H = reinterpret_cast<LargeBlock::Header *>(
LargeBlock::addHeaderTag<Config>(HeaderPos));
setHeader(Options, Entry, H, Zeroed);
const uptr BlockEnd = H->CommitBase + H->CommitSize;
if (BlockEndPtr)
*BlockEndPtr = BlockEnd;
Expand Down

0 comments on commit 4c6b8bb

Please sign in to comment.