Skip to content

Commit

Permalink
[scudo] Add the thread-safety annotations
Browse files Browse the repository at this point in the history
This CL adds the proper thread-safety annotations for most of the
functions and variables. However, given the restriction of the current
architecture, in some cases, we may not be able to use the annotations
easily. The followings are two exceptions,

1. enable()/disable(): Many structures in scudo are enabled/disabled by
   acquiring the lock in each instance. This makes those structure act
   like a `lock`. We can't mark those functions with ACQUIRE()/RELEASE()
   because that makes the entire allocator become another `lock`. In the
   end, that implies we need to *acquire* the `allocator` before each
   malloc et al. request. Therefore, adding a variable to tell the
   status of those structures may be a better way to cooperate with
   thread-safety annotation.

2. TSD/TSD shared/TSD exclusive: These three have simiar restrictions as
   mentioned above. In addition, they don't always need to be released
   if it's a thread local instance. However, thread-safety analysis
   doesn't support conditional branch. Which means we can't mark the
   proper annotations around the uses of TSDs. We may consider to make
   it consistent and which makes the code structure simpler.

This CL is supposed to introduce the annotations with the least code
refactoring. So only trivial thread safety issues will be addressed
here. For example, lacking of acquiring certain lock before accessing
certain variables will have the ScopedLock inserted. Other than that,
they are supposed to be done in the later changes.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D140706
  • Loading branch information
ChiaHungDuan committed Feb 15, 2023
1 parent 2b8cb7d commit 6a4c395
Show file tree
Hide file tree
Showing 20 changed files with 315 additions and 151 deletions.
3 changes: 3 additions & 0 deletions compiler-rt/lib/scudo/standalone/CMakeLists.txt
Expand Up @@ -30,6 +30,9 @@ else()
list(APPEND SCUDO_CFLAGS -O3)
endif()

append_list_if(COMPILER_RT_HAS_WTHREAD_SAFETY_FLAG -Werror=thread-safety
SCUDO_CFLAGS)

set(SCUDO_LINK_FLAGS)

list(APPEND SCUDO_LINK_FLAGS -Wl,-z,defs,-z,now,-z,relro)
Expand Down
12 changes: 7 additions & 5 deletions compiler-rt/lib/scudo/standalone/combined.h
Expand Up @@ -305,7 +305,7 @@ class Allocator {

NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
uptr Alignment = MinAlignment,
bool ZeroContents = false) {
bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();

const Options Options = Primary.Options.load();
Expand Down Expand Up @@ -389,9 +389,10 @@ class Allocator {
if (UnlockRequired)
TSD->unlock();
}
if (UNLIKELY(ClassId == 0))
if (UNLIKELY(ClassId == 0)) {
Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
FillContents);
}

if (UNLIKELY(!Block)) {
if (Options.get(OptionBit::MayReturnNull))
Expand Down Expand Up @@ -697,7 +698,7 @@ class Allocator {
// TODO(kostyak): disable() is currently best-effort. There are some small
// windows of time when an allocation could still succeed after
// this function finishes. We will revisit that later.
void disable() {
void disable() NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
#ifdef GWP_ASAN_HOOKS
GuardedAlloc.disable();
Expand All @@ -709,7 +710,7 @@ class Allocator {
Secondary.disable();
}

void enable() {
void enable() NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
Secondary.enable();
Primary.enable();
Expand Down Expand Up @@ -1124,7 +1125,8 @@ class Allocator {
}

void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
Chunk::UnpackedHeader *Header, uptr Size) {
Chunk::UnpackedHeader *Header,
uptr Size) NO_THREAD_SAFETY_ANALYSIS {
void *Ptr = getHeaderTaggedPointer(TaggedPtr);
Chunk::UnpackedHeader NewHeader = *Header;
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
Expand Down
2 changes: 2 additions & 0 deletions compiler-rt/lib/scudo/standalone/fuchsia.cpp
Expand Up @@ -195,6 +195,8 @@ void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
sync_mutex_unlock(&M);
}

void HybridMutex::assertHeldImpl() __TA_NO_THREAD_SAFETY_ANALYSIS {}

u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }

u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
Expand Down
5 changes: 5 additions & 0 deletions compiler-rt/lib/scudo/standalone/linux.cpp
Expand Up @@ -11,6 +11,7 @@
#if SCUDO_LINUX

#include "common.h"
#include "internal_defs.h"
#include "linux.h"
#include "mutex.h"
#include "string_utils.h"
Expand Down Expand Up @@ -128,6 +129,10 @@ void HybridMutex::unlock() {
}
}

void HybridMutex::assertHeldImpl() {
CHECK(atomic_load(&M, memory_order_acquire) != Unlocked);
}

u64 getMonotonicTime() {
timespec TS;
clock_gettime(CLOCK_MONOTONIC, &TS);
Expand Down
28 changes: 20 additions & 8 deletions compiler-rt/lib/scudo/standalone/mutex.h
Expand Up @@ -11,6 +11,7 @@

#include "atomic_helpers.h"
#include "common.h"
#include "thread_annotations.h"

#include <string.h>

Expand All @@ -20,10 +21,10 @@

namespace scudo {

class HybridMutex {
class CAPABILITY("mutex") HybridMutex {
public:
bool tryLock();
NOINLINE void lock() {
bool tryLock() TRY_ACQUIRE(true);
NOINLINE void lock() ACQUIRE() {
if (LIKELY(tryLock()))
return;
// The compiler may try to fully unroll the loop, ending up in a
Expand All @@ -40,9 +41,20 @@ class HybridMutex {
}
lockSlow();
}
void unlock();
void unlock() RELEASE();

// TODO(chiahungduan): In general, we may want to assert the owner of lock as
// well. Given the current uses of HybridMutex, it's acceptable without
// asserting the owner. Re-evaluate this when we have certain scenarios which
// requires a more fine-grained lock granularity.
ALWAYS_INLINE void assertHeld() ASSERT_CAPABILITY(this) {
if (SCUDO_DEBUG)
assertHeldImpl();
}

private:
void assertHeldImpl();

static constexpr u8 NumberOfTries = 8U;
static constexpr u8 NumberOfYields = 8U;

Expand All @@ -52,13 +64,13 @@ class HybridMutex {
sync_mutex_t M = {};
#endif

void lockSlow();
void lockSlow() ACQUIRE();
};

class ScopedLock {
class SCOPED_CAPABILITY ScopedLock {
public:
explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
~ScopedLock() { Mutex.unlock(); }
explicit ScopedLock(HybridMutex &M) ACQUIRE(M) : Mutex(M) { Mutex.lock(); }
~ScopedLock() RELEASE() { Mutex.unlock(); }

private:
HybridMutex &Mutex;
Expand Down
66 changes: 35 additions & 31 deletions compiler-rt/lib/scudo/standalone/primary32.h
Expand Up @@ -18,6 +18,7 @@
#include "report.h"
#include "stats.h"
#include "string_utils.h"
#include "thread_annotations.h"

namespace scudo {

Expand Down Expand Up @@ -62,7 +63,7 @@ template <typename Config> class SizeClassAllocator32 {

static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }

void init(s32 ReleaseToOsInterval) {
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
if (SCUDO_FUCHSIA)
reportError("SizeClassAllocator32 is not supported on Fuchsia");

Expand All @@ -86,7 +87,7 @@ template <typename Config> class SizeClassAllocator32 {
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}

void unmapTestOnly() {
void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
while (NumberOfStashedRegions > 0)
unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
RegionSize);
Expand Down Expand Up @@ -121,11 +122,11 @@ template <typename Config> class SizeClassAllocator32 {
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
ScopedLock L(Sci->Mutex);
TransferBatch *B = popBatchImpl(C, ClassId);
TransferBatch *B = popBatchImpl(C, ClassId, Sci);
if (UNLIKELY(!B)) {
if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
return nullptr;
B = popBatchImpl(C, ClassId);
B = popBatchImpl(C, ClassId, Sci);
// if `populateFreeList` succeeded, we are supposed to get free blocks.
DCHECK_NE(B, nullptr);
}
Expand All @@ -149,7 +150,7 @@ template <typename Config> class SizeClassAllocator32 {
// the blocks.
if (Size == 1 && !populateFreeList(C, ClassId, Sci))
return;
pushBlocksImpl(C, ClassId, Array, Size);
pushBlocksImpl(C, ClassId, Sci, Array, Size);
Sci->Stats.PushedBlocks += Size;
return;
}
Expand All @@ -173,14 +174,14 @@ template <typename Config> class SizeClassAllocator32 {
}

ScopedLock L(Sci->Mutex);
pushBlocksImpl(C, ClassId, Array, Size, SameGroup);
pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);

Sci->Stats.PushedBlocks += Size;
if (ClassId != SizeClassMap::BatchClassId)
releaseToOSMaybe(Sci, ClassId);
}

void disable() {
void disable() NO_THREAD_SAFETY_ANALYSIS {
// The BatchClassId must be locked last since other classes can use it.
for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
Expand All @@ -192,7 +193,7 @@ template <typename Config> class SizeClassAllocator32 {
PossibleRegions.disable();
}

void enable() {
void enable() NO_THREAD_SAFETY_ANALYSIS {
PossibleRegions.enable();
RegionsStashMutex.unlock();
getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
Expand All @@ -203,7 +204,8 @@ template <typename Config> class SizeClassAllocator32 {
}
}

template <typename F> void iterateOverBlocks(F Callback) {
template <typename F>
void iterateOverBlocks(F Callback) NO_THREAD_SAFETY_ANALYSIS {
uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
Expand Down Expand Up @@ -241,7 +243,7 @@ template <typename Config> class SizeClassAllocator32 {
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
ScopedLock L(Sci->Mutex);
getStats(Str, I, 0);
getStats(Str, I, Sci, 0);
}
}

Expand Down Expand Up @@ -301,17 +303,17 @@ template <typename Config> class SizeClassAllocator32 {

struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
HybridMutex Mutex;
SinglyLinkedList<BatchGroup> FreeList;
uptr CurrentRegion;
uptr CurrentRegionAllocated;
SizeClassStats Stats;
SinglyLinkedList<BatchGroup> FreeList GUARDED_BY(Mutex);
uptr CurrentRegion GUARDED_BY(Mutex);
uptr CurrentRegionAllocated GUARDED_BY(Mutex);
SizeClassStats Stats GUARDED_BY(Mutex);
u32 RandState;
uptr AllocatedUser;
uptr AllocatedUser GUARDED_BY(Mutex);
// Lowest & highest region index allocated for this size class, to avoid
// looping through the whole NumRegions.
uptr MinRegionIndex;
uptr MaxRegionIndex;
ReleaseToOsInfo ReleaseInfo;
uptr MinRegionIndex GUARDED_BY(Mutex);
uptr MaxRegionIndex GUARDED_BY(Mutex);
ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
};
static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");

Expand Down Expand Up @@ -346,7 +348,7 @@ template <typename Config> class SizeClassAllocator32 {
return Region;
}

uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) {
uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
DCHECK_LT(ClassId, NumClasses);
uptr Region = 0;
{
Expand Down Expand Up @@ -399,10 +401,10 @@ template <typename Config> class SizeClassAllocator32 {
// `SameGroup=true` instead.
//
// The region mutex needs to be held while calling this method.
void pushBlocksImpl(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size,
bool SameGroup = false) {
void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
CompactPtrT *Array, u32 Size, bool SameGroup = false)
REQUIRES(Sci->Mutex) {
DCHECK_GT(Size, 0U);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);

auto CreateGroup = [&](uptr GroupId) {
BatchGroup *BG = nullptr;
Expand Down Expand Up @@ -528,8 +530,8 @@ template <typename Config> class SizeClassAllocator32 {
// group id will be considered first.
//
// The region mutex needs to be held while calling this method.
TransferBatch *popBatchImpl(CacheT *C, uptr ClassId) {
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
REQUIRES(Sci->Mutex) {
if (Sci->FreeList.empty())
return nullptr;

Expand Down Expand Up @@ -557,7 +559,8 @@ template <typename Config> class SizeClassAllocator32 {
return B;
}

NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci) {
NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
REQUIRES(Sci->Mutex) {
uptr Region;
uptr Offset;
// If the size-class currently has a region associated to it, use it. The
Expand Down Expand Up @@ -612,7 +615,7 @@ template <typename Config> class SizeClassAllocator32 {
// it only happens when it crosses the group size boundary. Instead of
// sorting them, treat them as same group here to avoid sorting the
// almost-sorted blocks.
pushBlocksImpl(C, ClassId, &ShuffleArray[I], N, /*SameGroup=*/true);
pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[I], N, /*SameGroup=*/true);
I += N;
}

Expand All @@ -633,8 +636,8 @@ template <typename Config> class SizeClassAllocator32 {
return true;
}

void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci, uptr Rss)
REQUIRES(Sci->Mutex) {
if (Sci->AllocatedUser == 0)
return;
const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
Expand All @@ -647,7 +650,7 @@ template <typename Config> class SizeClassAllocator32 {
}

NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
bool Force = false) {
bool Force = false) REQUIRES(Sci->Mutex) {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();

Expand Down Expand Up @@ -753,14 +756,15 @@ template <typename Config> class SizeClassAllocator32 {
SizeClassInfo SizeClassInfoArray[NumClasses] = {};

// Track the regions in use, 0 is unused, otherwise store ClassId + 1.
// FIXME: There is no dedicated lock for `PossibleRegions`.
ByteMap PossibleRegions = {};
atomic_s32 ReleaseToOsIntervalMs = {};
// Unless several threads request regions simultaneously from different size
// classes, the stash rarely contains more than 1 entry.
static constexpr uptr MaxStashedRegions = 4;
HybridMutex RegionsStashMutex;
uptr NumberOfStashedRegions = 0;
uptr RegionsStash[MaxStashedRegions] = {};
uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
};

} // namespace scudo
Expand Down

0 comments on commit 6a4c395

Please sign in to comment.