diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h index e4ba6ee7886f2..b013f03a73ae4 100644 --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -245,12 +245,14 @@ class Allocator { // - unlinking the local stats from the global ones (destroying the cache does // the last two items). void commitBack(TSD *TSD) { + TSD->assertLocked(/*BypassCheck=*/true); Quarantine.drain(&TSD->getQuarantineCache(), QuarantineCallback(*this, TSD->getCache())); TSD->getCache().destroy(&Stats); } void drainCache(TSD *TSD) { + TSD->assertLocked(/*BypassCheck=*/true); Quarantine.drainAndRecycle(&TSD->getQuarantineCache(), QuarantineCallback(*this, TSD->getCache())); TSD->getCache().drain(); @@ -363,6 +365,7 @@ class Allocator { DCHECK_NE(ClassId, 0U); bool UnlockRequired; auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired); + TSD->assertLocked(/*BypassCheck=*/!UnlockRequired); Block = TSD->getCache().allocate(ClassId); // If the allocation failed, the most likely reason with a 32-bit primary // is the region being full. In that event, retry in each successively @@ -1147,6 +1150,7 @@ class Allocator { if (LIKELY(ClassId)) { bool UnlockRequired; auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired); + TSD->assertLocked(/*BypassCheck=*/!UnlockRequired); const bool CacheDrained = TSD->getCache().deallocate(ClassId, BlockBegin); if (UnlockRequired) @@ -1166,6 +1170,7 @@ class Allocator { } else { bool UnlockRequired; auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired); + TSD->assertLocked(/*BypassCheck=*/!UnlockRequired); Quarantine.put(&TSD->getQuarantineCache(), QuarantineCallback(*this, TSD->getCache()), Ptr, Size); if (UnlockRequired) diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index 1143aaab8371d..7958e9dabf60d 100644 --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -512,6 +512,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS { bool UnlockRequired; auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired); + TSD->assertLocked(/*BypassCheck=*/!UnlockRequired); EXPECT_TRUE(!TSD->getCache().isEmpty()); TSD->getCache().drain(); EXPECT_TRUE(TSD->getCache().isEmpty()); @@ -536,6 +537,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS { bool UnlockRequired; auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired); + TSD->assertLocked(/*BypassCheck=*/!UnlockRequired); EXPECT_TRUE(TSD->getCache().isEmpty()); EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U); EXPECT_TRUE(Allocator->getQuarantine()->isEmpty()); @@ -842,6 +844,7 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) { bool UnlockRequired; auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired); + TSD->assertLocked(/*BypassCheck=*/!UnlockRequired); TSD->getCache().drain(); Allocator->releaseToOS(scudo::ReleaseToOS::Force); diff --git a/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp b/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp index 5953d759a69a9..fad8fcf900771 100644 --- a/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp @@ -101,6 +101,7 @@ template static void testRegistry() { bool UnlockRequired; auto TSD = Registry->getTSDAndLock(&UnlockRequired); + TSD->assertLocked(/*BypassCheck=*/!UnlockRequired); EXPECT_NE(TSD, nullptr); EXPECT_EQ(TSD->getCache().Canary, 0U); if (UnlockRequired) @@ -108,6 +109,7 @@ template static void testRegistry() { Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false); TSD = Registry->getTSDAndLock(&UnlockRequired); + TSD->assertLocked(/*BypassCheck=*/!UnlockRequired); EXPECT_NE(TSD, nullptr); EXPECT_EQ(TSD->getCache().Canary, 0U); memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache())); @@ -137,6 +139,7 @@ template static void stressCache(AllocatorT *Allocator) { Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false); bool UnlockRequired; auto TSD = Registry->getTSDAndLock(&UnlockRequired); + TSD->assertLocked(/*BypassCheck=*/!UnlockRequired); EXPECT_NE(TSD, nullptr); // For an exclusive TSD, the cache should be empty. We cannot guarantee the // same for a shared TSD. @@ -195,6 +198,7 @@ static void stressSharedRegistry(MockAllocator *Allocator) { bool UnlockRequired; for (scudo::uptr I = 0; I < 4096U; I++) { auto TSD = Registry->getTSDAndLock(&UnlockRequired); + TSD->assertLocked(/*BypassCheck=*/!UnlockRequired); EXPECT_NE(TSD, nullptr); Set.insert(reinterpret_cast(TSD)); if (UnlockRequired) diff --git a/compiler-rt/lib/scudo/standalone/tsd.h b/compiler-rt/lib/scudo/standalone/tsd.h index f4fa545de5e04..b2108a01900bc 100644 --- a/compiler-rt/lib/scudo/standalone/tsd.h +++ b/compiler-rt/lib/scudo/standalone/tsd.h @@ -53,8 +53,14 @@ template struct alignas(SCUDO_CACHE_LINE_SIZE) TSD { inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); } inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); } - void commitBack(Allocator *Instance) ASSERT_CAPABILITY(Mutex) { - Instance->commitBack(this); + void commitBack(Allocator *Instance) { Instance->commitBack(this); } + + // As the comments attached to `getCache()`, the TSD doesn't always need to be + // locked. In that case, we would only skip the check before we have all TSDs + // locked in all paths. + void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) { + if (SCUDO_DEBUG && !BypassCheck) + Mutex.assertHeld(); } // Ideally, we may want to assert that all the operations on @@ -66,11 +72,8 @@ template struct alignas(SCUDO_CACHE_LINE_SIZE) TSD { // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring // TSD doesn't always require holding the lock. Add this assertion while the // lock is always acquired. - typename Allocator::CacheT &getCache() ASSERT_CAPABILITY(Mutex) { - return Cache; - } - typename Allocator::QuarantineCacheT &getQuarantineCache() - ASSERT_CAPABILITY(Mutex) { + typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; } + typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) { return QuarantineCache; } diff --git a/compiler-rt/lib/scudo/standalone/tsd_shared.h b/compiler-rt/lib/scudo/standalone/tsd_shared.h index dcb0948ad78fa..1bca578ee14be 100644 --- a/compiler-rt/lib/scudo/standalone/tsd_shared.h +++ b/compiler-rt/lib/scudo/standalone/tsd_shared.h @@ -120,6 +120,11 @@ struct TSDRegistrySharedT { TSDsArraySize); for (uptr I = 0; I < NumberOfTSDs; ++I) { TSDs[I].lock(); + // Theoretically, we want to mark TSD::lock()/TSD::unlock() with proper + // thread annotations. However, given the TSD is only locked on shared + // path, do the assertion in a separate path to avoid confusing the + // analyzer. + TSDs[I].assertLocked(/*BypassCheck=*/true); Str->append(" Shared TSD[%zu]:\n", I); TSDs[I].getCache().getStats(Str); TSDs[I].unlock();