diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h index e32b37c3dcb57d..3fc2d337a7d93c 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h @@ -372,9 +372,37 @@ class SCOPED_LOCK GenericScopedReadLock { void operator=(const GenericScopedReadLock &) = delete; }; +template +class SCOPED_LOCK GenericScopedRWLock { + public: + ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write) + ACQUIRE(mu) + : mu_(mu), write_(write) { + if (write_) + mu_->Lock(); + else + mu_->ReadLock(); + } + + ALWAYS_INLINE ~GenericScopedRWLock() RELEASE() { + if (write_) + mu_->Unlock(); + else + mu_->ReadUnlock(); + } + + private: + MutexType *mu_; + bool write_; + + GenericScopedRWLock(const GenericScopedRWLock &) = delete; + void operator=(const GenericScopedRWLock &) = delete; +}; + typedef GenericScopedLock SpinMutexLock; typedef GenericScopedLock Lock; typedef GenericScopedReadLock ReadLock; +typedef GenericScopedRWLock RWLock; } // namespace __sanitizer diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp index 6afb25705ddfbd..c25de24426f89a 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp @@ -219,8 +219,7 @@ static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) { #endif template -static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, - morder mo) NO_THREAD_SAFETY_ANALYSIS { +static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) { CHECK(IsLoadOrder(mo)); // This fast-path is critical for performance. // Assume the access is atomic. @@ -231,13 +230,13 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, // Don't create sync object if it does not exist yet. For example, an atomic // pointer is initialized to nullptr and then periodically acquire-loaded. T v = NoTsanAtomicLoad(a, mo); - SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false); + SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a); if (s) { + ReadLock l(&s->mtx); AcquireImpl(thr, pc, &s->clock); // Re-read under sync mutex because we need a consistent snapshot // of the value and the clock we acquire. v = NoTsanAtomicLoad(a, mo); - s->mtx.ReadUnlock(); } MemoryReadAtomic(thr, pc, (uptr)a, SizeLog()); return v; @@ -257,7 +256,7 @@ static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) { template static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) NO_THREAD_SAFETY_ANALYSIS { + morder mo) { CHECK(IsStoreOrder(mo)); MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); // This fast-path is critical for performance. @@ -269,36 +268,32 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, return; } __sync_synchronize(); - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a); + Lock l(&s->mtx); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); ReleaseStoreImpl(thr, pc, &s->clock); NoTsanAtomicStore(a, v, mo); - s->mtx.Unlock(); } template -static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) NO_THREAD_SAFETY_ANALYSIS { +static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); - SyncVar *s = 0; - if (mo != mo_relaxed) { - s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true); - thr->fast_state.IncrementEpoch(); - // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - if (IsAcqRelOrder(mo)) - AcquireReleaseImpl(thr, pc, &s->clock); - else if (IsReleaseOrder(mo)) - ReleaseImpl(thr, pc, &s->clock); - else if (IsAcquireOrder(mo)) - AcquireImpl(thr, pc, &s->clock); - } - v = F(a, v); - if (s) - s->mtx.Unlock(); - return v; + if (LIKELY(mo == mo_relaxed)) + return F(a, v); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a); + Lock l(&s->mtx); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + if (IsAcqRelOrder(mo)) + AcquireReleaseImpl(thr, pc, &s->clock); + else if (IsReleaseOrder(mo)) + ReleaseImpl(thr, pc, &s->clock); + else if (IsAcquireOrder(mo)) + AcquireImpl(thr, pc, &s->clock); + return F(a, v); } template @@ -402,20 +397,26 @@ static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) { } template -static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo, - morder fmo) NO_THREAD_SAFETY_ANALYSIS { +static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, + morder mo, morder fmo) { // 31.7.2.18: "The failure argument shall not be memory_order_release // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic // (mo_relaxed) when those are used. CHECK(IsLoadOrder(fmo)); MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); - SyncVar *s = 0; - bool write_lock = IsReleaseOrder(mo); - - if (mo != mo_relaxed || fmo != mo_relaxed) - s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock); + if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) { + T cc = *c; + T pr = func_cas(a, cc, v); + if (pr == cc) + return true; + *c = pr; + return false; + } + bool release = IsReleaseOrder(mo); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a); + RWLock l(&s->mtx, release); T cc = *c; T pr = func_cas(a, cc, v); bool success = pr == cc; @@ -423,25 +424,16 @@ static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morde *c = pr; mo = fmo; } + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - if (s) { - thr->fast_state.IncrementEpoch(); - // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - - if (success && IsAcqRelOrder(mo)) - AcquireReleaseImpl(thr, pc, &s->clock); - else if (success && IsReleaseOrder(mo)) - ReleaseImpl(thr, pc, &s->clock); - else if (IsAcquireOrder(mo)) - AcquireImpl(thr, pc, &s->clock); - - if (write_lock) - s->mtx.Unlock(); - else - s->mtx.ReadUnlock(); - } - + if (success && IsAcqRelOrder(mo)) + AcquireReleaseImpl(thr, pc, &s->clock); + else if (success && IsReleaseOrder(mo)) + ReleaseImpl(thr, pc, &s->clock); + else if (IsAcquireOrder(mo)) + AcquireImpl(thr, pc, &s->clock); return success; } diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp index b5dec47d1eb220..359b15c22ebdd7 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp @@ -63,7 +63,7 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, OutputReport(thr, rep); } -void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { +void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) { CHECK(!thr->is_freeing); @@ -71,41 +71,43 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFE MemoryWrite(thr, pc, addr, kSizeLog1); thr->is_freeing = false; } - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + Lock l(&s->mtx); s->SetFlags(flagz & MutexCreationFlagMask); if (!SANITIZER_GO && s->creation_stack_id == 0) s->creation_stack_id = CurrentStackId(thr, pc); - s->mtx.Unlock(); } -void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { +void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr); - SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); - if (s == 0) - return; - if ((flagz & MutexFlagLinkerInit) - || s->IsFlagSet(MutexFlagLinkerInit) - || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) { - // Destroy is no-op for linker-initialized mutexes. - s->mtx.Unlock(); - return; - } - if (common_flags()->detect_deadlocks) { - Callback cb(thr, pc); - ctx->dd->MutexDestroy(&cb, &s->dd); - ctx->dd->MutexInit(&cb, &s->dd); - } bool unlock_locked = false; - if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid && - !s->IsFlagSet(MutexFlagBroken)) { - s->SetFlags(MutexFlagBroken); - unlock_locked = true; + u64 mid = 0; + u64 last_lock = 0; + { + SyncVar *s = ctx->metamap.GetSyncIfExists(addr); + if (s == 0) + return; + Lock l(&s->mtx); + if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) || + ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) { + // Destroy is no-op for linker-initialized mutexes. + return; + } + if (common_flags()->detect_deadlocks) { + Callback cb(thr, pc); + ctx->dd->MutexDestroy(&cb, &s->dd); + ctx->dd->MutexInit(&cb, &s->dd); + } + if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid && + !s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); + unlock_locked = true; + } + mid = s->GetId(); + last_lock = s->last_lock; + if (!unlock_locked) + s->Reset(thr->proc()); // must not reset it before the report is printed } - u64 mid = s->GetId(); - u64 last_lock = s->last_lock; - if (!unlock_locked) - s->Reset(thr->proc()); // must not reset it before the report is printed - s->mtx.Unlock(); if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked)) { ThreadRegistryLock l(&ctx->thread_registry); ScopedReport rep(ReportTypeMutexDestroyLocked); @@ -119,10 +121,10 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAF rep.AddLocation(addr, 1); OutputReport(thr, rep); - SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); + SyncVar *s = ctx->metamap.GetSyncIfExists(addr); if (s != 0) { + Lock l(&s->mtx); s->Reset(thr->proc()); - s->mtx.Unlock(); } } thr->mset.Remove(mid); @@ -138,24 +140,24 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAF // s will be destroyed and freed in MetaMap::FreeBlock. } -void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { +void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); - s->UpdateFlags(flagz); - if (s->owner_tid != thr->tid) { - Callback cb(thr, pc); - ctx->dd->MutexBeforeLock(&cb, &s->dd, true); - s->mtx.ReadUnlock(); - ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); - } else { - s->mtx.ReadUnlock(); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + { + ReadLock l(&s->mtx); + s->UpdateFlags(flagz); + if (s->owner_tid != thr->tid) { + Callback cb(thr, pc); + ctx->dd->MutexBeforeLock(&cb, &s->dd, true); + } } + Callback cb(thr, pc); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } -void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, - int rec) NO_THREAD_SAFETY_ANALYSIS { +void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) { DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n", thr->tid, addr, flagz, rec); if (flagz & MutexFlagRecursiveLock) @@ -164,42 +166,45 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, rec = 1; if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); - s->UpdateFlags(flagz); - thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId()); - bool report_double_lock = false; - if (s->owner_tid == kInvalidTid) { - CHECK_EQ(s->recursion, 0); - s->owner_tid = thr->tid; - s->last_lock = thr->fast_state.raw(); - } else if (s->owner_tid == thr->tid) { - CHECK_GT(s->recursion, 0); - } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { - s->SetFlags(MutexFlagBroken); - report_double_lock = true; - } - const bool first = s->recursion == 0; - s->recursion += rec; - if (first) { - AcquireImpl(thr, pc, &s->clock); - AcquireImpl(thr, pc, &s->read_clock); - } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) { - } - thr->mset.Add(s->GetId(), true, thr->fast_state.epoch()); + u64 mid = 0; bool pre_lock = false; - if (first && common_flags()->detect_deadlocks) { - pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) && - !(flagz & MutexFlagTryLock); - Callback cb(thr, pc); - if (pre_lock) - ctx->dd->MutexBeforeLock(&cb, &s->dd, true); - ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock); + bool first = false; + bool report_double_lock = false; + { + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + Lock l(&s->mtx); + s->UpdateFlags(flagz); + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId()); + if (s->owner_tid == kInvalidTid) { + CHECK_EQ(s->recursion, 0); + s->owner_tid = thr->tid; + s->last_lock = thr->fast_state.raw(); + } else if (s->owner_tid == thr->tid) { + CHECK_GT(s->recursion, 0); + } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); + report_double_lock = true; + } + first = s->recursion == 0; + s->recursion += rec; + if (first) { + AcquireImpl(thr, pc, &s->clock); + AcquireImpl(thr, pc, &s->read_clock); + } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) { + } + thr->mset.Add(s->GetId(), true, thr->fast_state.epoch()); + if (first && common_flags()->detect_deadlocks) { + pre_lock = + (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock); + Callback cb(thr, pc); + if (pre_lock) + ctx->dd->MutexBeforeLock(&cb, &s->dd, true); + ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock); + } + mid = s->GetId(); + s->mtx.Unlock(); } - u64 mid = s->GetId(); - s->mtx.Unlock(); - // Can't touch s after this point. - s = 0; if (report_double_lock) ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid); if (first && pre_lock && common_flags()->detect_deadlocks) { @@ -208,38 +213,40 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, } } -int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { +int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); - thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); - int rec = 0; + u64 mid = 0; bool report_bad_unlock = false; - if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) { - if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { - s->SetFlags(MutexFlagBroken); - report_bad_unlock = true; - } - } else { - rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1; - s->recursion -= rec; - if (s->recursion == 0) { - s->owner_tid = kInvalidTid; - ReleaseStoreImpl(thr, pc, &s->clock); + int rec = 0; + { + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + Lock l(&s->mtx); + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); + if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) { + if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); + report_bad_unlock = true; + } } else { + rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1; + s->recursion -= rec; + if (s->recursion == 0) { + s->owner_tid = kInvalidTid; + ReleaseStoreImpl(thr, pc, &s->clock); + } else { + } } + thr->mset.Del(s->GetId(), true); + if (common_flags()->detect_deadlocks && s->recursion == 0 && + !report_bad_unlock) { + Callback cb(thr, pc); + ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true); + } + mid = s->GetId(); } - thr->mset.Del(s->GetId(), true); - if (common_flags()->detect_deadlocks && s->recursion == 0 && - !report_bad_unlock) { - Callback cb(thr, pc); - ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true); - } - u64 mid = s->GetId(); - s->mtx.Unlock(); - // Can't touch s after this point. if (report_bad_unlock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid); if (common_flags()->detect_deadlocks && !report_bad_unlock) { @@ -249,49 +256,53 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFET return rec; } -void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { +void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); - s->UpdateFlags(flagz); + { + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + ReadLock l(&s->mtx); + s->UpdateFlags(flagz); + Callback cb(thr, pc); + ctx->dd->MutexBeforeLock(&cb, &s->dd, false); + } Callback cb(thr, pc); - ctx->dd->MutexBeforeLock(&cb, &s->dd, false); - s->mtx.ReadUnlock(); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } -void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { +void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); - s->UpdateFlags(flagz); - thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId()); + u64 mid = 0; bool report_bad_lock = false; - if (s->owner_tid != kInvalidTid) { - if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { - s->SetFlags(MutexFlagBroken); - report_bad_lock = true; - } - } - AcquireImpl(thr, pc, &s->clock); - s->last_lock = thr->fast_state.raw(); - thr->mset.Add(s->GetId(), false, thr->fast_state.epoch()); bool pre_lock = false; - if (common_flags()->detect_deadlocks) { - pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) && - !(flagz & MutexFlagTryLock); - Callback cb(thr, pc); - if (pre_lock) - ctx->dd->MutexBeforeLock(&cb, &s->dd, false); - ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock); + { + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + ReadLock l(&s->mtx); + s->UpdateFlags(flagz); + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId()); + if (s->owner_tid != kInvalidTid) { + if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); + report_bad_lock = true; + } + } + AcquireImpl(thr, pc, &s->clock); + s->last_lock = thr->fast_state.raw(); + thr->mset.Add(s->GetId(), false, thr->fast_state.epoch()); + if (common_flags()->detect_deadlocks) { + pre_lock = + (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock); + Callback cb(thr, pc); + if (pre_lock) + ctx->dd->MutexBeforeLock(&cb, &s->dd, false); + ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock); + } + mid = s->GetId(); } - u64 mid = s->GetId(); - s->mtx.ReadUnlock(); - // Can't touch s after this point. - s = 0; if (report_bad_lock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid); if (pre_lock && common_flags()->detect_deadlocks) { @@ -300,28 +311,30 @@ void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREA } } -void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { +void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); - thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); + u64 mid = 0; bool report_bad_unlock = false; - if (s->owner_tid != kInvalidTid) { - if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { - s->SetFlags(MutexFlagBroken); - report_bad_unlock = true; + { + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + Lock l(&s->mtx); + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); + if (s->owner_tid != kInvalidTid) { + if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); + report_bad_unlock = true; + } } + ReleaseImpl(thr, pc, &s->read_clock); + if (common_flags()->detect_deadlocks && s->recursion == 0) { + Callback cb(thr, pc); + ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false); + } + mid = s->GetId(); } - ReleaseImpl(thr, pc, &s->read_clock); - if (common_flags()->detect_deadlocks && s->recursion == 0) { - Callback cb(thr, pc); - ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false); - } - u64 mid = s->GetId(); - s->mtx.Unlock(); - // Can't touch s after this point. thr->mset.Del(mid, false); if (report_bad_unlock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid); @@ -331,42 +344,44 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANAL } } -void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { +void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); - bool write = true; + u64 mid = 0; bool report_bad_unlock = false; - if (s->owner_tid == kInvalidTid) { - // Seems to be read unlock. - write = false; - thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); - ReleaseImpl(thr, pc, &s->read_clock); - } else if (s->owner_tid == thr->tid) { - // Seems to be write unlock. - thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); - CHECK_GT(s->recursion, 0); - s->recursion--; - if (s->recursion == 0) { - s->owner_tid = kInvalidTid; - ReleaseStoreImpl(thr, pc, &s->clock); - } else { + { + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + Lock l(&s->mtx); + bool write = true; + if (s->owner_tid == kInvalidTid) { + // Seems to be read unlock. + write = false; + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); + ReleaseImpl(thr, pc, &s->read_clock); + } else if (s->owner_tid == thr->tid) { + // Seems to be write unlock. + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); + CHECK_GT(s->recursion, 0); + s->recursion--; + if (s->recursion == 0) { + s->owner_tid = kInvalidTid; + ReleaseStoreImpl(thr, pc, &s->clock); + } else { + } + } else if (!s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); + report_bad_unlock = true; } - } else if (!s->IsFlagSet(MutexFlagBroken)) { - s->SetFlags(MutexFlagBroken); - report_bad_unlock = true; - } - thr->mset.Del(s->GetId(), write); - if (common_flags()->detect_deadlocks && s->recursion == 0) { - Callback cb(thr, pc); - ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write); + thr->mset.Del(s->GetId(), write); + if (common_flags()->detect_deadlocks && s->recursion == 0) { + Callback cb(thr, pc); + ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write); + } + mid = s->GetId(); } - u64 mid = s->GetId(); - s->mtx.Unlock(); - // Can't touch s after this point. if (report_bad_unlock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid); if (common_flags()->detect_deadlocks) { @@ -375,31 +390,29 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFE } } -void MutexRepair(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { +void MutexRepair(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr); - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + Lock l(&s->mtx); s->owner_tid = kInvalidTid; s->recursion = 0; - s->mtx.Unlock(); } -void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { +void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr); - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); - u64 mid = s->GetId(); - s->mtx.Unlock(); - ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, s->GetId()); } -void Acquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { +void Acquire(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: Acquire %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false); + SyncVar *s = ctx->metamap.GetSyncIfExists(addr); if (!s) return; + ReadLock l(&s->mtx); AcquireImpl(thr, pc, &s->clock); - s->mtx.ReadUnlock(); } static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) { @@ -421,40 +434,40 @@ void AcquireGlobal(ThreadState *thr) { ctx->thread_registry.RunCallbackForEachThreadLocked(UpdateClockCallback, thr); } -void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { +void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + Lock l(&s->mtx); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); ReleaseStoreAcquireImpl(thr, pc, &s->clock); - s->mtx.Unlock(); } -void Release(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { +void Release(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: Release %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + Lock l(&s->mtx); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); ReleaseImpl(thr, pc, &s->clock); - s->mtx.Unlock(); } -void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { +void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + Lock l(&s->mtx); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); ReleaseStoreImpl(thr, pc, &s->clock); - s->mtx.Unlock(); } #if !SANITIZER_GO diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp index 1948ed0fe41027..6dbfce3ee7b9f1 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp @@ -282,22 +282,21 @@ void ScopedReportBase::AddMutex(const SyncVar *s) { rm->stack = SymbolizeStackId(s->creation_stack_id); } -u64 ScopedReportBase::AddMutex(u64 id) NO_THREAD_SAFETY_ANALYSIS { +u64 ScopedReportBase::AddMutex(u64 id) { u64 uid = 0; u64 mid = id; uptr addr = SyncVar::SplitId(id, &uid); - SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); + SyncVar *s = ctx->metamap.GetSyncIfExists(addr); // Check that the mutex is still alive. // Another mutex can be created at the same address, // so check uid as well. if (s && s->CheckId(uid)) { + Lock l(&s->mtx); mid = s->uid; AddMutex(s); } else { AddDeadMutex(id); } - if (s) - s->mtx.Unlock(); return mid; } diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp index 5e226b2d12b172..dc1081a9b8b13a 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp @@ -190,17 +190,7 @@ MBlock* MetaMap::GetBlock(uptr p) { } } -SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc, - uptr addr, bool write_lock) { - return GetAndLock(thr, pc, addr, write_lock, true); -} - -SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) { - return GetAndLock(0, 0, addr, write_lock, false); -} - -SyncVar *MetaMap::GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock, - bool create) NO_THREAD_SAFETY_ANALYSIS { +SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create) { u32 *meta = MemToMeta(addr); u32 idx0 = *meta; u32 myidx = 0; @@ -219,16 +209,12 @@ SyncVar *MetaMap::GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lo mys->Reset(thr->proc()); sync_alloc_.Free(&thr->proc()->sync_cache, myidx); } - if (write_lock) - s->mtx.Lock(); - else - s->mtx.ReadLock(); return s; } idx = s->next; } if (!create) - return 0; + return nullptr; if (*meta != idx0) { idx0 = *meta; continue; @@ -243,10 +229,6 @@ SyncVar *MetaMap::GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lo mys->next = idx0; if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0, myidx | kFlagSync, memory_order_release)) { - if (write_lock) - mys->mtx.Lock(); - else - mys->mtx.ReadLock(); return mys; } } diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.h b/compiler-rt/lib/tsan/rtl/tsan_sync.h index 33dcc16c258cc8..73556dd71246d8 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_sync.h +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.h @@ -115,9 +115,12 @@ class MetaMap { void ResetRange(Processor *proc, uptr p, uptr sz); MBlock* GetBlock(uptr p); - SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc, - uptr addr, bool write_lock); - SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock); + SyncVar *GetSyncOrCreate(ThreadState *thr, uptr pc, uptr addr) { + return GetSync(thr, pc, addr, true); + } + SyncVar *GetSyncIfExists(uptr addr) { + return GetSync(nullptr, 0, addr, false); + } void MoveMemory(uptr src, uptr dst, uptr sz); @@ -133,8 +136,7 @@ class MetaMap { SyncAlloc sync_alloc_; atomic_uint64_t uid_gen_; - SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock, - bool create); + SyncVar *GetSync(ThreadState *thr, uptr pc, uptr addr, bool create); }; } // namespace __tsan diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp index 806592e755765a..324f4f61200fa6 100644 --- a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp +++ b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp @@ -47,7 +47,7 @@ TEST(MetaMap, FreeRange) { EXPECT_EQ(mb2, (MBlock*)0); } -TEST(MetaMap, Sync) NO_THREAD_SAFETY_ANALYSIS { +TEST(MetaMap, Sync) { // EXPECT can call memset/etc. Disable interceptors to prevent // them from detecting that we exit runtime with mutexes held. ScopedIgnoreInterceptors ignore; @@ -55,25 +55,23 @@ TEST(MetaMap, Sync) NO_THREAD_SAFETY_ANALYSIS { MetaMap *m = &ctx->metamap; u64 block[4] = {}; // fake malloc block m->AllocBlock(thr, 0, (uptr)&block[0], 4 * sizeof(u64)); - SyncVar *s1 = m->GetIfExistsAndLock((uptr)&block[0], true); + SyncVar *s1 = m->GetSyncIfExists((uptr)&block[0]); EXPECT_EQ(s1, (SyncVar*)0); - s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[0], true); + s1 = m->GetSyncOrCreate(thr, 0, (uptr)&block[0]); EXPECT_NE(s1, (SyncVar*)0); EXPECT_EQ(s1->addr, (uptr)&block[0]); - s1->mtx.Unlock(); - SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[1], false); + SyncVar *s2 = m->GetSyncOrCreate(thr, 0, (uptr)&block[1]); EXPECT_NE(s2, (SyncVar*)0); EXPECT_EQ(s2->addr, (uptr)&block[1]); - s2->mtx.ReadUnlock(); m->FreeBlock(thr->proc(), (uptr)&block[0]); - s1 = m->GetIfExistsAndLock((uptr)&block[0], true); + s1 = m->GetSyncIfExists((uptr)&block[0]); EXPECT_EQ(s1, (SyncVar*)0); - s2 = m->GetIfExistsAndLock((uptr)&block[1], true); + s2 = m->GetSyncIfExists((uptr)&block[1]); EXPECT_EQ(s2, (SyncVar*)0); m->OnProcIdle(thr->proc()); } -TEST(MetaMap, MoveMemory) NO_THREAD_SAFETY_ANALYSIS { +TEST(MetaMap, MoveMemory) { ScopedIgnoreInterceptors ignore; ThreadState *thr = cur_thread(); MetaMap *m = &ctx->metamap; @@ -81,10 +79,8 @@ TEST(MetaMap, MoveMemory) NO_THREAD_SAFETY_ANALYSIS { u64 block2[4] = {}; // fake malloc block m->AllocBlock(thr, 0, (uptr)&block1[0], 3 * sizeof(u64)); m->AllocBlock(thr, 0, (uptr)&block1[3], 1 * sizeof(u64)); - SyncVar *s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[0], true); - s1->mtx.Unlock(); - SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[1], true); - s2->mtx.Unlock(); + SyncVar *s1 = m->GetSyncOrCreate(thr, 0, (uptr)&block1[0]); + SyncVar *s2 = m->GetSyncOrCreate(thr, 0, (uptr)&block1[1]); m->MoveMemory((uptr)&block1[0], (uptr)&block2[0], 4 * sizeof(u64)); MBlock *mb1 = m->GetBlock((uptr)&block1[0]); EXPECT_EQ(mb1, (MBlock*)0); @@ -96,30 +92,27 @@ TEST(MetaMap, MoveMemory) NO_THREAD_SAFETY_ANALYSIS { mb2 = m->GetBlock((uptr)&block2[3]); EXPECT_NE(mb2, (MBlock*)0); EXPECT_EQ(mb2->siz, 1 * sizeof(u64)); - s1 = m->GetIfExistsAndLock((uptr)&block1[0], true); + s1 = m->GetSyncIfExists((uptr)&block1[0]); EXPECT_EQ(s1, (SyncVar*)0); - s2 = m->GetIfExistsAndLock((uptr)&block1[1], true); + s2 = m->GetSyncIfExists((uptr)&block1[1]); EXPECT_EQ(s2, (SyncVar*)0); - s1 = m->GetIfExistsAndLock((uptr)&block2[0], true); + s1 = m->GetSyncIfExists((uptr)&block2[0]); EXPECT_NE(s1, (SyncVar*)0); EXPECT_EQ(s1->addr, (uptr)&block2[0]); - s1->mtx.Unlock(); - s2 = m->GetIfExistsAndLock((uptr)&block2[1], true); + s2 = m->GetSyncIfExists((uptr)&block2[1]); EXPECT_NE(s2, (SyncVar*)0); EXPECT_EQ(s2->addr, (uptr)&block2[1]); - s2->mtx.Unlock(); m->FreeRange(thr->proc(), (uptr)&block2[0], 4 * sizeof(u64)); } -TEST(MetaMap, ResetSync) NO_THREAD_SAFETY_ANALYSIS { +TEST(MetaMap, ResetSync) { ScopedIgnoreInterceptors ignore; ThreadState *thr = cur_thread(); MetaMap *m = &ctx->metamap; u64 block[1] = {}; // fake malloc block m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64)); - SyncVar *s = m->GetOrCreateAndLock(thr, 0, (uptr)&block[0], true); + SyncVar *s = m->GetSyncOrCreate(thr, 0, (uptr)&block[0]); s->Reset(thr->proc()); - s->mtx.Unlock(); uptr sz = m->FreeBlock(thr->proc(), (uptr)&block[0]); EXPECT_EQ(sz, 1 * sizeof(u64)); }