diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp index 14b25a8995dab..34f00006665eb 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp @@ -22,6 +22,7 @@ #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" @@ -2141,13 +2142,29 @@ static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) { // StackTrace::GetNestInstructionPc(pc) is used because return address is // expected, OutputReport() will undo this. ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeErrnoInSignal); - rep.SetSigNum(sig); - if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { - rep.AddStack(stack, true); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + bool suppressed; + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeErrnoInSignal); + rep->SetSigNum(sig); + suppressed = IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack); + if (!suppressed) + rep->AddStack(stack, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks before writing report +#endif + if (!suppressed) + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE } +#endif } static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp index befd6a369026d..02ca82369aae1 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp @@ -437,16 +437,30 @@ void __tsan_mutex_post_divert(void *addr, unsigned flagz) { } static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) { - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeMutexHeldWrongContext); - for (uptr i = 0; i < thr->mset.Size(); ++i) { - MutexSet::Desc desc = thr->mset.Get(i); - rep.AddMutex(desc.addr, desc.stack_id); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeMutexHeldWrongContext); + for (uptr i = 0; i < thr->mset.Size(); ++i) { + MutexSet::Desc desc = thr->mset.Get(i); + rep->AddMutex(desc.addr, desc.stack_id); + } + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE } - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - OutputReport(thr, rep); +#endif } INTERFACE_ATTRIBUTE diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp index 0ea83fb3b5982..caacb3675825f 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp @@ -182,10 +182,24 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { ObtainCurrentStack(thr, pc, &stack); if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeSignalUnsafe); - rep.AddStack(stack, true); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeSignalUnsafe); + rep->AddStack(stack, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } +#endif } diff --git a/compiler-rt/lib/tsan/rtl/tsan_report.h b/compiler-rt/lib/tsan/rtl/tsan_report.h index bfe470797f8f7..24d47a5d700cd 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_report.h +++ b/compiler-rt/lib/tsan/rtl/tsan_report.h @@ -12,6 +12,8 @@ #ifndef TSAN_REPORT_H #define TSAN_REPORT_H +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include "sanitizer_common/sanitizer_thread_registry.h" #include "sanitizer_common/sanitizer_vector.h" @@ -56,6 +58,7 @@ struct ReportMop { bool atomic; uptr external_tag; Vector mset; + StackTrace stack_trace; ReportStack *stack; ReportMop(); @@ -79,6 +82,7 @@ struct ReportLocation { int fd = 0; bool fd_closed = false; bool suppressable = false; + StackID stack_id = 0; ReportStack *stack = nullptr; }; @@ -89,15 +93,23 @@ struct ReportThread { ThreadType thread_type; char *name; Tid parent_tid; + StackID stack_id; ReportStack *stack; + bool suppressable; }; struct ReportMutex { int id; uptr addr; + StackID stack_id; ReportStack *stack; }; +struct AddedLocationAddr { + uptr addr; + usize locs_idx; +}; + class ReportDesc { public: ReportType typ; @@ -105,6 +117,7 @@ class ReportDesc { Vector stacks; Vector mops; Vector locs; + Vector added_location_addrs; Vector mutexes; Vector threads; Vector unique_tids; diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h index dc32980e905f2..d21c92ece2320 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -420,6 +420,7 @@ class ScopedReportBase { void AddSleep(StackID stack_id); void SetCount(int count); void SetSigNum(int sig); + void SymbolizeStackElems(void); const ReportDesc *GetReport() const; @@ -498,7 +499,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread); void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, AccessType typ); -bool OutputReport(ThreadState *thr, const ScopedReport &srep); +bool OutputReport(ThreadState *thr, ScopedReport &srep); bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace); bool IsExpectedReport(uptr addr, uptr size); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp index 487fa490636eb..02719beac9f07 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp @@ -419,6 +419,11 @@ NOINLINE void TraceRestartMemoryAccess(ThreadState* thr, uptr pc, uptr addr, ALWAYS_INLINE USED void MemoryAccess(ThreadState* thr, uptr pc, uptr addr, uptr size, AccessType typ) { +#if SANITIZER_APPLE && !SANITIZER_GO + // Swift symbolizer can be intercepted and deadlock without this + if (thr->in_symbolizer) + return; +#endif RawShadow* shadow_mem = MemToShadow(addr); UNUSED char memBuf[4][64]; DPrintf2("#%d: Access: %d@%d %p/%zd typ=0x%x {%s, %s, %s, %s}\n", thr->tid, diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp index 2a8aa1915c9ae..30f5e964939d0 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp @@ -11,14 +11,15 @@ //===----------------------------------------------------------------------===// #include +#include #include -#include "tsan_rtl.h" #include "tsan_flags.h" -#include "tsan_sync.h" +#include "tsan_platform.h" #include "tsan_report.h" +#include "tsan_rtl.h" #include "tsan_symbolize.h" -#include "tsan_platform.h" +#include "tsan_sync.h" namespace __tsan { @@ -55,14 +56,28 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, return; if (!ShouldReport(thr, typ)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(typ); - rep.AddMutex(addr, creation_stack_id); - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - rep.AddLocation(addr, 1); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(typ); + rep->AddMutex(addr, creation_stack_id); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); + rep->AddLocation(addr, 1); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } +#endif } static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr, @@ -528,51 +543,81 @@ void AfterSleep(ThreadState *thr, uptr pc) { void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeDeadlock); - for (int i = 0; i < r->n; i++) { - rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]); - rep.AddUniqueTid((int)r->loop[i].thr_ctx); - rep.AddThread((int)r->loop[i].thr_ctx); - } - uptr dummy_pc = 0x42; - for (int i = 0; i < r->n; i++) { - for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { - u32 stk = r->loop[i].stk[j]; - if (stk && stk != kInvalidStackID) { - rep.AddStack(StackDepotGet(stk), true); - } else { - // Sometimes we fail to extract the stack trace (FIXME: investigate), - // but we should still produce some stack trace in the report. - rep.AddStack(StackTrace(&dummy_pc, 1), true); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeDeadlock); + for (int i = 0; i < r->n; i++) { + rep->AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]); + rep->AddUniqueTid((int)r->loop[i].thr_ctx); + rep->AddThread((int)r->loop[i].thr_ctx); + } + uptr dummy_pc = 0x42; + for (int i = 0; i < r->n; i++) { + for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { + u32 stk = r->loop[i].stk[j]; + StackTrace stack; + if (stk && stk != kInvalidStackID) { + stack = StackDepotGet(stk); + } else { + // Sometimes we fail to extract the stack trace (FIXME: investigate), + // but we should still produce some stack trace in the report. + stack = StackTrace(&dummy_pc, 1); + } + rep->AddStack(stack, true); } } +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE } - OutputReport(thr, rep); +#endif } void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr, FastState last_lock, StackID creation_stack_id) { - // We need to lock the slot during RestoreStack because it protects - // the slot journal. - Lock slot_lock(&ctx->slots[static_cast(last_lock.sid())].mtx); - ThreadRegistryLock l0(&ctx->thread_registry); - Lock slots_lock(&ctx->slot_mtx); - ScopedReport rep(ReportTypeMutexDestroyLocked); - rep.AddMutex(addr, creation_stack_id); - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - - Tid tid; - DynamicMutexSet mset; - uptr tag; - if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr, - 0, kAccessWrite, &tid, &trace, mset, &tag)) - return; - rep.AddStack(trace, true); - rep.AddLocation(addr, 1); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + // We need to lock the slot during RestoreStack because it protects + // the slot journal. + Lock slot_lock(&ctx->slots[static_cast(last_lock.sid())].mtx); + ThreadRegistryLock l0(&ctx->thread_registry); + Lock slots_lock(&ctx->slot_mtx); + new (rep) ScopedReport(ReportTypeMutexDestroyLocked); + rep->AddMutex(addr, creation_stack_id); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); + + Tid tid; + DynamicMutexSet mset; + uptr tag; + if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), + addr, 0, kAccessWrite, &tid, &trace, mset, &tag)) + return; + rep->AddStack(trace, true); + rep->AddLocation(addr, 1); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } +#endif } } // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp index 0820bf1adee43..43aef30d2f3b3 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp @@ -11,10 +11,12 @@ //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stacktrace.h" +#include "tsan_defs.h" #include "tsan_fd.h" #include "tsan_flags.h" #include "tsan_mman.h" @@ -187,10 +189,8 @@ void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, mop->size = size; mop->write = !(typ & kAccessRead); mop->atomic = typ & kAccessAtomic; - mop->stack = SymbolizeStack(stack); mop->external_tag = external_tag; - if (mop->stack) - mop->stack->suppressable = true; + mop->stack_trace = stack; for (uptr i = 0; i < mset->Size(); i++) { MutexSet::Desc d = mset->Get(i); int id = this->AddMutex(d.addr, d.stack_id); @@ -199,6 +199,56 @@ void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, } } +void ScopedReportBase::SymbolizeStackElems() { + // symbolize memory ops + for (usize i = 0, size = rep_->mops.Size(); i < size; i++) { + ReportMop *mop = rep_->mops[i]; + mop->stack = SymbolizeStack(mop->stack_trace); + if (mop->stack) + mop->stack->suppressable = true; + } + + // symbolize locations + for (usize i = 0, size = rep_->locs.Size(); i < size; i++) { + // added locations have a NULL placeholder - don't dereference them + if (ReportLocation *loc = rep_->locs[i]) + loc->stack = SymbolizeStackId(loc->stack_id); + } + + // symbolize any added locations + for (usize i = 0, size = rep_->added_location_addrs.Size(); i < size; i++) { + AddedLocationAddr *added_loc = &rep_->added_location_addrs[i]; + if (ReportLocation *loc = SymbolizeData(added_loc->addr)) { + loc->suppressable = true; + rep_->locs[added_loc->locs_idx] = loc; + } + } + + // Filter out any added location placeholders that could not be symbolized + usize j = 0; + for (usize i = 0, size = rep_->locs.Size(); i < size; i++) { + if (rep_->locs[i] != nullptr) { + rep_->locs[j] = rep_->locs[i]; + j++; + } + } + rep_->locs.Resize(j); + + // symbolize threads + for (usize i = 0, size = rep_->threads.Size(); i < size; i++) { + ReportThread *rt = rep_->threads[i]; + rt->stack = SymbolizeStackId(rt->stack_id); + if (rt->stack) + rt->stack->suppressable = rt->suppressable; + } + + // symbolize mutexes + for (usize i = 0, size = rep_->mutexes.Size(); i < size; i++) { + ReportMutex *rm = rep_->mutexes[i]; + rm->stack = SymbolizeStackId(rm->stack_id); + } +} + void ScopedReportBase::AddUniqueTid(Tid unique_tid) { rep_->unique_tids.PushBack(unique_tid); } @@ -216,10 +266,8 @@ void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) { rt->name = internal_strdup(tctx->name); rt->parent_tid = tctx->parent_tid; rt->thread_type = tctx->thread_type; - rt->stack = 0; - rt->stack = SymbolizeStackId(tctx->creation_stack_id); - if (rt->stack) - rt->stack->suppressable = suppressable; + rt->stack_id = tctx->creation_stack_id; + rt->suppressable = suppressable; } #if !SANITIZER_GO @@ -270,7 +318,7 @@ int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) { rep_->mutexes.PushBack(rm); rm->id = rep_->mutexes.Size() - 1; rm->addr = addr; - rm->stack = SymbolizeStackId(creation_stack_id); + rm->stack_id = creation_stack_id; return rm->id; } @@ -288,7 +336,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { loc->fd_closed = closed; loc->fd = fd; loc->tid = creat_tid; - loc->stack = SymbolizeStackId(creat_stack); + loc->stack_id = creat_stack; rep_->locs.PushBack(loc); AddThread(creat_tid); return; @@ -310,7 +358,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { loc->heap_chunk_size = b->siz; loc->external_tag = b->tag; loc->tid = b->tid; - loc->stack = SymbolizeStackId(b->stk); + loc->stack_id = b->stk; rep_->locs.PushBack(loc); AddThread(b->tid); return; @@ -324,11 +372,8 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { AddThread(tctx); } #endif - if (ReportLocation *loc = SymbolizeData(addr)) { - loc->suppressable = true; - rep_->locs.PushBack(loc); - return; - } + rep_->added_location_addrs.PushBack({addr, rep_->locs.Size()}); + rep_->locs.PushBack(nullptr); } #if !SANITIZER_GO @@ -628,11 +673,12 @@ static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) { return false; } -bool OutputReport(ThreadState *thr, const ScopedReport &srep) { +bool OutputReport(ThreadState *thr, ScopedReport &srep) { // These should have been checked in ShouldReport. // It's too late to check them here, we have already taken locks. CHECK(flags()->report_bugs); CHECK(!thr->suppress_reports); + srep.SymbolizeStackElems(); atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); const ReportDesc *rep = srep.GetReport(); CHECK_EQ(thr->current_report, nullptr); @@ -761,65 +807,80 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, DynamicMutexSet mset1; MutexSet *mset[kMop] = {&thr->mset, mset1}; - // We need to lock the slot during RestoreStack because it protects - // the slot journal. - Lock slot_lock(&ctx->slots[static_cast(s[1].sid())].mtx); - ThreadRegistryLock l0(&ctx->thread_registry); - Lock slots_lock(&ctx->slot_mtx); - if (SpuriousRace(old)) - return; - if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1, - size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) { - StoreShadow(&ctx->last_spurious_race, old.raw()); - return; - } + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + // We need to lock the slot during RestoreStack because it protects + // the slot journal. + Lock slot_lock(&ctx->slots[static_cast(s[1].sid())].mtx); + ThreadRegistryLock l0(&ctx->thread_registry); + Lock slots_lock(&ctx->slot_mtx); + if (SpuriousRace(old)) + return; + if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1, + size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) { + StoreShadow(&ctx->last_spurious_race, old.raw()); + return; + } - if (IsFiredSuppression(ctx, rep_typ, traces[1])) - return; + if (IsFiredSuppression(ctx, rep_typ, traces[1])) + return; - if (HandleRacyStacks(thr, traces)) - return; + if (HandleRacyStacks(thr, traces)) + return; - // If any of the accesses has a tag, treat this as an "external" race. - uptr tag = kExternalTagNone; - for (uptr i = 0; i < kMop; i++) { - if (tags[i] != kExternalTagNone) { - rep_typ = ReportTypeExternalRace; - tag = tags[i]; - break; + // If any of the accesses has a tag, treat this as an "external" race. + uptr tag = kExternalTagNone; + for (uptr i = 0; i < kMop; i++) { + if (tags[i] != kExternalTagNone) { + rep_typ = ReportTypeExternalRace; + tag = tags[i]; + break; + } } - } - ScopedReport rep(rep_typ, tag); - for (uptr i = 0; i < kMop; i++) - rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]); + new (rep) ScopedReport(rep_typ, tag); + for (uptr i = 0; i < kMop; i++) + rep->AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]); - for (uptr i = 0; i < kMop; i++) { - ThreadContext *tctx = static_cast( - ctx->thread_registry.GetThreadLocked(tids[i])); - rep.AddThread(tctx); - } + for (uptr i = 0; i < kMop; i++) { + ThreadContext *tctx = static_cast( + ctx->thread_registry.GetThreadLocked(tids[i])); + rep->AddThread(tctx); + } - rep.AddLocation(addr_min, addr_max - addr_min); - - if (flags()->print_full_thread_history) { - const ReportDesc *rep_desc = rep.GetReport(); - for (uptr i = 0; i < rep_desc->threads.Size(); i++) { - Tid parent_tid = rep_desc->threads[i]->parent_tid; - if (parent_tid == kMainTid || parent_tid == kInvalidTid) - continue; - ThreadContext *parent_tctx = static_cast( - ctx->thread_registry.GetThreadLocked(parent_tid)); - rep.AddThread(parent_tctx); + rep->AddLocation(addr_min, addr_max - addr_min); + + if (flags()->print_full_thread_history) { + const ReportDesc *rep_desc = rep->GetReport(); + for (uptr i = 0; i < rep_desc->threads.Size(); i++) { + Tid parent_tid = rep_desc->threads[i]->parent_tid; + if (parent_tid == kMainTid || parent_tid == kInvalidTid) + continue; + ThreadContext *parent_tctx = static_cast( + ctx->thread_registry.GetThreadLocked(parent_tid)); + rep->AddThread(parent_tctx); + } } - } #if !SANITIZER_GO - if (!((typ0 | typ1) & kAccessFree) && - s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid())) - rep.AddSleep(thr->last_sleep_stack_id); + if (!((typ0 | typ1) & kAccessFree) && + s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid())) + rep->AddSleep(thr->last_sleep_stack_id); +#endif + +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } #endif - OutputReport(thr, rep); } void PrintCurrentStack(ThreadState *thr, uptr pc) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp index 766e88658a8a8..ad681ffb44e3f 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp @@ -88,15 +88,33 @@ void ThreadFinalize(ThreadState *thr) { #if !SANITIZER_GO if (!ShouldReport(thr, ReportTypeThreadLeak)) return; - ThreadRegistryLock l(&ctx->thread_registry); Vector leaks; - ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks, - &leaks); + { + ThreadRegistryLock l(&ctx->thread_registry); + ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks, + &leaks); + } + for (uptr i = 0; i < leaks.Size(); i++) { - ScopedReport rep(ReportTypeThreadLeak); - rep.AddThread(leaks[i].tctx, true); - rep.SetCount(leaks[i].count); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeThreadLeak); + rep->AddThread(leaks[i].tctx, true); + rep->SetCount(leaks[i].count); +# if SANITIZER_APPLE + } // Close this scope to release the locks +# endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +# if !SANITIZER_APPLE + } +# endif } #endif }