diff --git a/compiler-rt/lib/tsan/go/tsan_go.cpp b/compiler-rt/lib/tsan/go/tsan_go.cpp index c689a51fb5e1d..7709b86996116 100644 --- a/compiler-rt/lib/tsan/go/tsan_go.cpp +++ b/compiler-rt/lib/tsan/go/tsan_go.cpp @@ -118,6 +118,26 @@ ReportLocation *SymbolizeData(uptr addr) { } } +bool SymbolizeData(uptr addr, DataInfo *info) { + SymbolizeDataContext cbctx; + internal_memset(&cbctx, 0, sizeof(cbctx)); + cbctx.addr = addr; + go_runtime_cb(CallbackSymbolizeData, &cbctx); + if (!cbctx.res) + return false; + if (cbctx.heap) { + return false; + } else { + info->Clear(); + info->name = internal_strdup(cbctx.name ? cbctx.name : "??"); + info->file = internal_strdup(cbctx.file ? cbctx.file : "??"); + info->line = cbctx.line; + info->start = cbctx.start; + info->size = cbctx.size; + return true; + } +} + static ThreadState *main_thr; static bool inited; diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp index 9cab2a3727128..5bccd31e48bf5 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp @@ -2064,13 +2064,17 @@ static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) { // StackTrace::GetNestInstructionPc(pc) is used because return address is // expected, OutputReport() will undo this. ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); - ThreadRegistryLock l(&ctx->thread_registry); + if (IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { + return; + } ScopedReport rep(ReportTypeErrnoInSignal); - rep.SetSigNum(sig); - if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { + { + // ThreadRegistryLock l(&ctx->thread_registry); + rep.SetSigNum(sig); rep.AddStack(stack, true); - OutputReport(thr, rep); } + rep.SymbolizeReport(); + OutputReport(thr, rep); } static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp index befd6a369026d..e359ac7ee29cc 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp @@ -437,15 +437,18 @@ void __tsan_mutex_post_divert(void *addr, unsigned flagz) { } static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) { - ThreadRegistryLock l(&ctx->thread_registry); ScopedReport rep(ReportTypeMutexHeldWrongContext); - for (uptr i = 0; i < thr->mset.Size(); ++i) { - MutexSet::Desc desc = thr->mset.Get(i); - rep.AddMutex(desc.addr, desc.stack_id); + { + ThreadRegistryLock l(&ctx->thread_registry); + for (uptr i = 0; i < thr->mset.Size(); ++i) { + MutexSet::Desc desc = thr->mset.Get(i); + rep.AddMutex(desc.addr, desc.stack_id); + } + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep.AddStack(trace, true); } - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); + rep.SymbolizeReport(); OutputReport(thr, rep); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp index 0705365d77427..3d4d216df0089 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp @@ -182,9 +182,12 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { ObtainCurrentStack(thr, pc, &stack); if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack)) return; - ThreadRegistryLock l(&ctx->thread_registry); ScopedReport rep(ReportTypeSignalUnsafe); - rep.AddStack(stack, true); + { + // ThreadRegistryLock l(&ctx->thread_registry); + rep.AddStack(stack, true); + } + rep.SymbolizeReport(); OutputReport(thr, rep); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_report.h b/compiler-rt/lib/tsan/rtl/tsan_report.h index bfe470797f8f7..d1568c1434d0a 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_report.h +++ b/compiler-rt/lib/tsan/rtl/tsan_report.h @@ -16,6 +16,7 @@ #include "sanitizer_common/sanitizer_thread_registry.h" #include "sanitizer_common/sanitizer_vector.h" #include "tsan_defs.h" +#include "tsan_stack_trace.h" namespace __tsan { @@ -39,6 +40,7 @@ enum ReportType { }; struct ReportStack { + VarSizeStackTrace raw; SymbolizedStack *frames = nullptr; bool suppressable = false; }; diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h index f48be8e0a4fe0..2dfad6b01d758 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -406,6 +406,7 @@ uptr TagFromShadowStackFrame(uptr pc); class ScopedReportBase { public: + void SetKindAndTag(ReportType typ, uptr tag); void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid, StackTrace stack, const MutexSet *mset); void AddStack(StackTrace stack, bool suppressable = false); @@ -418,17 +419,16 @@ class ScopedReportBase { void SetCount(int count); void SetSigNum(int sig); + void SymbolizeReport(); const ReportDesc *GetReport() const; protected: + ScopedReportBase(); ScopedReportBase(ReportType typ, uptr tag); ~ScopedReportBase(); private: ReportDesc *rep_; - // Symbolizer makes lots of intercepted calls. If we try to process them, - // at best it will cause deadlocks on internal mutexes. - ScopedIgnoreInterceptors ignore_interceptors_; ScopedReportBase(const ScopedReportBase &) = delete; void operator=(const ScopedReportBase &) = delete; @@ -436,11 +436,9 @@ class ScopedReportBase { class ScopedReport : public ScopedReportBase { public: + explicit ScopedReport(); explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone); ~ScopedReport(); - - private: - ScopedErrorReportLock lock_; }; bool ShouldReport(ThreadState *thr, ReportType typ); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp index 2a8aa1915c9ae..9bfe9eb21b90f 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp @@ -55,13 +55,16 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, return; if (!ShouldReport(thr, typ)) return; - ThreadRegistryLock l(&ctx->thread_registry); ScopedReport rep(typ); - rep.AddMutex(addr, creation_stack_id); - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - rep.AddLocation(addr, 1); + { + ThreadRegistryLock l(&ctx->thread_registry); + rep.AddMutex(addr, creation_stack_id); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep.AddStack(trace, true); + rep.AddLocation(addr, 1); + } + rep.SymbolizeReport(); OutputReport(thr, rep); } @@ -528,50 +531,56 @@ void AfterSleep(ThreadState *thr, uptr pc) { void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock)) return; - ThreadRegistryLock l(&ctx->thread_registry); ScopedReport rep(ReportTypeDeadlock); - for (int i = 0; i < r->n; i++) { - rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]); - rep.AddUniqueTid((int)r->loop[i].thr_ctx); - rep.AddThread((int)r->loop[i].thr_ctx); - } - uptr dummy_pc = 0x42; - for (int i = 0; i < r->n; i++) { - for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { - u32 stk = r->loop[i].stk[j]; - if (stk && stk != kInvalidStackID) { - rep.AddStack(StackDepotGet(stk), true); - } else { - // Sometimes we fail to extract the stack trace (FIXME: investigate), - // but we should still produce some stack trace in the report. - rep.AddStack(StackTrace(&dummy_pc, 1), true); + { + ThreadRegistryLock l(&ctx->thread_registry); + for (int i = 0; i < r->n; i++) { + rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]); + rep.AddUniqueTid((int)r->loop[i].thr_ctx); + rep.AddThread((int)r->loop[i].thr_ctx); + } + uptr dummy_pc = 0x42; + for (int i = 0; i < r->n; i++) { + for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { + u32 stk = r->loop[i].stk[j]; + if (stk && stk != kInvalidStackID) { + rep.AddStack(StackDepotGet(stk), true); + } else { + // Sometimes we fail to extract the stack trace (FIXME: investigate), + // but we should still produce some stack trace in the report. + rep.AddStack(StackTrace(&dummy_pc, 1), true); + } } } } + rep.SymbolizeReport(); OutputReport(thr, rep); } void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr, FastState last_lock, StackID creation_stack_id) { - // We need to lock the slot during RestoreStack because it protects - // the slot journal. - Lock slot_lock(&ctx->slots[static_cast(last_lock.sid())].mtx); - ThreadRegistryLock l0(&ctx->thread_registry); - Lock slots_lock(&ctx->slot_mtx); ScopedReport rep(ReportTypeMutexDestroyLocked); - rep.AddMutex(addr, creation_stack_id); - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - - Tid tid; - DynamicMutexSet mset; - uptr tag; - if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr, - 0, kAccessWrite, &tid, &trace, mset, &tag)) - return; - rep.AddStack(trace, true); - rep.AddLocation(addr, 1); + { + // We need to lock the slot during RestoreStack because it protects + // the slot journal. + Lock slot_lock(&ctx->slots[static_cast(last_lock.sid())].mtx); + ThreadRegistryLock l0(&ctx->thread_registry); + Lock slots_lock(&ctx->slot_mtx); + rep.AddMutex(addr, creation_stack_id); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep.AddStack(trace, true); + + Tid tid; + DynamicMutexSet mset; + uptr tag; + if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), + addr, 0, kAccessWrite, &tid, &trace, mset, &tag)) + return; + rep.AddStack(trace, true); + rep.AddLocation(addr, 1); + } + rep.SymbolizeReport(); OutputReport(thr, rep); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp index 0311df553fdd0..84034aa6ed379 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp @@ -29,7 +29,7 @@ namespace __tsan { using namespace __sanitizer; -static ReportStack *SymbolizeStack(StackTrace trace); +static ReportStack *SymbolizeStack(const StackTrace &trace); // Can be overriden by an application/test to intercept reports. #ifdef TSAN_EXTERNAL_HOOKS @@ -98,9 +98,10 @@ ReportStack *SymbolizeStackId(u32 stack_id) { return SymbolizeStack(stack); } -static ReportStack *SymbolizeStack(StackTrace trace) { +static SymbolizedStack *SymbolizeRawStack(const StackTrace &trace) { if (trace.size == 0) return 0; + CHECK_NE(trace.trace, nullptr); SymbolizedStack *top = nullptr; for (uptr si = 0; si < trace.size; si++) { const uptr pc = trace.trace[si]; @@ -121,12 +122,31 @@ static ReportStack *SymbolizeStack(StackTrace trace) { top = ent; } StackStripMain(top); + return top; +} + +static ReportStack *SymbolizeStack(const StackTrace &trace) { + if (trace.size == 0) + return 0; + CHECK_NE(trace.trace, nullptr); + auto *stack = New(); + stack->frames = SymbolizeRawStack(trace); + return stack; +} +static ReportStack *CreateReportStack(const StackTrace &trace) { + if (trace.size == 0) + return 0; + CHECK_NE(trace.trace, nullptr); auto *stack = New(); - stack->frames = top; + stack->raw.Init(trace); return stack; } +static ReportStack *CreateReportStack(const StackTrace &&trace) { + return CreateReportStack(trace); +} + bool ShouldReport(ThreadState *thr, ReportType typ) { // We set thr->suppress_reports in the fork context. // Taking any locking in the fork context can lead to deadlocks. @@ -155,22 +175,24 @@ bool ShouldReport(ThreadState *thr, ReportType typ) { } } -ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) { - ctx->thread_registry.CheckLocked(); - rep_ = New(); +ScopedReportBase::ScopedReportBase() { rep_ = New(); } + +ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) + : ScopedReportBase() { rep_->typ = typ; rep_->tag = tag; - ctx->report_mtx.Lock(); } -ScopedReportBase::~ScopedReportBase() { - ctx->report_mtx.Unlock(); - DestroyAndFree(rep_); +ScopedReportBase::~ScopedReportBase() { DestroyAndFree(rep_); } + +void ScopedReportBase::SetKindAndTag(ReportType typ, uptr tag) { + rep_->typ = typ; + rep_->tag = tag; } void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) { ReportStack **rs = rep_->stacks.PushBack(); - *rs = SymbolizeStack(stack); + *rs = CreateReportStack(stack); (*rs)->suppressable = suppressable; } @@ -187,8 +209,8 @@ void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, mop->size = size; mop->write = !(typ & kAccessRead); mop->atomic = typ & kAccessAtomic; - mop->stack = SymbolizeStack(stack); mop->external_tag = external_tag; + mop->stack = CreateReportStack(stack); if (mop->stack) mop->stack->suppressable = true; for (uptr i = 0; i < mset->Size(); i++) { @@ -216,8 +238,7 @@ void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) { rt->name = internal_strdup(tctx->name); rt->parent_tid = tctx->parent_tid; rt->thread_type = tctx->thread_type; - rt->stack = 0; - rt->stack = SymbolizeStackId(tctx->creation_stack_id); + rt->stack = CreateReportStack(StackDepotGet(tctx->creation_stack_id)); if (rt->stack) rt->stack->suppressable = suppressable; } @@ -270,7 +291,7 @@ int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) { rep_->mutexes.PushBack(rm); rm->id = rep_->mutexes.Size() - 1; rm->addr = addr; - rm->stack = SymbolizeStackId(creation_stack_id); + rm->stack = CreateReportStack(StackDepotGet(creation_stack_id)); return rm->id; } @@ -288,7 +309,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { loc->fd_closed = closed; loc->fd = fd; loc->tid = creat_tid; - loc->stack = SymbolizeStackId(creat_stack); + loc->stack = CreateReportStack(StackDepotGet(creat_stack)); rep_->locs.PushBack(loc); AddThread(creat_tid); return; @@ -310,7 +331,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { loc->heap_chunk_size = b->siz; loc->external_tag = b->tag; loc->tid = b->tid; - loc->stack = SymbolizeStackId(b->stk); + loc->stack = CreateReportStack(StackDepotGet(b->stk)); rep_->locs.PushBack(loc); AddThread(b->tid); return; @@ -324,16 +345,16 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { AddThread(tctx); } #endif - if (ReportLocation *loc = SymbolizeData(addr)) { - loc->suppressable = true; - rep_->locs.PushBack(loc); - return; - } + auto *loc = New(); + loc->type = ReportLocationGlobal; + loc->global.start = addr; + loc->suppressable = true; + rep_->locs.PushBack(loc); } #if !SANITIZER_GO void ScopedReportBase::AddSleep(StackID stack_id) { - rep_->sleep = SymbolizeStackId(stack_id); + rep_->sleep = CreateReportStack(StackDepotGet(stack_id)); } #endif @@ -341,8 +362,56 @@ void ScopedReportBase::SetCount(int count) { rep_->count = count; } void ScopedReportBase::SetSigNum(int sig) { rep_->signum = sig; } +void ScopedReportBase::SymbolizeReport() { + CheckedMutex::CheckNoLocks(); + // Symbolizer makes lots of intercepted calls. If we try to process them, + // at best it will cause deadlocks on internal mutexes. + ScopedIgnoreInterceptors ignore_interceptors_; + for (uptr i = 0; i < rep_->stacks.Size(); i++) { + rep_->stacks[i]->frames = SymbolizeRawStack(rep_->stacks[i]->raw); + rep_->stacks[i]->raw.Reset(); + } + for (uptr i = 0; i < rep_->mops.Size(); i++) { + if (rep_->mops[i]->stack) { + rep_->mops[i]->stack->frames = + SymbolizeRawStack(rep_->mops[i]->stack->raw); + rep_->mops[i]->stack->raw.Reset(); + } + } + for (uptr i = 0; i < rep_->locs.Size(); i++) { + if (rep_->locs[i]->stack) { + rep_->locs[i]->stack->frames = + SymbolizeRawStack(rep_->locs[i]->stack->raw); + rep_->locs[i]->stack->raw.Reset(); + } + if (rep_->locs[i]->type == ReportLocationGlobal) { + SymbolizeData(rep_->locs[i]->global.start, &rep_->locs[i]->global); + } + } + for (uptr i = 0; i < rep_->mutexes.Size(); i++) { + if (rep_->mutexes[i]->stack) { + rep_->mutexes[i]->stack->frames = + SymbolizeRawStack(rep_->mutexes[i]->stack->raw); + rep_->mutexes[i]->stack->raw.Reset(); + } + } + for (uptr i = 0; i < rep_->threads.Size(); i++) { + if (rep_->threads[i]->stack) { + rep_->threads[i]->stack->frames = + SymbolizeRawStack(rep_->threads[i]->stack->raw); + rep_->threads[i]->stack->raw.Reset(); + } + } + if (rep_->sleep) { + rep_->sleep->frames = SymbolizeRawStack(rep_->sleep->raw); + rep_->sleep->raw.Reset(); + } +} + const ReportDesc *ScopedReportBase::GetReport() const { return rep_; } +ScopedReport::ScopedReport() : ScopedReportBase() {} + ScopedReport::ScopedReport(ReportType typ, uptr tag) : ScopedReportBase(typ, tag) {} @@ -629,10 +698,12 @@ static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) { } bool OutputReport(ThreadState *thr, const ScopedReport &srep) { + CheckedMutex::CheckNoLocks(); // These should have been checked in ShouldReport. // It's too late to check them here, we have already taken locks. CHECK(flags()->report_bugs); CHECK(!thr->suppress_reports); + Lock l(&ctx->report_mtx); atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); const ReportDesc *rep = srep.GetReport(); CHECK_EQ(thr->current_report, nullptr); @@ -705,18 +776,12 @@ static bool SpuriousRace(Shadow old) { return last.sid() == old.sid() && last.epoch() == old.epoch(); } -void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, - AccessType typ0) { +static bool BuildRaceReport(ScopedReport &rep, ThreadState *thr, + RawShadow *shadow_mem, Shadow &cur, Shadow &old, + AccessType typ0) { CheckedMutex::CheckNoLocks(); - - // Symbolizer makes lots of intercepted calls. If we try to process them, - // at best it will cause deadlocks on internal mutexes. - ScopedIgnoreInterceptors ignore; - uptr addr = ShadowToMem(shadow_mem); DPrintf("#%d: ReportRace %p\n", thr->tid, (void *)addr); - if (!ShouldReport(thr, ReportTypeRace)) - return; uptr addr_off0, size0; cur.GetAccess(&addr_off0, &size0, nullptr); uptr addr_off1, size1, typ1; @@ -724,9 +789,9 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, if (!flags()->report_atomic_races && ((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) && !(typ0 & kAccessFree) && !(typ1 & kAccessFree)) - return; + return false; if (SpuriousRace(old)) - return; + return false; const uptr kMop = 2; Shadow s[kMop] = {cur, old}; @@ -737,7 +802,7 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, uptr addr_min = min(addr0, addr1); uptr addr_max = max(end0, end1); if (IsExpectedReport(addr_min, addr_max - addr_min)) - return; + return false; ReportType rep_typ = ReportTypeRace; if ((typ0 & kAccessVptr) && (typ1 & kAccessFree)) @@ -748,7 +813,7 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, rep_typ = ReportTypeUseAfterFree; if (IsFiredSuppression(ctx, rep_typ, addr)) - return; + return false; VarSizeStackTrace traces[kMop]; Tid tids[kMop] = {thr->tid, kInvalidTid}; @@ -756,7 +821,7 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, ObtainCurrentStack(thr, thr->trace_prev_pc, &traces[0], &tags[0]); if (IsFiredSuppression(ctx, rep_typ, traces[0])) - return; + return false; DynamicMutexSet mset1; MutexSet *mset[kMop] = {&thr->mset, mset1}; @@ -767,18 +832,18 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, ThreadRegistryLock l0(&ctx->thread_registry); Lock slots_lock(&ctx->slot_mtx); if (SpuriousRace(old)) - return; + return false; if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1, size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) { StoreShadow(&ctx->last_spurious_race, old.raw()); - return; + return false; } if (IsFiredSuppression(ctx, rep_typ, traces[1])) - return; + return false; if (HandleRacyStacks(thr, traces)) - return; + return false; // If any of the accesses has a tag, treat this as an "external" race. uptr tag = kExternalTagNone; @@ -789,8 +854,7 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, break; } } - - ScopedReport rep(rep_typ, tag); + rep.SetKindAndTag(rep_typ, tag); for (uptr i = 0; i < kMop; i++) rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]); @@ -819,6 +883,18 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid())) rep.AddSleep(thr->last_sleep_stack_id); #endif + return true; +} + +void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, + AccessType typ0) { + if (!ShouldReport(thr, ReportTypeRace)) + return; + ScopedReport rep; + if (!BuildRaceReport(rep, thr, shadow_mem, cur, old, typ0)) { + return; + } + rep.SymbolizeReport(); OutputReport(thr, rep); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp index 5316a7862e449..3acaf824da38a 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp @@ -34,6 +34,7 @@ void ThreadContext::OnReset() { CHECK(!sync); } struct ThreadLeak { ThreadContext *tctx; int count; + ScopedReport rep; }; static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) { @@ -47,7 +48,7 @@ static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) { return; } } - leaks.PushBack({tctx, 1}); + leaks.PushBack({tctx, 1, ScopedReport{ReportTypeThreadLeak}}); } #endif @@ -88,15 +89,19 @@ void ThreadFinalize(ThreadState *thr) { #if !SANITIZER_GO if (!ShouldReport(thr, ReportTypeThreadLeak)) return; - ThreadRegistryLock l(&ctx->thread_registry); Vector leaks; - ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks, - &leaks); + { + ThreadRegistryLock l(&ctx->thread_registry); + ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks, + &leaks); + for (uptr i = 0; i < leaks.Size(); i++) { + leaks[i].rep.AddThread(leaks[i].tctx, true); + leaks[i].rep.SetCount(leaks[i].count); + } + } for (uptr i = 0; i < leaks.Size(); i++) { - ScopedReport rep(ReportTypeThreadLeak); - rep.AddThread(leaks[i].tctx, true); - rep.SetCount(leaks[i].count); - OutputReport(thr, rep); + leaks[i].rep.SymbolizeReport(); + OutputReport(thr, leaks[i].rep); } #endif } diff --git a/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp b/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp index 9bbaafb3a85f5..a4976d656a3aa 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp @@ -38,11 +38,24 @@ void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) { trace_buffer[cnt] = extra_top_pc; } +void VarSizeStackTrace::Init(const StackTrace &trace) { + ResizeBuffer(trace.size); + internal_memcpy(trace_buffer, trace.trace, + trace.size * sizeof(trace.trace[0])); +} + void VarSizeStackTrace::ReverseOrder() { for (u32 i = 0; i < (size >> 1); i++) Swap(trace_buffer[i], trace_buffer[size - 1 - i]); } +void VarSizeStackTrace::Reset() { + size = 0; + trace = nullptr; + Free(trace_buffer); + trace_buffer = nullptr; +} + } // namespace __tsan #if !SANITIZER_GO diff --git a/compiler-rt/lib/tsan/rtl/tsan_stack_trace.h b/compiler-rt/lib/tsan/rtl/tsan_stack_trace.h index 3eb8ce156e835..deda28b5ac856 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_stack_trace.h +++ b/compiler-rt/lib/tsan/rtl/tsan_stack_trace.h @@ -24,12 +24,17 @@ struct VarSizeStackTrace : public StackTrace { VarSizeStackTrace(); ~VarSizeStackTrace(); + void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0); + void Init(const StackTrace &trace); // Reverses the current stack trace order, the top frame goes to the bottom, // the last frame goes to the top. void ReverseOrder(); + // Clear and release internal buffer. + void Reset(); + private: void ResizeBuffer(uptr new_size); diff --git a/compiler-rt/lib/tsan/rtl/tsan_symbolize.cpp b/compiler-rt/lib/tsan/rtl/tsan_symbolize.cpp index 2e2744d2eae78..444698063e200 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_symbolize.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_symbolize.cpp @@ -116,6 +116,10 @@ ReportLocation *SymbolizeData(uptr addr) { return ent; } +bool SymbolizeData(uptr addr, DataInfo *info) { + return Symbolizer::GetOrInit()->SymbolizeData(addr, info); +} + void SymbolizeFlush() { Symbolizer::GetOrInit()->Flush(); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_symbolize.h b/compiler-rt/lib/tsan/rtl/tsan_symbolize.h index 7adaa04dc273e..499a60de779ac 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_symbolize.h +++ b/compiler-rt/lib/tsan/rtl/tsan_symbolize.h @@ -21,6 +21,7 @@ void EnterSymbolizer(); void ExitSymbolizer(); SymbolizedStack *SymbolizeCode(uptr addr); ReportLocation *SymbolizeData(uptr addr); +bool SymbolizeData(uptr addr, DataInfo *info); void SymbolizeFlush(); ReportStack *NewReportStackEntry(uptr addr);