match.pd (bit_and (plus/minus (convert @0) (convert @1) mask): New simplifier to...
[official-gcc.git] / libsanitizer / tsan / tsan_rtl_report.cc
blobf86cfd4681de8ef12aafa8033722a2a8b9d836c6
1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_libc.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "sanitizer_common/sanitizer_stackdepot.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_stacktrace.h"
17 #include "tsan_platform.h"
18 #include "tsan_rtl.h"
19 #include "tsan_suppressions.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_report.h"
22 #include "tsan_sync.h"
23 #include "tsan_mman.h"
24 #include "tsan_flags.h"
25 #include "tsan_fd.h"
27 namespace __tsan {
29 using namespace __sanitizer; // NOLINT
31 static ReportStack *SymbolizeStack(StackTrace trace);
33 void TsanCheckFailed(const char *file, int line, const char *cond,
34 u64 v1, u64 v2) {
35 // There is high probability that interceptors will check-fail as well,
36 // on the other hand there is no sense in processing interceptors
37 // since we are going to die soon.
38 ScopedIgnoreInterceptors ignore;
39 Printf("FATAL: ThreadSanitizer CHECK failed: "
40 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
41 file, line, cond, (uptr)v1, (uptr)v2);
42 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
43 Die();
46 // Can be overriden by an application/test to intercept reports.
47 #ifdef TSAN_EXTERNAL_HOOKS
48 bool OnReport(const ReportDesc *rep, bool suppressed);
49 #else
50 SANITIZER_INTERFACE_ATTRIBUTE
51 bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
52 (void)rep;
53 return suppressed;
55 #endif
57 static void StackStripMain(ReportStack *stack) {
58 ReportStack *last_frame = 0;
59 ReportStack *last_frame2 = 0;
60 for (ReportStack *ent = stack; ent; ent = ent->next) {
61 last_frame2 = last_frame;
62 last_frame = ent;
65 if (last_frame2 == 0)
66 return;
67 const char *last = last_frame->info.function;
68 #ifndef TSAN_GO
69 const char *last2 = last_frame2->info.function;
70 // Strip frame above 'main'
71 if (last2 && 0 == internal_strcmp(last2, "main")) {
72 last_frame2->next = 0;
73 // Strip our internal thread start routine.
74 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
75 last_frame2->next = 0;
76 // Strip global ctors init.
77 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
78 last_frame2->next = 0;
79 // If both are 0, then we probably just failed to symbolize.
80 } else if (last || last2) {
81 // Ensure that we recovered stack completely. Trimmed stack
82 // can actually happen if we do not instrument some code,
83 // so it's only a debug print. However we must try hard to not miss it
84 // due to our fault.
85 DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
87 #else
88 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
89 last_frame2->next = 0;
90 (void)last;
91 #endif
94 ReportStack *SymbolizeStackId(u32 stack_id) {
95 if (stack_id == 0)
96 return 0;
97 StackTrace stack = StackDepotGet(stack_id);
98 if (stack.trace == nullptr)
99 return nullptr;
100 return SymbolizeStack(stack);
103 static ReportStack *SymbolizeStack(StackTrace trace) {
104 if (trace.size == 0)
105 return 0;
106 ReportStack *stack = 0;
107 for (uptr si = 0; si < trace.size; si++) {
108 const uptr pc = trace.trace[si];
109 #ifndef TSAN_GO
110 // We obtain the return address, that is, address of the next instruction,
111 // so offset it by 1 byte.
112 const uptr pc1 = StackTrace::GetPreviousInstructionPc(pc);
113 #else
114 // FIXME(dvyukov): Go sometimes uses address of a function as top pc.
115 uptr pc1 = pc;
116 if (si != trace.size - 1)
117 pc1 -= 1;
118 #endif
119 ReportStack *ent = SymbolizeCode(pc1);
120 CHECK_NE(ent, 0);
121 ReportStack *last = ent;
122 while (last->next) {
123 last->info.address = pc; // restore original pc for report
124 last = last->next;
126 last->info.address = pc; // restore original pc for report
127 last->next = stack;
128 stack = ent;
130 StackStripMain(stack);
131 return stack;
134 ScopedReport::ScopedReport(ReportType typ) {
135 ctx->thread_registry->CheckLocked();
136 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
137 rep_ = new(mem) ReportDesc;
138 rep_->typ = typ;
139 ctx->report_mtx.Lock();
140 CommonSanitizerReportMutex.Lock();
143 ScopedReport::~ScopedReport() {
144 CommonSanitizerReportMutex.Unlock();
145 ctx->report_mtx.Unlock();
146 DestroyAndFree(rep_);
149 void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
150 ReportStack **rs = rep_->stacks.PushBack();
151 *rs = SymbolizeStack(stack);
152 (*rs)->suppressable = suppressable;
155 void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
156 const MutexSet *mset) {
157 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
158 ReportMop *mop = new(mem) ReportMop;
159 rep_->mops.PushBack(mop);
160 mop->tid = s.tid();
161 mop->addr = addr + s.addr0();
162 mop->size = s.size();
163 mop->write = s.IsWrite();
164 mop->atomic = s.IsAtomic();
165 mop->stack = SymbolizeStack(stack);
166 if (mop->stack)
167 mop->stack->suppressable = true;
168 for (uptr i = 0; i < mset->Size(); i++) {
169 MutexSet::Desc d = mset->Get(i);
170 u64 mid = this->AddMutex(d.id);
171 ReportMopMutex mtx = {mid, d.write};
172 mop->mset.PushBack(mtx);
176 void ScopedReport::AddUniqueTid(int unique_tid) {
177 rep_->unique_tids.PushBack(unique_tid);
180 void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
181 for (uptr i = 0; i < rep_->threads.Size(); i++) {
182 if ((u32)rep_->threads[i]->id == tctx->tid)
183 return;
185 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
186 ReportThread *rt = new(mem) ReportThread();
187 rep_->threads.PushBack(rt);
188 rt->id = tctx->tid;
189 rt->pid = tctx->os_id;
190 rt->running = (tctx->status == ThreadStatusRunning);
191 rt->name = internal_strdup(tctx->name);
192 rt->parent_tid = tctx->parent_tid;
193 rt->stack = 0;
194 rt->stack = SymbolizeStackId(tctx->creation_stack_id);
195 if (rt->stack)
196 rt->stack->suppressable = suppressable;
199 #ifndef TSAN_GO
200 static ThreadContext *FindThreadByUidLocked(int unique_id) {
201 ctx->thread_registry->CheckLocked();
202 for (unsigned i = 0; i < kMaxTid; i++) {
203 ThreadContext *tctx = static_cast<ThreadContext*>(
204 ctx->thread_registry->GetThreadLocked(i));
205 if (tctx && tctx->unique_id == (u32)unique_id) {
206 return tctx;
209 return 0;
212 static ThreadContext *FindThreadByTidLocked(int tid) {
213 ctx->thread_registry->CheckLocked();
214 return static_cast<ThreadContext*>(
215 ctx->thread_registry->GetThreadLocked(tid));
218 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
219 uptr addr = (uptr)arg;
220 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
221 if (tctx->status != ThreadStatusRunning)
222 return false;
223 ThreadState *thr = tctx->thr;
224 CHECK(thr);
225 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
226 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
229 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
230 ctx->thread_registry->CheckLocked();
231 ThreadContext *tctx = static_cast<ThreadContext*>(
232 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
233 (void*)addr));
234 if (!tctx)
235 return 0;
236 ThreadState *thr = tctx->thr;
237 CHECK(thr);
238 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
239 return tctx;
241 #endif
243 void ScopedReport::AddThread(int unique_tid, bool suppressable) {
244 #ifndef TSAN_GO
245 if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
246 AddThread(tctx, suppressable);
247 #endif
250 void ScopedReport::AddMutex(const SyncVar *s) {
251 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
252 if (rep_->mutexes[i]->id == s->uid)
253 return;
255 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
256 ReportMutex *rm = new(mem) ReportMutex();
257 rep_->mutexes.PushBack(rm);
258 rm->id = s->uid;
259 rm->addr = s->addr;
260 rm->destroyed = false;
261 rm->stack = SymbolizeStackId(s->creation_stack_id);
264 u64 ScopedReport::AddMutex(u64 id) {
265 u64 uid = 0;
266 u64 mid = id;
267 uptr addr = SyncVar::SplitId(id, &uid);
268 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
269 // Check that the mutex is still alive.
270 // Another mutex can be created at the same address,
271 // so check uid as well.
272 if (s && s->CheckId(uid)) {
273 mid = s->uid;
274 AddMutex(s);
275 } else {
276 AddDeadMutex(id);
278 if (s)
279 s->mtx.Unlock();
280 return mid;
283 void ScopedReport::AddDeadMutex(u64 id) {
284 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
285 if (rep_->mutexes[i]->id == id)
286 return;
288 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
289 ReportMutex *rm = new(mem) ReportMutex();
290 rep_->mutexes.PushBack(rm);
291 rm->id = id;
292 rm->addr = 0;
293 rm->destroyed = true;
294 rm->stack = 0;
297 void ScopedReport::AddLocation(uptr addr, uptr size) {
298 if (addr == 0)
299 return;
300 #ifndef TSAN_GO
301 int fd = -1;
302 int creat_tid = -1;
303 u32 creat_stack = 0;
304 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
305 ReportLocation *loc = ReportLocation::New(ReportLocationFD);
306 loc->fd = fd;
307 loc->tid = creat_tid;
308 loc->stack = SymbolizeStackId(creat_stack);
309 rep_->locs.PushBack(loc);
310 ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
311 if (tctx)
312 AddThread(tctx);
313 return;
315 MBlock *b = 0;
316 Allocator *a = allocator();
317 if (a->PointerIsMine((void*)addr)) {
318 void *block_begin = a->GetBlockBegin((void*)addr);
319 if (block_begin)
320 b = ctx->metamap.GetBlock((uptr)block_begin);
322 if (b != 0) {
323 ThreadContext *tctx = FindThreadByTidLocked(b->tid);
324 ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
325 loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
326 loc->heap_chunk_size = b->siz;
327 loc->tid = tctx ? tctx->tid : b->tid;
328 loc->stack = SymbolizeStackId(b->stk);
329 rep_->locs.PushBack(loc);
330 if (tctx)
331 AddThread(tctx);
332 return;
334 bool is_stack = false;
335 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
336 ReportLocation *loc =
337 ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
338 loc->tid = tctx->tid;
339 rep_->locs.PushBack(loc);
340 AddThread(tctx);
342 if (ReportLocation *loc = SymbolizeData(addr)) {
343 loc->suppressable = true;
344 rep_->locs.PushBack(loc);
345 return;
347 #endif
350 #ifndef TSAN_GO
351 void ScopedReport::AddSleep(u32 stack_id) {
352 rep_->sleep = SymbolizeStackId(stack_id);
354 #endif
356 void ScopedReport::SetCount(int count) {
357 rep_->count = count;
360 const ReportDesc *ScopedReport::GetReport() const {
361 return rep_;
364 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
365 MutexSet *mset) {
366 // This function restores stack trace and mutex set for the thread/epoch.
367 // It does so by getting stack trace and mutex set at the beginning of
368 // trace part, and then replaying the trace till the given epoch.
369 ctx->thread_registry->CheckLocked();
370 ThreadContext *tctx = static_cast<ThreadContext*>(
371 ctx->thread_registry->GetThreadLocked(tid));
372 if (tctx == 0)
373 return;
374 if (tctx->status != ThreadStatusRunning
375 && tctx->status != ThreadStatusFinished
376 && tctx->status != ThreadStatusDead)
377 return;
378 Trace* trace = ThreadTrace(tctx->tid);
379 Lock l(&trace->mtx);
380 const int partidx = (epoch / kTracePartSize) % TraceParts();
381 TraceHeader* hdr = &trace->headers[partidx];
382 if (epoch < hdr->epoch0)
383 return;
384 const u64 epoch0 = RoundDown(epoch, TraceSize());
385 const u64 eend = epoch % TraceSize();
386 const u64 ebegin = RoundDown(eend, kTracePartSize);
387 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
388 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
389 InternalScopedBuffer<uptr> stack(kShadowStackSize);
390 for (uptr i = 0; i < hdr->stack0.size; i++) {
391 stack[i] = hdr->stack0.trace[i];
392 DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
394 if (mset)
395 *mset = hdr->mset0;
396 uptr pos = hdr->stack0.size;
397 Event *events = (Event*)GetThreadTrace(tid);
398 for (uptr i = ebegin; i <= eend; i++) {
399 Event ev = events[i];
400 EventType typ = (EventType)(ev >> 61);
401 uptr pc = (uptr)(ev & ((1ull << 61) - 1));
402 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
403 if (typ == EventTypeMop) {
404 stack[pos] = pc;
405 } else if (typ == EventTypeFuncEnter) {
406 stack[pos++] = pc;
407 } else if (typ == EventTypeFuncExit) {
408 if (pos > 0)
409 pos--;
411 if (mset) {
412 if (typ == EventTypeLock) {
413 mset->Add(pc, true, epoch0 + i);
414 } else if (typ == EventTypeUnlock) {
415 mset->Del(pc, true);
416 } else if (typ == EventTypeRLock) {
417 mset->Add(pc, false, epoch0 + i);
418 } else if (typ == EventTypeRUnlock) {
419 mset->Del(pc, false);
422 for (uptr j = 0; j <= pos; j++)
423 DPrintf2(" #%zu: %zx\n", j, stack[j]);
425 if (pos == 0 && stack[0] == 0)
426 return;
427 pos++;
428 stk->Init(stack.data(), pos);
431 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
432 uptr addr_min, uptr addr_max) {
433 bool equal_stack = false;
434 RacyStacks hash;
435 if (flags()->suppress_equal_stacks) {
436 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
437 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
438 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
439 if (hash == ctx->racy_stacks[i]) {
440 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
441 equal_stack = true;
442 break;
446 bool equal_address = false;
447 RacyAddress ra0 = {addr_min, addr_max};
448 if (flags()->suppress_equal_addresses) {
449 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
450 RacyAddress ra2 = ctx->racy_addresses[i];
451 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
452 uptr minend = min(ra0.addr_max, ra2.addr_max);
453 if (maxbeg < minend) {
454 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
455 equal_address = true;
456 break;
460 if (equal_stack || equal_address) {
461 if (!equal_stack)
462 ctx->racy_stacks.PushBack(hash);
463 if (!equal_address)
464 ctx->racy_addresses.PushBack(ra0);
465 return true;
467 return false;
470 static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
471 uptr addr_min, uptr addr_max) {
472 if (flags()->suppress_equal_stacks) {
473 RacyStacks hash;
474 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
475 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
476 ctx->racy_stacks.PushBack(hash);
478 if (flags()->suppress_equal_addresses) {
479 RacyAddress ra0 = {addr_min, addr_max};
480 ctx->racy_addresses.PushBack(ra0);
484 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
485 atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
486 const ReportDesc *rep = srep.GetReport();
487 Suppression *supp = 0;
488 uptr suppress_pc = 0;
489 for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++)
490 suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
491 for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++)
492 suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp);
493 for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++)
494 suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
495 for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++)
496 suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp);
497 if (suppress_pc != 0) {
498 FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
499 ctx->fired_suppressions.push_back(s);
502 bool old_is_freeing = thr->is_freeing;
503 thr->is_freeing = false;
504 bool suppressed = OnReport(rep, suppress_pc != 0);
505 thr->is_freeing = old_is_freeing;
506 if (suppressed)
507 return false;
509 PrintReport(rep);
510 ctx->nreported++;
511 if (flags()->halt_on_error)
512 internal__exit(flags()->exitcode);
513 return true;
516 bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
517 StackTrace trace) {
518 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
519 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
520 continue;
521 for (uptr j = 0; j < trace.size; j++) {
522 FiredSuppression *s = &ctx->fired_suppressions[k];
523 if (trace.trace[j] == s->pc) {
524 if (s->supp)
525 s->supp->hit_count++;
526 return true;
530 return false;
533 static bool IsFiredSuppression(Context *ctx,
534 const ScopedReport &srep,
535 uptr addr) {
536 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
537 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
538 continue;
539 FiredSuppression *s = &ctx->fired_suppressions[k];
540 if (addr == s->pc) {
541 if (s->supp)
542 s->supp->hit_count++;
543 return true;
546 return false;
549 bool FrameIsInternal(const ReportStack *frame) {
550 if (frame == 0)
551 return false;
552 const char *file = frame->info.file;
553 return file != 0 &&
554 (internal_strstr(file, "tsan_interceptors.cc") ||
555 internal_strstr(file, "sanitizer_common_interceptors.inc") ||
556 internal_strstr(file, "tsan_interface_"));
559 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
560 Shadow s0(thr->racy_state[0]);
561 Shadow s1(thr->racy_state[1]);
562 CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
563 if (!s0.IsAtomic() && !s1.IsAtomic())
564 return true;
565 if (s0.IsAtomic() && s1.IsFreed())
566 return true;
567 if (s1.IsAtomic() && thr->is_freeing)
568 return true;
569 return false;
572 void ReportRace(ThreadState *thr) {
573 CheckNoLocks(thr);
575 // Symbolizer makes lots of intercepted calls. If we try to process them,
576 // at best it will cause deadlocks on internal mutexes.
577 ScopedIgnoreInterceptors ignore;
579 if (!flags()->report_bugs)
580 return;
581 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
582 return;
584 bool freed = false;
586 Shadow s(thr->racy_state[1]);
587 freed = s.GetFreedAndReset();
588 thr->racy_state[1] = s.raw();
591 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
592 uptr addr_min = 0;
593 uptr addr_max = 0;
595 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
596 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
597 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
598 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
599 addr_min = min(a0, a1);
600 addr_max = max(e0, e1);
601 if (IsExpectedReport(addr_min, addr_max - addr_min))
602 return;
605 ThreadRegistryLock l0(ctx->thread_registry);
607 ReportType typ = ReportTypeRace;
608 if (thr->is_vptr_access && freed)
609 typ = ReportTypeVptrUseAfterFree;
610 else if (thr->is_vptr_access)
611 typ = ReportTypeVptrRace;
612 else if (freed)
613 typ = ReportTypeUseAfterFree;
614 ScopedReport rep(typ);
615 if (IsFiredSuppression(ctx, rep, addr))
616 return;
617 const uptr kMop = 2;
618 VarSizeStackTrace traces[kMop];
619 const uptr toppc = TraceTopPC(thr);
620 ObtainCurrentStack(thr, toppc, &traces[0]);
621 if (IsFiredSuppression(ctx, rep, traces[0]))
622 return;
623 InternalScopedBuffer<MutexSet> mset2(1);
624 new(mset2.data()) MutexSet();
625 Shadow s2(thr->racy_state[1]);
626 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
627 if (IsFiredSuppression(ctx, rep, traces[1]))
628 return;
630 if (HandleRacyStacks(thr, traces, addr_min, addr_max))
631 return;
633 for (uptr i = 0; i < kMop; i++) {
634 Shadow s(thr->racy_state[i]);
635 rep.AddMemoryAccess(addr, s, traces[i],
636 i == 0 ? &thr->mset : mset2.data());
639 for (uptr i = 0; i < kMop; i++) {
640 FastState s(thr->racy_state[i]);
641 ThreadContext *tctx = static_cast<ThreadContext*>(
642 ctx->thread_registry->GetThreadLocked(s.tid()));
643 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
644 continue;
645 rep.AddThread(tctx);
648 rep.AddLocation(addr_min, addr_max - addr_min);
650 #ifndef TSAN_GO
651 { // NOLINT
652 Shadow s(thr->racy_state[1]);
653 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
654 rep.AddSleep(thr->last_sleep_stack_id);
656 #endif
658 if (!OutputReport(thr, rep))
659 return;
661 AddRacyStacks(thr, traces, addr_min, addr_max);
664 void PrintCurrentStack(ThreadState *thr, uptr pc) {
665 VarSizeStackTrace trace;
666 ObtainCurrentStack(thr, pc, &trace);
667 PrintStack(SymbolizeStack(trace));
670 void PrintCurrentStackSlow(uptr pc) {
671 #ifndef TSAN_GO
672 BufferedStackTrace *ptrace =
673 new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
674 BufferedStackTrace();
675 ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
676 for (uptr i = 0; i < ptrace->size / 2; i++) {
677 uptr tmp = ptrace->trace_buffer[i];
678 ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
679 ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
681 PrintStack(SymbolizeStack(*ptrace));
682 #endif
685 } // namespace __tsan
687 using namespace __tsan;
689 extern "C" {
690 SANITIZER_INTERFACE_ATTRIBUTE
691 void __sanitizer_print_stack_trace() {
692 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
694 } // extern "C"