tree-flow-inline.h (get_addr_base_and_unit_offset_1): Handle BIT_FIELD_REF.
[official-gcc.git] / libsanitizer / tsan / tsan_rtl_report.cc
blobff1d43bc9e8afdf6cf7cf42fa14c3f34864869f8
1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_libc.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "sanitizer_common/sanitizer_stackdepot.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_stacktrace.h"
17 #include "tsan_platform.h"
18 #include "tsan_rtl.h"
19 #include "tsan_suppressions.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_report.h"
22 #include "tsan_sync.h"
23 #include "tsan_mman.h"
24 #include "tsan_flags.h"
25 #include "tsan_fd.h"
27 namespace __tsan {
29 using namespace __sanitizer; // NOLINT
31 static ReportStack *SymbolizeStack(const StackTrace& trace);
33 void TsanCheckFailed(const char *file, int line, const char *cond,
34 u64 v1, u64 v2) {
35 ScopedInRtl in_rtl;
36 Printf("FATAL: ThreadSanitizer CHECK failed: "
37 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
38 file, line, cond, (uptr)v1, (uptr)v2);
39 PrintCurrentStackSlow();
40 Die();
43 // Can be overriden by an application/test to intercept reports.
44 #ifdef TSAN_EXTERNAL_HOOKS
45 bool OnReport(const ReportDesc *rep, bool suppressed);
46 #else
47 SANITIZER_INTERFACE_ATTRIBUTE
48 bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
49 (void)rep;
50 return suppressed;
52 #endif
54 static void StackStripMain(ReportStack *stack) {
55 ReportStack *last_frame = 0;
56 ReportStack *last_frame2 = 0;
57 const char *prefix = "__interceptor_";
58 uptr prefix_len = internal_strlen(prefix);
59 const char *path_prefix = flags()->strip_path_prefix;
60 uptr path_prefix_len = internal_strlen(path_prefix);
61 char *pos;
62 for (ReportStack *ent = stack; ent; ent = ent->next) {
63 if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
64 ent->func += prefix_len;
65 if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
66 ent->file = pos + path_prefix_len;
67 if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
68 ent->file += 2;
69 last_frame2 = last_frame;
70 last_frame = ent;
73 if (last_frame2 == 0)
74 return;
75 const char *last = last_frame->func;
76 #ifndef TSAN_GO
77 const char *last2 = last_frame2->func;
78 // Strip frame above 'main'
79 if (last2 && 0 == internal_strcmp(last2, "main")) {
80 last_frame2->next = 0;
81 // Strip our internal thread start routine.
82 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
83 last_frame2->next = 0;
84 // Strip global ctors init.
85 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
86 last_frame2->next = 0;
87 // If both are 0, then we probably just failed to symbolize.
88 } else if (last || last2) {
89 // Ensure that we recovered stack completely. Trimmed stack
90 // can actually happen if we do not instrument some code,
91 // so it's only a debug print. However we must try hard to not miss it
92 // due to our fault.
93 DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
95 #else
96 if (last && 0 == internal_strcmp(last, "schedunlock"))
97 last_frame2->next = 0;
98 #endif
101 static ReportStack *SymbolizeStack(const StackTrace& trace) {
102 if (trace.IsEmpty())
103 return 0;
104 ReportStack *stack = 0;
105 for (uptr si = 0; si < trace.Size(); si++) {
106 // We obtain the return address, that is, address of the next instruction,
107 // so offset it by 1 byte.
108 bool is_last = (si == trace.Size() - 1);
109 ReportStack *ent = SymbolizeCode(trace.Get(si) - !is_last);
110 CHECK_NE(ent, 0);
111 ReportStack *last = ent;
112 while (last->next) {
113 last->pc += !is_last;
114 last = last->next;
116 last->pc += !is_last;
117 last->next = stack;
118 stack = ent;
120 StackStripMain(stack);
121 return stack;
124 ScopedReport::ScopedReport(ReportType typ) {
125 ctx_ = CTX();
126 ctx_->thread_mtx.CheckLocked();
127 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
128 rep_ = new(mem) ReportDesc;
129 rep_->typ = typ;
130 ctx_->report_mtx.Lock();
133 ScopedReport::~ScopedReport() {
134 ctx_->report_mtx.Unlock();
135 DestroyAndFree(rep_);
138 void ScopedReport::AddStack(const StackTrace *stack) {
139 ReportStack **rs = rep_->stacks.PushBack();
140 *rs = SymbolizeStack(*stack);
143 void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
144 const StackTrace *stack, const MutexSet *mset) {
145 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
146 ReportMop *mop = new(mem) ReportMop;
147 rep_->mops.PushBack(mop);
148 mop->tid = s.tid();
149 mop->addr = addr + s.addr0();
150 mop->size = s.size();
151 mop->write = s.IsWrite();
152 mop->atomic = s.IsAtomic();
153 mop->stack = SymbolizeStack(*stack);
154 for (uptr i = 0; i < mset->Size(); i++) {
155 MutexSet::Desc d = mset->Get(i);
156 u64 uid = 0;
157 uptr addr = SyncVar::SplitId(d.id, &uid);
158 SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false);
159 // Check that the mutex is still alive.
160 // Another mutex can be created at the same address,
161 // so check uid as well.
162 if (s && s->CheckId(uid)) {
163 ReportMopMutex mtx = {s->uid, d.write};
164 mop->mset.PushBack(mtx);
165 AddMutex(s);
166 } else {
167 ReportMopMutex mtx = {d.id, d.write};
168 mop->mset.PushBack(mtx);
169 AddMutex(d.id);
171 if (s)
172 s->mtx.ReadUnlock();
176 void ScopedReport::AddThread(const ThreadContext *tctx) {
177 for (uptr i = 0; i < rep_->threads.Size(); i++) {
178 if (rep_->threads[i]->id == tctx->tid)
179 return;
181 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
182 ReportThread *rt = new(mem) ReportThread();
183 rep_->threads.PushBack(rt);
184 rt->id = tctx->tid;
185 rt->pid = tctx->os_id;
186 rt->running = (tctx->status == ThreadStatusRunning);
187 rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
188 rt->parent_tid = tctx->creation_tid;
189 rt->stack = SymbolizeStack(tctx->creation_stack);
192 #ifndef TSAN_GO
193 static ThreadContext *FindThread(int unique_id) {
194 Context *ctx = CTX();
195 ctx->thread_mtx.CheckLocked();
196 for (unsigned i = 0; i < kMaxTid; i++) {
197 ThreadContext *tctx = ctx->threads[i];
198 if (tctx && tctx->unique_id == unique_id) {
199 return tctx;
202 return 0;
205 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
206 Context *ctx = CTX();
207 ctx->thread_mtx.CheckLocked();
208 for (unsigned i = 0; i < kMaxTid; i++) {
209 ThreadContext *tctx = ctx->threads[i];
210 if (tctx == 0 || tctx->status != ThreadStatusRunning)
211 continue;
212 ThreadState *thr = tctx->thr;
213 CHECK(thr);
214 if (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) {
215 *is_stack = true;
216 return tctx;
218 if (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size) {
219 *is_stack = false;
220 return tctx;
223 return 0;
225 #endif
227 void ScopedReport::AddMutex(const SyncVar *s) {
228 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
229 if (rep_->mutexes[i]->id == s->uid)
230 return;
232 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
233 ReportMutex *rm = new(mem) ReportMutex();
234 rep_->mutexes.PushBack(rm);
235 rm->id = s->uid;
236 rm->destroyed = false;
237 rm->stack = SymbolizeStack(s->creation_stack);
240 void ScopedReport::AddMutex(u64 id) {
241 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
242 if (rep_->mutexes[i]->id == id)
243 return;
245 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
246 ReportMutex *rm = new(mem) ReportMutex();
247 rep_->mutexes.PushBack(rm);
248 rm->id = id;
249 rm->destroyed = true;
250 rm->stack = 0;
253 void ScopedReport::AddLocation(uptr addr, uptr size) {
254 if (addr == 0)
255 return;
256 #ifndef TSAN_GO
257 int fd = -1;
258 int creat_tid = -1;
259 u32 creat_stack = 0;
260 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
261 || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
262 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
263 ReportLocation *loc = new(mem) ReportLocation();
264 rep_->locs.PushBack(loc);
265 loc->type = ReportLocationFD;
266 loc->fd = fd;
267 loc->tid = creat_tid;
268 uptr ssz = 0;
269 const uptr *stack = StackDepotGet(creat_stack, &ssz);
270 if (stack) {
271 StackTrace trace;
272 trace.Init(stack, ssz);
273 loc->stack = SymbolizeStack(trace);
275 ThreadContext *tctx = FindThread(creat_tid);
276 if (tctx)
277 AddThread(tctx);
278 return;
280 if (allocator()->PointerIsMine((void*)addr)) {
281 MBlock *b = user_mblock(0, (void*)addr);
282 ThreadContext *tctx = FindThread(b->alloc_tid);
283 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
284 ReportLocation *loc = new(mem) ReportLocation();
285 rep_->locs.PushBack(loc);
286 loc->type = ReportLocationHeap;
287 loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
288 loc->size = b->size;
289 loc->tid = tctx ? tctx->tid : b->alloc_tid;
290 loc->name = 0;
291 loc->file = 0;
292 loc->line = 0;
293 loc->stack = 0;
294 uptr ssz = 0;
295 const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz);
296 if (stack) {
297 StackTrace trace;
298 trace.Init(stack, ssz);
299 loc->stack = SymbolizeStack(trace);
301 if (tctx)
302 AddThread(tctx);
303 return;
305 bool is_stack = false;
306 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
307 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
308 ReportLocation *loc = new(mem) ReportLocation();
309 rep_->locs.PushBack(loc);
310 loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
311 loc->tid = tctx->tid;
312 AddThread(tctx);
314 ReportLocation *loc = SymbolizeData(addr);
315 if (loc) {
316 rep_->locs.PushBack(loc);
317 return;
319 #endif
322 #ifndef TSAN_GO
323 void ScopedReport::AddSleep(u32 stack_id) {
324 uptr ssz = 0;
325 const uptr *stack = StackDepotGet(stack_id, &ssz);
326 if (stack) {
327 StackTrace trace;
328 trace.Init(stack, ssz);
329 rep_->sleep = SymbolizeStack(trace);
332 #endif
334 const ReportDesc *ScopedReport::GetReport() const {
335 return rep_;
338 void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
339 // This function restores stack trace and mutex set for the thread/epoch.
340 // It does so by getting stack trace and mutex set at the beginning of
341 // trace part, and then replaying the trace till the given epoch.
342 ThreadContext *tctx = CTX()->threads[tid];
343 if (tctx == 0)
344 return;
345 Trace* trace = 0;
346 if (tctx->status == ThreadStatusRunning) {
347 CHECK(tctx->thr);
348 trace = &tctx->thr->trace;
349 } else if (tctx->status == ThreadStatusFinished
350 || tctx->status == ThreadStatusDead) {
351 if (tctx->dead_info == 0)
352 return;
353 trace = &tctx->dead_info->trace;
354 } else {
355 return;
357 Lock l(&trace->mtx);
358 const int partidx = (epoch / kTracePartSize) % TraceParts();
359 TraceHeader* hdr = &trace->headers[partidx];
360 if (epoch < hdr->epoch0)
361 return;
362 const u64 epoch0 = RoundDown(epoch, TraceSize());
363 const u64 eend = epoch % TraceSize();
364 const u64 ebegin = RoundDown(eend, kTracePartSize);
365 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
366 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
367 InternalScopedBuffer<uptr> stack(1024); // FIXME: de-hardcode 1024
368 for (uptr i = 0; i < hdr->stack0.Size(); i++) {
369 stack[i] = hdr->stack0.Get(i);
370 DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
372 if (mset)
373 *mset = hdr->mset0;
374 uptr pos = hdr->stack0.Size();
375 Event *events = (Event*)GetThreadTrace(tid);
376 for (uptr i = ebegin; i <= eend; i++) {
377 Event ev = events[i];
378 EventType typ = (EventType)(ev >> 61);
379 uptr pc = (uptr)(ev & ((1ull << 61) - 1));
380 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
381 if (typ == EventTypeMop) {
382 stack[pos] = pc;
383 } else if (typ == EventTypeFuncEnter) {
384 stack[pos++] = pc;
385 } else if (typ == EventTypeFuncExit) {
386 if (pos > 0)
387 pos--;
389 if (mset) {
390 if (typ == EventTypeLock) {
391 mset->Add(pc, true, epoch0 + i);
392 } else if (typ == EventTypeUnlock) {
393 mset->Del(pc, true);
394 } else if (typ == EventTypeRLock) {
395 mset->Add(pc, false, epoch0 + i);
396 } else if (typ == EventTypeRUnlock) {
397 mset->Del(pc, false);
400 for (uptr j = 0; j <= pos; j++)
401 DPrintf2(" #%zu: %zx\n", j, stack[j]);
403 if (pos == 0 && stack[0] == 0)
404 return;
405 pos++;
406 stk->Init(stack.data(), pos);
409 static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
410 uptr addr_min, uptr addr_max) {
411 Context *ctx = CTX();
412 bool equal_stack = false;
413 RacyStacks hash;
414 if (flags()->suppress_equal_stacks) {
415 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
416 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
417 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
418 if (hash == ctx->racy_stacks[i]) {
419 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
420 equal_stack = true;
421 break;
425 bool equal_address = false;
426 RacyAddress ra0 = {addr_min, addr_max};
427 if (flags()->suppress_equal_addresses) {
428 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
429 RacyAddress ra2 = ctx->racy_addresses[i];
430 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
431 uptr minend = min(ra0.addr_max, ra2.addr_max);
432 if (maxbeg < minend) {
433 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
434 equal_address = true;
435 break;
439 if (equal_stack || equal_address) {
440 if (!equal_stack)
441 ctx->racy_stacks.PushBack(hash);
442 if (!equal_address)
443 ctx->racy_addresses.PushBack(ra0);
444 return true;
446 return false;
449 static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
450 uptr addr_min, uptr addr_max) {
451 Context *ctx = CTX();
452 if (flags()->suppress_equal_stacks) {
453 RacyStacks hash;
454 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
455 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
456 ctx->racy_stacks.PushBack(hash);
458 if (flags()->suppress_equal_addresses) {
459 RacyAddress ra0 = {addr_min, addr_max};
460 ctx->racy_addresses.PushBack(ra0);
464 bool OutputReport(Context *ctx,
465 const ScopedReport &srep,
466 const ReportStack *suppress_stack1,
467 const ReportStack *suppress_stack2) {
468 const ReportDesc *rep = srep.GetReport();
469 uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1);
470 if (suppress_pc == 0)
471 suppress_pc = IsSuppressed(rep->typ, suppress_stack2);
472 if (suppress_pc != 0) {
473 FiredSuppression supp = {srep.GetReport()->typ, suppress_pc};
474 ctx->fired_suppressions.PushBack(supp);
476 if (OnReport(rep, suppress_pc != 0))
477 return false;
478 PrintReport(rep);
479 CTX()->nreported++;
480 return true;
483 bool IsFiredSuppression(Context *ctx,
484 const ScopedReport &srep,
485 const StackTrace &trace) {
486 for (uptr k = 0; k < ctx->fired_suppressions.Size(); k++) {
487 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
488 continue;
489 for (uptr j = 0; j < trace.Size(); j++) {
490 if (trace.Get(j) == ctx->fired_suppressions[k].pc)
491 return true;
494 return false;
497 bool FrameIsInternal(const ReportStack *frame) {
498 return frame != 0 && frame->file != 0
499 && (internal_strstr(frame->file, "tsan_interceptors.cc") ||
500 internal_strstr(frame->file, "sanitizer_common_interceptors.inc") ||
501 internal_strstr(frame->file, "tsan_interface_"));
504 // On programs that use Java we see weird reports like:
505 // WARNING: ThreadSanitizer: data race (pid=22512)
506 // Read of size 8 at 0x7d2b00084318 by thread 100:
507 // #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
508 // #1 <null> <null>:0 (0x7f7ad9b40193)
509 // Previous write of size 8 at 0x7d2b00084318 by thread 105:
510 // #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
511 // #1 <null> <null>:0 (0x7f7ad9b42707)
512 static bool IsJavaNonsense(const ReportDesc *rep) {
513 #ifndef TSAN_GO
514 for (uptr i = 0; i < rep->mops.Size(); i++) {
515 ReportMop *mop = rep->mops[i];
516 ReportStack *frame = mop->stack;
517 if (frame == 0
518 || (frame->func == 0 && frame->file == 0 && frame->line == 0
519 && frame->module == 0)) {
520 return true;
522 if (FrameIsInternal(frame)) {
523 frame = frame->next;
524 if (frame == 0
525 || (frame->func == 0 && frame->file == 0 && frame->line == 0
526 && frame->module == 0)) {
527 if (frame) {
528 FiredSuppression supp = {rep->typ, frame->pc};
529 CTX()->fired_suppressions.PushBack(supp);
531 return true;
535 #endif
536 return false;
539 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
540 Shadow s0(thr->racy_state[0]);
541 Shadow s1(thr->racy_state[1]);
542 CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
543 if (!s0.IsAtomic() && !s1.IsAtomic())
544 return true;
545 if (s0.IsAtomic() && s1.IsFreed())
546 return true;
547 if (s1.IsAtomic() && thr->is_freeing)
548 return true;
549 return false;
552 void ReportRace(ThreadState *thr) {
553 if (!flags()->report_bugs)
554 return;
555 ScopedInRtl in_rtl;
557 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
558 return;
560 if (thr->in_signal_handler)
561 Printf("ThreadSanitizer: printing report from signal handler."
562 " Can crash or hang.\n");
564 bool freed = false;
566 Shadow s(thr->racy_state[1]);
567 freed = s.GetFreedAndReset();
568 thr->racy_state[1] = s.raw();
571 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
572 uptr addr_min = 0;
573 uptr addr_max = 0;
575 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
576 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
577 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
578 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
579 addr_min = min(a0, a1);
580 addr_max = max(e0, e1);
581 if (IsExpectedReport(addr_min, addr_max - addr_min))
582 return;
585 Context *ctx = CTX();
586 Lock l0(&ctx->thread_mtx);
588 ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace);
589 const uptr kMop = 2;
590 StackTrace traces[kMop];
591 const uptr toppc = TraceTopPC(thr);
592 traces[0].ObtainCurrent(thr, toppc);
593 if (IsFiredSuppression(ctx, rep, traces[0]))
594 return;
595 InternalScopedBuffer<MutexSet> mset2(1);
596 new(mset2.data()) MutexSet();
597 Shadow s2(thr->racy_state[1]);
598 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
600 if (HandleRacyStacks(thr, traces, addr_min, addr_max))
601 return;
603 for (uptr i = 0; i < kMop; i++) {
604 Shadow s(thr->racy_state[i]);
605 rep.AddMemoryAccess(addr, s, &traces[i],
606 i == 0 ? &thr->mset : mset2.data());
609 if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
610 return;
612 for (uptr i = 0; i < kMop; i++) {
613 FastState s(thr->racy_state[i]);
614 ThreadContext *tctx = ctx->threads[s.tid()];
615 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
616 continue;
617 rep.AddThread(tctx);
620 rep.AddLocation(addr_min, addr_max - addr_min);
622 #ifndef TSAN_GO
623 { // NOLINT
624 Shadow s(thr->racy_state[1]);
625 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
626 rep.AddSleep(thr->last_sleep_stack_id);
628 #endif
630 if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack,
631 rep.GetReport()->mops[1]->stack))
632 return;
634 AddRacyStacks(thr, traces, addr_min, addr_max);
637 void PrintCurrentStack(ThreadState *thr, uptr pc) {
638 StackTrace trace;
639 trace.ObtainCurrent(thr, pc);
640 PrintStack(SymbolizeStack(trace));
643 void PrintCurrentStackSlow() {
644 #ifndef TSAN_GO
645 __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
646 sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
647 ptrace->SlowUnwindStack(__sanitizer::StackTrace::GetCurrentPc(),
648 kStackTraceMax);
649 StackTrace trace;
650 trace.Init(ptrace->trace, ptrace->size);
651 PrintStack(SymbolizeStack(trace));
652 #endif
655 } // namespace __tsan