Daily bump.
[official-gcc.git] / libsanitizer / tsan / tsan_rtl_report.cc
blobd19deb066dff87734a9d471de9b3f875bcc33704
1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_libc.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "sanitizer_common/sanitizer_stackdepot.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_stacktrace.h"
17 #include "tsan_platform.h"
18 #include "tsan_rtl.h"
19 #include "tsan_suppressions.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_report.h"
22 #include "tsan_sync.h"
23 #include "tsan_mman.h"
24 #include "tsan_flags.h"
25 #include "tsan_fd.h"
27 namespace __tsan {
29 using namespace __sanitizer; // NOLINT
31 static ReportStack *SymbolizeStack(const StackTrace& trace);
33 void TsanCheckFailed(const char *file, int line, const char *cond,
34 u64 v1, u64 v2) {
35 // There is high probability that interceptors will check-fail as well,
36 // on the other hand there is no sense in processing interceptors
37 // since we are going to die soon.
38 ScopedIgnoreInterceptors ignore;
39 Printf("FATAL: ThreadSanitizer CHECK failed: "
40 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
41 file, line, cond, (uptr)v1, (uptr)v2);
42 PrintCurrentStackSlow();
43 Die();
46 // Can be overriden by an application/test to intercept reports.
47 #ifdef TSAN_EXTERNAL_HOOKS
48 bool OnReport(const ReportDesc *rep, bool suppressed);
49 #else
50 SANITIZER_INTERFACE_ATTRIBUTE
51 bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
52 (void)rep;
53 return suppressed;
55 #endif
57 static void StackStripMain(ReportStack *stack) {
58 ReportStack *last_frame = 0;
59 ReportStack *last_frame2 = 0;
60 const char *prefix = "__interceptor_";
61 uptr prefix_len = internal_strlen(prefix);
62 const char *path_prefix = flags()->strip_path_prefix;
63 uptr path_prefix_len = internal_strlen(path_prefix);
64 char *pos;
65 for (ReportStack *ent = stack; ent; ent = ent->next) {
66 if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
67 ent->func += prefix_len;
68 if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
69 ent->file = pos + path_prefix_len;
70 if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
71 ent->file += 2;
72 last_frame2 = last_frame;
73 last_frame = ent;
76 if (last_frame2 == 0)
77 return;
78 const char *last = last_frame->func;
79 #ifndef TSAN_GO
80 const char *last2 = last_frame2->func;
81 // Strip frame above 'main'
82 if (last2 && 0 == internal_strcmp(last2, "main")) {
83 last_frame2->next = 0;
84 // Strip our internal thread start routine.
85 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
86 last_frame2->next = 0;
87 // Strip global ctors init.
88 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
89 last_frame2->next = 0;
90 // If both are 0, then we probably just failed to symbolize.
91 } else if (last || last2) {
92 // Ensure that we recovered stack completely. Trimmed stack
93 // can actually happen if we do not instrument some code,
94 // so it's only a debug print. However we must try hard to not miss it
95 // due to our fault.
96 DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
98 #else
99 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
100 last_frame2->next = 0;
101 (void)last;
102 #endif
105 ReportStack *SymbolizeStackId(u32 stack_id) {
106 if (stack_id == 0)
107 return 0;
108 uptr ssz = 0;
109 const uptr *stack = StackDepotGet(stack_id, &ssz);
110 if (stack == 0)
111 return 0;
112 StackTrace trace;
113 trace.Init(stack, ssz);
114 return SymbolizeStack(trace);
117 static ReportStack *SymbolizeStack(const StackTrace& trace) {
118 if (trace.IsEmpty())
119 return 0;
120 ReportStack *stack = 0;
121 for (uptr si = 0; si < trace.Size(); si++) {
122 const uptr pc = trace.Get(si);
123 #ifndef TSAN_GO
124 // We obtain the return address, that is, address of the next instruction,
125 // so offset it by 1 byte.
126 const uptr pc1 = __sanitizer::StackTrace::GetPreviousInstructionPc(pc);
127 #else
128 // FIXME(dvyukov): Go sometimes uses address of a function as top pc.
129 uptr pc1 = pc;
130 if (si != trace.Size() - 1)
131 pc1 -= 1;
132 #endif
133 ReportStack *ent = SymbolizeCode(pc1);
134 CHECK_NE(ent, 0);
135 ReportStack *last = ent;
136 while (last->next) {
137 last->pc = pc; // restore original pc for report
138 last = last->next;
140 last->pc = pc; // restore original pc for report
141 last->next = stack;
142 stack = ent;
144 StackStripMain(stack);
145 return stack;
148 ScopedReport::ScopedReport(ReportType typ) {
149 ctx->thread_registry->CheckLocked();
150 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
151 rep_ = new(mem) ReportDesc;
152 rep_->typ = typ;
153 ctx->report_mtx.Lock();
154 CommonSanitizerReportMutex.Lock();
157 ScopedReport::~ScopedReport() {
158 CommonSanitizerReportMutex.Unlock();
159 ctx->report_mtx.Unlock();
160 DestroyAndFree(rep_);
163 void ScopedReport::AddStack(const StackTrace *stack) {
164 ReportStack **rs = rep_->stacks.PushBack();
165 *rs = SymbolizeStack(*stack);
168 void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
169 const StackTrace *stack, const MutexSet *mset) {
170 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
171 ReportMop *mop = new(mem) ReportMop;
172 rep_->mops.PushBack(mop);
173 mop->tid = s.tid();
174 mop->addr = addr + s.addr0();
175 mop->size = s.size();
176 mop->write = s.IsWrite();
177 mop->atomic = s.IsAtomic();
178 mop->stack = SymbolizeStack(*stack);
179 for (uptr i = 0; i < mset->Size(); i++) {
180 MutexSet::Desc d = mset->Get(i);
181 u64 mid = this->AddMutex(d.id);
182 ReportMopMutex mtx = {mid, d.write};
183 mop->mset.PushBack(mtx);
187 void ScopedReport::AddUniqueTid(int unique_tid) {
188 rep_->unique_tids.PushBack(unique_tid);
191 void ScopedReport::AddThread(const ThreadContext *tctx) {
192 for (uptr i = 0; i < rep_->threads.Size(); i++) {
193 if ((u32)rep_->threads[i]->id == tctx->tid)
194 return;
196 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
197 ReportThread *rt = new(mem) ReportThread();
198 rep_->threads.PushBack(rt);
199 rt->id = tctx->tid;
200 rt->pid = tctx->os_id;
201 rt->running = (tctx->status == ThreadStatusRunning);
202 rt->name = internal_strdup(tctx->name);
203 rt->parent_tid = tctx->parent_tid;
204 rt->stack = 0;
205 rt->stack = SymbolizeStackId(tctx->creation_stack_id);
208 #ifndef TSAN_GO
209 static ThreadContext *FindThreadByUidLocked(int unique_id) {
210 ctx->thread_registry->CheckLocked();
211 for (unsigned i = 0; i < kMaxTid; i++) {
212 ThreadContext *tctx = static_cast<ThreadContext*>(
213 ctx->thread_registry->GetThreadLocked(i));
214 if (tctx && tctx->unique_id == (u32)unique_id) {
215 return tctx;
218 return 0;
221 static ThreadContext *FindThreadByTidLocked(int tid) {
222 ctx->thread_registry->CheckLocked();
223 return static_cast<ThreadContext*>(
224 ctx->thread_registry->GetThreadLocked(tid));
227 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
228 uptr addr = (uptr)arg;
229 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
230 if (tctx->status != ThreadStatusRunning)
231 return false;
232 ThreadState *thr = tctx->thr;
233 CHECK(thr);
234 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
235 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
238 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
239 ctx->thread_registry->CheckLocked();
240 ThreadContext *tctx = static_cast<ThreadContext*>(
241 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
242 (void*)addr));
243 if (!tctx)
244 return 0;
245 ThreadState *thr = tctx->thr;
246 CHECK(thr);
247 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
248 return tctx;
250 #endif
252 void ScopedReport::AddThread(int unique_tid) {
253 #ifndef TSAN_GO
254 AddThread(FindThreadByUidLocked(unique_tid));
255 #endif
258 void ScopedReport::AddMutex(const SyncVar *s) {
259 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
260 if (rep_->mutexes[i]->id == s->uid)
261 return;
263 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
264 ReportMutex *rm = new(mem) ReportMutex();
265 rep_->mutexes.PushBack(rm);
266 rm->id = s->uid;
267 rm->addr = s->addr;
268 rm->destroyed = false;
269 rm->stack = SymbolizeStackId(s->creation_stack_id);
272 u64 ScopedReport::AddMutex(u64 id) {
273 u64 uid = 0;
274 u64 mid = id;
275 uptr addr = SyncVar::SplitId(id, &uid);
276 SyncVar *s = ctx->synctab.GetIfExistsAndLock(addr, false);
277 // Check that the mutex is still alive.
278 // Another mutex can be created at the same address,
279 // so check uid as well.
280 if (s && s->CheckId(uid)) {
281 mid = s->uid;
282 AddMutex(s);
283 } else {
284 AddDeadMutex(id);
286 if (s)
287 s->mtx.ReadUnlock();
288 return mid;
291 void ScopedReport::AddDeadMutex(u64 id) {
292 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
293 if (rep_->mutexes[i]->id == id)
294 return;
296 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
297 ReportMutex *rm = new(mem) ReportMutex();
298 rep_->mutexes.PushBack(rm);
299 rm->id = id;
300 rm->addr = 0;
301 rm->destroyed = true;
302 rm->stack = 0;
305 void ScopedReport::AddLocation(uptr addr, uptr size) {
306 if (addr == 0)
307 return;
308 #ifndef TSAN_GO
309 int fd = -1;
310 int creat_tid = -1;
311 u32 creat_stack = 0;
312 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
313 || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
314 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
315 ReportLocation *loc = new(mem) ReportLocation();
316 rep_->locs.PushBack(loc);
317 loc->type = ReportLocationFD;
318 loc->fd = fd;
319 loc->tid = creat_tid;
320 loc->stack = SymbolizeStackId(creat_stack);
321 ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
322 if (tctx)
323 AddThread(tctx);
324 return;
326 MBlock *b = 0;
327 if (allocator()->PointerIsMine((void*)addr)
328 && (b = user_mblock(0, (void*)addr))) {
329 ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
330 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
331 ReportLocation *loc = new(mem) ReportLocation();
332 rep_->locs.PushBack(loc);
333 loc->type = ReportLocationHeap;
334 loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
335 loc->size = b->Size();
336 loc->tid = tctx ? tctx->tid : b->Tid();
337 loc->name = 0;
338 loc->file = 0;
339 loc->line = 0;
340 loc->stack = 0;
341 loc->stack = SymbolizeStackId(b->StackId());
342 if (tctx)
343 AddThread(tctx);
344 return;
346 bool is_stack = false;
347 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
348 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
349 ReportLocation *loc = new(mem) ReportLocation();
350 rep_->locs.PushBack(loc);
351 loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
352 loc->tid = tctx->tid;
353 AddThread(tctx);
355 ReportLocation *loc = SymbolizeData(addr);
356 if (loc) {
357 rep_->locs.PushBack(loc);
358 return;
360 #endif
363 #ifndef TSAN_GO
364 void ScopedReport::AddSleep(u32 stack_id) {
365 rep_->sleep = SymbolizeStackId(stack_id);
367 #endif
369 void ScopedReport::SetCount(int count) {
370 rep_->count = count;
373 const ReportDesc *ScopedReport::GetReport() const {
374 return rep_;
377 void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
378 // This function restores stack trace and mutex set for the thread/epoch.
379 // It does so by getting stack trace and mutex set at the beginning of
380 // trace part, and then replaying the trace till the given epoch.
381 ctx->thread_registry->CheckLocked();
382 ThreadContext *tctx = static_cast<ThreadContext*>(
383 ctx->thread_registry->GetThreadLocked(tid));
384 if (tctx == 0)
385 return;
386 if (tctx->status != ThreadStatusRunning
387 && tctx->status != ThreadStatusFinished
388 && tctx->status != ThreadStatusDead)
389 return;
390 Trace* trace = ThreadTrace(tctx->tid);
391 Lock l(&trace->mtx);
392 const int partidx = (epoch / kTracePartSize) % TraceParts();
393 TraceHeader* hdr = &trace->headers[partidx];
394 if (epoch < hdr->epoch0)
395 return;
396 const u64 epoch0 = RoundDown(epoch, TraceSize());
397 const u64 eend = epoch % TraceSize();
398 const u64 ebegin = RoundDown(eend, kTracePartSize);
399 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
400 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
401 InternalScopedBuffer<uptr> stack(kShadowStackSize);
402 for (uptr i = 0; i < hdr->stack0.Size(); i++) {
403 stack[i] = hdr->stack0.Get(i);
404 DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
406 if (mset)
407 *mset = hdr->mset0;
408 uptr pos = hdr->stack0.Size();
409 Event *events = (Event*)GetThreadTrace(tid);
410 for (uptr i = ebegin; i <= eend; i++) {
411 Event ev = events[i];
412 EventType typ = (EventType)(ev >> 61);
413 uptr pc = (uptr)(ev & ((1ull << 61) - 1));
414 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
415 if (typ == EventTypeMop) {
416 stack[pos] = pc;
417 } else if (typ == EventTypeFuncEnter) {
418 stack[pos++] = pc;
419 } else if (typ == EventTypeFuncExit) {
420 if (pos > 0)
421 pos--;
423 if (mset) {
424 if (typ == EventTypeLock) {
425 mset->Add(pc, true, epoch0 + i);
426 } else if (typ == EventTypeUnlock) {
427 mset->Del(pc, true);
428 } else if (typ == EventTypeRLock) {
429 mset->Add(pc, false, epoch0 + i);
430 } else if (typ == EventTypeRUnlock) {
431 mset->Del(pc, false);
434 for (uptr j = 0; j <= pos; j++)
435 DPrintf2(" #%zu: %zx\n", j, stack[j]);
437 if (pos == 0 && stack[0] == 0)
438 return;
439 pos++;
440 stk->Init(stack.data(), pos);
443 static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
444 uptr addr_min, uptr addr_max) {
445 bool equal_stack = false;
446 RacyStacks hash;
447 if (flags()->suppress_equal_stacks) {
448 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
449 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
450 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
451 if (hash == ctx->racy_stacks[i]) {
452 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
453 equal_stack = true;
454 break;
458 bool equal_address = false;
459 RacyAddress ra0 = {addr_min, addr_max};
460 if (flags()->suppress_equal_addresses) {
461 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
462 RacyAddress ra2 = ctx->racy_addresses[i];
463 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
464 uptr minend = min(ra0.addr_max, ra2.addr_max);
465 if (maxbeg < minend) {
466 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
467 equal_address = true;
468 break;
472 if (equal_stack || equal_address) {
473 if (!equal_stack)
474 ctx->racy_stacks.PushBack(hash);
475 if (!equal_address)
476 ctx->racy_addresses.PushBack(ra0);
477 return true;
479 return false;
482 static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
483 uptr addr_min, uptr addr_max) {
484 if (flags()->suppress_equal_stacks) {
485 RacyStacks hash;
486 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
487 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
488 ctx->racy_stacks.PushBack(hash);
490 if (flags()->suppress_equal_addresses) {
491 RacyAddress ra0 = {addr_min, addr_max};
492 ctx->racy_addresses.PushBack(ra0);
496 bool OutputReport(Context *ctx,
497 const ScopedReport &srep,
498 const ReportStack *suppress_stack1,
499 const ReportStack *suppress_stack2,
500 const ReportLocation *suppress_loc) {
501 atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
502 const ReportDesc *rep = srep.GetReport();
503 Suppression *supp = 0;
504 uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1, &supp);
505 if (suppress_pc == 0)
506 suppress_pc = IsSuppressed(rep->typ, suppress_stack2, &supp);
507 if (suppress_pc == 0)
508 suppress_pc = IsSuppressed(rep->typ, suppress_loc, &supp);
509 if (suppress_pc != 0) {
510 FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
511 ctx->fired_suppressions.push_back(s);
513 if (OnReport(rep, suppress_pc != 0))
514 return false;
515 PrintReport(rep);
516 ctx->nreported++;
517 if (flags()->halt_on_error)
518 internal__exit(flags()->exitcode);
519 return true;
522 bool IsFiredSuppression(Context *ctx,
523 const ScopedReport &srep,
524 const StackTrace &trace) {
525 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
526 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
527 continue;
528 for (uptr j = 0; j < trace.Size(); j++) {
529 FiredSuppression *s = &ctx->fired_suppressions[k];
530 if (trace.Get(j) == s->pc) {
531 if (s->supp)
532 s->supp->hit_count++;
533 return true;
537 return false;
540 static bool IsFiredSuppression(Context *ctx,
541 const ScopedReport &srep,
542 uptr addr) {
543 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
544 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
545 continue;
546 FiredSuppression *s = &ctx->fired_suppressions[k];
547 if (addr == s->pc) {
548 if (s->supp)
549 s->supp->hit_count++;
550 return true;
553 return false;
556 bool FrameIsInternal(const ReportStack *frame) {
557 return frame != 0 && frame->file != 0
558 && (internal_strstr(frame->file, "tsan_interceptors.cc") ||
559 internal_strstr(frame->file, "sanitizer_common_interceptors.inc") ||
560 internal_strstr(frame->file, "tsan_interface_"));
563 // On programs that use Java we see weird reports like:
564 // WARNING: ThreadSanitizer: data race (pid=22512)
565 // Read of size 8 at 0x7d2b00084318 by thread 100:
566 // #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
567 // #1 <null> <null>:0 (0x7f7ad9b40193)
568 // Previous write of size 8 at 0x7d2b00084318 by thread 105:
569 // #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
570 // #1 <null> <null>:0 (0x7f7ad9b42707)
571 static bool IsJavaNonsense(const ReportDesc *rep) {
572 #ifndef TSAN_GO
573 for (uptr i = 0; i < rep->mops.Size(); i++) {
574 ReportMop *mop = rep->mops[i];
575 ReportStack *frame = mop->stack;
576 if (frame == 0
577 || (frame->func == 0 && frame->file == 0 && frame->line == 0
578 && frame->module == 0)) {
579 return true;
581 if (FrameIsInternal(frame)) {
582 frame = frame->next;
583 if (frame == 0
584 || (frame->func == 0 && frame->file == 0 && frame->line == 0
585 && frame->module == 0)) {
586 if (frame) {
587 FiredSuppression supp = {rep->typ, frame->pc, 0};
588 ctx->fired_suppressions.push_back(supp);
590 return true;
594 #endif
595 return false;
598 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
599 Shadow s0(thr->racy_state[0]);
600 Shadow s1(thr->racy_state[1]);
601 CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
602 if (!s0.IsAtomic() && !s1.IsAtomic())
603 return true;
604 if (s0.IsAtomic() && s1.IsFreed())
605 return true;
606 if (s1.IsAtomic() && thr->is_freeing)
607 return true;
608 return false;
611 void ReportRace(ThreadState *thr) {
612 // Symbolizer makes lots of intercepted calls. If we try to process them,
613 // at best it will cause deadlocks on internal mutexes.
614 ScopedIgnoreInterceptors ignore;
616 if (!flags()->report_bugs)
617 return;
618 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
619 return;
621 bool freed = false;
623 Shadow s(thr->racy_state[1]);
624 freed = s.GetFreedAndReset();
625 thr->racy_state[1] = s.raw();
628 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
629 uptr addr_min = 0;
630 uptr addr_max = 0;
632 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
633 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
634 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
635 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
636 addr_min = min(a0, a1);
637 addr_max = max(e0, e1);
638 if (IsExpectedReport(addr_min, addr_max - addr_min))
639 return;
642 ThreadRegistryLock l0(ctx->thread_registry);
644 ReportType typ = ReportTypeRace;
645 if (thr->is_vptr_access)
646 typ = ReportTypeVptrRace;
647 else if (freed)
648 typ = ReportTypeUseAfterFree;
649 ScopedReport rep(typ);
650 if (IsFiredSuppression(ctx, rep, addr))
651 return;
652 const uptr kMop = 2;
653 StackTrace traces[kMop];
654 const uptr toppc = TraceTopPC(thr);
655 traces[0].ObtainCurrent(thr, toppc);
656 if (IsFiredSuppression(ctx, rep, traces[0]))
657 return;
658 InternalScopedBuffer<MutexSet> mset2(1);
659 new(mset2.data()) MutexSet();
660 Shadow s2(thr->racy_state[1]);
661 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
662 if (IsFiredSuppression(ctx, rep, traces[1]))
663 return;
665 if (HandleRacyStacks(thr, traces, addr_min, addr_max))
666 return;
668 for (uptr i = 0; i < kMop; i++) {
669 Shadow s(thr->racy_state[i]);
670 rep.AddMemoryAccess(addr, s, &traces[i],
671 i == 0 ? &thr->mset : mset2.data());
674 if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
675 return;
677 for (uptr i = 0; i < kMop; i++) {
678 FastState s(thr->racy_state[i]);
679 ThreadContext *tctx = static_cast<ThreadContext*>(
680 ctx->thread_registry->GetThreadLocked(s.tid()));
681 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
682 continue;
683 rep.AddThread(tctx);
686 rep.AddLocation(addr_min, addr_max - addr_min);
688 #ifndef TSAN_GO
689 { // NOLINT
690 Shadow s(thr->racy_state[1]);
691 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
692 rep.AddSleep(thr->last_sleep_stack_id);
694 #endif
696 ReportLocation *suppress_loc = rep.GetReport()->locs.Size() ?
697 rep.GetReport()->locs[0] : 0;
698 if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack,
699 rep.GetReport()->mops[1]->stack,
700 suppress_loc))
701 return;
703 AddRacyStacks(thr, traces, addr_min, addr_max);
706 void PrintCurrentStack(ThreadState *thr, uptr pc) {
707 StackTrace trace;
708 trace.ObtainCurrent(thr, pc);
709 PrintStack(SymbolizeStack(trace));
712 void PrintCurrentStackSlow() {
713 #ifndef TSAN_GO
714 __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
715 sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
716 ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(), 0, 0,
717 0, 0, false);
718 for (uptr i = 0; i < ptrace->size / 2; i++) {
719 uptr tmp = ptrace->trace[i];
720 ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1];
721 ptrace->trace[ptrace->size - i - 1] = tmp;
723 StackTrace trace;
724 trace.Init(ptrace->trace, ptrace->size);
725 PrintStack(SymbolizeStack(trace));
726 #endif
729 } // namespace __tsan