1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_libc.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_stacktrace.h"
19 #include "tsan_platform.h"
21 #include "tsan_suppressions.h"
22 #include "tsan_symbolize.h"
23 #include "tsan_report.h"
24 #include "tsan_sync.h"
25 #include "tsan_mman.h"
26 #include "tsan_flags.h"
31 using namespace __sanitizer
; // NOLINT
33 static ReportStack
*SymbolizeStack(const StackTrace
& trace
);
35 void TsanCheckFailed(const char *file
, int line
, const char *cond
,
38 Printf("FATAL: ThreadSanitizer CHECK failed: "
39 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
40 file
, line
, cond
, (uptr
)v1
, (uptr
)v2
);
41 PrintCurrentStackSlow();
45 // Can be overriden by an application/test to intercept reports.
46 #ifdef TSAN_EXTERNAL_HOOKS
47 bool OnReport(const ReportDesc
*rep
, bool suppressed
);
49 SANITIZER_INTERFACE_ATTRIBUTE
50 bool WEAK
OnReport(const ReportDesc
*rep
, bool suppressed
) {
56 static void StackStripMain(ReportStack
*stack
) {
57 ReportStack
*last_frame
= 0;
58 ReportStack
*last_frame2
= 0;
59 const char *prefix
= "__interceptor_";
60 uptr prefix_len
= internal_strlen(prefix
);
61 const char *path_prefix
= flags()->strip_path_prefix
;
62 uptr path_prefix_len
= internal_strlen(path_prefix
);
64 for (ReportStack
*ent
= stack
; ent
; ent
= ent
->next
) {
65 if (ent
->func
&& 0 == internal_strncmp(ent
->func
, prefix
, prefix_len
))
66 ent
->func
+= prefix_len
;
67 if (ent
->file
&& (pos
= internal_strstr(ent
->file
, path_prefix
)))
68 ent
->file
= pos
+ path_prefix_len
;
69 if (ent
->file
&& ent
->file
[0] == '.' && ent
->file
[1] == '/')
71 last_frame2
= last_frame
;
77 const char *last
= last_frame
->func
;
79 const char *last2
= last_frame2
->func
;
80 // Strip frame above 'main'
81 if (last2
&& 0 == internal_strcmp(last2
, "main")) {
82 last_frame2
->next
= 0;
83 // Strip our internal thread start routine.
84 } else if (last
&& 0 == internal_strcmp(last
, "__tsan_thread_start_func")) {
85 last_frame2
->next
= 0;
86 // Strip global ctors init.
87 } else if (last
&& 0 == internal_strcmp(last
, "__do_global_ctors_aux")) {
88 last_frame2
->next
= 0;
89 // If both are 0, then we probably just failed to symbolize.
90 } else if (last
|| last2
) {
91 // Ensure that we recovered stack completely. Trimmed stack
92 // can actually happen if we do not instrument some code,
93 // so it's only a debug print. However we must try hard to not miss it
95 DPrintf("Bottom stack frame of stack %zx is missed\n", stack
->pc
);
98 if (last
&& 0 == internal_strcmp(last
, "schedunlock"))
99 last_frame2
->next
= 0;
103 static ReportStack
*SymbolizeStack(const StackTrace
& trace
) {
106 ReportStack
*stack
= 0;
107 for (uptr si
= 0; si
< trace
.Size(); si
++) {
108 // We obtain the return address, that is, address of the next instruction,
109 // so offset it by 1 byte.
110 bool is_last
= (si
== trace
.Size() - 1);
111 ReportStack
*ent
= SymbolizeCode(trace
.Get(si
) - !is_last
);
113 ReportStack
*last
= ent
;
115 last
->pc
+= !is_last
;
118 last
->pc
+= !is_last
;
122 StackStripMain(stack
);
126 ScopedReport::ScopedReport(ReportType typ
) {
128 ctx_
->thread_mtx
.CheckLocked();
129 void *mem
= internal_alloc(MBlockReport
, sizeof(ReportDesc
));
130 rep_
= new(mem
) ReportDesc
;
132 ctx_
->report_mtx
.Lock();
135 ScopedReport::~ScopedReport() {
136 ctx_
->report_mtx
.Unlock();
137 DestroyAndFree(rep_
);
140 void ScopedReport::AddStack(const StackTrace
*stack
) {
141 ReportStack
**rs
= rep_
->stacks
.PushBack();
142 *rs
= SymbolizeStack(*stack
);
145 void ScopedReport::AddMemoryAccess(uptr addr
, Shadow s
,
146 const StackTrace
*stack
, const MutexSet
*mset
) {
147 void *mem
= internal_alloc(MBlockReportMop
, sizeof(ReportMop
));
148 ReportMop
*mop
= new(mem
) ReportMop
;
149 rep_
->mops
.PushBack(mop
);
151 mop
->addr
= addr
+ s
.addr0();
152 mop
->size
= s
.size();
153 mop
->write
= s
.IsWrite();
154 mop
->stack
= SymbolizeStack(*stack
);
155 for (uptr i
= 0; i
< mset
->Size(); i
++) {
156 MutexSet::Desc d
= mset
->Get(i
);
158 uptr addr
= SyncVar::SplitId(d
.id
, &uid
);
159 SyncVar
*s
= ctx_
->synctab
.GetIfExistsAndLock(addr
, false);
160 // Check that the mutex is still alive.
161 // Another mutex can be created at the same address,
162 // so check uid as well.
163 if (s
&& s
->CheckId(uid
)) {
164 ReportMopMutex mtx
= {s
->uid
, d
.write
};
165 mop
->mset
.PushBack(mtx
);
168 ReportMopMutex mtx
= {d
.id
, d
.write
};
169 mop
->mset
.PushBack(mtx
);
177 void ScopedReport::AddThread(const ThreadContext
*tctx
) {
178 for (uptr i
= 0; i
< rep_
->threads
.Size(); i
++) {
179 if (rep_
->threads
[i
]->id
== tctx
->tid
)
182 void *mem
= internal_alloc(MBlockReportThread
, sizeof(ReportThread
));
183 ReportThread
*rt
= new(mem
) ReportThread();
184 rep_
->threads
.PushBack(rt
);
186 rt
->pid
= tctx
->os_id
;
187 rt
->running
= (tctx
->status
== ThreadStatusRunning
);
188 rt
->name
= tctx
->name
? internal_strdup(tctx
->name
) : 0;
189 rt
->parent_tid
= tctx
->creation_tid
;
190 rt
->stack
= SymbolizeStack(tctx
->creation_stack
);
194 static ThreadContext
*FindThread(int unique_id
) {
195 Context
*ctx
= CTX();
196 ctx
->thread_mtx
.CheckLocked();
197 for (unsigned i
= 0; i
< kMaxTid
; i
++) {
198 ThreadContext
*tctx
= ctx
->threads
[i
];
199 if (tctx
&& tctx
->unique_id
== unique_id
) {
206 ThreadContext
*IsThreadStackOrTls(uptr addr
, bool *is_stack
) {
207 Context
*ctx
= CTX();
208 ctx
->thread_mtx
.CheckLocked();
209 for (unsigned i
= 0; i
< kMaxTid
; i
++) {
210 ThreadContext
*tctx
= ctx
->threads
[i
];
211 if (tctx
== 0 || tctx
->status
!= ThreadStatusRunning
)
213 ThreadState
*thr
= tctx
->thr
;
215 if (addr
>= thr
->stk_addr
&& addr
< thr
->stk_addr
+ thr
->stk_size
) {
219 if (addr
>= thr
->tls_addr
&& addr
< thr
->tls_addr
+ thr
->tls_size
) {
228 void ScopedReport::AddMutex(const SyncVar
*s
) {
229 for (uptr i
= 0; i
< rep_
->mutexes
.Size(); i
++) {
230 if (rep_
->mutexes
[i
]->id
== s
->uid
)
233 void *mem
= internal_alloc(MBlockReportMutex
, sizeof(ReportMutex
));
234 ReportMutex
*rm
= new(mem
) ReportMutex();
235 rep_
->mutexes
.PushBack(rm
);
237 rm
->destroyed
= false;
238 rm
->stack
= SymbolizeStack(s
->creation_stack
);
241 void ScopedReport::AddMutex(u64 id
) {
242 for (uptr i
= 0; i
< rep_
->mutexes
.Size(); i
++) {
243 if (rep_
->mutexes
[i
]->id
== id
)
246 void *mem
= internal_alloc(MBlockReportMutex
, sizeof(ReportMutex
));
247 ReportMutex
*rm
= new(mem
) ReportMutex();
248 rep_
->mutexes
.PushBack(rm
);
250 rm
->destroyed
= true;
254 void ScopedReport::AddLocation(uptr addr
, uptr size
) {
261 if (FdLocation(addr
, &fd
, &creat_tid
, &creat_stack
)
262 || FdLocation(AlternativeAddress(addr
), &fd
, &creat_tid
, &creat_stack
)) {
263 void *mem
= internal_alloc(MBlockReportLoc
, sizeof(ReportLocation
));
264 ReportLocation
*loc
= new(mem
) ReportLocation();
265 rep_
->locs
.PushBack(loc
);
266 loc
->type
= ReportLocationFD
;
268 loc
->tid
= creat_tid
;
270 const uptr
*stack
= StackDepotGet(creat_stack
, &ssz
);
273 trace
.Init(stack
, ssz
);
274 loc
->stack
= SymbolizeStack(trace
);
276 ThreadContext
*tctx
= FindThread(creat_tid
);
281 if (allocator()->PointerIsMine((void*)addr
)) {
282 MBlock
*b
= user_mblock(0, (void*)addr
);
283 ThreadContext
*tctx
= FindThread(b
->alloc_tid
);
284 void *mem
= internal_alloc(MBlockReportLoc
, sizeof(ReportLocation
));
285 ReportLocation
*loc
= new(mem
) ReportLocation();
286 rep_
->locs
.PushBack(loc
);
287 loc
->type
= ReportLocationHeap
;
288 loc
->addr
= (uptr
)allocator()->GetBlockBegin((void*)addr
);
290 loc
->tid
= tctx
? tctx
->tid
: b
->alloc_tid
;
296 const uptr
*stack
= StackDepotGet(b
->alloc_stack_id
, &ssz
);
299 trace
.Init(stack
, ssz
);
300 loc
->stack
= SymbolizeStack(trace
);
306 bool is_stack
= false;
307 if (ThreadContext
*tctx
= IsThreadStackOrTls(addr
, &is_stack
)) {
308 void *mem
= internal_alloc(MBlockReportLoc
, sizeof(ReportLocation
));
309 ReportLocation
*loc
= new(mem
) ReportLocation();
310 rep_
->locs
.PushBack(loc
);
311 loc
->type
= is_stack
? ReportLocationStack
: ReportLocationTLS
;
312 loc
->tid
= tctx
->tid
;
315 ReportLocation
*loc
= SymbolizeData(addr
);
317 rep_
->locs
.PushBack(loc
);
324 void ScopedReport::AddSleep(u32 stack_id
) {
326 const uptr
*stack
= StackDepotGet(stack_id
, &ssz
);
329 trace
.Init(stack
, ssz
);
330 rep_
->sleep
= SymbolizeStack(trace
);
335 const ReportDesc
*ScopedReport::GetReport() const {
339 void RestoreStack(int tid
, const u64 epoch
, StackTrace
*stk
, MutexSet
*mset
) {
340 // This function restores stack trace and mutex set for the thread/epoch.
341 // It does so by getting stack trace and mutex set at the beginning of
342 // trace part, and then replaying the trace till the given epoch.
343 ThreadContext
*tctx
= CTX()->threads
[tid
];
347 if (tctx
->status
== ThreadStatusRunning
) {
349 trace
= &tctx
->thr
->trace
;
350 } else if (tctx
->status
== ThreadStatusFinished
351 || tctx
->status
== ThreadStatusDead
) {
352 if (tctx
->dead_info
== 0)
354 trace
= &tctx
->dead_info
->trace
;
359 const int partidx
= (epoch
/ kTracePartSize
) % TraceParts();
360 TraceHeader
* hdr
= &trace
->headers
[partidx
];
361 if (epoch
< hdr
->epoch0
)
363 const u64 epoch0
= RoundDown(epoch
, TraceSize());
364 const u64 eend
= epoch
% TraceSize();
365 const u64 ebegin
= RoundDown(eend
, kTracePartSize
);
366 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
367 tid
, (uptr
)epoch
, (uptr
)ebegin
, (uptr
)eend
, partidx
);
368 InternalScopedBuffer
<uptr
> stack(1024); // FIXME: de-hardcode 1024
369 for (uptr i
= 0; i
< hdr
->stack0
.Size(); i
++) {
370 stack
[i
] = hdr
->stack0
.Get(i
);
371 DPrintf2(" #%02lu: pc=%zx\n", i
, stack
[i
]);
375 uptr pos
= hdr
->stack0
.Size();
376 Event
*events
= (Event
*)GetThreadTrace(tid
);
377 for (uptr i
= ebegin
; i
<= eend
; i
++) {
378 Event ev
= events
[i
];
379 EventType typ
= (EventType
)(ev
>> 61);
380 uptr pc
= (uptr
)(ev
& ((1ull << 61) - 1));
381 DPrintf2(" %zu typ=%d pc=%zx\n", i
, typ
, pc
);
382 if (typ
== EventTypeMop
) {
384 } else if (typ
== EventTypeFuncEnter
) {
386 } else if (typ
== EventTypeFuncExit
) {
391 if (typ
== EventTypeLock
) {
392 mset
->Add(pc
, true, epoch0
+ i
);
393 } else if (typ
== EventTypeUnlock
) {
395 } else if (typ
== EventTypeRLock
) {
396 mset
->Add(pc
, false, epoch0
+ i
);
397 } else if (typ
== EventTypeRUnlock
) {
398 mset
->Del(pc
, false);
401 for (uptr j
= 0; j
<= pos
; j
++)
402 DPrintf2(" #%zu: %zx\n", j
, stack
[j
]);
404 if (pos
== 0 && stack
[0] == 0)
407 stk
->Init(stack
.data(), pos
);
410 static bool HandleRacyStacks(ThreadState
*thr
, const StackTrace (&traces
)[2],
411 uptr addr_min
, uptr addr_max
) {
412 Context
*ctx
= CTX();
413 bool equal_stack
= false;
415 if (flags()->suppress_equal_stacks
) {
416 hash
.hash
[0] = md5_hash(traces
[0].Begin(), traces
[0].Size() * sizeof(uptr
));
417 hash
.hash
[1] = md5_hash(traces
[1].Begin(), traces
[1].Size() * sizeof(uptr
));
418 for (uptr i
= 0; i
< ctx
->racy_stacks
.Size(); i
++) {
419 if (hash
== ctx
->racy_stacks
[i
]) {
420 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
426 bool equal_address
= false;
427 RacyAddress ra0
= {addr_min
, addr_max
};
428 if (flags()->suppress_equal_addresses
) {
429 for (uptr i
= 0; i
< ctx
->racy_addresses
.Size(); i
++) {
430 RacyAddress ra2
= ctx
->racy_addresses
[i
];
431 uptr maxbeg
= max(ra0
.addr_min
, ra2
.addr_min
);
432 uptr minend
= min(ra0
.addr_max
, ra2
.addr_max
);
433 if (maxbeg
< minend
) {
434 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
435 equal_address
= true;
440 if (equal_stack
|| equal_address
) {
442 ctx
->racy_stacks
.PushBack(hash
);
444 ctx
->racy_addresses
.PushBack(ra0
);
450 static void AddRacyStacks(ThreadState
*thr
, const StackTrace (&traces
)[2],
451 uptr addr_min
, uptr addr_max
) {
452 Context
*ctx
= CTX();
453 if (flags()->suppress_equal_stacks
) {
455 hash
.hash
[0] = md5_hash(traces
[0].Begin(), traces
[0].Size() * sizeof(uptr
));
456 hash
.hash
[1] = md5_hash(traces
[1].Begin(), traces
[1].Size() * sizeof(uptr
));
457 ctx
->racy_stacks
.PushBack(hash
);
459 if (flags()->suppress_equal_addresses
) {
460 RacyAddress ra0
= {addr_min
, addr_max
};
461 ctx
->racy_addresses
.PushBack(ra0
);
465 bool OutputReport(Context
*ctx
,
466 const ScopedReport
&srep
,
467 const ReportStack
*suppress_stack1
,
468 const ReportStack
*suppress_stack2
) {
469 const ReportDesc
*rep
= srep
.GetReport();
470 uptr suppress_pc
= IsSuppressed(rep
->typ
, suppress_stack1
);
471 if (suppress_pc
== 0)
472 suppress_pc
= IsSuppressed(rep
->typ
, suppress_stack2
);
473 if (suppress_pc
!= 0) {
474 FiredSuppression supp
= {srep
.GetReport()->typ
, suppress_pc
};
475 ctx
->fired_suppressions
.PushBack(supp
);
477 if (OnReport(rep
, suppress_pc
!= 0))
484 bool IsFiredSuppression(Context
*ctx
,
485 const ScopedReport
&srep
,
486 const StackTrace
&trace
) {
487 for (uptr k
= 0; k
< ctx
->fired_suppressions
.Size(); k
++) {
488 if (ctx
->fired_suppressions
[k
].type
!= srep
.GetReport()->typ
)
490 for (uptr j
= 0; j
< trace
.Size(); j
++) {
491 if (trace
.Get(j
) == ctx
->fired_suppressions
[k
].pc
)
498 // On programs that use Java we see weird reports like:
499 // WARNING: ThreadSanitizer: data race (pid=22512)
500 // Read of size 8 at 0x7d2b00084318 by thread 100:
501 // #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
502 // #1 <null> <null>:0 (0x7f7ad9b40193)
503 // Previous write of size 8 at 0x7d2b00084318 by thread 105:
504 // #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
505 // #1 <null> <null>:0 (0x7f7ad9b42707)
506 static bool IsJavaNonsense(const ReportDesc
*rep
) {
507 for (uptr i
= 0; i
< rep
->mops
.Size(); i
++) {
508 ReportMop
*mop
= rep
->mops
[i
];
509 ReportStack
*frame
= mop
->stack
;
511 || (frame
->func
== 0 && frame
->file
== 0 && frame
->line
== 0
512 && frame
->module
== 0)) {
515 if (frame
!= 0 && frame
->file
!= 0
516 && (internal_strstr(frame
->file
, "tsan_interceptors.cc") ||
517 internal_strstr(frame
->file
, "sanitizer_common_interceptors.inc"))) {
520 || (frame
->func
== 0 && frame
->file
== 0 && frame
->line
== 0
521 && frame
->module
== 0)) {
523 FiredSuppression supp
= {rep
->typ
, frame
->pc
};
524 CTX()->fired_suppressions
.PushBack(supp
);
533 void ReportRace(ThreadState
*thr
) {
534 if (!flags()->report_bugs
)
538 if (thr
->in_signal_handler
)
539 Printf("ThreadSanitizer: printing report from signal handler."
540 " Can crash or hang.\n");
544 Shadow
s(thr
->racy_state
[1]);
545 freed
= s
.GetFreedAndReset();
546 thr
->racy_state
[1] = s
.raw();
549 uptr addr
= ShadowToMem((uptr
)thr
->racy_shadow_addr
);
553 uptr a0
= addr
+ Shadow(thr
->racy_state
[0]).addr0();
554 uptr a1
= addr
+ Shadow(thr
->racy_state
[1]).addr0();
555 uptr e0
= a0
+ Shadow(thr
->racy_state
[0]).size();
556 uptr e1
= a1
+ Shadow(thr
->racy_state
[1]).size();
557 addr_min
= min(a0
, a1
);
558 addr_max
= max(e0
, e1
);
559 if (IsExpectedReport(addr_min
, addr_max
- addr_min
))
563 Context
*ctx
= CTX();
564 Lock
l0(&ctx
->thread_mtx
);
566 ScopedReport
rep(freed
? ReportTypeUseAfterFree
: ReportTypeRace
);
568 StackTrace traces
[kMop
];
569 const uptr toppc
= TraceTopPC(thr
);
570 traces
[0].ObtainCurrent(thr
, toppc
);
571 if (IsFiredSuppression(ctx
, rep
, traces
[0]))
573 InternalScopedBuffer
<MutexSet
> mset2(1);
574 new(mset2
.data()) MutexSet();
575 Shadow
s2(thr
->racy_state
[1]);
576 RestoreStack(s2
.tid(), s2
.epoch(), &traces
[1], mset2
.data());
578 if (HandleRacyStacks(thr
, traces
, addr_min
, addr_max
))
581 for (uptr i
= 0; i
< kMop
; i
++) {
582 Shadow
s(thr
->racy_state
[i
]);
583 rep
.AddMemoryAccess(addr
, s
, &traces
[i
],
584 i
== 0 ? &thr
->mset
: mset2
.data());
587 if (flags()->suppress_java
&& IsJavaNonsense(rep
.GetReport()))
590 for (uptr i
= 0; i
< kMop
; i
++) {
591 FastState
s(thr
->racy_state
[i
]);
592 ThreadContext
*tctx
= ctx
->threads
[s
.tid()];
593 if (s
.epoch() < tctx
->epoch0
|| s
.epoch() > tctx
->epoch1
)
598 rep
.AddLocation(addr_min
, addr_max
- addr_min
);
602 Shadow
s(thr
->racy_state
[1]);
603 if (s
.epoch() <= thr
->last_sleep_clock
.get(s
.tid()))
604 rep
.AddSleep(thr
->last_sleep_stack_id
);
608 if (!OutputReport(ctx
, rep
, rep
.GetReport()->mops
[0]->stack
,
609 rep
.GetReport()->mops
[1]->stack
))
612 AddRacyStacks(thr
, traces
, addr_min
, addr_max
);
615 void PrintCurrentStack(ThreadState
*thr
, uptr pc
) {
617 trace
.ObtainCurrent(thr
, pc
);
618 PrintStack(SymbolizeStack(trace
));
621 void PrintCurrentStackSlow() {
623 __sanitizer::StackTrace
*ptrace
= new(internal_alloc(MBlockStackTrace
,
624 sizeof(__sanitizer::StackTrace
))) __sanitizer::StackTrace
;
625 ptrace
->SlowUnwindStack(__sanitizer::StackTrace::GetCurrentPc(),
628 trace
.Init(ptrace
->trace
, ptrace
->size
);
629 PrintStack(SymbolizeStack(trace
));
633 } // namespace __tsan