1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_libc.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "sanitizer_common/sanitizer_stackdepot.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_stacktrace.h"
17 #include "tsan_platform.h"
19 #include "tsan_suppressions.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_report.h"
22 #include "tsan_sync.h"
23 #include "tsan_mman.h"
24 #include "tsan_flags.h"
29 using namespace __sanitizer
; // NOLINT
31 static ReportStack
*SymbolizeStack(StackTrace trace
);
33 void TsanCheckFailed(const char *file
, int line
, const char *cond
,
35 // There is high probability that interceptors will check-fail as well,
36 // on the other hand there is no sense in processing interceptors
37 // since we are going to die soon.
38 ScopedIgnoreInterceptors ignore
;
40 cur_thread()->ignore_sync
++;
41 cur_thread()->ignore_reads_and_writes
++;
43 Printf("FATAL: ThreadSanitizer CHECK failed: "
44 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
45 file
, line
, cond
, (uptr
)v1
, (uptr
)v2
);
46 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
50 // Can be overriden by an application/test to intercept reports.
51 #ifdef TSAN_EXTERNAL_HOOKS
52 bool OnReport(const ReportDesc
*rep
, bool suppressed
);
54 SANITIZER_WEAK_CXX_DEFAULT_IMPL
55 bool OnReport(const ReportDesc
*rep
, bool suppressed
) {
61 SANITIZER_WEAK_DEFAULT_IMPL
62 void __tsan_on_report(const ReportDesc
*rep
) {
66 static void StackStripMain(SymbolizedStack
*frames
) {
67 SymbolizedStack
*last_frame
= nullptr;
68 SymbolizedStack
*last_frame2
= nullptr;
69 for (SymbolizedStack
*cur
= frames
; cur
; cur
= cur
->next
) {
70 last_frame2
= last_frame
;
77 const char *last
= last_frame
->info
.function
;
78 const char *last2
= last_frame2
->info
.function
;
79 // Strip frame above 'main'
80 if (last2
&& 0 == internal_strcmp(last2
, "main")) {
81 last_frame
->ClearAll();
82 last_frame2
->next
= nullptr;
83 // Strip our internal thread start routine.
84 } else if (last
&& 0 == internal_strcmp(last
, "__tsan_thread_start_func")) {
85 last_frame
->ClearAll();
86 last_frame2
->next
= nullptr;
87 // Strip global ctors init.
88 } else if (last
&& 0 == internal_strcmp(last
, "__do_global_ctors_aux")) {
89 last_frame
->ClearAll();
90 last_frame2
->next
= nullptr;
91 // If both are 0, then we probably just failed to symbolize.
92 } else if (last
|| last2
) {
93 // Ensure that we recovered stack completely. Trimmed stack
94 // can actually happen if we do not instrument some code,
95 // so it's only a debug print. However we must try hard to not miss it
97 DPrintf("Bottom stack frame is missed\n");
100 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
101 last_frame
->ClearAll();
102 last_frame2
->next
= nullptr;
106 ReportStack
*SymbolizeStackId(u32 stack_id
) {
109 StackTrace stack
= StackDepotGet(stack_id
);
110 if (stack
.trace
== nullptr)
112 return SymbolizeStack(stack
);
115 static ReportStack
*SymbolizeStack(StackTrace trace
) {
118 SymbolizedStack
*top
= nullptr;
119 for (uptr si
= 0; si
< trace
.size
; si
++) {
120 const uptr pc
= trace
.trace
[si
];
122 // We obtain the return address, but we're interested in the previous
124 if ((pc
& kExternalPCBit
) == 0)
125 pc1
= StackTrace::GetPreviousInstructionPc(pc
);
126 SymbolizedStack
*ent
= SymbolizeCode(pc1
);
128 SymbolizedStack
*last
= ent
;
130 last
->info
.address
= pc
; // restore original pc for report
133 last
->info
.address
= pc
; // restore original pc for report
139 ReportStack
*stack
= ReportStack::New();
144 ScopedReport::ScopedReport(ReportType typ
, uptr tag
) {
145 ctx
->thread_registry
->CheckLocked();
146 void *mem
= internal_alloc(MBlockReport
, sizeof(ReportDesc
));
147 rep_
= new(mem
) ReportDesc
;
150 ctx
->report_mtx
.Lock();
151 CommonSanitizerReportMutex
.Lock();
154 ScopedReport::~ScopedReport() {
155 CommonSanitizerReportMutex
.Unlock();
156 ctx
->report_mtx
.Unlock();
157 DestroyAndFree(rep_
);
160 void ScopedReport::AddStack(StackTrace stack
, bool suppressable
) {
161 ReportStack
**rs
= rep_
->stacks
.PushBack();
162 *rs
= SymbolizeStack(stack
);
163 (*rs
)->suppressable
= suppressable
;
166 void ScopedReport::AddMemoryAccess(uptr addr
, uptr external_tag
, Shadow s
,
167 StackTrace stack
, const MutexSet
*mset
) {
168 void *mem
= internal_alloc(MBlockReportMop
, sizeof(ReportMop
));
169 ReportMop
*mop
= new(mem
) ReportMop
;
170 rep_
->mops
.PushBack(mop
);
172 mop
->addr
= addr
+ s
.addr0();
173 mop
->size
= s
.size();
174 mop
->write
= s
.IsWrite();
175 mop
->atomic
= s
.IsAtomic();
176 mop
->stack
= SymbolizeStack(stack
);
177 mop
->external_tag
= external_tag
;
179 mop
->stack
->suppressable
= true;
180 for (uptr i
= 0; i
< mset
->Size(); i
++) {
181 MutexSet::Desc d
= mset
->Get(i
);
182 u64 mid
= this->AddMutex(d
.id
);
183 ReportMopMutex mtx
= {mid
, d
.write
};
184 mop
->mset
.PushBack(mtx
);
188 void ScopedReport::AddUniqueTid(int unique_tid
) {
189 rep_
->unique_tids
.PushBack(unique_tid
);
192 void ScopedReport::AddThread(const ThreadContext
*tctx
, bool suppressable
) {
193 for (uptr i
= 0; i
< rep_
->threads
.Size(); i
++) {
194 if ((u32
)rep_
->threads
[i
]->id
== tctx
->tid
)
197 void *mem
= internal_alloc(MBlockReportThread
, sizeof(ReportThread
));
198 ReportThread
*rt
= new(mem
) ReportThread
;
199 rep_
->threads
.PushBack(rt
);
201 rt
->os_id
= tctx
->os_id
;
202 rt
->running
= (tctx
->status
== ThreadStatusRunning
);
203 rt
->name
= internal_strdup(tctx
->name
);
204 rt
->parent_tid
= tctx
->parent_tid
;
205 rt
->workerthread
= tctx
->workerthread
;
207 rt
->stack
= SymbolizeStackId(tctx
->creation_stack_id
);
209 rt
->stack
->suppressable
= suppressable
;
213 static bool FindThreadByUidLockedCallback(ThreadContextBase
*tctx
, void *arg
) {
214 int unique_id
= *(int *)arg
;
215 return tctx
->unique_id
== (u32
)unique_id
;
218 static ThreadContext
*FindThreadByUidLocked(int unique_id
) {
219 ctx
->thread_registry
->CheckLocked();
220 return static_cast<ThreadContext
*>(
221 ctx
->thread_registry
->FindThreadContextLocked(
222 FindThreadByUidLockedCallback
, &unique_id
));
225 static ThreadContext
*FindThreadByTidLocked(int tid
) {
226 ctx
->thread_registry
->CheckLocked();
227 return static_cast<ThreadContext
*>(
228 ctx
->thread_registry
->GetThreadLocked(tid
));
231 static bool IsInStackOrTls(ThreadContextBase
*tctx_base
, void *arg
) {
232 uptr addr
= (uptr
)arg
;
233 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
234 if (tctx
->status
!= ThreadStatusRunning
)
236 ThreadState
*thr
= tctx
->thr
;
238 return ((addr
>= thr
->stk_addr
&& addr
< thr
->stk_addr
+ thr
->stk_size
) ||
239 (addr
>= thr
->tls_addr
&& addr
< thr
->tls_addr
+ thr
->tls_size
));
242 ThreadContext
*IsThreadStackOrTls(uptr addr
, bool *is_stack
) {
243 ctx
->thread_registry
->CheckLocked();
244 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
245 ctx
->thread_registry
->FindThreadContextLocked(IsInStackOrTls
,
249 ThreadState
*thr
= tctx
->thr
;
251 *is_stack
= (addr
>= thr
->stk_addr
&& addr
< thr
->stk_addr
+ thr
->stk_size
);
256 void ScopedReport::AddThread(int unique_tid
, bool suppressable
) {
258 if (const ThreadContext
*tctx
= FindThreadByUidLocked(unique_tid
))
259 AddThread(tctx
, suppressable
);
263 void ScopedReport::AddMutex(const SyncVar
*s
) {
264 for (uptr i
= 0; i
< rep_
->mutexes
.Size(); i
++) {
265 if (rep_
->mutexes
[i
]->id
== s
->uid
)
268 void *mem
= internal_alloc(MBlockReportMutex
, sizeof(ReportMutex
));
269 ReportMutex
*rm
= new(mem
) ReportMutex
;
270 rep_
->mutexes
.PushBack(rm
);
273 rm
->destroyed
= false;
274 rm
->stack
= SymbolizeStackId(s
->creation_stack_id
);
277 u64
ScopedReport::AddMutex(u64 id
) {
280 uptr addr
= SyncVar::SplitId(id
, &uid
);
281 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
, true);
282 // Check that the mutex is still alive.
283 // Another mutex can be created at the same address,
284 // so check uid as well.
285 if (s
&& s
->CheckId(uid
)) {
296 void ScopedReport::AddDeadMutex(u64 id
) {
297 for (uptr i
= 0; i
< rep_
->mutexes
.Size(); i
++) {
298 if (rep_
->mutexes
[i
]->id
== id
)
301 void *mem
= internal_alloc(MBlockReportMutex
, sizeof(ReportMutex
));
302 ReportMutex
*rm
= new(mem
) ReportMutex
;
303 rep_
->mutexes
.PushBack(rm
);
306 rm
->destroyed
= true;
310 void ScopedReport::AddLocation(uptr addr
, uptr size
) {
315 int creat_tid
= kInvalidTid
;
317 if (FdLocation(addr
, &fd
, &creat_tid
, &creat_stack
)) {
318 ReportLocation
*loc
= ReportLocation::New(ReportLocationFD
);
320 loc
->tid
= creat_tid
;
321 loc
->stack
= SymbolizeStackId(creat_stack
);
322 rep_
->locs
.PushBack(loc
);
323 ThreadContext
*tctx
= FindThreadByUidLocked(creat_tid
);
329 Allocator
*a
= allocator();
330 if (a
->PointerIsMine((void*)addr
)) {
331 void *block_begin
= a
->GetBlockBegin((void*)addr
);
333 b
= ctx
->metamap
.GetBlock((uptr
)block_begin
);
336 ThreadContext
*tctx
= FindThreadByTidLocked(b
->tid
);
337 ReportLocation
*loc
= ReportLocation::New(ReportLocationHeap
);
338 loc
->heap_chunk_start
= (uptr
)allocator()->GetBlockBegin((void *)addr
);
339 loc
->heap_chunk_size
= b
->siz
;
340 loc
->external_tag
= b
->tag
;
341 loc
->tid
= tctx
? tctx
->tid
: b
->tid
;
342 loc
->stack
= SymbolizeStackId(b
->stk
);
343 rep_
->locs
.PushBack(loc
);
348 bool is_stack
= false;
349 if (ThreadContext
*tctx
= IsThreadStackOrTls(addr
, &is_stack
)) {
350 ReportLocation
*loc
=
351 ReportLocation::New(is_stack
? ReportLocationStack
: ReportLocationTLS
);
352 loc
->tid
= tctx
->tid
;
353 rep_
->locs
.PushBack(loc
);
357 if (ReportLocation
*loc
= SymbolizeData(addr
)) {
358 loc
->suppressable
= true;
359 rep_
->locs
.PushBack(loc
);
365 void ScopedReport::AddSleep(u32 stack_id
) {
366 rep_
->sleep
= SymbolizeStackId(stack_id
);
370 void ScopedReport::SetCount(int count
) {
374 const ReportDesc
*ScopedReport::GetReport() const {
378 void RestoreStack(int tid
, const u64 epoch
, VarSizeStackTrace
*stk
,
379 MutexSet
*mset
, uptr
*tag
) {
380 // This function restores stack trace and mutex set for the thread/epoch.
381 // It does so by getting stack trace and mutex set at the beginning of
382 // trace part, and then replaying the trace till the given epoch.
383 Trace
* trace
= ThreadTrace(tid
);
384 ReadLock
l(&trace
->mtx
);
385 const int partidx
= (epoch
/ kTracePartSize
) % TraceParts();
386 TraceHeader
* hdr
= &trace
->headers
[partidx
];
387 if (epoch
< hdr
->epoch0
|| epoch
>= hdr
->epoch0
+ kTracePartSize
)
389 CHECK_EQ(RoundDown(epoch
, kTracePartSize
), hdr
->epoch0
);
390 const u64 epoch0
= RoundDown(epoch
, TraceSize());
391 const u64 eend
= epoch
% TraceSize();
392 const u64 ebegin
= RoundDown(eend
, kTracePartSize
);
393 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
394 tid
, (uptr
)epoch
, (uptr
)ebegin
, (uptr
)eend
, partidx
);
395 Vector
<uptr
> stack(MBlockReportStack
);
396 stack
.Resize(hdr
->stack0
.size
+ 64);
397 for (uptr i
= 0; i
< hdr
->stack0
.size
; i
++) {
398 stack
[i
] = hdr
->stack0
.trace
[i
];
399 DPrintf2(" #%02zu: pc=%zx\n", i
, stack
[i
]);
403 uptr pos
= hdr
->stack0
.size
;
404 Event
*events
= (Event
*)GetThreadTrace(tid
);
405 for (uptr i
= ebegin
; i
<= eend
; i
++) {
406 Event ev
= events
[i
];
407 EventType typ
= (EventType
)(ev
>> kEventPCBits
);
408 uptr pc
= (uptr
)(ev
& ((1ull << kEventPCBits
) - 1));
409 DPrintf2(" %zu typ=%d pc=%zx\n", i
, typ
, pc
);
410 if (typ
== EventTypeMop
) {
412 } else if (typ
== EventTypeFuncEnter
) {
413 if (stack
.Size() < pos
+ 2)
414 stack
.Resize(pos
+ 2);
416 } else if (typ
== EventTypeFuncExit
) {
421 if (typ
== EventTypeLock
) {
422 mset
->Add(pc
, true, epoch0
+ i
);
423 } else if (typ
== EventTypeUnlock
) {
425 } else if (typ
== EventTypeRLock
) {
426 mset
->Add(pc
, false, epoch0
+ i
);
427 } else if (typ
== EventTypeRUnlock
) {
428 mset
->Del(pc
, false);
431 for (uptr j
= 0; j
<= pos
; j
++)
432 DPrintf2(" #%zu: %zx\n", j
, stack
[j
]);
434 if (pos
== 0 && stack
[0] == 0)
437 stk
->Init(&stack
[0], pos
);
438 ExtractTagFromStack(stk
, tag
);
441 static bool HandleRacyStacks(ThreadState
*thr
, VarSizeStackTrace traces
[2],
442 uptr addr_min
, uptr addr_max
) {
443 bool equal_stack
= false;
445 bool equal_address
= false;
446 RacyAddress ra0
= {addr_min
, addr_max
};
448 ReadLock
lock(&ctx
->racy_mtx
);
449 if (flags()->suppress_equal_stacks
) {
450 hash
.hash
[0] = md5_hash(traces
[0].trace
, traces
[0].size
* sizeof(uptr
));
451 hash
.hash
[1] = md5_hash(traces
[1].trace
, traces
[1].size
* sizeof(uptr
));
452 for (uptr i
= 0; i
< ctx
->racy_stacks
.Size(); i
++) {
453 if (hash
== ctx
->racy_stacks
[i
]) {
455 "ThreadSanitizer: suppressing report as doubled (stack)\n");
461 if (flags()->suppress_equal_addresses
) {
462 for (uptr i
= 0; i
< ctx
->racy_addresses
.Size(); i
++) {
463 RacyAddress ra2
= ctx
->racy_addresses
[i
];
464 uptr maxbeg
= max(ra0
.addr_min
, ra2
.addr_min
);
465 uptr minend
= min(ra0
.addr_max
, ra2
.addr_max
);
466 if (maxbeg
< minend
) {
467 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
468 equal_address
= true;
474 if (!equal_stack
&& !equal_address
)
477 Lock
lock(&ctx
->racy_mtx
);
478 ctx
->racy_stacks
.PushBack(hash
);
480 if (!equal_address
) {
481 Lock
lock(&ctx
->racy_mtx
);
482 ctx
->racy_addresses
.PushBack(ra0
);
487 static void AddRacyStacks(ThreadState
*thr
, VarSizeStackTrace traces
[2],
488 uptr addr_min
, uptr addr_max
) {
489 Lock
lock(&ctx
->racy_mtx
);
490 if (flags()->suppress_equal_stacks
) {
492 hash
.hash
[0] = md5_hash(traces
[0].trace
, traces
[0].size
* sizeof(uptr
));
493 hash
.hash
[1] = md5_hash(traces
[1].trace
, traces
[1].size
* sizeof(uptr
));
494 ctx
->racy_stacks
.PushBack(hash
);
496 if (flags()->suppress_equal_addresses
) {
497 RacyAddress ra0
= {addr_min
, addr_max
};
498 ctx
->racy_addresses
.PushBack(ra0
);
502 bool OutputReport(ThreadState
*thr
, const ScopedReport
&srep
) {
503 if (!flags()->report_bugs
|| thr
->suppress_reports
)
505 atomic_store_relaxed(&ctx
->last_symbolize_time_ns
, NanoTime());
506 const ReportDesc
*rep
= srep
.GetReport();
507 CHECK_EQ(thr
->current_report
, nullptr);
508 thr
->current_report
= rep
;
509 Suppression
*supp
= 0;
511 for (uptr i
= 0; pc_or_addr
== 0 && i
< rep
->mops
.Size(); i
++)
512 pc_or_addr
= IsSuppressed(rep
->typ
, rep
->mops
[i
]->stack
, &supp
);
513 for (uptr i
= 0; pc_or_addr
== 0 && i
< rep
->stacks
.Size(); i
++)
514 pc_or_addr
= IsSuppressed(rep
->typ
, rep
->stacks
[i
], &supp
);
515 for (uptr i
= 0; pc_or_addr
== 0 && i
< rep
->threads
.Size(); i
++)
516 pc_or_addr
= IsSuppressed(rep
->typ
, rep
->threads
[i
]->stack
, &supp
);
517 for (uptr i
= 0; pc_or_addr
== 0 && i
< rep
->locs
.Size(); i
++)
518 pc_or_addr
= IsSuppressed(rep
->typ
, rep
->locs
[i
], &supp
);
519 if (pc_or_addr
!= 0) {
520 Lock
lock(&ctx
->fired_suppressions_mtx
);
521 FiredSuppression s
= {srep
.GetReport()->typ
, pc_or_addr
, supp
};
522 ctx
->fired_suppressions
.push_back(s
);
525 bool old_is_freeing
= thr
->is_freeing
;
526 thr
->is_freeing
= false;
527 bool suppressed
= OnReport(rep
, pc_or_addr
!= 0);
528 thr
->is_freeing
= old_is_freeing
;
530 thr
->current_report
= nullptr;
535 __tsan_on_report(rep
);
537 if (flags()->halt_on_error
)
539 thr
->current_report
= nullptr;
543 bool IsFiredSuppression(Context
*ctx
, ReportType type
, StackTrace trace
) {
544 ReadLock
lock(&ctx
->fired_suppressions_mtx
);
545 for (uptr k
= 0; k
< ctx
->fired_suppressions
.size(); k
++) {
546 if (ctx
->fired_suppressions
[k
].type
!= type
)
548 for (uptr j
= 0; j
< trace
.size
; j
++) {
549 FiredSuppression
*s
= &ctx
->fired_suppressions
[k
];
550 if (trace
.trace
[j
] == s
->pc_or_addr
) {
552 atomic_fetch_add(&s
->supp
->hit_count
, 1, memory_order_relaxed
);
560 static bool IsFiredSuppression(Context
*ctx
, ReportType type
, uptr addr
) {
561 ReadLock
lock(&ctx
->fired_suppressions_mtx
);
562 for (uptr k
= 0; k
< ctx
->fired_suppressions
.size(); k
++) {
563 if (ctx
->fired_suppressions
[k
].type
!= type
)
565 FiredSuppression
*s
= &ctx
->fired_suppressions
[k
];
566 if (addr
== s
->pc_or_addr
) {
568 atomic_fetch_add(&s
->supp
->hit_count
, 1, memory_order_relaxed
);
575 static bool RaceBetweenAtomicAndFree(ThreadState
*thr
) {
576 Shadow
s0(thr
->racy_state
[0]);
577 Shadow
s1(thr
->racy_state
[1]);
578 CHECK(!(s0
.IsAtomic() && s1
.IsAtomic()));
579 if (!s0
.IsAtomic() && !s1
.IsAtomic())
581 if (s0
.IsAtomic() && s1
.IsFreed())
583 if (s1
.IsAtomic() && thr
->is_freeing
)
588 void ReportRace(ThreadState
*thr
) {
591 // Symbolizer makes lots of intercepted calls. If we try to process them,
592 // at best it will cause deadlocks on internal mutexes.
593 ScopedIgnoreInterceptors ignore
;
595 if (!flags()->report_bugs
)
597 if (!flags()->report_atomic_races
&& !RaceBetweenAtomicAndFree(thr
))
602 Shadow
s(thr
->racy_state
[1]);
603 freed
= s
.GetFreedAndReset();
604 thr
->racy_state
[1] = s
.raw();
607 uptr addr
= ShadowToMem((uptr
)thr
->racy_shadow_addr
);
611 uptr a0
= addr
+ Shadow(thr
->racy_state
[0]).addr0();
612 uptr a1
= addr
+ Shadow(thr
->racy_state
[1]).addr0();
613 uptr e0
= a0
+ Shadow(thr
->racy_state
[0]).size();
614 uptr e1
= a1
+ Shadow(thr
->racy_state
[1]).size();
615 addr_min
= min(a0
, a1
);
616 addr_max
= max(e0
, e1
);
617 if (IsExpectedReport(addr_min
, addr_max
- addr_min
))
621 ReportType typ
= ReportTypeRace
;
622 if (thr
->is_vptr_access
&& freed
)
623 typ
= ReportTypeVptrUseAfterFree
;
624 else if (thr
->is_vptr_access
)
625 typ
= ReportTypeVptrRace
;
627 typ
= ReportTypeUseAfterFree
;
629 if (IsFiredSuppression(ctx
, typ
, addr
))
633 VarSizeStackTrace traces
[kMop
];
634 uptr tags
[kMop
] = {kExternalTagNone
};
635 uptr toppc
= TraceTopPC(thr
);
636 if (toppc
>> kEventPCBits
) {
637 // This is a work-around for a known issue.
638 // The scenario where this happens is rather elaborate and requires
639 // an instrumented __sanitizer_report_error_summary callback and
640 // a __tsan_symbolize_external callback and a race during a range memory
641 // access larger than 8 bytes. MemoryAccessRange adds the current PC to
642 // the trace and starts processing memory accesses. A first memory access
643 // triggers a race, we report it and call the instrumented
644 // __sanitizer_report_error_summary, which adds more stuff to the trace
645 // since it is intrumented. Then a second memory access in MemoryAccessRange
646 // also triggers a race and we get here and call TraceTopPC to get the
647 // current PC, however now it contains some unrelated events from the
648 // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
649 // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
650 // and the resulting PC has kExternalPCBit set, so we pass it to
651 // __tsan_symbolize_external. __tsan_symbolize_external is within its rights
652 // to crash since the PC is completely bogus.
653 // test/tsan/double_race.cc contains a test case for this.
656 ObtainCurrentStack(thr
, toppc
, &traces
[0], &tags
[0]);
657 if (IsFiredSuppression(ctx
, typ
, traces
[0]))
660 // MutexSet is too large to live on stack.
661 Vector
<u64
> mset_buffer(MBlockScopedBuf
);
662 mset_buffer
.Resize(sizeof(MutexSet
) / sizeof(u64
) + 1);
663 MutexSet
*mset2
= new(&mset_buffer
[0]) MutexSet();
665 Shadow
s2(thr
->racy_state
[1]);
666 RestoreStack(s2
.tid(), s2
.epoch(), &traces
[1], mset2
, &tags
[1]);
667 if (IsFiredSuppression(ctx
, typ
, traces
[1]))
670 if (HandleRacyStacks(thr
, traces
, addr_min
, addr_max
))
673 // If any of the accesses has a tag, treat this as an "external" race.
674 uptr tag
= kExternalTagNone
;
675 for (uptr i
= 0; i
< kMop
; i
++) {
676 if (tags
[i
] != kExternalTagNone
) {
677 typ
= ReportTypeExternalRace
;
683 ThreadRegistryLock
l0(ctx
->thread_registry
);
684 ScopedReport
rep(typ
, tag
);
685 for (uptr i
= 0; i
< kMop
; i
++) {
686 Shadow
s(thr
->racy_state
[i
]);
687 rep
.AddMemoryAccess(addr
, tags
[i
], s
, traces
[i
],
688 i
== 0 ? &thr
->mset
: mset2
);
691 for (uptr i
= 0; i
< kMop
; i
++) {
692 FastState
s(thr
->racy_state
[i
]);
693 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
694 ctx
->thread_registry
->GetThreadLocked(s
.tid()));
695 if (s
.epoch() < tctx
->epoch0
|| s
.epoch() > tctx
->epoch1
)
700 rep
.AddLocation(addr_min
, addr_max
- addr_min
);
704 Shadow
s(thr
->racy_state
[1]);
705 if (s
.epoch() <= thr
->last_sleep_clock
.get(s
.tid()))
706 rep
.AddSleep(thr
->last_sleep_stack_id
);
710 if (!OutputReport(thr
, rep
))
713 AddRacyStacks(thr
, traces
, addr_min
, addr_max
);
716 void PrintCurrentStack(ThreadState
*thr
, uptr pc
) {
717 VarSizeStackTrace trace
;
718 ObtainCurrentStack(thr
, pc
, &trace
);
719 PrintStack(SymbolizeStack(trace
));
722 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
723 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but
724 // tail-call to PrintCurrentStackSlow breaks this assumption because
725 // __sanitizer_print_stack_trace disappears after tail-call.
726 // However, this solution is not reliable enough, please see dvyukov's comment
727 // http://reviews.llvm.org/D19148#406208
728 // Also see PR27280 comment 2 and 3 for breaking examples and analysis.
730 void PrintCurrentStackSlow(uptr pc
) {
732 BufferedStackTrace
*ptrace
=
733 new(internal_alloc(MBlockStackTrace
, sizeof(BufferedStackTrace
)))
734 BufferedStackTrace();
735 ptrace
->Unwind(kStackTraceMax
, pc
, 0, 0, 0, 0, false);
736 for (uptr i
= 0; i
< ptrace
->size
/ 2; i
++) {
737 uptr tmp
= ptrace
->trace_buffer
[i
];
738 ptrace
->trace_buffer
[i
] = ptrace
->trace_buffer
[ptrace
->size
- i
- 1];
739 ptrace
->trace_buffer
[ptrace
->size
- i
- 1] = tmp
;
741 PrintStack(SymbolizeStack(*ptrace
));
745 } // namespace __tsan
747 using namespace __tsan
;
750 SANITIZER_INTERFACE_ATTRIBUTE
751 void __sanitizer_print_stack_trace() {
752 PrintCurrentStackSlow(StackTrace::GetCurrentPc());