1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_libc.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_stacktrace.h"
19 #include "tsan_platform.h"
21 #include "tsan_suppressions.h"
22 #include "tsan_symbolize.h"
23 #include "tsan_report.h"
24 #include "tsan_sync.h"
25 #include "tsan_mman.h"
26 #include "tsan_flags.h"
31 using namespace __sanitizer
; // NOLINT
33 static ReportStack
*SymbolizeStack(const StackTrace
& trace
);
35 void TsanCheckFailed(const char *file
, int line
, const char *cond
,
38 Printf("FATAL: ThreadSanitizer CHECK failed: "
39 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
40 file
, line
, cond
, (uptr
)v1
, (uptr
)v2
);
41 PrintCurrentStackSlow();
45 // Can be overriden by an application/test to intercept reports.
46 #ifdef TSAN_EXTERNAL_HOOKS
47 bool OnReport(const ReportDesc
*rep
, bool suppressed
);
49 SANITIZER_INTERFACE_ATTRIBUTE
50 bool WEAK
OnReport(const ReportDesc
*rep
, bool suppressed
) {
56 static void StackStripMain(ReportStack
*stack
) {
57 ReportStack
*last_frame
= 0;
58 ReportStack
*last_frame2
= 0;
59 const char *prefix
= "__interceptor_";
60 uptr prefix_len
= internal_strlen(prefix
);
61 const char *path_prefix
= flags()->strip_path_prefix
;
62 uptr path_prefix_len
= internal_strlen(path_prefix
);
64 for (ReportStack
*ent
= stack
; ent
; ent
= ent
->next
) {
65 if (ent
->func
&& 0 == internal_strncmp(ent
->func
, prefix
, prefix_len
))
66 ent
->func
+= prefix_len
;
67 if (ent
->file
&& (pos
= internal_strstr(ent
->file
, path_prefix
)))
68 ent
->file
= pos
+ path_prefix_len
;
69 if (ent
->file
&& ent
->file
[0] == '.' && ent
->file
[1] == '/')
71 last_frame2
= last_frame
;
77 const char *last
= last_frame
->func
;
79 const char *last2
= last_frame2
->func
;
80 // Strip frame above 'main'
81 if (last2
&& 0 == internal_strcmp(last2
, "main")) {
82 last_frame2
->next
= 0;
83 // Strip our internal thread start routine.
84 } else if (last
&& 0 == internal_strcmp(last
, "__tsan_thread_start_func")) {
85 last_frame2
->next
= 0;
86 // Strip global ctors init.
87 } else if (last
&& 0 == internal_strcmp(last
, "__do_global_ctors_aux")) {
88 last_frame2
->next
= 0;
89 // If both are 0, then we probably just failed to symbolize.
90 } else if (last
|| last2
) {
91 // Ensure that we recovered stack completely. Trimmed stack
92 // can actually happen if we do not instrument some code,
93 // so it's only a debug print. However we must try hard to not miss it
95 DPrintf("Bottom stack frame of stack %zx is missed\n", stack
->pc
);
98 if (last
&& 0 == internal_strcmp(last
, "schedunlock"))
99 last_frame2
->next
= 0;
103 static ReportStack
*SymbolizeStack(const StackTrace
& trace
) {
106 ReportStack
*stack
= 0;
107 for (uptr si
= 0; si
< trace
.Size(); si
++) {
108 // We obtain the return address, that is, address of the next instruction,
109 // so offset it by 1 byte.
110 bool is_last
= (si
== trace
.Size() - 1);
111 ReportStack
*ent
= SymbolizeCode(trace
.Get(si
) - !is_last
);
113 ReportStack
*last
= ent
;
115 last
->pc
+= !is_last
;
118 last
->pc
+= !is_last
;
122 StackStripMain(stack
);
126 ScopedReport::ScopedReport(ReportType typ
) {
128 ctx_
->thread_registry
->CheckLocked();
129 void *mem
= internal_alloc(MBlockReport
, sizeof(ReportDesc
));
130 rep_
= new(mem
) ReportDesc
;
132 ctx_
->report_mtx
.Lock();
135 ScopedReport::~ScopedReport() {
136 ctx_
->report_mtx
.Unlock();
137 DestroyAndFree(rep_
);
140 void ScopedReport::AddStack(const StackTrace
*stack
) {
141 ReportStack
**rs
= rep_
->stacks
.PushBack();
142 *rs
= SymbolizeStack(*stack
);
145 void ScopedReport::AddMemoryAccess(uptr addr
, Shadow s
,
146 const StackTrace
*stack
, const MutexSet
*mset
) {
147 void *mem
= internal_alloc(MBlockReportMop
, sizeof(ReportMop
));
148 ReportMop
*mop
= new(mem
) ReportMop
;
149 rep_
->mops
.PushBack(mop
);
151 mop
->addr
= addr
+ s
.addr0();
152 mop
->size
= s
.size();
153 mop
->write
= s
.IsWrite();
154 mop
->atomic
= s
.IsAtomic();
155 mop
->stack
= SymbolizeStack(*stack
);
156 for (uptr i
= 0; i
< mset
->Size(); i
++) {
157 MutexSet::Desc d
= mset
->Get(i
);
159 uptr addr
= SyncVar::SplitId(d
.id
, &uid
);
160 SyncVar
*s
= ctx_
->synctab
.GetIfExistsAndLock(addr
, false);
161 // Check that the mutex is still alive.
162 // Another mutex can be created at the same address,
163 // so check uid as well.
164 if (s
&& s
->CheckId(uid
)) {
165 ReportMopMutex mtx
= {s
->uid
, d
.write
};
166 mop
->mset
.PushBack(mtx
);
169 ReportMopMutex mtx
= {d
.id
, d
.write
};
170 mop
->mset
.PushBack(mtx
);
178 void ScopedReport::AddThread(const ThreadContext
*tctx
) {
179 for (uptr i
= 0; i
< rep_
->threads
.Size(); i
++) {
180 if ((u32
)rep_
->threads
[i
]->id
== tctx
->tid
)
183 void *mem
= internal_alloc(MBlockReportThread
, sizeof(ReportThread
));
184 ReportThread
*rt
= new(mem
) ReportThread();
185 rep_
->threads
.PushBack(rt
);
187 rt
->pid
= tctx
->os_id
;
188 rt
->running
= (tctx
->status
== ThreadStatusRunning
);
189 rt
->name
= tctx
->name
? internal_strdup(tctx
->name
) : 0;
190 rt
->parent_tid
= tctx
->parent_tid
;
193 rt
->stack
= SymbolizeStack(tctx
->creation_stack
);
196 const uptr
*stack
= StackDepotGet(tctx
->creation_stack_id
, &ssz
);
199 trace
.Init(stack
, ssz
);
200 rt
->stack
= SymbolizeStack(trace
);
206 static ThreadContext
*FindThreadByUidLocked(int unique_id
) {
207 Context
*ctx
= CTX();
208 ctx
->thread_registry
->CheckLocked();
209 for (unsigned i
= 0; i
< kMaxTid
; i
++) {
210 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
211 ctx
->thread_registry
->GetThreadLocked(i
));
212 if (tctx
&& tctx
->unique_id
== (u32
)unique_id
) {
219 static ThreadContext
*FindThreadByTidLocked(int tid
) {
220 Context
*ctx
= CTX();
221 ctx
->thread_registry
->CheckLocked();
222 return static_cast<ThreadContext
*>(
223 ctx
->thread_registry
->GetThreadLocked(tid
));
226 static bool IsInStackOrTls(ThreadContextBase
*tctx_base
, void *arg
) {
227 uptr addr
= (uptr
)arg
;
228 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
229 if (tctx
->status
!= ThreadStatusRunning
)
231 ThreadState
*thr
= tctx
->thr
;
233 return ((addr
>= thr
->stk_addr
&& addr
< thr
->stk_addr
+ thr
->stk_size
) ||
234 (addr
>= thr
->tls_addr
&& addr
< thr
->tls_addr
+ thr
->tls_size
));
237 ThreadContext
*IsThreadStackOrTls(uptr addr
, bool *is_stack
) {
238 Context
*ctx
= CTX();
239 ctx
->thread_registry
->CheckLocked();
240 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
241 ctx
->thread_registry
->FindThreadContextLocked(IsInStackOrTls
,
245 ThreadState
*thr
= tctx
->thr
;
247 *is_stack
= (addr
>= thr
->stk_addr
&& addr
< thr
->stk_addr
+ thr
->stk_size
);
252 void ScopedReport::AddMutex(const SyncVar
*s
) {
253 for (uptr i
= 0; i
< rep_
->mutexes
.Size(); i
++) {
254 if (rep_
->mutexes
[i
]->id
== s
->uid
)
257 void *mem
= internal_alloc(MBlockReportMutex
, sizeof(ReportMutex
));
258 ReportMutex
*rm
= new(mem
) ReportMutex();
259 rep_
->mutexes
.PushBack(rm
);
261 rm
->destroyed
= false;
265 const uptr
*stack
= StackDepotGet(s
->creation_stack_id
, &ssz
);
268 trace
.Init(stack
, ssz
);
269 rm
->stack
= SymbolizeStack(trace
);
274 void ScopedReport::AddMutex(u64 id
) {
275 for (uptr i
= 0; i
< rep_
->mutexes
.Size(); i
++) {
276 if (rep_
->mutexes
[i
]->id
== id
)
279 void *mem
= internal_alloc(MBlockReportMutex
, sizeof(ReportMutex
));
280 ReportMutex
*rm
= new(mem
) ReportMutex();
281 rep_
->mutexes
.PushBack(rm
);
283 rm
->destroyed
= true;
287 void ScopedReport::AddLocation(uptr addr
, uptr size
) {
294 if (FdLocation(addr
, &fd
, &creat_tid
, &creat_stack
)
295 || FdLocation(AlternativeAddress(addr
), &fd
, &creat_tid
, &creat_stack
)) {
296 void *mem
= internal_alloc(MBlockReportLoc
, sizeof(ReportLocation
));
297 ReportLocation
*loc
= new(mem
) ReportLocation();
298 rep_
->locs
.PushBack(loc
);
299 loc
->type
= ReportLocationFD
;
301 loc
->tid
= creat_tid
;
303 const uptr
*stack
= StackDepotGet(creat_stack
, &ssz
);
306 trace
.Init(stack
, ssz
);
307 loc
->stack
= SymbolizeStack(trace
);
309 ThreadContext
*tctx
= FindThreadByUidLocked(creat_tid
);
314 if (allocator()->PointerIsMine((void*)addr
)) {
315 MBlock
*b
= user_mblock(0, (void*)addr
);
316 ThreadContext
*tctx
= FindThreadByTidLocked(b
->Tid());
317 void *mem
= internal_alloc(MBlockReportLoc
, sizeof(ReportLocation
));
318 ReportLocation
*loc
= new(mem
) ReportLocation();
319 rep_
->locs
.PushBack(loc
);
320 loc
->type
= ReportLocationHeap
;
321 loc
->addr
= (uptr
)allocator()->GetBlockBegin((void*)addr
);
322 loc
->size
= b
->Size();
323 loc
->tid
= tctx
? tctx
->tid
: b
->Tid();
329 const uptr
*stack
= StackDepotGet(b
->StackId(), &ssz
);
332 trace
.Init(stack
, ssz
);
333 loc
->stack
= SymbolizeStack(trace
);
339 bool is_stack
= false;
340 if (ThreadContext
*tctx
= IsThreadStackOrTls(addr
, &is_stack
)) {
341 void *mem
= internal_alloc(MBlockReportLoc
, sizeof(ReportLocation
));
342 ReportLocation
*loc
= new(mem
) ReportLocation();
343 rep_
->locs
.PushBack(loc
);
344 loc
->type
= is_stack
? ReportLocationStack
: ReportLocationTLS
;
345 loc
->tid
= tctx
->tid
;
348 ReportLocation
*loc
= SymbolizeData(addr
);
350 rep_
->locs
.PushBack(loc
);
357 void ScopedReport::AddSleep(u32 stack_id
) {
359 const uptr
*stack
= StackDepotGet(stack_id
, &ssz
);
362 trace
.Init(stack
, ssz
);
363 rep_
->sleep
= SymbolizeStack(trace
);
368 const ReportDesc
*ScopedReport::GetReport() const {
372 void RestoreStack(int tid
, const u64 epoch
, StackTrace
*stk
, MutexSet
*mset
) {
373 // This function restores stack trace and mutex set for the thread/epoch.
374 // It does so by getting stack trace and mutex set at the beginning of
375 // trace part, and then replaying the trace till the given epoch.
376 Context
*ctx
= CTX();
377 ctx
->thread_registry
->CheckLocked();
378 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
379 ctx
->thread_registry
->GetThreadLocked(tid
));
382 if (tctx
->status
!= ThreadStatusRunning
383 && tctx
->status
!= ThreadStatusFinished
384 && tctx
->status
!= ThreadStatusDead
)
386 Trace
* trace
= ThreadTrace(tctx
->tid
);
388 const int partidx
= (epoch
/ kTracePartSize
) % TraceParts();
389 TraceHeader
* hdr
= &trace
->headers
[partidx
];
390 if (epoch
< hdr
->epoch0
)
392 const u64 epoch0
= RoundDown(epoch
, TraceSize());
393 const u64 eend
= epoch
% TraceSize();
394 const u64 ebegin
= RoundDown(eend
, kTracePartSize
);
395 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
396 tid
, (uptr
)epoch
, (uptr
)ebegin
, (uptr
)eend
, partidx
);
397 InternalScopedBuffer
<uptr
> stack(1024); // FIXME: de-hardcode 1024
398 for (uptr i
= 0; i
< hdr
->stack0
.Size(); i
++) {
399 stack
[i
] = hdr
->stack0
.Get(i
);
400 DPrintf2(" #%02lu: pc=%zx\n", i
, stack
[i
]);
404 uptr pos
= hdr
->stack0
.Size();
405 Event
*events
= (Event
*)GetThreadTrace(tid
);
406 for (uptr i
= ebegin
; i
<= eend
; i
++) {
407 Event ev
= events
[i
];
408 EventType typ
= (EventType
)(ev
>> 61);
409 uptr pc
= (uptr
)(ev
& ((1ull << 61) - 1));
410 DPrintf2(" %zu typ=%d pc=%zx\n", i
, typ
, pc
);
411 if (typ
== EventTypeMop
) {
413 } else if (typ
== EventTypeFuncEnter
) {
415 } else if (typ
== EventTypeFuncExit
) {
420 if (typ
== EventTypeLock
) {
421 mset
->Add(pc
, true, epoch0
+ i
);
422 } else if (typ
== EventTypeUnlock
) {
424 } else if (typ
== EventTypeRLock
) {
425 mset
->Add(pc
, false, epoch0
+ i
);
426 } else if (typ
== EventTypeRUnlock
) {
427 mset
->Del(pc
, false);
430 for (uptr j
= 0; j
<= pos
; j
++)
431 DPrintf2(" #%zu: %zx\n", j
, stack
[j
]);
433 if (pos
== 0 && stack
[0] == 0)
436 stk
->Init(stack
.data(), pos
);
439 static bool HandleRacyStacks(ThreadState
*thr
, const StackTrace (&traces
)[2],
440 uptr addr_min
, uptr addr_max
) {
441 Context
*ctx
= CTX();
442 bool equal_stack
= false;
444 if (flags()->suppress_equal_stacks
) {
445 hash
.hash
[0] = md5_hash(traces
[0].Begin(), traces
[0].Size() * sizeof(uptr
));
446 hash
.hash
[1] = md5_hash(traces
[1].Begin(), traces
[1].Size() * sizeof(uptr
));
447 for (uptr i
= 0; i
< ctx
->racy_stacks
.Size(); i
++) {
448 if (hash
== ctx
->racy_stacks
[i
]) {
449 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
455 bool equal_address
= false;
456 RacyAddress ra0
= {addr_min
, addr_max
};
457 if (flags()->suppress_equal_addresses
) {
458 for (uptr i
= 0; i
< ctx
->racy_addresses
.Size(); i
++) {
459 RacyAddress ra2
= ctx
->racy_addresses
[i
];
460 uptr maxbeg
= max(ra0
.addr_min
, ra2
.addr_min
);
461 uptr minend
= min(ra0
.addr_max
, ra2
.addr_max
);
462 if (maxbeg
< minend
) {
463 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
464 equal_address
= true;
469 if (equal_stack
|| equal_address
) {
471 ctx
->racy_stacks
.PushBack(hash
);
473 ctx
->racy_addresses
.PushBack(ra0
);
479 static void AddRacyStacks(ThreadState
*thr
, const StackTrace (&traces
)[2],
480 uptr addr_min
, uptr addr_max
) {
481 Context
*ctx
= CTX();
482 if (flags()->suppress_equal_stacks
) {
484 hash
.hash
[0] = md5_hash(traces
[0].Begin(), traces
[0].Size() * sizeof(uptr
));
485 hash
.hash
[1] = md5_hash(traces
[1].Begin(), traces
[1].Size() * sizeof(uptr
));
486 ctx
->racy_stacks
.PushBack(hash
);
488 if (flags()->suppress_equal_addresses
) {
489 RacyAddress ra0
= {addr_min
, addr_max
};
490 ctx
->racy_addresses
.PushBack(ra0
);
494 bool OutputReport(Context
*ctx
,
495 const ScopedReport
&srep
,
496 const ReportStack
*suppress_stack1
,
497 const ReportStack
*suppress_stack2
) {
498 atomic_store(&ctx
->last_symbolize_time_ns
, NanoTime(), memory_order_relaxed
);
499 const ReportDesc
*rep
= srep
.GetReport();
500 uptr suppress_pc
= IsSuppressed(rep
->typ
, suppress_stack1
);
501 if (suppress_pc
== 0)
502 suppress_pc
= IsSuppressed(rep
->typ
, suppress_stack2
);
503 if (suppress_pc
!= 0) {
504 FiredSuppression supp
= {srep
.GetReport()->typ
, suppress_pc
};
505 ctx
->fired_suppressions
.PushBack(supp
);
507 if (OnReport(rep
, suppress_pc
!= 0))
514 bool IsFiredSuppression(Context
*ctx
,
515 const ScopedReport
&srep
,
516 const StackTrace
&trace
) {
517 for (uptr k
= 0; k
< ctx
->fired_suppressions
.Size(); k
++) {
518 if (ctx
->fired_suppressions
[k
].type
!= srep
.GetReport()->typ
)
520 for (uptr j
= 0; j
< trace
.Size(); j
++) {
521 if (trace
.Get(j
) == ctx
->fired_suppressions
[k
].pc
)
528 bool FrameIsInternal(const ReportStack
*frame
) {
529 return frame
!= 0 && frame
->file
!= 0
530 && (internal_strstr(frame
->file
, "tsan_interceptors.cc") ||
531 internal_strstr(frame
->file
, "sanitizer_common_interceptors.inc") ||
532 internal_strstr(frame
->file
, "tsan_interface_"));
535 // On programs that use Java we see weird reports like:
536 // WARNING: ThreadSanitizer: data race (pid=22512)
537 // Read of size 8 at 0x7d2b00084318 by thread 100:
538 // #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
539 // #1 <null> <null>:0 (0x7f7ad9b40193)
540 // Previous write of size 8 at 0x7d2b00084318 by thread 105:
541 // #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
542 // #1 <null> <null>:0 (0x7f7ad9b42707)
543 static bool IsJavaNonsense(const ReportDesc
*rep
) {
545 for (uptr i
= 0; i
< rep
->mops
.Size(); i
++) {
546 ReportMop
*mop
= rep
->mops
[i
];
547 ReportStack
*frame
= mop
->stack
;
549 || (frame
->func
== 0 && frame
->file
== 0 && frame
->line
== 0
550 && frame
->module
== 0)) {
553 if (FrameIsInternal(frame
)) {
556 || (frame
->func
== 0 && frame
->file
== 0 && frame
->line
== 0
557 && frame
->module
== 0)) {
559 FiredSuppression supp
= {rep
->typ
, frame
->pc
};
560 CTX()->fired_suppressions
.PushBack(supp
);
570 static bool RaceBetweenAtomicAndFree(ThreadState
*thr
) {
571 Shadow
s0(thr
->racy_state
[0]);
572 Shadow
s1(thr
->racy_state
[1]);
573 CHECK(!(s0
.IsAtomic() && s1
.IsAtomic()));
574 if (!s0
.IsAtomic() && !s1
.IsAtomic())
576 if (s0
.IsAtomic() && s1
.IsFreed())
578 if (s1
.IsAtomic() && thr
->is_freeing
)
583 void ReportRace(ThreadState
*thr
) {
584 if (!flags()->report_bugs
)
588 if (!flags()->report_atomic_races
&& !RaceBetweenAtomicAndFree(thr
))
591 if (thr
->in_signal_handler
)
592 Printf("ThreadSanitizer: printing report from signal handler."
593 " Can crash or hang.\n");
597 Shadow
s(thr
->racy_state
[1]);
598 freed
= s
.GetFreedAndReset();
599 thr
->racy_state
[1] = s
.raw();
602 uptr addr
= ShadowToMem((uptr
)thr
->racy_shadow_addr
);
606 uptr a0
= addr
+ Shadow(thr
->racy_state
[0]).addr0();
607 uptr a1
= addr
+ Shadow(thr
->racy_state
[1]).addr0();
608 uptr e0
= a0
+ Shadow(thr
->racy_state
[0]).size();
609 uptr e1
= a1
+ Shadow(thr
->racy_state
[1]).size();
610 addr_min
= min(a0
, a1
);
611 addr_max
= max(e0
, e1
);
612 if (IsExpectedReport(addr_min
, addr_max
- addr_min
))
616 Context
*ctx
= CTX();
617 ThreadRegistryLock
l0(ctx
->thread_registry
);
619 ReportType typ
= ReportTypeRace
;
620 if (thr
->is_vptr_access
)
621 typ
= ReportTypeVptrRace
;
623 typ
= ReportTypeUseAfterFree
;
624 ScopedReport
rep(typ
);
626 StackTrace traces
[kMop
];
627 const uptr toppc
= TraceTopPC(thr
);
628 traces
[0].ObtainCurrent(thr
, toppc
);
629 if (IsFiredSuppression(ctx
, rep
, traces
[0]))
631 InternalScopedBuffer
<MutexSet
> mset2(1);
632 new(mset2
.data()) MutexSet();
633 Shadow
s2(thr
->racy_state
[1]);
634 RestoreStack(s2
.tid(), s2
.epoch(), &traces
[1], mset2
.data());
636 if (HandleRacyStacks(thr
, traces
, addr_min
, addr_max
))
639 for (uptr i
= 0; i
< kMop
; i
++) {
640 Shadow
s(thr
->racy_state
[i
]);
641 rep
.AddMemoryAccess(addr
, s
, &traces
[i
],
642 i
== 0 ? &thr
->mset
: mset2
.data());
645 if (flags()->suppress_java
&& IsJavaNonsense(rep
.GetReport()))
648 for (uptr i
= 0; i
< kMop
; i
++) {
649 FastState
s(thr
->racy_state
[i
]);
650 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
651 ctx
->thread_registry
->GetThreadLocked(s
.tid()));
652 if (s
.epoch() < tctx
->epoch0
|| s
.epoch() > tctx
->epoch1
)
657 rep
.AddLocation(addr_min
, addr_max
- addr_min
);
661 Shadow
s(thr
->racy_state
[1]);
662 if (s
.epoch() <= thr
->last_sleep_clock
.get(s
.tid()))
663 rep
.AddSleep(thr
->last_sleep_stack_id
);
667 if (!OutputReport(ctx
, rep
, rep
.GetReport()->mops
[0]->stack
,
668 rep
.GetReport()->mops
[1]->stack
))
671 AddRacyStacks(thr
, traces
, addr_min
, addr_max
);
674 void PrintCurrentStack(ThreadState
*thr
, uptr pc
) {
676 trace
.ObtainCurrent(thr
, pc
);
677 PrintStack(SymbolizeStack(trace
));
680 void PrintCurrentStackSlow() {
682 __sanitizer::StackTrace
*ptrace
= new(internal_alloc(MBlockStackTrace
,
683 sizeof(__sanitizer::StackTrace
))) __sanitizer::StackTrace
;
684 ptrace
->SlowUnwindStack(__sanitizer::StackTrace::GetCurrentPc(),
687 trace
.Init(ptrace
->trace
, ptrace
->size
);
688 PrintStack(SymbolizeStack(trace
));
692 } // namespace __tsan