1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_libc.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "sanitizer_common/sanitizer_stackdepot.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_stacktrace.h"
17 #include "tsan_platform.h"
19 #include "tsan_suppressions.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_report.h"
22 #include "tsan_sync.h"
23 #include "tsan_mman.h"
24 #include "tsan_flags.h"
29 using namespace __sanitizer
; // NOLINT
31 static ReportStack
*SymbolizeStack(StackTrace trace
);
33 void TsanCheckFailed(const char *file
, int line
, const char *cond
,
35 // There is high probability that interceptors will check-fail as well,
36 // on the other hand there is no sense in processing interceptors
37 // since we are going to die soon.
38 ScopedIgnoreInterceptors ignore
;
39 Printf("FATAL: ThreadSanitizer CHECK failed: "
40 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
41 file
, line
, cond
, (uptr
)v1
, (uptr
)v2
);
42 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
46 // Can be overriden by an application/test to intercept reports.
47 #ifdef TSAN_EXTERNAL_HOOKS
48 bool OnReport(const ReportDesc
*rep
, bool suppressed
);
50 SANITIZER_INTERFACE_ATTRIBUTE
51 bool WEAK
OnReport(const ReportDesc
*rep
, bool suppressed
) {
57 static void StackStripMain(ReportStack
*stack
) {
58 ReportStack
*last_frame
= 0;
59 ReportStack
*last_frame2
= 0;
60 for (ReportStack
*ent
= stack
; ent
; ent
= ent
->next
) {
61 last_frame2
= last_frame
;
67 const char *last
= last_frame
->info
.function
;
69 const char *last2
= last_frame2
->info
.function
;
70 // Strip frame above 'main'
71 if (last2
&& 0 == internal_strcmp(last2
, "main")) {
72 last_frame2
->next
= 0;
73 // Strip our internal thread start routine.
74 } else if (last
&& 0 == internal_strcmp(last
, "__tsan_thread_start_func")) {
75 last_frame2
->next
= 0;
76 // Strip global ctors init.
77 } else if (last
&& 0 == internal_strcmp(last
, "__do_global_ctors_aux")) {
78 last_frame2
->next
= 0;
79 // If both are 0, then we probably just failed to symbolize.
80 } else if (last
|| last2
) {
81 // Ensure that we recovered stack completely. Trimmed stack
82 // can actually happen if we do not instrument some code,
83 // so it's only a debug print. However we must try hard to not miss it
85 DPrintf("Bottom stack frame of stack %zx is missed\n", stack
->pc
);
88 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
89 last_frame2
->next
= 0;
94 ReportStack
*SymbolizeStackId(u32 stack_id
) {
97 StackTrace stack
= StackDepotGet(stack_id
);
98 if (stack
.trace
== nullptr)
100 return SymbolizeStack(stack
);
103 static ReportStack
*SymbolizeStack(StackTrace trace
) {
106 ReportStack
*stack
= 0;
107 for (uptr si
= 0; si
< trace
.size
; si
++) {
108 const uptr pc
= trace
.trace
[si
];
110 // We obtain the return address, that is, address of the next instruction,
111 // so offset it by 1 byte.
112 const uptr pc1
= StackTrace::GetPreviousInstructionPc(pc
);
114 // FIXME(dvyukov): Go sometimes uses address of a function as top pc.
116 if (si
!= trace
.size
- 1)
119 ReportStack
*ent
= SymbolizeCode(pc1
);
121 ReportStack
*last
= ent
;
123 last
->info
.address
= pc
; // restore original pc for report
126 last
->info
.address
= pc
; // restore original pc for report
130 StackStripMain(stack
);
134 ScopedReport::ScopedReport(ReportType typ
) {
135 ctx
->thread_registry
->CheckLocked();
136 void *mem
= internal_alloc(MBlockReport
, sizeof(ReportDesc
));
137 rep_
= new(mem
) ReportDesc
;
139 ctx
->report_mtx
.Lock();
140 CommonSanitizerReportMutex
.Lock();
143 ScopedReport::~ScopedReport() {
144 CommonSanitizerReportMutex
.Unlock();
145 ctx
->report_mtx
.Unlock();
146 DestroyAndFree(rep_
);
149 void ScopedReport::AddStack(StackTrace stack
, bool suppressable
) {
150 ReportStack
**rs
= rep_
->stacks
.PushBack();
151 *rs
= SymbolizeStack(stack
);
152 (*rs
)->suppressable
= suppressable
;
155 void ScopedReport::AddMemoryAccess(uptr addr
, Shadow s
, StackTrace stack
,
156 const MutexSet
*mset
) {
157 void *mem
= internal_alloc(MBlockReportMop
, sizeof(ReportMop
));
158 ReportMop
*mop
= new(mem
) ReportMop
;
159 rep_
->mops
.PushBack(mop
);
161 mop
->addr
= addr
+ s
.addr0();
162 mop
->size
= s
.size();
163 mop
->write
= s
.IsWrite();
164 mop
->atomic
= s
.IsAtomic();
165 mop
->stack
= SymbolizeStack(stack
);
167 mop
->stack
->suppressable
= true;
168 for (uptr i
= 0; i
< mset
->Size(); i
++) {
169 MutexSet::Desc d
= mset
->Get(i
);
170 u64 mid
= this->AddMutex(d
.id
);
171 ReportMopMutex mtx
= {mid
, d
.write
};
172 mop
->mset
.PushBack(mtx
);
176 void ScopedReport::AddUniqueTid(int unique_tid
) {
177 rep_
->unique_tids
.PushBack(unique_tid
);
180 void ScopedReport::AddThread(const ThreadContext
*tctx
, bool suppressable
) {
181 for (uptr i
= 0; i
< rep_
->threads
.Size(); i
++) {
182 if ((u32
)rep_
->threads
[i
]->id
== tctx
->tid
)
185 void *mem
= internal_alloc(MBlockReportThread
, sizeof(ReportThread
));
186 ReportThread
*rt
= new(mem
) ReportThread();
187 rep_
->threads
.PushBack(rt
);
189 rt
->pid
= tctx
->os_id
;
190 rt
->running
= (tctx
->status
== ThreadStatusRunning
);
191 rt
->name
= internal_strdup(tctx
->name
);
192 rt
->parent_tid
= tctx
->parent_tid
;
194 rt
->stack
= SymbolizeStackId(tctx
->creation_stack_id
);
196 rt
->stack
->suppressable
= suppressable
;
200 static ThreadContext
*FindThreadByUidLocked(int unique_id
) {
201 ctx
->thread_registry
->CheckLocked();
202 for (unsigned i
= 0; i
< kMaxTid
; i
++) {
203 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
204 ctx
->thread_registry
->GetThreadLocked(i
));
205 if (tctx
&& tctx
->unique_id
== (u32
)unique_id
) {
212 static ThreadContext
*FindThreadByTidLocked(int tid
) {
213 ctx
->thread_registry
->CheckLocked();
214 return static_cast<ThreadContext
*>(
215 ctx
->thread_registry
->GetThreadLocked(tid
));
218 static bool IsInStackOrTls(ThreadContextBase
*tctx_base
, void *arg
) {
219 uptr addr
= (uptr
)arg
;
220 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
221 if (tctx
->status
!= ThreadStatusRunning
)
223 ThreadState
*thr
= tctx
->thr
;
225 return ((addr
>= thr
->stk_addr
&& addr
< thr
->stk_addr
+ thr
->stk_size
) ||
226 (addr
>= thr
->tls_addr
&& addr
< thr
->tls_addr
+ thr
->tls_size
));
229 ThreadContext
*IsThreadStackOrTls(uptr addr
, bool *is_stack
) {
230 ctx
->thread_registry
->CheckLocked();
231 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
232 ctx
->thread_registry
->FindThreadContextLocked(IsInStackOrTls
,
236 ThreadState
*thr
= tctx
->thr
;
238 *is_stack
= (addr
>= thr
->stk_addr
&& addr
< thr
->stk_addr
+ thr
->stk_size
);
243 void ScopedReport::AddThread(int unique_tid
, bool suppressable
) {
245 if (const ThreadContext
*tctx
= FindThreadByUidLocked(unique_tid
))
246 AddThread(tctx
, suppressable
);
250 void ScopedReport::AddMutex(const SyncVar
*s
) {
251 for (uptr i
= 0; i
< rep_
->mutexes
.Size(); i
++) {
252 if (rep_
->mutexes
[i
]->id
== s
->uid
)
255 void *mem
= internal_alloc(MBlockReportMutex
, sizeof(ReportMutex
));
256 ReportMutex
*rm
= new(mem
) ReportMutex();
257 rep_
->mutexes
.PushBack(rm
);
260 rm
->destroyed
= false;
261 rm
->stack
= SymbolizeStackId(s
->creation_stack_id
);
264 u64
ScopedReport::AddMutex(u64 id
) {
267 uptr addr
= SyncVar::SplitId(id
, &uid
);
268 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
);
269 // Check that the mutex is still alive.
270 // Another mutex can be created at the same address,
271 // so check uid as well.
272 if (s
&& s
->CheckId(uid
)) {
283 void ScopedReport::AddDeadMutex(u64 id
) {
284 for (uptr i
= 0; i
< rep_
->mutexes
.Size(); i
++) {
285 if (rep_
->mutexes
[i
]->id
== id
)
288 void *mem
= internal_alloc(MBlockReportMutex
, sizeof(ReportMutex
));
289 ReportMutex
*rm
= new(mem
) ReportMutex();
290 rep_
->mutexes
.PushBack(rm
);
293 rm
->destroyed
= true;
297 void ScopedReport::AddLocation(uptr addr
, uptr size
) {
304 if (FdLocation(addr
, &fd
, &creat_tid
, &creat_stack
)) {
305 ReportLocation
*loc
= ReportLocation::New(ReportLocationFD
);
307 loc
->tid
= creat_tid
;
308 loc
->stack
= SymbolizeStackId(creat_stack
);
309 rep_
->locs
.PushBack(loc
);
310 ThreadContext
*tctx
= FindThreadByUidLocked(creat_tid
);
316 Allocator
*a
= allocator();
317 if (a
->PointerIsMine((void*)addr
)) {
318 void *block_begin
= a
->GetBlockBegin((void*)addr
);
320 b
= ctx
->metamap
.GetBlock((uptr
)block_begin
);
323 ThreadContext
*tctx
= FindThreadByTidLocked(b
->tid
);
324 ReportLocation
*loc
= ReportLocation::New(ReportLocationHeap
);
325 loc
->heap_chunk_start
= (uptr
)allocator()->GetBlockBegin((void *)addr
);
326 loc
->heap_chunk_size
= b
->siz
;
327 loc
->tid
= tctx
? tctx
->tid
: b
->tid
;
328 loc
->stack
= SymbolizeStackId(b
->stk
);
329 rep_
->locs
.PushBack(loc
);
334 bool is_stack
= false;
335 if (ThreadContext
*tctx
= IsThreadStackOrTls(addr
, &is_stack
)) {
336 ReportLocation
*loc
=
337 ReportLocation::New(is_stack
? ReportLocationStack
: ReportLocationTLS
);
338 loc
->tid
= tctx
->tid
;
339 rep_
->locs
.PushBack(loc
);
342 if (ReportLocation
*loc
= SymbolizeData(addr
)) {
343 loc
->suppressable
= true;
344 rep_
->locs
.PushBack(loc
);
351 void ScopedReport::AddSleep(u32 stack_id
) {
352 rep_
->sleep
= SymbolizeStackId(stack_id
);
356 void ScopedReport::SetCount(int count
) {
360 const ReportDesc
*ScopedReport::GetReport() const {
364 void RestoreStack(int tid
, const u64 epoch
, VarSizeStackTrace
*stk
,
366 // This function restores stack trace and mutex set for the thread/epoch.
367 // It does so by getting stack trace and mutex set at the beginning of
368 // trace part, and then replaying the trace till the given epoch.
369 ctx
->thread_registry
->CheckLocked();
370 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
371 ctx
->thread_registry
->GetThreadLocked(tid
));
374 if (tctx
->status
!= ThreadStatusRunning
375 && tctx
->status
!= ThreadStatusFinished
376 && tctx
->status
!= ThreadStatusDead
)
378 Trace
* trace
= ThreadTrace(tctx
->tid
);
380 const int partidx
= (epoch
/ kTracePartSize
) % TraceParts();
381 TraceHeader
* hdr
= &trace
->headers
[partidx
];
382 if (epoch
< hdr
->epoch0
)
384 const u64 epoch0
= RoundDown(epoch
, TraceSize());
385 const u64 eend
= epoch
% TraceSize();
386 const u64 ebegin
= RoundDown(eend
, kTracePartSize
);
387 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
388 tid
, (uptr
)epoch
, (uptr
)ebegin
, (uptr
)eend
, partidx
);
389 InternalScopedBuffer
<uptr
> stack(kShadowStackSize
);
390 for (uptr i
= 0; i
< hdr
->stack0
.size
; i
++) {
391 stack
[i
] = hdr
->stack0
.trace
[i
];
392 DPrintf2(" #%02lu: pc=%zx\n", i
, stack
[i
]);
396 uptr pos
= hdr
->stack0
.size
;
397 Event
*events
= (Event
*)GetThreadTrace(tid
);
398 for (uptr i
= ebegin
; i
<= eend
; i
++) {
399 Event ev
= events
[i
];
400 EventType typ
= (EventType
)(ev
>> 61);
401 uptr pc
= (uptr
)(ev
& ((1ull << 61) - 1));
402 DPrintf2(" %zu typ=%d pc=%zx\n", i
, typ
, pc
);
403 if (typ
== EventTypeMop
) {
405 } else if (typ
== EventTypeFuncEnter
) {
407 } else if (typ
== EventTypeFuncExit
) {
412 if (typ
== EventTypeLock
) {
413 mset
->Add(pc
, true, epoch0
+ i
);
414 } else if (typ
== EventTypeUnlock
) {
416 } else if (typ
== EventTypeRLock
) {
417 mset
->Add(pc
, false, epoch0
+ i
);
418 } else if (typ
== EventTypeRUnlock
) {
419 mset
->Del(pc
, false);
422 for (uptr j
= 0; j
<= pos
; j
++)
423 DPrintf2(" #%zu: %zx\n", j
, stack
[j
]);
425 if (pos
== 0 && stack
[0] == 0)
428 stk
->Init(stack
.data(), pos
);
431 static bool HandleRacyStacks(ThreadState
*thr
, VarSizeStackTrace traces
[2],
432 uptr addr_min
, uptr addr_max
) {
433 bool equal_stack
= false;
435 if (flags()->suppress_equal_stacks
) {
436 hash
.hash
[0] = md5_hash(traces
[0].trace
, traces
[0].size
* sizeof(uptr
));
437 hash
.hash
[1] = md5_hash(traces
[1].trace
, traces
[1].size
* sizeof(uptr
));
438 for (uptr i
= 0; i
< ctx
->racy_stacks
.Size(); i
++) {
439 if (hash
== ctx
->racy_stacks
[i
]) {
440 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
446 bool equal_address
= false;
447 RacyAddress ra0
= {addr_min
, addr_max
};
448 if (flags()->suppress_equal_addresses
) {
449 for (uptr i
= 0; i
< ctx
->racy_addresses
.Size(); i
++) {
450 RacyAddress ra2
= ctx
->racy_addresses
[i
];
451 uptr maxbeg
= max(ra0
.addr_min
, ra2
.addr_min
);
452 uptr minend
= min(ra0
.addr_max
, ra2
.addr_max
);
453 if (maxbeg
< minend
) {
454 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
455 equal_address
= true;
460 if (equal_stack
|| equal_address
) {
462 ctx
->racy_stacks
.PushBack(hash
);
464 ctx
->racy_addresses
.PushBack(ra0
);
470 static void AddRacyStacks(ThreadState
*thr
, VarSizeStackTrace traces
[2],
471 uptr addr_min
, uptr addr_max
) {
472 if (flags()->suppress_equal_stacks
) {
474 hash
.hash
[0] = md5_hash(traces
[0].trace
, traces
[0].size
* sizeof(uptr
));
475 hash
.hash
[1] = md5_hash(traces
[1].trace
, traces
[1].size
* sizeof(uptr
));
476 ctx
->racy_stacks
.PushBack(hash
);
478 if (flags()->suppress_equal_addresses
) {
479 RacyAddress ra0
= {addr_min
, addr_max
};
480 ctx
->racy_addresses
.PushBack(ra0
);
484 bool OutputReport(ThreadState
*thr
, const ScopedReport
&srep
) {
485 atomic_store(&ctx
->last_symbolize_time_ns
, NanoTime(), memory_order_relaxed
);
486 const ReportDesc
*rep
= srep
.GetReport();
487 Suppression
*supp
= 0;
488 uptr suppress_pc
= 0;
489 for (uptr i
= 0; suppress_pc
== 0 && i
< rep
->mops
.Size(); i
++)
490 suppress_pc
= IsSuppressed(rep
->typ
, rep
->mops
[i
]->stack
, &supp
);
491 for (uptr i
= 0; suppress_pc
== 0 && i
< rep
->stacks
.Size(); i
++)
492 suppress_pc
= IsSuppressed(rep
->typ
, rep
->stacks
[i
], &supp
);
493 for (uptr i
= 0; suppress_pc
== 0 && i
< rep
->threads
.Size(); i
++)
494 suppress_pc
= IsSuppressed(rep
->typ
, rep
->threads
[i
]->stack
, &supp
);
495 for (uptr i
= 0; suppress_pc
== 0 && i
< rep
->locs
.Size(); i
++)
496 suppress_pc
= IsSuppressed(rep
->typ
, rep
->locs
[i
], &supp
);
497 if (suppress_pc
!= 0) {
498 FiredSuppression s
= {srep
.GetReport()->typ
, suppress_pc
, supp
};
499 ctx
->fired_suppressions
.push_back(s
);
502 bool old_is_freeing
= thr
->is_freeing
;
503 thr
->is_freeing
= false;
504 bool suppressed
= OnReport(rep
, suppress_pc
!= 0);
505 thr
->is_freeing
= old_is_freeing
;
511 if (flags()->halt_on_error
)
512 internal__exit(flags()->exitcode
);
516 bool IsFiredSuppression(Context
*ctx
, const ScopedReport
&srep
,
518 for (uptr k
= 0; k
< ctx
->fired_suppressions
.size(); k
++) {
519 if (ctx
->fired_suppressions
[k
].type
!= srep
.GetReport()->typ
)
521 for (uptr j
= 0; j
< trace
.size
; j
++) {
522 FiredSuppression
*s
= &ctx
->fired_suppressions
[k
];
523 if (trace
.trace
[j
] == s
->pc
) {
525 s
->supp
->hit_count
++;
533 static bool IsFiredSuppression(Context
*ctx
,
534 const ScopedReport
&srep
,
536 for (uptr k
= 0; k
< ctx
->fired_suppressions
.size(); k
++) {
537 if (ctx
->fired_suppressions
[k
].type
!= srep
.GetReport()->typ
)
539 FiredSuppression
*s
= &ctx
->fired_suppressions
[k
];
542 s
->supp
->hit_count
++;
549 bool FrameIsInternal(const ReportStack
*frame
) {
552 const char *file
= frame
->info
.file
;
554 (internal_strstr(file
, "tsan_interceptors.cc") ||
555 internal_strstr(file
, "sanitizer_common_interceptors.inc") ||
556 internal_strstr(file
, "tsan_interface_"));
559 static bool RaceBetweenAtomicAndFree(ThreadState
*thr
) {
560 Shadow
s0(thr
->racy_state
[0]);
561 Shadow
s1(thr
->racy_state
[1]);
562 CHECK(!(s0
.IsAtomic() && s1
.IsAtomic()));
563 if (!s0
.IsAtomic() && !s1
.IsAtomic())
565 if (s0
.IsAtomic() && s1
.IsFreed())
567 if (s1
.IsAtomic() && thr
->is_freeing
)
572 void ReportRace(ThreadState
*thr
) {
575 // Symbolizer makes lots of intercepted calls. If we try to process them,
576 // at best it will cause deadlocks on internal mutexes.
577 ScopedIgnoreInterceptors ignore
;
579 if (!flags()->report_bugs
)
581 if (!flags()->report_atomic_races
&& !RaceBetweenAtomicAndFree(thr
))
586 Shadow
s(thr
->racy_state
[1]);
587 freed
= s
.GetFreedAndReset();
588 thr
->racy_state
[1] = s
.raw();
591 uptr addr
= ShadowToMem((uptr
)thr
->racy_shadow_addr
);
595 uptr a0
= addr
+ Shadow(thr
->racy_state
[0]).addr0();
596 uptr a1
= addr
+ Shadow(thr
->racy_state
[1]).addr0();
597 uptr e0
= a0
+ Shadow(thr
->racy_state
[0]).size();
598 uptr e1
= a1
+ Shadow(thr
->racy_state
[1]).size();
599 addr_min
= min(a0
, a1
);
600 addr_max
= max(e0
, e1
);
601 if (IsExpectedReport(addr_min
, addr_max
- addr_min
))
605 ThreadRegistryLock
l0(ctx
->thread_registry
);
607 ReportType typ
= ReportTypeRace
;
608 if (thr
->is_vptr_access
&& freed
)
609 typ
= ReportTypeVptrUseAfterFree
;
610 else if (thr
->is_vptr_access
)
611 typ
= ReportTypeVptrRace
;
613 typ
= ReportTypeUseAfterFree
;
614 ScopedReport
rep(typ
);
615 if (IsFiredSuppression(ctx
, rep
, addr
))
618 VarSizeStackTrace traces
[kMop
];
619 const uptr toppc
= TraceTopPC(thr
);
620 ObtainCurrentStack(thr
, toppc
, &traces
[0]);
621 if (IsFiredSuppression(ctx
, rep
, traces
[0]))
623 InternalScopedBuffer
<MutexSet
> mset2(1);
624 new(mset2
.data()) MutexSet();
625 Shadow
s2(thr
->racy_state
[1]);
626 RestoreStack(s2
.tid(), s2
.epoch(), &traces
[1], mset2
.data());
627 if (IsFiredSuppression(ctx
, rep
, traces
[1]))
630 if (HandleRacyStacks(thr
, traces
, addr_min
, addr_max
))
633 for (uptr i
= 0; i
< kMop
; i
++) {
634 Shadow
s(thr
->racy_state
[i
]);
635 rep
.AddMemoryAccess(addr
, s
, traces
[i
],
636 i
== 0 ? &thr
->mset
: mset2
.data());
639 for (uptr i
= 0; i
< kMop
; i
++) {
640 FastState
s(thr
->racy_state
[i
]);
641 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
642 ctx
->thread_registry
->GetThreadLocked(s
.tid()));
643 if (s
.epoch() < tctx
->epoch0
|| s
.epoch() > tctx
->epoch1
)
648 rep
.AddLocation(addr_min
, addr_max
- addr_min
);
652 Shadow
s(thr
->racy_state
[1]);
653 if (s
.epoch() <= thr
->last_sleep_clock
.get(s
.tid()))
654 rep
.AddSleep(thr
->last_sleep_stack_id
);
658 if (!OutputReport(thr
, rep
))
661 AddRacyStacks(thr
, traces
, addr_min
, addr_max
);
664 void PrintCurrentStack(ThreadState
*thr
, uptr pc
) {
665 VarSizeStackTrace trace
;
666 ObtainCurrentStack(thr
, pc
, &trace
);
667 PrintStack(SymbolizeStack(trace
));
670 void PrintCurrentStackSlow(uptr pc
) {
672 BufferedStackTrace
*ptrace
=
673 new(internal_alloc(MBlockStackTrace
, sizeof(BufferedStackTrace
)))
674 BufferedStackTrace();
675 ptrace
->Unwind(kStackTraceMax
, pc
, 0, 0, 0, 0, false);
676 for (uptr i
= 0; i
< ptrace
->size
/ 2; i
++) {
677 uptr tmp
= ptrace
->trace_buffer
[i
];
678 ptrace
->trace_buffer
[i
] = ptrace
->trace_buffer
[ptrace
->size
- i
- 1];
679 ptrace
->trace_buffer
[ptrace
->size
- i
- 1] = tmp
;
681 PrintStack(SymbolizeStack(*ptrace
));
685 } // namespace __tsan
687 using namespace __tsan
;
690 SANITIZER_INTERFACE_ATTRIBUTE
691 void __sanitizer_print_stack_trace() {
692 PrintCurrentStackSlow(StackTrace::GetCurrentPc());