1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_libc.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "sanitizer_common/sanitizer_stackdepot.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_stacktrace.h"
17 #include "tsan_platform.h"
19 #include "tsan_suppressions.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_report.h"
22 #include "tsan_sync.h"
23 #include "tsan_mman.h"
24 #include "tsan_flags.h"
29 using namespace __sanitizer
; // NOLINT
31 static ReportStack
*SymbolizeStack(const StackTrace
& trace
);
33 void TsanCheckFailed(const char *file
, int line
, const char *cond
,
35 // There is high probability that interceptors will check-fail as well,
36 // on the other hand there is no sense in processing interceptors
37 // since we are going to die soon.
38 ScopedIgnoreInterceptors ignore
;
39 Printf("FATAL: ThreadSanitizer CHECK failed: "
40 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
41 file
, line
, cond
, (uptr
)v1
, (uptr
)v2
);
42 PrintCurrentStackSlow();
46 // Can be overriden by an application/test to intercept reports.
47 #ifdef TSAN_EXTERNAL_HOOKS
48 bool OnReport(const ReportDesc
*rep
, bool suppressed
);
50 SANITIZER_INTERFACE_ATTRIBUTE
51 bool WEAK
OnReport(const ReportDesc
*rep
, bool suppressed
) {
57 static void StackStripMain(ReportStack
*stack
) {
58 ReportStack
*last_frame
= 0;
59 ReportStack
*last_frame2
= 0;
60 const char *prefix
= "__interceptor_";
61 uptr prefix_len
= internal_strlen(prefix
);
62 const char *path_prefix
= flags()->strip_path_prefix
;
63 uptr path_prefix_len
= internal_strlen(path_prefix
);
65 for (ReportStack
*ent
= stack
; ent
; ent
= ent
->next
) {
66 if (ent
->func
&& 0 == internal_strncmp(ent
->func
, prefix
, prefix_len
))
67 ent
->func
+= prefix_len
;
68 if (ent
->file
&& (pos
= internal_strstr(ent
->file
, path_prefix
)))
69 ent
->file
= pos
+ path_prefix_len
;
70 if (ent
->file
&& ent
->file
[0] == '.' && ent
->file
[1] == '/')
72 last_frame2
= last_frame
;
78 const char *last
= last_frame
->func
;
80 const char *last2
= last_frame2
->func
;
81 // Strip frame above 'main'
82 if (last2
&& 0 == internal_strcmp(last2
, "main")) {
83 last_frame2
->next
= 0;
84 // Strip our internal thread start routine.
85 } else if (last
&& 0 == internal_strcmp(last
, "__tsan_thread_start_func")) {
86 last_frame2
->next
= 0;
87 // Strip global ctors init.
88 } else if (last
&& 0 == internal_strcmp(last
, "__do_global_ctors_aux")) {
89 last_frame2
->next
= 0;
90 // If both are 0, then we probably just failed to symbolize.
91 } else if (last
|| last2
) {
92 // Ensure that we recovered stack completely. Trimmed stack
93 // can actually happen if we do not instrument some code,
94 // so it's only a debug print. However we must try hard to not miss it
96 DPrintf("Bottom stack frame of stack %zx is missed\n", stack
->pc
);
99 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
100 last_frame2
->next
= 0;
105 ReportStack
*SymbolizeStackId(u32 stack_id
) {
109 const uptr
*stack
= StackDepotGet(stack_id
, &ssz
);
113 trace
.Init(stack
, ssz
);
114 return SymbolizeStack(trace
);
117 static ReportStack
*SymbolizeStack(const StackTrace
& trace
) {
120 ReportStack
*stack
= 0;
121 for (uptr si
= 0; si
< trace
.Size(); si
++) {
122 const uptr pc
= trace
.Get(si
);
124 // We obtain the return address, that is, address of the next instruction,
125 // so offset it by 1 byte.
126 const uptr pc1
= __sanitizer::StackTrace::GetPreviousInstructionPc(pc
);
128 // FIXME(dvyukov): Go sometimes uses address of a function as top pc.
130 if (si
!= trace
.Size() - 1)
133 ReportStack
*ent
= SymbolizeCode(pc1
);
135 ReportStack
*last
= ent
;
137 last
->pc
= pc
; // restore original pc for report
140 last
->pc
= pc
; // restore original pc for report
144 StackStripMain(stack
);
148 ScopedReport::ScopedReport(ReportType typ
) {
149 ctx
->thread_registry
->CheckLocked();
150 void *mem
= internal_alloc(MBlockReport
, sizeof(ReportDesc
));
151 rep_
= new(mem
) ReportDesc
;
153 ctx
->report_mtx
.Lock();
154 CommonSanitizerReportMutex
.Lock();
157 ScopedReport::~ScopedReport() {
158 CommonSanitizerReportMutex
.Unlock();
159 ctx
->report_mtx
.Unlock();
160 DestroyAndFree(rep_
);
163 void ScopedReport::AddStack(const StackTrace
*stack
) {
164 ReportStack
**rs
= rep_
->stacks
.PushBack();
165 *rs
= SymbolizeStack(*stack
);
168 void ScopedReport::AddMemoryAccess(uptr addr
, Shadow s
,
169 const StackTrace
*stack
, const MutexSet
*mset
) {
170 void *mem
= internal_alloc(MBlockReportMop
, sizeof(ReportMop
));
171 ReportMop
*mop
= new(mem
) ReportMop
;
172 rep_
->mops
.PushBack(mop
);
174 mop
->addr
= addr
+ s
.addr0();
175 mop
->size
= s
.size();
176 mop
->write
= s
.IsWrite();
177 mop
->atomic
= s
.IsAtomic();
178 mop
->stack
= SymbolizeStack(*stack
);
179 for (uptr i
= 0; i
< mset
->Size(); i
++) {
180 MutexSet::Desc d
= mset
->Get(i
);
181 u64 mid
= this->AddMutex(d
.id
);
182 ReportMopMutex mtx
= {mid
, d
.write
};
183 mop
->mset
.PushBack(mtx
);
187 void ScopedReport::AddUniqueTid(int unique_tid
) {
188 rep_
->unique_tids
.PushBack(unique_tid
);
191 void ScopedReport::AddThread(const ThreadContext
*tctx
) {
192 for (uptr i
= 0; i
< rep_
->threads
.Size(); i
++) {
193 if ((u32
)rep_
->threads
[i
]->id
== tctx
->tid
)
196 void *mem
= internal_alloc(MBlockReportThread
, sizeof(ReportThread
));
197 ReportThread
*rt
= new(mem
) ReportThread();
198 rep_
->threads
.PushBack(rt
);
200 rt
->pid
= tctx
->os_id
;
201 rt
->running
= (tctx
->status
== ThreadStatusRunning
);
202 rt
->name
= internal_strdup(tctx
->name
);
203 rt
->parent_tid
= tctx
->parent_tid
;
205 rt
->stack
= SymbolizeStackId(tctx
->creation_stack_id
);
209 static ThreadContext
*FindThreadByUidLocked(int unique_id
) {
210 ctx
->thread_registry
->CheckLocked();
211 for (unsigned i
= 0; i
< kMaxTid
; i
++) {
212 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
213 ctx
->thread_registry
->GetThreadLocked(i
));
214 if (tctx
&& tctx
->unique_id
== (u32
)unique_id
) {
221 static ThreadContext
*FindThreadByTidLocked(int tid
) {
222 ctx
->thread_registry
->CheckLocked();
223 return static_cast<ThreadContext
*>(
224 ctx
->thread_registry
->GetThreadLocked(tid
));
227 static bool IsInStackOrTls(ThreadContextBase
*tctx_base
, void *arg
) {
228 uptr addr
= (uptr
)arg
;
229 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
230 if (tctx
->status
!= ThreadStatusRunning
)
232 ThreadState
*thr
= tctx
->thr
;
234 return ((addr
>= thr
->stk_addr
&& addr
< thr
->stk_addr
+ thr
->stk_size
) ||
235 (addr
>= thr
->tls_addr
&& addr
< thr
->tls_addr
+ thr
->tls_size
));
238 ThreadContext
*IsThreadStackOrTls(uptr addr
, bool *is_stack
) {
239 ctx
->thread_registry
->CheckLocked();
240 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
241 ctx
->thread_registry
->FindThreadContextLocked(IsInStackOrTls
,
245 ThreadState
*thr
= tctx
->thr
;
247 *is_stack
= (addr
>= thr
->stk_addr
&& addr
< thr
->stk_addr
+ thr
->stk_size
);
252 void ScopedReport::AddThread(int unique_tid
) {
254 AddThread(FindThreadByUidLocked(unique_tid
));
258 void ScopedReport::AddMutex(const SyncVar
*s
) {
259 for (uptr i
= 0; i
< rep_
->mutexes
.Size(); i
++) {
260 if (rep_
->mutexes
[i
]->id
== s
->uid
)
263 void *mem
= internal_alloc(MBlockReportMutex
, sizeof(ReportMutex
));
264 ReportMutex
*rm
= new(mem
) ReportMutex();
265 rep_
->mutexes
.PushBack(rm
);
268 rm
->destroyed
= false;
269 rm
->stack
= SymbolizeStackId(s
->creation_stack_id
);
272 u64
ScopedReport::AddMutex(u64 id
) {
275 uptr addr
= SyncVar::SplitId(id
, &uid
);
276 SyncVar
*s
= ctx
->synctab
.GetIfExistsAndLock(addr
, false);
277 // Check that the mutex is still alive.
278 // Another mutex can be created at the same address,
279 // so check uid as well.
280 if (s
&& s
->CheckId(uid
)) {
291 void ScopedReport::AddDeadMutex(u64 id
) {
292 for (uptr i
= 0; i
< rep_
->mutexes
.Size(); i
++) {
293 if (rep_
->mutexes
[i
]->id
== id
)
296 void *mem
= internal_alloc(MBlockReportMutex
, sizeof(ReportMutex
));
297 ReportMutex
*rm
= new(mem
) ReportMutex();
298 rep_
->mutexes
.PushBack(rm
);
301 rm
->destroyed
= true;
305 void ScopedReport::AddLocation(uptr addr
, uptr size
) {
312 if (FdLocation(addr
, &fd
, &creat_tid
, &creat_stack
)
313 || FdLocation(AlternativeAddress(addr
), &fd
, &creat_tid
, &creat_stack
)) {
314 void *mem
= internal_alloc(MBlockReportLoc
, sizeof(ReportLocation
));
315 ReportLocation
*loc
= new(mem
) ReportLocation();
316 rep_
->locs
.PushBack(loc
);
317 loc
->type
= ReportLocationFD
;
319 loc
->tid
= creat_tid
;
320 loc
->stack
= SymbolizeStackId(creat_stack
);
321 ThreadContext
*tctx
= FindThreadByUidLocked(creat_tid
);
327 if (allocator()->PointerIsMine((void*)addr
)
328 && (b
= user_mblock(0, (void*)addr
))) {
329 ThreadContext
*tctx
= FindThreadByTidLocked(b
->Tid());
330 void *mem
= internal_alloc(MBlockReportLoc
, sizeof(ReportLocation
));
331 ReportLocation
*loc
= new(mem
) ReportLocation();
332 rep_
->locs
.PushBack(loc
);
333 loc
->type
= ReportLocationHeap
;
334 loc
->addr
= (uptr
)allocator()->GetBlockBegin((void*)addr
);
335 loc
->size
= b
->Size();
336 loc
->tid
= tctx
? tctx
->tid
: b
->Tid();
341 loc
->stack
= SymbolizeStackId(b
->StackId());
346 bool is_stack
= false;
347 if (ThreadContext
*tctx
= IsThreadStackOrTls(addr
, &is_stack
)) {
348 void *mem
= internal_alloc(MBlockReportLoc
, sizeof(ReportLocation
));
349 ReportLocation
*loc
= new(mem
) ReportLocation();
350 rep_
->locs
.PushBack(loc
);
351 loc
->type
= is_stack
? ReportLocationStack
: ReportLocationTLS
;
352 loc
->tid
= tctx
->tid
;
355 ReportLocation
*loc
= SymbolizeData(addr
);
357 rep_
->locs
.PushBack(loc
);
364 void ScopedReport::AddSleep(u32 stack_id
) {
365 rep_
->sleep
= SymbolizeStackId(stack_id
);
369 void ScopedReport::SetCount(int count
) {
373 const ReportDesc
*ScopedReport::GetReport() const {
377 void RestoreStack(int tid
, const u64 epoch
, StackTrace
*stk
, MutexSet
*mset
) {
378 // This function restores stack trace and mutex set for the thread/epoch.
379 // It does so by getting stack trace and mutex set at the beginning of
380 // trace part, and then replaying the trace till the given epoch.
381 ctx
->thread_registry
->CheckLocked();
382 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
383 ctx
->thread_registry
->GetThreadLocked(tid
));
386 if (tctx
->status
!= ThreadStatusRunning
387 && tctx
->status
!= ThreadStatusFinished
388 && tctx
->status
!= ThreadStatusDead
)
390 Trace
* trace
= ThreadTrace(tctx
->tid
);
392 const int partidx
= (epoch
/ kTracePartSize
) % TraceParts();
393 TraceHeader
* hdr
= &trace
->headers
[partidx
];
394 if (epoch
< hdr
->epoch0
)
396 const u64 epoch0
= RoundDown(epoch
, TraceSize());
397 const u64 eend
= epoch
% TraceSize();
398 const u64 ebegin
= RoundDown(eend
, kTracePartSize
);
399 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
400 tid
, (uptr
)epoch
, (uptr
)ebegin
, (uptr
)eend
, partidx
);
401 InternalScopedBuffer
<uptr
> stack(kShadowStackSize
);
402 for (uptr i
= 0; i
< hdr
->stack0
.Size(); i
++) {
403 stack
[i
] = hdr
->stack0
.Get(i
);
404 DPrintf2(" #%02lu: pc=%zx\n", i
, stack
[i
]);
408 uptr pos
= hdr
->stack0
.Size();
409 Event
*events
= (Event
*)GetThreadTrace(tid
);
410 for (uptr i
= ebegin
; i
<= eend
; i
++) {
411 Event ev
= events
[i
];
412 EventType typ
= (EventType
)(ev
>> 61);
413 uptr pc
= (uptr
)(ev
& ((1ull << 61) - 1));
414 DPrintf2(" %zu typ=%d pc=%zx\n", i
, typ
, pc
);
415 if (typ
== EventTypeMop
) {
417 } else if (typ
== EventTypeFuncEnter
) {
419 } else if (typ
== EventTypeFuncExit
) {
424 if (typ
== EventTypeLock
) {
425 mset
->Add(pc
, true, epoch0
+ i
);
426 } else if (typ
== EventTypeUnlock
) {
428 } else if (typ
== EventTypeRLock
) {
429 mset
->Add(pc
, false, epoch0
+ i
);
430 } else if (typ
== EventTypeRUnlock
) {
431 mset
->Del(pc
, false);
434 for (uptr j
= 0; j
<= pos
; j
++)
435 DPrintf2(" #%zu: %zx\n", j
, stack
[j
]);
437 if (pos
== 0 && stack
[0] == 0)
440 stk
->Init(stack
.data(), pos
);
443 static bool HandleRacyStacks(ThreadState
*thr
, const StackTrace (&traces
)[2],
444 uptr addr_min
, uptr addr_max
) {
445 bool equal_stack
= false;
447 if (flags()->suppress_equal_stacks
) {
448 hash
.hash
[0] = md5_hash(traces
[0].Begin(), traces
[0].Size() * sizeof(uptr
));
449 hash
.hash
[1] = md5_hash(traces
[1].Begin(), traces
[1].Size() * sizeof(uptr
));
450 for (uptr i
= 0; i
< ctx
->racy_stacks
.Size(); i
++) {
451 if (hash
== ctx
->racy_stacks
[i
]) {
452 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
458 bool equal_address
= false;
459 RacyAddress ra0
= {addr_min
, addr_max
};
460 if (flags()->suppress_equal_addresses
) {
461 for (uptr i
= 0; i
< ctx
->racy_addresses
.Size(); i
++) {
462 RacyAddress ra2
= ctx
->racy_addresses
[i
];
463 uptr maxbeg
= max(ra0
.addr_min
, ra2
.addr_min
);
464 uptr minend
= min(ra0
.addr_max
, ra2
.addr_max
);
465 if (maxbeg
< minend
) {
466 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
467 equal_address
= true;
472 if (equal_stack
|| equal_address
) {
474 ctx
->racy_stacks
.PushBack(hash
);
476 ctx
->racy_addresses
.PushBack(ra0
);
482 static void AddRacyStacks(ThreadState
*thr
, const StackTrace (&traces
)[2],
483 uptr addr_min
, uptr addr_max
) {
484 if (flags()->suppress_equal_stacks
) {
486 hash
.hash
[0] = md5_hash(traces
[0].Begin(), traces
[0].Size() * sizeof(uptr
));
487 hash
.hash
[1] = md5_hash(traces
[1].Begin(), traces
[1].Size() * sizeof(uptr
));
488 ctx
->racy_stacks
.PushBack(hash
);
490 if (flags()->suppress_equal_addresses
) {
491 RacyAddress ra0
= {addr_min
, addr_max
};
492 ctx
->racy_addresses
.PushBack(ra0
);
496 bool OutputReport(Context
*ctx
,
497 const ScopedReport
&srep
,
498 const ReportStack
*suppress_stack1
,
499 const ReportStack
*suppress_stack2
,
500 const ReportLocation
*suppress_loc
) {
501 atomic_store(&ctx
->last_symbolize_time_ns
, NanoTime(), memory_order_relaxed
);
502 const ReportDesc
*rep
= srep
.GetReport();
503 Suppression
*supp
= 0;
504 uptr suppress_pc
= IsSuppressed(rep
->typ
, suppress_stack1
, &supp
);
505 if (suppress_pc
== 0)
506 suppress_pc
= IsSuppressed(rep
->typ
, suppress_stack2
, &supp
);
507 if (suppress_pc
== 0)
508 suppress_pc
= IsSuppressed(rep
->typ
, suppress_loc
, &supp
);
509 if (suppress_pc
!= 0) {
510 FiredSuppression s
= {srep
.GetReport()->typ
, suppress_pc
, supp
};
511 ctx
->fired_suppressions
.push_back(s
);
513 if (OnReport(rep
, suppress_pc
!= 0))
517 if (flags()->halt_on_error
)
518 internal__exit(flags()->exitcode
);
522 bool IsFiredSuppression(Context
*ctx
,
523 const ScopedReport
&srep
,
524 const StackTrace
&trace
) {
525 for (uptr k
= 0; k
< ctx
->fired_suppressions
.size(); k
++) {
526 if (ctx
->fired_suppressions
[k
].type
!= srep
.GetReport()->typ
)
528 for (uptr j
= 0; j
< trace
.Size(); j
++) {
529 FiredSuppression
*s
= &ctx
->fired_suppressions
[k
];
530 if (trace
.Get(j
) == s
->pc
) {
532 s
->supp
->hit_count
++;
540 static bool IsFiredSuppression(Context
*ctx
,
541 const ScopedReport
&srep
,
543 for (uptr k
= 0; k
< ctx
->fired_suppressions
.size(); k
++) {
544 if (ctx
->fired_suppressions
[k
].type
!= srep
.GetReport()->typ
)
546 FiredSuppression
*s
= &ctx
->fired_suppressions
[k
];
549 s
->supp
->hit_count
++;
556 bool FrameIsInternal(const ReportStack
*frame
) {
557 return frame
!= 0 && frame
->file
!= 0
558 && (internal_strstr(frame
->file
, "tsan_interceptors.cc") ||
559 internal_strstr(frame
->file
, "sanitizer_common_interceptors.inc") ||
560 internal_strstr(frame
->file
, "tsan_interface_"));
563 // On programs that use Java we see weird reports like:
564 // WARNING: ThreadSanitizer: data race (pid=22512)
565 // Read of size 8 at 0x7d2b00084318 by thread 100:
566 // #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
567 // #1 <null> <null>:0 (0x7f7ad9b40193)
568 // Previous write of size 8 at 0x7d2b00084318 by thread 105:
569 // #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
570 // #1 <null> <null>:0 (0x7f7ad9b42707)
571 static bool IsJavaNonsense(const ReportDesc
*rep
) {
573 for (uptr i
= 0; i
< rep
->mops
.Size(); i
++) {
574 ReportMop
*mop
= rep
->mops
[i
];
575 ReportStack
*frame
= mop
->stack
;
577 || (frame
->func
== 0 && frame
->file
== 0 && frame
->line
== 0
578 && frame
->module
== 0)) {
581 if (FrameIsInternal(frame
)) {
584 || (frame
->func
== 0 && frame
->file
== 0 && frame
->line
== 0
585 && frame
->module
== 0)) {
587 FiredSuppression supp
= {rep
->typ
, frame
->pc
, 0};
588 ctx
->fired_suppressions
.push_back(supp
);
598 static bool RaceBetweenAtomicAndFree(ThreadState
*thr
) {
599 Shadow
s0(thr
->racy_state
[0]);
600 Shadow
s1(thr
->racy_state
[1]);
601 CHECK(!(s0
.IsAtomic() && s1
.IsAtomic()));
602 if (!s0
.IsAtomic() && !s1
.IsAtomic())
604 if (s0
.IsAtomic() && s1
.IsFreed())
606 if (s1
.IsAtomic() && thr
->is_freeing
)
611 void ReportRace(ThreadState
*thr
) {
612 // Symbolizer makes lots of intercepted calls. If we try to process them,
613 // at best it will cause deadlocks on internal mutexes.
614 ScopedIgnoreInterceptors ignore
;
616 if (!flags()->report_bugs
)
618 if (!flags()->report_atomic_races
&& !RaceBetweenAtomicAndFree(thr
))
623 Shadow
s(thr
->racy_state
[1]);
624 freed
= s
.GetFreedAndReset();
625 thr
->racy_state
[1] = s
.raw();
628 uptr addr
= ShadowToMem((uptr
)thr
->racy_shadow_addr
);
632 uptr a0
= addr
+ Shadow(thr
->racy_state
[0]).addr0();
633 uptr a1
= addr
+ Shadow(thr
->racy_state
[1]).addr0();
634 uptr e0
= a0
+ Shadow(thr
->racy_state
[0]).size();
635 uptr e1
= a1
+ Shadow(thr
->racy_state
[1]).size();
636 addr_min
= min(a0
, a1
);
637 addr_max
= max(e0
, e1
);
638 if (IsExpectedReport(addr_min
, addr_max
- addr_min
))
642 ThreadRegistryLock
l0(ctx
->thread_registry
);
644 ReportType typ
= ReportTypeRace
;
645 if (thr
->is_vptr_access
)
646 typ
= ReportTypeVptrRace
;
648 typ
= ReportTypeUseAfterFree
;
649 ScopedReport
rep(typ
);
650 if (IsFiredSuppression(ctx
, rep
, addr
))
653 StackTrace traces
[kMop
];
654 const uptr toppc
= TraceTopPC(thr
);
655 traces
[0].ObtainCurrent(thr
, toppc
);
656 if (IsFiredSuppression(ctx
, rep
, traces
[0]))
658 InternalScopedBuffer
<MutexSet
> mset2(1);
659 new(mset2
.data()) MutexSet();
660 Shadow
s2(thr
->racy_state
[1]);
661 RestoreStack(s2
.tid(), s2
.epoch(), &traces
[1], mset2
.data());
662 if (IsFiredSuppression(ctx
, rep
, traces
[1]))
665 if (HandleRacyStacks(thr
, traces
, addr_min
, addr_max
))
668 for (uptr i
= 0; i
< kMop
; i
++) {
669 Shadow
s(thr
->racy_state
[i
]);
670 rep
.AddMemoryAccess(addr
, s
, &traces
[i
],
671 i
== 0 ? &thr
->mset
: mset2
.data());
674 if (flags()->suppress_java
&& IsJavaNonsense(rep
.GetReport()))
677 for (uptr i
= 0; i
< kMop
; i
++) {
678 FastState
s(thr
->racy_state
[i
]);
679 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
680 ctx
->thread_registry
->GetThreadLocked(s
.tid()));
681 if (s
.epoch() < tctx
->epoch0
|| s
.epoch() > tctx
->epoch1
)
686 rep
.AddLocation(addr_min
, addr_max
- addr_min
);
690 Shadow
s(thr
->racy_state
[1]);
691 if (s
.epoch() <= thr
->last_sleep_clock
.get(s
.tid()))
692 rep
.AddSleep(thr
->last_sleep_stack_id
);
696 ReportLocation
*suppress_loc
= rep
.GetReport()->locs
.Size() ?
697 rep
.GetReport()->locs
[0] : 0;
698 if (!OutputReport(ctx
, rep
, rep
.GetReport()->mops
[0]->stack
,
699 rep
.GetReport()->mops
[1]->stack
,
703 AddRacyStacks(thr
, traces
, addr_min
, addr_max
);
706 void PrintCurrentStack(ThreadState
*thr
, uptr pc
) {
708 trace
.ObtainCurrent(thr
, pc
);
709 PrintStack(SymbolizeStack(trace
));
712 void PrintCurrentStackSlow() {
714 __sanitizer::StackTrace
*ptrace
= new(internal_alloc(MBlockStackTrace
,
715 sizeof(__sanitizer::StackTrace
))) __sanitizer::StackTrace
;
716 ptrace
->Unwind(kStackTraceMax
, __sanitizer::StackTrace::GetCurrentPc(), 0, 0,
718 for (uptr i
= 0; i
< ptrace
->size
/ 2; i
++) {
719 uptr tmp
= ptrace
->trace
[i
];
720 ptrace
->trace
[i
] = ptrace
->trace
[ptrace
->size
- i
- 1];
721 ptrace
->trace
[ptrace
->size
- i
- 1] = tmp
;
724 trace
.Init(ptrace
->trace
, ptrace
->size
);
725 PrintStack(SymbolizeStack(trace
));
729 } // namespace __tsan