1 //===-- tsan_rtl.cc -------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 // Main file (entry points) for the TSan run-time.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_atomic.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_libc.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "sanitizer_common/sanitizer_symbolizer.h"
19 #include "tsan_defs.h"
20 #include "tsan_platform.h"
22 #include "tsan_mman.h"
23 #include "tsan_suppressions.h"
25 volatile int __tsan_resumed
= 0;
27 extern "C" void __tsan_resume() {
34 THREADLOCAL
char cur_thread_placeholder
[sizeof(ThreadState
)] ALIGNED(64);
36 static char ctx_placeholder
[sizeof(Context
)] ALIGNED(64);
38 // Can be overriden by a front-end.
39 bool CPP_WEAK
OnFinalize(bool failed
) {
50 , report_mtx(MutexTypeReport
, StatMtxReport
)
53 , thread_mtx(MutexTypeThreads
, StatMtxThreads
)
54 , racy_stacks(MBlockRacyStacks
)
55 , racy_addresses(MBlockRacyAddresses
)
56 , fired_suppressions(MBlockRacyAddresses
) {
59 // The objects are allocated in TLS, so one may rely on zero-initialization.
60 ThreadState::ThreadState(Context
*ctx
, int tid
, int unique_id
, u64 epoch
,
61 uptr stk_addr
, uptr stk_size
,
62 uptr tls_addr
, uptr tls_size
)
63 : fast_state(tid
, epoch
)
64 // Do not touch these, rely on zero initialization,
65 // they may be accessed before the ctor.
66 // , fast_ignore_reads()
67 // , fast_ignore_writes()
69 , shadow_stack_pos(&shadow_stack
[0])
71 , unique_id(unique_id
)
75 , tls_size(tls_size
) {
78 ThreadContext::ThreadContext(int tid
)
84 , status(ThreadStatusInvalid
)
94 static void WriteMemoryProfile(char *buf
, uptr buf_size
, int num
) {
95 uptr shadow
= GetShadowMemoryConsumption();
101 Lock
l(&ctx
->thread_mtx
);
102 for (unsigned i
= 0; i
< kMaxTid
; i
++) {
103 ThreadContext
*tctx
= ctx
->threads
[i
];
107 threadmem
+= sizeof(ThreadContext
);
108 if (tctx
->status
!= ThreadStatusRunning
)
111 threadmem
+= sizeof(ThreadState
);
116 uptr syncmem
= CTX()->synctab
.GetMemoryConsumption(&nsync
);
118 internal_snprintf(buf
, buf_size
, "%d: shadow=%zuMB"
119 " thread=%zuMB(total=%d/live=%d)"
120 " sync=%zuMB(cnt=%zu)\n",
123 threadmem
>> 20, nthread
, nlivethread
,
124 syncmem
>> 20, nsync
);
127 static void MemoryProfileThread(void *arg
) {
129 fd_t fd
= (fd_t
)(uptr
)arg
;
130 for (int i
= 0; ; i
++) {
131 InternalScopedBuffer
<char> buf(4096);
132 WriteMemoryProfile(buf
.data(), buf
.size(), i
);
133 internal_write(fd
, buf
.data(), internal_strlen(buf
.data()));
138 static void InitializeMemoryProfile() {
139 if (flags()->profile_memory
== 0 || flags()->profile_memory
[0] == 0)
141 InternalScopedBuffer
<char> filename(4096);
142 internal_snprintf(filename
.data(), filename
.size(), "%s.%d",
143 flags()->profile_memory
, GetPid());
144 fd_t fd
= OpenFile(filename
.data(), true);
145 if (fd
== kInvalidFd
) {
146 Printf("Failed to open memory profile file '%s'\n", &filename
[0]);
149 internal_start_thread(&MemoryProfileThread
, (void*)(uptr
)fd
);
152 static void MemoryFlushThread(void *arg
) {
154 for (int i
= 0; ; i
++) {
155 SleepForMillis(flags()->flush_memory_ms
);
160 static void InitializeMemoryFlush() {
161 if (flags()->flush_memory_ms
== 0)
163 if (flags()->flush_memory_ms
< 100)
164 flags()->flush_memory_ms
= 100;
165 internal_start_thread(&MemoryFlushThread
, 0);
168 void MapShadow(uptr addr
, uptr size
) {
169 MmapFixedNoReserve(MemToShadow(addr
), size
* kShadowMultiplier
);
172 void MapThreadTrace(uptr addr
, uptr size
) {
173 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr
, addr
+ size
, size
);
174 CHECK_GE(addr
, kTraceMemBegin
);
175 CHECK_LE(addr
+ size
, kTraceMemBegin
+ kTraceMemSize
);
176 if (addr
!= (uptr
)MmapFixedNoReserve(addr
, size
)) {
177 Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
182 void Initialize(ThreadState
*thr
) {
183 // Thread safe because done before all threads exist.
184 static bool is_initialized
= false;
187 is_initialized
= true;
188 SanitizerToolName
= "ThreadSanitizer";
189 // Install tool-specific callbacks in sanitizer_common.
190 SetCheckFailedCallback(TsanCheckFailed
);
194 InitializeAllocator();
196 InitializeInterceptors();
197 const char *env
= InitializePlatform();
199 InitializeDynamicAnnotations();
200 ctx
= new(ctx_placeholder
) Context
;
202 InitializeShadowMemory();
204 ctx
->dead_list_size
= 0;
205 ctx
->dead_list_head
= 0;
206 ctx
->dead_list_tail
= 0;
207 InitializeFlags(&ctx
->flags
, env
);
208 // Setup correct file descriptor for error reports.
209 if (internal_strcmp(flags()->log_path
, "stdout") == 0)
210 __sanitizer_set_report_fd(kStdoutFd
);
211 else if (internal_strcmp(flags()->log_path
, "stderr") == 0)
212 __sanitizer_set_report_fd(kStderrFd
);
214 __sanitizer_set_report_path(flags()->log_path
);
215 InitializeSuppressions();
217 // Initialize external symbolizer before internal threads are started.
218 const char *external_symbolizer
= flags()->external_symbolizer_path
;
219 if (external_symbolizer
!= 0 && external_symbolizer
[0] != '\0') {
220 if (!InitializeExternalSymbolizer(external_symbolizer
)) {
221 Printf("Failed to start external symbolizer: '%s'\n",
222 external_symbolizer
);
227 InitializeMemoryProfile();
228 InitializeMemoryFlush();
230 if (ctx
->flags
.verbosity
)
231 Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
234 // Initialize thread 0.
236 int tid
= ThreadCreate(thr
, 0, 0, true);
238 ThreadStart(thr
, tid
, GetPid());
239 CHECK_EQ(thr
->in_rtl
, 1);
240 ctx
->initialized
= true;
242 if (flags()->stop_on_start
) {
243 Printf("ThreadSanitizer is suspended at startup (pid %d)."
244 " Call __tsan_resume().\n",
246 while (__tsan_resumed
== 0) {}
250 int Finalize(ThreadState
*thr
) {
252 Context
*ctx
= __tsan::ctx
;
255 if (flags()->atexit_sleep_ms
> 0 && ThreadCount(thr
) > 1)
256 SleepForMillis(flags()->atexit_sleep_ms
);
258 // Wait for pending reports.
259 ctx
->report_mtx
.Lock();
260 ctx
->report_mtx
.Unlock();
263 if (ctx
->flags
.verbosity
)
264 AllocatorPrintStats();
269 if (ctx
->nreported
) {
272 Printf("ThreadSanitizer: reported %d warnings\n", ctx
->nreported
);
274 Printf("Found %d data race(s)\n", ctx
->nreported
);
278 if (ctx
->nmissed_expected
) {
280 Printf("ThreadSanitizer: missed %d expected races\n",
281 ctx
->nmissed_expected
);
284 failed
= OnFinalize(failed
);
286 StatAggregate(ctx
->stat
, thr
->stat
);
287 StatOutput(ctx
->stat
);
288 return failed
? flags()->exitcode
: 0;
292 u32
CurrentStackId(ThreadState
*thr
, uptr pc
) {
293 if (thr
->shadow_stack_pos
== 0) // May happen during bootstrap.
296 thr
->shadow_stack_pos
[0] = pc
;
297 thr
->shadow_stack_pos
++;
299 u32 id
= StackDepotPut(thr
->shadow_stack
,
300 thr
->shadow_stack_pos
- thr
->shadow_stack
);
302 thr
->shadow_stack_pos
--;
307 void TraceSwitch(ThreadState
*thr
) {
310 Lock
l(&thr
->trace
.mtx
);
311 unsigned trace
= (thr
->fast_state
.epoch() / kTracePartSize
) % TraceParts();
312 TraceHeader
*hdr
= &thr
->trace
.headers
[trace
];
313 hdr
->epoch0
= thr
->fast_state
.epoch();
314 hdr
->stack0
.ObtainCurrent(thr
, 0);
315 hdr
->mset0
= thr
->mset
;
319 uptr
TraceTopPC(ThreadState
*thr
) {
320 Event
*events
= (Event
*)GetThreadTrace(thr
->tid
);
321 uptr pc
= events
[thr
->fast_state
.GetTracePos()];
326 return (uptr
)(1ull << (kTracePartSizeBits
+ flags()->history_size
+ 1));
330 return TraceSize() / kTracePartSize
;
334 extern "C" void __tsan_trace_switch() {
335 TraceSwitch(cur_thread());
338 extern "C" void __tsan_report_race() {
339 ReportRace(cur_thread());
344 static Shadow
LoadShadow(u64
*p
) {
345 u64 raw
= atomic_load((atomic_uint64_t
*)p
, memory_order_relaxed
);
350 static void StoreShadow(u64
*sp
, u64 s
) {
351 atomic_store((atomic_uint64_t
*)sp
, s
, memory_order_relaxed
);
355 static void StoreIfNotYetStored(u64
*sp
, u64
*s
) {
360 static inline void HandleRace(ThreadState
*thr
, u64
*shadow_mem
,
361 Shadow cur
, Shadow old
) {
362 thr
->racy_state
[0] = cur
.raw();
363 thr
->racy_state
[1] = old
.raw();
364 thr
->racy_shadow_addr
= shadow_mem
;
366 HACKY_CALL(__tsan_report_race
);
372 static inline bool OldIsInSameSynchEpoch(Shadow old
, ThreadState
*thr
) {
373 return old
.epoch() >= thr
->fast_synch_epoch
;
376 static inline bool HappensBefore(Shadow old
, ThreadState
*thr
) {
377 return thr
->clock
.get(old
.TidWithIgnore()) >= old
.epoch();
381 void MemoryAccessImpl(ThreadState
*thr
, uptr addr
,
382 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
,
383 u64
*shadow_mem
, Shadow cur
) {
384 StatInc(thr
, StatMop
);
385 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
386 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
388 // This potentially can live in an MMX/SSE scratch register.
389 // The required intrinsics are:
390 // __m128i _mm_move_epi64(__m128i*);
391 // _mm_storel_epi64(u64*, __m128i);
392 u64 store_word
= cur
.raw();
394 // scan all the shadow values and dispatch to 4 categories:
395 // same, replace, candidate and race (see comments below).
396 // we consider only 3 cases regarding access sizes:
397 // equal, intersect and not intersect. initially I considered
398 // larger and smaller as well, it allowed to replace some
399 // 'candidates' with 'same' or 'replace', but I think
400 // it's just not worth it (performance- and complexity-wise).
403 if (kShadowCnt
== 1) {
405 #include "tsan_update_shadow_word_inl.h"
406 } else if (kShadowCnt
== 2) {
408 #include "tsan_update_shadow_word_inl.h"
410 #include "tsan_update_shadow_word_inl.h"
411 } else if (kShadowCnt
== 4) {
413 #include "tsan_update_shadow_word_inl.h"
415 #include "tsan_update_shadow_word_inl.h"
417 #include "tsan_update_shadow_word_inl.h"
419 #include "tsan_update_shadow_word_inl.h"
420 } else if (kShadowCnt
== 8) {
422 #include "tsan_update_shadow_word_inl.h"
424 #include "tsan_update_shadow_word_inl.h"
426 #include "tsan_update_shadow_word_inl.h"
428 #include "tsan_update_shadow_word_inl.h"
430 #include "tsan_update_shadow_word_inl.h"
432 #include "tsan_update_shadow_word_inl.h"
434 #include "tsan_update_shadow_word_inl.h"
436 #include "tsan_update_shadow_word_inl.h"
441 // we did not find any races and had already stored
442 // the current access info, so we are done
443 if (LIKELY(store_word
== 0))
445 // choose a random candidate slot and replace it
446 StoreShadow(shadow_mem
+ (cur
.epoch() % kShadowCnt
), store_word
);
447 StatInc(thr
, StatShadowReplace
);
450 HandleRace(thr
, shadow_mem
, cur
, old
);
455 void MemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
456 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
) {
457 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
458 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
459 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
460 (int)thr
->fast_state
.tid(), (void*)pc
, (void*)addr
,
461 (int)(1 << kAccessSizeLog
), kAccessIsWrite
, shadow_mem
,
462 (uptr
)shadow_mem
[0], (uptr
)shadow_mem
[1],
463 (uptr
)shadow_mem
[2], (uptr
)shadow_mem
[3]);
465 if (!IsAppMem(addr
)) {
466 Printf("Access to non app mem %zx\n", addr
);
467 DCHECK(IsAppMem(addr
));
469 if (!IsShadowMem((uptr
)shadow_mem
)) {
470 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
471 DCHECK(IsShadowMem((uptr
)shadow_mem
));
475 FastState fast_state
= thr
->fast_state
;
476 if (fast_state
.GetIgnoreBit())
478 fast_state
.IncrementEpoch();
479 thr
->fast_state
= fast_state
;
480 Shadow
cur(fast_state
);
481 cur
.SetAddr0AndSizeLog(addr
& 7, kAccessSizeLog
);
482 cur
.SetWrite(kAccessIsWrite
);
483 cur
.SetAtomic(kIsAtomic
);
485 // We must not store to the trace if we do not store to the shadow.
486 // That is, this call must be moved somewhere below.
487 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
489 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
,
493 static void MemoryRangeSet(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
,
498 uptr offset
= addr
% kShadowCell
;
500 offset
= kShadowCell
- offset
;
506 DCHECK_EQ(addr
% 8, 0);
507 // If a user passes some insane arguments (memset(0)),
508 // let it just crash as usual.
509 if (!IsAppMem(addr
) || !IsAppMem(addr
+ size
- 1))
513 // Some programs mmap like hundreds of GBs but actually used a small part.
514 // So, it's better to report a false positive on the memory
515 // then to hang here senselessly.
516 const uptr kMaxResetSize
= 4ull*1024*1024*1024;
517 if (size
> kMaxResetSize
)
518 size
= kMaxResetSize
;
519 size
= (size
+ (kShadowCell
- 1)) & ~(kShadowCell
- 1);
520 u64
*p
= (u64
*)MemToShadow(addr
);
521 CHECK(IsShadowMem((uptr
)p
));
522 CHECK(IsShadowMem((uptr
)(p
+ size
* kShadowCnt
/ kShadowCell
- 1)));
523 // FIXME: may overwrite a part outside the region
524 for (uptr i
= 0; i
< size
* kShadowCnt
/ kShadowCell
;) {
526 for (uptr j
= 1; j
< kShadowCnt
; j
++)
531 void MemoryResetRange(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
532 MemoryRangeSet(thr
, pc
, addr
, size
, 0);
535 void MemoryRangeFreed(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
536 CHECK_EQ(thr
->is_freeing
, false);
537 thr
->is_freeing
= true;
538 MemoryAccessRange(thr
, pc
, addr
, size
, true);
539 thr
->is_freeing
= false;
540 Shadow
s(thr
->fast_state
);
544 s
.SetAddr0AndSizeLog(0, 3);
545 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
548 void MemoryRangeImitateWrite(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
549 Shadow
s(thr
->fast_state
);
552 s
.SetAddr0AndSizeLog(0, 3);
553 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
557 void FuncEntry(ThreadState
*thr
, uptr pc
) {
558 DCHECK_EQ(thr
->in_rtl
, 0);
559 StatInc(thr
, StatFuncEnter
);
560 DPrintf2("#%d: FuncEntry %p\n", (int)thr
->fast_state
.tid(), (void*)pc
);
561 thr
->fast_state
.IncrementEpoch();
562 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncEnter
, pc
);
564 // Shadow stack maintenance can be replaced with
565 // stack unwinding during trace switch (which presumably must be faster).
566 DCHECK_GE(thr
->shadow_stack_pos
, &thr
->shadow_stack
[0]);
568 DCHECK_LT(thr
->shadow_stack_pos
, &thr
->shadow_stack
[kShadowStackSize
]);
570 if (thr
->shadow_stack_pos
== thr
->shadow_stack_end
) {
571 const int sz
= thr
->shadow_stack_end
- thr
->shadow_stack
;
572 const int newsz
= 2 * sz
;
573 uptr
*newstack
= (uptr
*)internal_alloc(MBlockShadowStack
,
574 newsz
* sizeof(uptr
));
575 internal_memcpy(newstack
, thr
->shadow_stack
, sz
* sizeof(uptr
));
576 internal_free(thr
->shadow_stack
);
577 thr
->shadow_stack
= newstack
;
578 thr
->shadow_stack_pos
= newstack
+ sz
;
579 thr
->shadow_stack_end
= newstack
+ newsz
;
582 thr
->shadow_stack_pos
[0] = pc
;
583 thr
->shadow_stack_pos
++;
587 void FuncExit(ThreadState
*thr
) {
588 DCHECK_EQ(thr
->in_rtl
, 0);
589 StatInc(thr
, StatFuncExit
);
590 DPrintf2("#%d: FuncExit\n", (int)thr
->fast_state
.tid());
591 thr
->fast_state
.IncrementEpoch();
592 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncExit
, 0);
594 DCHECK_GT(thr
->shadow_stack_pos
, &thr
->shadow_stack
[0]);
596 DCHECK_LT(thr
->shadow_stack_pos
, &thr
->shadow_stack
[kShadowStackSize
]);
598 thr
->shadow_stack_pos
--;
601 void IgnoreCtl(ThreadState
*thr
, bool write
, bool begin
) {
602 DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr
->tid
, write
, begin
);
603 thr
->ignore_reads_and_writes
+= begin
? 1 : -1;
604 CHECK_GE(thr
->ignore_reads_and_writes
, 0);
605 if (thr
->ignore_reads_and_writes
)
606 thr
->fast_state
.SetIgnoreBit();
608 thr
->fast_state
.ClearIgnoreBit();
611 bool MD5Hash::operator==(const MD5Hash
&other
) const {
612 return hash
[0] == other
.hash
[0] && hash
[1] == other
.hash
[1];
616 void build_consistency_debug() {}
618 void build_consistency_release() {}
621 #if TSAN_COLLECT_STATS
622 void build_consistency_stats() {}
624 void build_consistency_nostats() {}
627 #if TSAN_SHADOW_COUNT == 1
628 void build_consistency_shadow1() {}
629 #elif TSAN_SHADOW_COUNT == 2
630 void build_consistency_shadow2() {}
631 #elif TSAN_SHADOW_COUNT == 4
632 void build_consistency_shadow4() {}
634 void build_consistency_shadow8() {}
637 } // namespace __tsan
640 // Must be included in this file to make sure everything is inlined.
641 #include "tsan_interface_inl.h"