1 //===-- tsan_rtl.cc -------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 // Main file (entry points) for the TSan run-time.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_atomic.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_libc.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "sanitizer_common/sanitizer_symbolizer.h"
19 #include "tsan_defs.h"
20 #include "tsan_platform.h"
22 #include "tsan_mman.h"
23 #include "tsan_suppressions.h"
24 #include "tsan_symbolize.h"
26 volatile int __tsan_resumed
= 0;
28 extern "C" void __tsan_resume() {
35 THREADLOCAL
char cur_thread_placeholder
[sizeof(ThreadState
)] ALIGNED(64);
37 static char ctx_placeholder
[sizeof(Context
)] ALIGNED(64);
40 // Can be overriden by a front-end.
41 #ifdef TSAN_EXTERNAL_HOOKS
42 bool OnFinalize(bool failed
);
45 SANITIZER_INTERFACE_ATTRIBUTE
46 bool WEAK
OnFinalize(bool failed
) {
49 SANITIZER_INTERFACE_ATTRIBUTE
50 void WEAK
OnInitialize() {}
53 static char thread_registry_placeholder
[sizeof(ThreadRegistry
)];
55 static ThreadContextBase
*CreateThreadContext(u32 tid
) {
56 // Map thread trace when context is created.
57 MapThreadTrace(GetThreadTrace(tid
), TraceSize() * sizeof(Event
));
58 MapThreadTrace(GetThreadTraceHeader(tid
), sizeof(Trace
));
59 new(ThreadTrace(tid
)) Trace();
60 void *mem
= internal_alloc(MBlockThreadContex
, sizeof(ThreadContext
));
61 return new(mem
) ThreadContext(tid
);
65 static const u32 kThreadQuarantineSize
= 16;
67 static const u32 kThreadQuarantineSize
= 64;
72 , report_mtx(MutexTypeReport
, StatMtxReport
)
75 , thread_registry(new(thread_registry_placeholder
) ThreadRegistry(
76 CreateThreadContext
, kMaxTid
, kThreadQuarantineSize
, kMaxTidReuse
))
77 , racy_stacks(MBlockRacyStacks
)
78 , racy_addresses(MBlockRacyAddresses
)
79 , fired_suppressions(8) {
82 // The objects are allocated in TLS, so one may rely on zero-initialization.
83 ThreadState::ThreadState(Context
*ctx
, int tid
, int unique_id
, u64 epoch
,
85 uptr stk_addr
, uptr stk_size
,
86 uptr tls_addr
, uptr tls_size
)
87 : fast_state(tid
, epoch
)
88 // Do not touch these, rely on zero initialization,
89 // they may be accessed before the ctor.
90 // , ignore_reads_and_writes()
91 // , ignore_interceptors()
92 , clock(tid
, reuse_count
)
94 , jmp_bufs(MBlockJmpBuf
)
97 , unique_id(unique_id
)
103 , last_sleep_clock(tid
)
108 static void MemoryProfiler(Context
*ctx
, fd_t fd
, int i
) {
110 uptr n_running_threads
;
111 ctx
->thread_registry
->GetNumberOfThreads(&n_threads
, &n_running_threads
);
112 InternalScopedBuffer
<char> buf(4096);
113 internal_snprintf(buf
.data(), buf
.size(), "%d: nthr=%d nlive=%d\n",
114 i
, n_threads
, n_running_threads
);
115 internal_write(fd
, buf
.data(), internal_strlen(buf
.data()));
116 WriteMemoryProfile(buf
.data(), buf
.size());
117 internal_write(fd
, buf
.data(), internal_strlen(buf
.data()));
120 static void BackgroundThread(void *arg
) {
122 // This is a non-initialized non-user thread, nothing to see here.
123 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
124 // enabled even when the thread function exits (e.g. during pthread thread
126 cur_thread()->ignore_interceptors
++;
128 const u64 kMs2Ns
= 1000 * 1000;
130 fd_t mprof_fd
= kInvalidFd
;
131 if (flags()->profile_memory
&& flags()->profile_memory
[0]) {
132 InternalScopedBuffer
<char> filename(4096);
133 internal_snprintf(filename
.data(), filename
.size(), "%s.%d",
134 flags()->profile_memory
, (int)internal_getpid());
135 uptr openrv
= OpenFile(filename
.data(), true);
136 if (internal_iserror(openrv
)) {
137 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
144 u64 last_flush
= NanoTime();
147 atomic_load(&ctx
->stop_background_thread
, memory_order_relaxed
) == 0;
150 u64 now
= NanoTime();
152 // Flush memory if requested.
153 if (flags()->flush_memory_ms
> 0) {
154 if (last_flush
+ flags()->flush_memory_ms
* kMs2Ns
< now
) {
155 if (flags()->verbosity
> 0)
156 Printf("ThreadSanitizer: periodic memory flush\n");
158 last_flush
= NanoTime();
161 if (flags()->memory_limit_mb
> 0) {
163 uptr limit
= uptr(flags()->memory_limit_mb
) << 20;
164 if (flags()->verbosity
> 0) {
165 Printf("ThreadSanitizer: memory flush check"
166 " RSS=%llu LAST=%llu LIMIT=%llu\n",
167 (u64
)rss
>>20, (u64
)last_rss
>>20, (u64
)limit
>>20);
169 if (2 * rss
> limit
+ last_rss
) {
170 if (flags()->verbosity
> 0)
171 Printf("ThreadSanitizer: flushing memory due to RSS\n");
174 if (flags()->verbosity
> 0)
175 Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64
)rss
>>20);
180 // Write memory profile if requested.
181 if (mprof_fd
!= kInvalidFd
)
182 MemoryProfiler(ctx
, mprof_fd
, i
);
185 // Flush symbolizer cache if requested.
186 if (flags()->flush_symbolizer_ms
> 0) {
187 u64 last
= atomic_load(&ctx
->last_symbolize_time_ns
,
188 memory_order_relaxed
);
189 if (last
!= 0 && last
+ flags()->flush_symbolizer_ms
* kMs2Ns
< now
) {
190 Lock
l(&ctx
->report_mtx
);
191 SpinMutexLock
l2(&CommonSanitizerReportMutex
);
193 atomic_store(&ctx
->last_symbolize_time_ns
, 0, memory_order_relaxed
);
200 static void StartBackgroundThread() {
201 ctx
->background_thread
= internal_start_thread(&BackgroundThread
, 0);
204 static void StopBackgroundThread() {
205 atomic_store(&ctx
->stop_background_thread
, 1, memory_order_relaxed
);
206 internal_join_thread(ctx
->background_thread
);
207 ctx
->background_thread
= 0;
210 void DontNeedShadowFor(uptr addr
, uptr size
) {
211 uptr shadow_beg
= MemToShadow(addr
);
212 uptr shadow_end
= MemToShadow(addr
+ size
);
213 FlushUnneededShadowMemory(shadow_beg
, shadow_end
- shadow_beg
);
216 void MapShadow(uptr addr
, uptr size
) {
217 // Global data is not 64K aligned, but there are no adjacent mappings,
218 // so we can get away with unaligned mapping.
219 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
220 MmapFixedNoReserve(MemToShadow(addr
), size
* kShadowMultiplier
);
223 void MapThreadTrace(uptr addr
, uptr size
) {
224 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr
, addr
+ size
, size
);
225 CHECK_GE(addr
, kTraceMemBegin
);
226 CHECK_LE(addr
+ size
, kTraceMemBegin
+ kTraceMemSize
);
227 CHECK_EQ(addr
, addr
& ~((64 << 10) - 1)); // windows wants 64K alignment
228 uptr addr1
= (uptr
)MmapFixedNoReserve(addr
, size
);
230 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
236 void Initialize(ThreadState
*thr
) {
237 // Thread safe because done before all threads exist.
238 static bool is_initialized
= false;
241 is_initialized
= true;
242 // We are not ready to handle interceptors yet.
243 ScopedIgnoreInterceptors ignore
;
244 SanitizerToolName
= "ThreadSanitizer";
245 // Install tool-specific callbacks in sanitizer_common.
246 SetCheckFailedCallback(TsanCheckFailed
);
249 InitializeAllocator();
251 InitializeInterceptors();
252 const char *env
= InitializePlatform();
254 InitializeDynamicAnnotations();
255 ctx
= new(ctx_placeholder
) Context
;
257 InitializeShadowMemory();
259 InitializeFlags(&ctx
->flags
, env
);
260 // Setup correct file descriptor for error reports.
261 __sanitizer_set_report_path(flags()->log_path
);
262 InitializeSuppressions();
264 InitializeLibIgnore();
265 Symbolizer::Init(common_flags()->external_symbolizer_path
);
266 Symbolizer::Get()->AddHooks(EnterSymbolizer
, ExitSymbolizer
);
268 StartBackgroundThread();
269 SetSandboxingCallback(StopBackgroundThread
);
270 if (flags()->detect_deadlocks
)
271 ctx
->dd
= DDetector::Create(flags());
273 if (ctx
->flags
.verbosity
)
274 Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
275 (int)internal_getpid());
277 // Initialize thread 0.
278 int tid
= ThreadCreate(thr
, 0, 0, true);
280 ThreadStart(thr
, tid
, internal_getpid());
281 ctx
->initialized
= true;
283 if (flags()->stop_on_start
) {
284 Printf("ThreadSanitizer is suspended at startup (pid %d)."
285 " Call __tsan_resume().\n",
286 (int)internal_getpid());
287 while (__tsan_resumed
== 0) {}
293 int Finalize(ThreadState
*thr
) {
294 Context
*ctx
= __tsan::ctx
;
297 if (flags()->atexit_sleep_ms
> 0 && ThreadCount(thr
) > 1)
298 SleepForMillis(flags()->atexit_sleep_ms
);
300 // Wait for pending reports.
301 ctx
->report_mtx
.Lock();
302 CommonSanitizerReportMutex
.Lock();
303 CommonSanitizerReportMutex
.Unlock();
304 ctx
->report_mtx
.Unlock();
307 if (ctx
->flags
.verbosity
)
308 AllocatorPrintStats();
313 if (ctx
->nreported
) {
316 Printf("ThreadSanitizer: reported %d warnings\n", ctx
->nreported
);
318 Printf("Found %d data race(s)\n", ctx
->nreported
);
322 if (ctx
->nmissed_expected
) {
324 Printf("ThreadSanitizer: missed %d expected races\n",
325 ctx
->nmissed_expected
);
328 if (flags()->print_suppressions
)
329 PrintMatchedSuppressions();
331 if (flags()->print_benign
)
332 PrintMatchedBenignRaces();
335 failed
= OnFinalize(failed
);
337 StatAggregate(ctx
->stat
, thr
->stat
);
338 StatOutput(ctx
->stat
);
339 return failed
? flags()->exitcode
: 0;
343 void ForkBefore(ThreadState
*thr
, uptr pc
) {
344 ctx
->thread_registry
->Lock();
345 ctx
->report_mtx
.Lock();
348 void ForkParentAfter(ThreadState
*thr
, uptr pc
) {
349 ctx
->report_mtx
.Unlock();
350 ctx
->thread_registry
->Unlock();
353 void ForkChildAfter(ThreadState
*thr
, uptr pc
) {
354 ctx
->report_mtx
.Unlock();
355 ctx
->thread_registry
->Unlock();
358 ctx
->thread_registry
->GetNumberOfThreads(0, 0, &nthread
/* alive threads */);
359 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
360 " parent had %d threads\n", (int)internal_getpid(), (int)nthread
);
362 internal_start_thread(&BackgroundThread
, 0);
364 // We've just forked a multi-threaded process. We cannot reasonably function
365 // after that (some mutexes may be locked before fork). So just enable
366 // ignores for everything in the hope that we will exec soon.
367 ctx
->after_multithreaded_fork
= true;
368 thr
->ignore_interceptors
++;
369 ThreadIgnoreBegin(thr
, pc
);
370 ThreadIgnoreSyncBegin(thr
, pc
);
375 u32
CurrentStackId(ThreadState
*thr
, uptr pc
) {
376 if (thr
->shadow_stack_pos
== 0) // May happen during bootstrap.
379 thr
->shadow_stack_pos
[0] = pc
;
380 thr
->shadow_stack_pos
++;
382 u32 id
= StackDepotPut(thr
->shadow_stack
,
383 thr
->shadow_stack_pos
- thr
->shadow_stack
);
385 thr
->shadow_stack_pos
--;
389 void TraceSwitch(ThreadState
*thr
) {
391 Trace
*thr_trace
= ThreadTrace(thr
->tid
);
392 Lock
l(&thr_trace
->mtx
);
393 unsigned trace
= (thr
->fast_state
.epoch() / kTracePartSize
) % TraceParts();
394 TraceHeader
*hdr
= &thr_trace
->headers
[trace
];
395 hdr
->epoch0
= thr
->fast_state
.epoch();
396 hdr
->stack0
.ObtainCurrent(thr
, 0);
397 hdr
->mset0
= thr
->mset
;
401 Trace
*ThreadTrace(int tid
) {
402 return (Trace
*)GetThreadTraceHeader(tid
);
405 uptr
TraceTopPC(ThreadState
*thr
) {
406 Event
*events
= (Event
*)GetThreadTrace(thr
->tid
);
407 uptr pc
= events
[thr
->fast_state
.GetTracePos()];
412 return (uptr
)(1ull << (kTracePartSizeBits
+ flags()->history_size
+ 1));
416 return TraceSize() / kTracePartSize
;
420 extern "C" void __tsan_trace_switch() {
421 TraceSwitch(cur_thread());
424 extern "C" void __tsan_report_race() {
425 ReportRace(cur_thread());
430 Shadow
LoadShadow(u64
*p
) {
431 u64 raw
= atomic_load((atomic_uint64_t
*)p
, memory_order_relaxed
);
436 void StoreShadow(u64
*sp
, u64 s
) {
437 atomic_store((atomic_uint64_t
*)sp
, s
, memory_order_relaxed
);
441 void StoreIfNotYetStored(u64
*sp
, u64
*s
) {
446 static inline void HandleRace(ThreadState
*thr
, u64
*shadow_mem
,
447 Shadow cur
, Shadow old
) {
448 thr
->racy_state
[0] = cur
.raw();
449 thr
->racy_state
[1] = old
.raw();
450 thr
->racy_shadow_addr
= shadow_mem
;
452 HACKY_CALL(__tsan_report_race
);
458 static inline bool OldIsInSameSynchEpoch(Shadow old
, ThreadState
*thr
) {
459 return old
.epoch() >= thr
->fast_synch_epoch
;
462 static inline bool HappensBefore(Shadow old
, ThreadState
*thr
) {
463 return thr
->clock
.get(old
.TidWithIgnore()) >= old
.epoch();
467 void MemoryAccessImpl(ThreadState
*thr
, uptr addr
,
468 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
,
469 u64
*shadow_mem
, Shadow cur
) {
470 StatInc(thr
, StatMop
);
471 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
472 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
474 // This potentially can live in an MMX/SSE scratch register.
475 // The required intrinsics are:
476 // __m128i _mm_move_epi64(__m128i*);
477 // _mm_storel_epi64(u64*, __m128i);
478 u64 store_word
= cur
.raw();
480 // scan all the shadow values and dispatch to 4 categories:
481 // same, replace, candidate and race (see comments below).
482 // we consider only 3 cases regarding access sizes:
483 // equal, intersect and not intersect. initially I considered
484 // larger and smaller as well, it allowed to replace some
485 // 'candidates' with 'same' or 'replace', but I think
486 // it's just not worth it (performance- and complexity-wise).
489 if (kShadowCnt
== 1) {
491 #include "tsan_update_shadow_word_inl.h"
492 } else if (kShadowCnt
== 2) {
494 #include "tsan_update_shadow_word_inl.h"
496 #include "tsan_update_shadow_word_inl.h"
497 } else if (kShadowCnt
== 4) {
499 #include "tsan_update_shadow_word_inl.h"
501 #include "tsan_update_shadow_word_inl.h"
503 #include "tsan_update_shadow_word_inl.h"
505 #include "tsan_update_shadow_word_inl.h"
506 } else if (kShadowCnt
== 8) {
508 #include "tsan_update_shadow_word_inl.h"
510 #include "tsan_update_shadow_word_inl.h"
512 #include "tsan_update_shadow_word_inl.h"
514 #include "tsan_update_shadow_word_inl.h"
516 #include "tsan_update_shadow_word_inl.h"
518 #include "tsan_update_shadow_word_inl.h"
520 #include "tsan_update_shadow_word_inl.h"
522 #include "tsan_update_shadow_word_inl.h"
527 // we did not find any races and had already stored
528 // the current access info, so we are done
529 if (LIKELY(store_word
== 0))
531 // choose a random candidate slot and replace it
532 StoreShadow(shadow_mem
+ (cur
.epoch() % kShadowCnt
), store_word
);
533 StatInc(thr
, StatShadowReplace
);
536 HandleRace(thr
, shadow_mem
, cur
, old
);
540 void UnalignedMemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
541 int size
, bool kAccessIsWrite
, bool kIsAtomic
) {
544 int kAccessSizeLog
= kSizeLog1
;
545 if (size
>= 8 && (addr
& ~7) == ((addr
+ 8) & ~7)) {
547 kAccessSizeLog
= kSizeLog8
;
548 } else if (size
>= 4 && (addr
& ~7) == ((addr
+ 4) & ~7)) {
550 kAccessSizeLog
= kSizeLog4
;
551 } else if (size
>= 2 && (addr
& ~7) == ((addr
+ 2) & ~7)) {
553 kAccessSizeLog
= kSizeLog2
;
555 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
);
562 void MemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
563 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
) {
564 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
565 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
566 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
567 (int)thr
->fast_state
.tid(), (void*)pc
, (void*)addr
,
568 (int)(1 << kAccessSizeLog
), kAccessIsWrite
, shadow_mem
,
569 (uptr
)shadow_mem
[0], (uptr
)shadow_mem
[1],
570 (uptr
)shadow_mem
[2], (uptr
)shadow_mem
[3]);
572 if (!IsAppMem(addr
)) {
573 Printf("Access to non app mem %zx\n", addr
);
574 DCHECK(IsAppMem(addr
));
576 if (!IsShadowMem((uptr
)shadow_mem
)) {
577 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
578 DCHECK(IsShadowMem((uptr
)shadow_mem
));
582 if (*shadow_mem
== kShadowRodata
) {
583 // Access to .rodata section, no races here.
584 // Measurements show that it can be 10-20% of all memory accesses.
585 StatInc(thr
, StatMop
);
586 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
587 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
588 StatInc(thr
, StatMopRodata
);
592 FastState fast_state
= thr
->fast_state
;
593 if (fast_state
.GetIgnoreBit())
595 if (kCollectHistory
) {
596 fast_state
.IncrementEpoch();
597 thr
->fast_state
= fast_state
;
598 // We must not store to the trace if we do not store to the shadow.
599 // That is, this call must be moved somewhere below.
600 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
603 Shadow
cur(fast_state
);
604 cur
.SetAddr0AndSizeLog(addr
& 7, kAccessSizeLog
);
605 cur
.SetWrite(kAccessIsWrite
);
606 cur
.SetAtomic(kIsAtomic
);
608 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
,
612 static void MemoryRangeSet(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
,
619 uptr offset
= addr
% kShadowCell
;
621 offset
= kShadowCell
- offset
;
627 DCHECK_EQ(addr
% 8, 0);
628 // If a user passes some insane arguments (memset(0)),
629 // let it just crash as usual.
630 if (!IsAppMem(addr
) || !IsAppMem(addr
+ size
- 1))
632 // Don't want to touch lots of shadow memory.
633 // If a program maps 10MB stack, there is no need reset the whole range.
634 size
= (size
+ (kShadowCell
- 1)) & ~(kShadowCell
- 1);
635 // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
636 // so we do it only for C/C++.
637 if (kGoMode
|| size
< common_flags()->clear_shadow_mmap_threshold
) {
638 u64
*p
= (u64
*)MemToShadow(addr
);
639 CHECK(IsShadowMem((uptr
)p
));
640 CHECK(IsShadowMem((uptr
)(p
+ size
* kShadowCnt
/ kShadowCell
- 1)));
641 // FIXME: may overwrite a part outside the region
642 for (uptr i
= 0; i
< size
/ kShadowCell
* kShadowCnt
;) {
644 for (uptr j
= 1; j
< kShadowCnt
; j
++)
648 // The region is big, reset only beginning and end.
649 const uptr kPageSize
= 4096;
650 u64
*begin
= (u64
*)MemToShadow(addr
);
651 u64
*end
= begin
+ size
/ kShadowCell
* kShadowCnt
;
653 // Set at least first kPageSize/2 to page boundary.
654 while ((p
< begin
+ kPageSize
/ kShadowSize
/ 2) || ((uptr
)p
% kPageSize
)) {
656 for (uptr j
= 1; j
< kShadowCnt
; j
++)
659 // Reset middle part.
661 p
= RoundDown(end
, kPageSize
);
662 UnmapOrDie((void*)p1
, (uptr
)p
- (uptr
)p1
);
663 MmapFixedNoReserve((uptr
)p1
, (uptr
)p
- (uptr
)p1
);
667 for (uptr j
= 1; j
< kShadowCnt
; j
++)
673 void MemoryResetRange(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
674 MemoryRangeSet(thr
, pc
, addr
, size
, 0);
677 void MemoryRangeFreed(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
678 // Processing more than 1k (4k of shadow) is expensive,
679 // can cause excessive memory consumption (user does not necessary touch
680 // the whole range) and most likely unnecessary.
683 CHECK_EQ(thr
->is_freeing
, false);
684 thr
->is_freeing
= true;
685 MemoryAccessRange(thr
, pc
, addr
, size
, true);
686 thr
->is_freeing
= false;
687 if (kCollectHistory
) {
688 thr
->fast_state
.IncrementEpoch();
689 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, pc
);
691 Shadow
s(thr
->fast_state
);
695 s
.SetAddr0AndSizeLog(0, 3);
696 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
699 void MemoryRangeImitateWrite(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
700 if (kCollectHistory
) {
701 thr
->fast_state
.IncrementEpoch();
702 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, pc
);
704 Shadow
s(thr
->fast_state
);
707 s
.SetAddr0AndSizeLog(0, 3);
708 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
712 void FuncEntry(ThreadState
*thr
, uptr pc
) {
713 StatInc(thr
, StatFuncEnter
);
714 DPrintf2("#%d: FuncEntry %p\n", (int)thr
->fast_state
.tid(), (void*)pc
);
715 if (kCollectHistory
) {
716 thr
->fast_state
.IncrementEpoch();
717 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncEnter
, pc
);
720 // Shadow stack maintenance can be replaced with
721 // stack unwinding during trace switch (which presumably must be faster).
722 DCHECK_GE(thr
->shadow_stack_pos
, thr
->shadow_stack
);
724 DCHECK_LT(thr
->shadow_stack_pos
, thr
->shadow_stack_end
);
726 if (thr
->shadow_stack_pos
== thr
->shadow_stack_end
) {
727 const int sz
= thr
->shadow_stack_end
- thr
->shadow_stack
;
728 const int newsz
= 2 * sz
;
729 uptr
*newstack
= (uptr
*)internal_alloc(MBlockShadowStack
,
730 newsz
* sizeof(uptr
));
731 internal_memcpy(newstack
, thr
->shadow_stack
, sz
* sizeof(uptr
));
732 internal_free(thr
->shadow_stack
);
733 thr
->shadow_stack
= newstack
;
734 thr
->shadow_stack_pos
= newstack
+ sz
;
735 thr
->shadow_stack_end
= newstack
+ newsz
;
738 thr
->shadow_stack_pos
[0] = pc
;
739 thr
->shadow_stack_pos
++;
743 void FuncExit(ThreadState
*thr
) {
744 StatInc(thr
, StatFuncExit
);
745 DPrintf2("#%d: FuncExit\n", (int)thr
->fast_state
.tid());
746 if (kCollectHistory
) {
747 thr
->fast_state
.IncrementEpoch();
748 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncExit
, 0);
751 DCHECK_GT(thr
->shadow_stack_pos
, thr
->shadow_stack
);
753 DCHECK_LT(thr
->shadow_stack_pos
, thr
->shadow_stack_end
);
755 thr
->shadow_stack_pos
--;
758 void ThreadIgnoreBegin(ThreadState
*thr
, uptr pc
) {
759 DPrintf("#%d: ThreadIgnoreBegin\n", thr
->tid
);
760 thr
->ignore_reads_and_writes
++;
761 CHECK_GT(thr
->ignore_reads_and_writes
, 0);
762 thr
->fast_state
.SetIgnoreBit();
764 if (!ctx
->after_multithreaded_fork
)
765 thr
->mop_ignore_set
.Add(CurrentStackId(thr
, pc
));
769 void ThreadIgnoreEnd(ThreadState
*thr
, uptr pc
) {
770 DPrintf("#%d: ThreadIgnoreEnd\n", thr
->tid
);
771 thr
->ignore_reads_and_writes
--;
772 CHECK_GE(thr
->ignore_reads_and_writes
, 0);
773 if (thr
->ignore_reads_and_writes
== 0) {
774 thr
->fast_state
.ClearIgnoreBit();
776 thr
->mop_ignore_set
.Reset();
781 void ThreadIgnoreSyncBegin(ThreadState
*thr
, uptr pc
) {
782 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr
->tid
);
784 CHECK_GT(thr
->ignore_sync
, 0);
786 if (!ctx
->after_multithreaded_fork
)
787 thr
->sync_ignore_set
.Add(CurrentStackId(thr
, pc
));
791 void ThreadIgnoreSyncEnd(ThreadState
*thr
, uptr pc
) {
792 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr
->tid
);
794 CHECK_GE(thr
->ignore_sync
, 0);
796 if (thr
->ignore_sync
== 0)
797 thr
->sync_ignore_set
.Reset();
801 bool MD5Hash::operator==(const MD5Hash
&other
) const {
802 return hash
[0] == other
.hash
[0] && hash
[1] == other
.hash
[1];
806 void build_consistency_debug() {}
808 void build_consistency_release() {}
811 #if TSAN_COLLECT_STATS
812 void build_consistency_stats() {}
814 void build_consistency_nostats() {}
817 #if TSAN_SHADOW_COUNT == 1
818 void build_consistency_shadow1() {}
819 #elif TSAN_SHADOW_COUNT == 2
820 void build_consistency_shadow2() {}
821 #elif TSAN_SHADOW_COUNT == 4
822 void build_consistency_shadow4() {}
824 void build_consistency_shadow8() {}
827 } // namespace __tsan
830 // Must be included in this file to make sure everything is inlined.
831 #include "tsan_interface_inl.h"