1 //===-- tsan_rtl.cc -------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 // Main file (entry points) for the TSan run-time.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_atomic.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_libc.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "sanitizer_common/sanitizer_symbolizer.h"
19 #include "tsan_defs.h"
20 #include "tsan_platform.h"
22 #include "tsan_mman.h"
23 #include "tsan_suppressions.h"
24 #include "tsan_symbolize.h"
26 volatile int __tsan_resumed
= 0;
28 extern "C" void __tsan_resume() {
35 THREADLOCAL
char cur_thread_placeholder
[sizeof(ThreadState
)] ALIGNED(64);
37 static char ctx_placeholder
[sizeof(Context
)] ALIGNED(64);
39 // Can be overriden by a front-end.
40 #ifdef TSAN_EXTERNAL_HOOKS
41 bool OnFinalize(bool failed
);
43 SANITIZER_INTERFACE_ATTRIBUTE
44 bool WEAK
OnFinalize(bool failed
) {
54 static char thread_registry_placeholder
[sizeof(ThreadRegistry
)];
56 static ThreadContextBase
*CreateThreadContext(u32 tid
) {
57 // Map thread trace when context is created.
58 MapThreadTrace(GetThreadTrace(tid
), TraceSize() * sizeof(Event
));
59 MapThreadTrace(GetThreadTraceHeader(tid
), sizeof(Trace
));
60 new(ThreadTrace(tid
)) Trace();
61 void *mem
= internal_alloc(MBlockThreadContex
, sizeof(ThreadContext
));
62 return new(mem
) ThreadContext(tid
);
66 static const u32 kThreadQuarantineSize
= 16;
68 static const u32 kThreadQuarantineSize
= 64;
73 , report_mtx(MutexTypeReport
, StatMtxReport
)
76 , thread_registry(new(thread_registry_placeholder
) ThreadRegistry(
77 CreateThreadContext
, kMaxTid
, kThreadQuarantineSize
))
78 , racy_stacks(MBlockRacyStacks
)
79 , racy_addresses(MBlockRacyAddresses
)
80 , fired_suppressions(8) {
83 // The objects are allocated in TLS, so one may rely on zero-initialization.
84 ThreadState::ThreadState(Context
*ctx
, int tid
, int unique_id
, u64 epoch
,
85 uptr stk_addr
, uptr stk_size
,
86 uptr tls_addr
, uptr tls_size
)
87 : fast_state(tid
, epoch
)
88 // Do not touch these, rely on zero initialization,
89 // they may be accessed before the ctor.
90 // , ignore_reads_and_writes()
93 , jmp_bufs(MBlockJmpBuf
)
96 , unique_id(unique_id
)
100 , tls_size(tls_size
) {
103 static void MemoryProfiler(Context
*ctx
, fd_t fd
, int i
) {
105 uptr n_running_threads
;
106 ctx
->thread_registry
->GetNumberOfThreads(&n_threads
, &n_running_threads
);
107 InternalScopedBuffer
<char> buf(4096);
108 internal_snprintf(buf
.data(), buf
.size(), "%d: nthr=%d nlive=%d\n",
109 i
, n_threads
, n_running_threads
);
110 internal_write(fd
, buf
.data(), internal_strlen(buf
.data()));
111 WriteMemoryProfile(buf
.data(), buf
.size());
112 internal_write(fd
, buf
.data(), internal_strlen(buf
.data()));
115 static void BackgroundThread(void *arg
) {
117 Context
*ctx
= CTX();
118 const u64 kMs2Ns
= 1000 * 1000;
120 fd_t mprof_fd
= kInvalidFd
;
121 if (flags()->profile_memory
&& flags()->profile_memory
[0]) {
122 InternalScopedBuffer
<char> filename(4096);
123 internal_snprintf(filename
.data(), filename
.size(), "%s.%d",
124 flags()->profile_memory
, (int)internal_getpid());
125 uptr openrv
= OpenFile(filename
.data(), true);
126 if (internal_iserror(openrv
)) {
127 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
134 u64 last_flush
= NanoTime();
136 for (int i
= 0; ; i
++) {
138 u64 now
= NanoTime();
140 // Flush memory if requested.
141 if (flags()->flush_memory_ms
> 0) {
142 if (last_flush
+ flags()->flush_memory_ms
* kMs2Ns
< now
) {
143 if (flags()->verbosity
> 0)
144 Printf("ThreadSanitizer: periodic memory flush\n");
146 last_flush
= NanoTime();
149 if (flags()->memory_limit_mb
> 0) {
151 uptr limit
= uptr(flags()->memory_limit_mb
) << 20;
152 if (flags()->verbosity
> 0) {
153 Printf("ThreadSanitizer: memory flush check"
154 " RSS=%llu LAST=%llu LIMIT=%llu\n",
155 (u64
)rss
>>20, (u64
)last_rss
>>20, (u64
)limit
>>20);
157 if (2 * rss
> limit
+ last_rss
) {
158 if (flags()->verbosity
> 0)
159 Printf("ThreadSanitizer: flushing memory due to RSS\n");
162 if (flags()->verbosity
> 0)
163 Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64
)rss
>>20);
168 // Write memory profile if requested.
169 if (mprof_fd
!= kInvalidFd
)
170 MemoryProfiler(ctx
, mprof_fd
, i
);
173 // Flush symbolizer cache if requested.
174 if (flags()->flush_symbolizer_ms
> 0) {
175 u64 last
= atomic_load(&ctx
->last_symbolize_time_ns
,
176 memory_order_relaxed
);
177 if (last
!= 0 && last
+ flags()->flush_symbolizer_ms
* kMs2Ns
< now
) {
178 Lock
l(&ctx
->report_mtx
);
179 SpinMutexLock
l2(&CommonSanitizerReportMutex
);
181 atomic_store(&ctx
->last_symbolize_time_ns
, 0, memory_order_relaxed
);
188 void DontNeedShadowFor(uptr addr
, uptr size
) {
189 uptr shadow_beg
= MemToShadow(addr
);
190 uptr shadow_end
= MemToShadow(addr
+ size
);
191 FlushUnneededShadowMemory(shadow_beg
, shadow_end
- shadow_beg
);
194 void MapShadow(uptr addr
, uptr size
) {
195 MmapFixedNoReserve(MemToShadow(addr
), size
* kShadowMultiplier
);
198 void MapThreadTrace(uptr addr
, uptr size
) {
199 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr
, addr
+ size
, size
);
200 CHECK_GE(addr
, kTraceMemBegin
);
201 CHECK_LE(addr
+ size
, kTraceMemBegin
+ kTraceMemSize
);
202 uptr addr1
= (uptr
)MmapFixedNoReserve(addr
, size
);
204 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
210 void Initialize(ThreadState
*thr
) {
211 // Thread safe because done before all threads exist.
212 static bool is_initialized
= false;
215 is_initialized
= true;
216 SanitizerToolName
= "ThreadSanitizer";
217 // Install tool-specific callbacks in sanitizer_common.
218 SetCheckFailedCallback(TsanCheckFailed
);
222 InitializeAllocator();
224 InitializeInterceptors();
225 const char *env
= InitializePlatform();
227 InitializeDynamicAnnotations();
228 ctx
= new(ctx_placeholder
) Context
;
230 InitializeShadowMemory();
232 InitializeFlags(&ctx
->flags
, env
);
233 // Setup correct file descriptor for error reports.
234 __sanitizer_set_report_path(flags()->log_path
);
235 InitializeSuppressions();
237 InitializeLibIgnore();
238 // Initialize external symbolizer before internal threads are started.
239 const char *external_symbolizer
= flags()->external_symbolizer_path
;
240 bool external_symbolizer_started
=
241 Symbolizer::Init(external_symbolizer
)->IsExternalAvailable();
242 if (external_symbolizer
!= 0 && external_symbolizer
[0] != '\0' &&
243 !external_symbolizer_started
) {
244 Printf("Failed to start external symbolizer: '%s'\n",
245 external_symbolizer
);
248 Symbolizer::Get()->AddHooks(EnterSymbolizer
, ExitSymbolizer
);
250 internal_start_thread(&BackgroundThread
, 0);
252 if (ctx
->flags
.verbosity
)
253 Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
254 (int)internal_getpid());
256 // Initialize thread 0.
257 int tid
= ThreadCreate(thr
, 0, 0, true);
259 ThreadStart(thr
, tid
, internal_getpid());
260 CHECK_EQ(thr
->in_rtl
, 1);
261 ctx
->initialized
= true;
263 if (flags()->stop_on_start
) {
264 Printf("ThreadSanitizer is suspended at startup (pid %d)."
265 " Call __tsan_resume().\n",
266 (int)internal_getpid());
267 while (__tsan_resumed
== 0) {}
271 int Finalize(ThreadState
*thr
) {
273 Context
*ctx
= __tsan::ctx
;
276 if (flags()->atexit_sleep_ms
> 0 && ThreadCount(thr
) > 1)
277 SleepForMillis(flags()->atexit_sleep_ms
);
279 // Wait for pending reports.
280 ctx
->report_mtx
.Lock();
281 CommonSanitizerReportMutex
.Lock();
282 CommonSanitizerReportMutex
.Unlock();
283 ctx
->report_mtx
.Unlock();
286 if (ctx
->flags
.verbosity
)
287 AllocatorPrintStats();
292 if (ctx
->nreported
) {
295 Printf("ThreadSanitizer: reported %d warnings\n", ctx
->nreported
);
297 Printf("Found %d data race(s)\n", ctx
->nreported
);
301 if (ctx
->nmissed_expected
) {
303 Printf("ThreadSanitizer: missed %d expected races\n",
304 ctx
->nmissed_expected
);
307 if (flags()->print_suppressions
)
308 PrintMatchedSuppressions();
310 if (flags()->print_benign
)
311 PrintMatchedBenignRaces();
314 failed
= OnFinalize(failed
);
316 StatAggregate(ctx
->stat
, thr
->stat
);
317 StatOutput(ctx
->stat
);
318 return failed
? flags()->exitcode
: 0;
322 u32
CurrentStackId(ThreadState
*thr
, uptr pc
) {
323 if (thr
->shadow_stack_pos
== 0) // May happen during bootstrap.
326 thr
->shadow_stack_pos
[0] = pc
;
327 thr
->shadow_stack_pos
++;
329 u32 id
= StackDepotPut(thr
->shadow_stack
,
330 thr
->shadow_stack_pos
- thr
->shadow_stack
);
332 thr
->shadow_stack_pos
--;
337 void TraceSwitch(ThreadState
*thr
) {
340 Trace
*thr_trace
= ThreadTrace(thr
->tid
);
341 Lock
l(&thr_trace
->mtx
);
342 unsigned trace
= (thr
->fast_state
.epoch() / kTracePartSize
) % TraceParts();
343 TraceHeader
*hdr
= &thr_trace
->headers
[trace
];
344 hdr
->epoch0
= thr
->fast_state
.epoch();
345 hdr
->stack0
.ObtainCurrent(thr
, 0);
346 hdr
->mset0
= thr
->mset
;
350 Trace
*ThreadTrace(int tid
) {
351 return (Trace
*)GetThreadTraceHeader(tid
);
354 uptr
TraceTopPC(ThreadState
*thr
) {
355 Event
*events
= (Event
*)GetThreadTrace(thr
->tid
);
356 uptr pc
= events
[thr
->fast_state
.GetTracePos()];
361 return (uptr
)(1ull << (kTracePartSizeBits
+ flags()->history_size
+ 1));
365 return TraceSize() / kTracePartSize
;
369 extern "C" void __tsan_trace_switch() {
370 TraceSwitch(cur_thread());
373 extern "C" void __tsan_report_race() {
374 ReportRace(cur_thread());
379 Shadow
LoadShadow(u64
*p
) {
380 u64 raw
= atomic_load((atomic_uint64_t
*)p
, memory_order_relaxed
);
385 void StoreShadow(u64
*sp
, u64 s
) {
386 atomic_store((atomic_uint64_t
*)sp
, s
, memory_order_relaxed
);
390 void StoreIfNotYetStored(u64
*sp
, u64
*s
) {
395 static inline void HandleRace(ThreadState
*thr
, u64
*shadow_mem
,
396 Shadow cur
, Shadow old
) {
397 thr
->racy_state
[0] = cur
.raw();
398 thr
->racy_state
[1] = old
.raw();
399 thr
->racy_shadow_addr
= shadow_mem
;
401 HACKY_CALL(__tsan_report_race
);
407 static inline bool OldIsInSameSynchEpoch(Shadow old
, ThreadState
*thr
) {
408 return old
.epoch() >= thr
->fast_synch_epoch
;
411 static inline bool HappensBefore(Shadow old
, ThreadState
*thr
) {
412 return thr
->clock
.get(old
.TidWithIgnore()) >= old
.epoch();
416 void MemoryAccessImpl(ThreadState
*thr
, uptr addr
,
417 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
,
418 u64
*shadow_mem
, Shadow cur
) {
419 StatInc(thr
, StatMop
);
420 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
421 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
423 // This potentially can live in an MMX/SSE scratch register.
424 // The required intrinsics are:
425 // __m128i _mm_move_epi64(__m128i*);
426 // _mm_storel_epi64(u64*, __m128i);
427 u64 store_word
= cur
.raw();
429 // scan all the shadow values and dispatch to 4 categories:
430 // same, replace, candidate and race (see comments below).
431 // we consider only 3 cases regarding access sizes:
432 // equal, intersect and not intersect. initially I considered
433 // larger and smaller as well, it allowed to replace some
434 // 'candidates' with 'same' or 'replace', but I think
435 // it's just not worth it (performance- and complexity-wise).
438 if (kShadowCnt
== 1) {
440 #include "tsan_update_shadow_word_inl.h"
441 } else if (kShadowCnt
== 2) {
443 #include "tsan_update_shadow_word_inl.h"
445 #include "tsan_update_shadow_word_inl.h"
446 } else if (kShadowCnt
== 4) {
448 #include "tsan_update_shadow_word_inl.h"
450 #include "tsan_update_shadow_word_inl.h"
452 #include "tsan_update_shadow_word_inl.h"
454 #include "tsan_update_shadow_word_inl.h"
455 } else if (kShadowCnt
== 8) {
457 #include "tsan_update_shadow_word_inl.h"
459 #include "tsan_update_shadow_word_inl.h"
461 #include "tsan_update_shadow_word_inl.h"
463 #include "tsan_update_shadow_word_inl.h"
465 #include "tsan_update_shadow_word_inl.h"
467 #include "tsan_update_shadow_word_inl.h"
469 #include "tsan_update_shadow_word_inl.h"
471 #include "tsan_update_shadow_word_inl.h"
476 // we did not find any races and had already stored
477 // the current access info, so we are done
478 if (LIKELY(store_word
== 0))
480 // choose a random candidate slot and replace it
481 StoreShadow(shadow_mem
+ (cur
.epoch() % kShadowCnt
), store_word
);
482 StatInc(thr
, StatShadowReplace
);
485 HandleRace(thr
, shadow_mem
, cur
, old
);
489 void UnalignedMemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
490 int size
, bool kAccessIsWrite
, bool kIsAtomic
) {
493 int kAccessSizeLog
= kSizeLog1
;
494 if (size
>= 8 && (addr
& ~7) == ((addr
+ 8) & ~7)) {
496 kAccessSizeLog
= kSizeLog8
;
497 } else if (size
>= 4 && (addr
& ~7) == ((addr
+ 4) & ~7)) {
499 kAccessSizeLog
= kSizeLog4
;
500 } else if (size
>= 2 && (addr
& ~7) == ((addr
+ 2) & ~7)) {
502 kAccessSizeLog
= kSizeLog2
;
504 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
);
511 void MemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
512 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
) {
513 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
514 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
515 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
516 (int)thr
->fast_state
.tid(), (void*)pc
, (void*)addr
,
517 (int)(1 << kAccessSizeLog
), kAccessIsWrite
, shadow_mem
,
518 (uptr
)shadow_mem
[0], (uptr
)shadow_mem
[1],
519 (uptr
)shadow_mem
[2], (uptr
)shadow_mem
[3]);
521 if (!IsAppMem(addr
)) {
522 Printf("Access to non app mem %zx\n", addr
);
523 DCHECK(IsAppMem(addr
));
525 if (!IsShadowMem((uptr
)shadow_mem
)) {
526 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
527 DCHECK(IsShadowMem((uptr
)shadow_mem
));
531 if (*shadow_mem
== kShadowRodata
) {
532 // Access to .rodata section, no races here.
533 // Measurements show that it can be 10-20% of all memory accesses.
534 StatInc(thr
, StatMop
);
535 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
536 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
537 StatInc(thr
, StatMopRodata
);
541 FastState fast_state
= thr
->fast_state
;
542 if (fast_state
.GetIgnoreBit())
544 fast_state
.IncrementEpoch();
545 thr
->fast_state
= fast_state
;
546 Shadow
cur(fast_state
);
547 cur
.SetAddr0AndSizeLog(addr
& 7, kAccessSizeLog
);
548 cur
.SetWrite(kAccessIsWrite
);
549 cur
.SetAtomic(kIsAtomic
);
551 // We must not store to the trace if we do not store to the shadow.
552 // That is, this call must be moved somewhere below.
553 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
555 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
,
559 static void MemoryRangeSet(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
,
566 uptr offset
= addr
% kShadowCell
;
568 offset
= kShadowCell
- offset
;
574 DCHECK_EQ(addr
% 8, 0);
575 // If a user passes some insane arguments (memset(0)),
576 // let it just crash as usual.
577 if (!IsAppMem(addr
) || !IsAppMem(addr
+ size
- 1))
579 // Don't want to touch lots of shadow memory.
580 // If a program maps 10MB stack, there is no need reset the whole range.
581 size
= (size
+ (kShadowCell
- 1)) & ~(kShadowCell
- 1);
582 // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
583 // so we do it only for C/C++.
584 if (kGoMode
|| size
< 64*1024) {
585 u64
*p
= (u64
*)MemToShadow(addr
);
586 CHECK(IsShadowMem((uptr
)p
));
587 CHECK(IsShadowMem((uptr
)(p
+ size
* kShadowCnt
/ kShadowCell
- 1)));
588 // FIXME: may overwrite a part outside the region
589 for (uptr i
= 0; i
< size
/ kShadowCell
* kShadowCnt
;) {
591 for (uptr j
= 1; j
< kShadowCnt
; j
++)
595 // The region is big, reset only beginning and end.
596 const uptr kPageSize
= 4096;
597 u64
*begin
= (u64
*)MemToShadow(addr
);
598 u64
*end
= begin
+ size
/ kShadowCell
* kShadowCnt
;
600 // Set at least first kPageSize/2 to page boundary.
601 while ((p
< begin
+ kPageSize
/ kShadowSize
/ 2) || ((uptr
)p
% kPageSize
)) {
603 for (uptr j
= 1; j
< kShadowCnt
; j
++)
606 // Reset middle part.
608 p
= RoundDown(end
, kPageSize
);
609 UnmapOrDie((void*)p1
, (uptr
)p
- (uptr
)p1
);
610 MmapFixedNoReserve((uptr
)p1
, (uptr
)p
- (uptr
)p1
);
614 for (uptr j
= 1; j
< kShadowCnt
; j
++)
620 void MemoryResetRange(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
621 MemoryRangeSet(thr
, pc
, addr
, size
, 0);
624 void MemoryRangeFreed(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
625 // Processing more than 1k (4k of shadow) is expensive,
626 // can cause excessive memory consumption (user does not necessary touch
627 // the whole range) and most likely unnecessary.
630 CHECK_EQ(thr
->is_freeing
, false);
631 thr
->is_freeing
= true;
632 MemoryAccessRange(thr
, pc
, addr
, size
, true);
633 thr
->is_freeing
= false;
634 thr
->fast_state
.IncrementEpoch();
635 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, pc
);
636 Shadow
s(thr
->fast_state
);
640 s
.SetAddr0AndSizeLog(0, 3);
641 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
644 void MemoryRangeImitateWrite(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
645 thr
->fast_state
.IncrementEpoch();
646 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, pc
);
647 Shadow
s(thr
->fast_state
);
650 s
.SetAddr0AndSizeLog(0, 3);
651 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
655 void FuncEntry(ThreadState
*thr
, uptr pc
) {
656 DCHECK_EQ(thr
->in_rtl
, 0);
657 StatInc(thr
, StatFuncEnter
);
658 DPrintf2("#%d: FuncEntry %p\n", (int)thr
->fast_state
.tid(), (void*)pc
);
659 thr
->fast_state
.IncrementEpoch();
660 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncEnter
, pc
);
662 // Shadow stack maintenance can be replaced with
663 // stack unwinding during trace switch (which presumably must be faster).
664 DCHECK_GE(thr
->shadow_stack_pos
, thr
->shadow_stack
);
666 DCHECK_LT(thr
->shadow_stack_pos
, thr
->shadow_stack_end
);
668 if (thr
->shadow_stack_pos
== thr
->shadow_stack_end
) {
669 const int sz
= thr
->shadow_stack_end
- thr
->shadow_stack
;
670 const int newsz
= 2 * sz
;
671 uptr
*newstack
= (uptr
*)internal_alloc(MBlockShadowStack
,
672 newsz
* sizeof(uptr
));
673 internal_memcpy(newstack
, thr
->shadow_stack
, sz
* sizeof(uptr
));
674 internal_free(thr
->shadow_stack
);
675 thr
->shadow_stack
= newstack
;
676 thr
->shadow_stack_pos
= newstack
+ sz
;
677 thr
->shadow_stack_end
= newstack
+ newsz
;
680 thr
->shadow_stack_pos
[0] = pc
;
681 thr
->shadow_stack_pos
++;
685 void FuncExit(ThreadState
*thr
) {
686 DCHECK_EQ(thr
->in_rtl
, 0);
687 StatInc(thr
, StatFuncExit
);
688 DPrintf2("#%d: FuncExit\n", (int)thr
->fast_state
.tid());
689 thr
->fast_state
.IncrementEpoch();
690 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncExit
, 0);
692 DCHECK_GT(thr
->shadow_stack_pos
, thr
->shadow_stack
);
694 DCHECK_LT(thr
->shadow_stack_pos
, thr
->shadow_stack_end
);
696 thr
->shadow_stack_pos
--;
699 void ThreadIgnoreBegin(ThreadState
*thr
, uptr pc
) {
700 DPrintf("#%d: ThreadIgnoreBegin\n", thr
->tid
);
701 thr
->ignore_reads_and_writes
++;
702 CHECK_GT(thr
->ignore_reads_and_writes
, 0);
703 thr
->fast_state
.SetIgnoreBit();
705 thr
->mop_ignore_set
.Add(CurrentStackId(thr
, pc
));
709 void ThreadIgnoreEnd(ThreadState
*thr
, uptr pc
) {
710 DPrintf("#%d: ThreadIgnoreEnd\n", thr
->tid
);
711 thr
->ignore_reads_and_writes
--;
712 CHECK_GE(thr
->ignore_reads_and_writes
, 0);
713 if (thr
->ignore_reads_and_writes
== 0) {
714 thr
->fast_state
.ClearIgnoreBit();
716 thr
->mop_ignore_set
.Reset();
721 void ThreadIgnoreSyncBegin(ThreadState
*thr
, uptr pc
) {
722 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr
->tid
);
724 CHECK_GT(thr
->ignore_sync
, 0);
726 thr
->sync_ignore_set
.Add(CurrentStackId(thr
, pc
));
730 void ThreadIgnoreSyncEnd(ThreadState
*thr
, uptr pc
) {
731 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr
->tid
);
733 CHECK_GE(thr
->ignore_sync
, 0);
735 if (thr
->ignore_sync
== 0)
736 thr
->mop_ignore_set
.Reset();
740 bool MD5Hash::operator==(const MD5Hash
&other
) const {
741 return hash
[0] == other
.hash
[0] && hash
[1] == other
.hash
[1];
745 void build_consistency_debug() {}
747 void build_consistency_release() {}
750 #if TSAN_COLLECT_STATS
751 void build_consistency_stats() {}
753 void build_consistency_nostats() {}
756 #if TSAN_SHADOW_COUNT == 1
757 void build_consistency_shadow1() {}
758 #elif TSAN_SHADOW_COUNT == 2
759 void build_consistency_shadow2() {}
760 #elif TSAN_SHADOW_COUNT == 4
761 void build_consistency_shadow4() {}
763 void build_consistency_shadow8() {}
766 } // namespace __tsan
769 // Must be included in this file to make sure everything is inlined.
770 #include "tsan_interface_inl.h"