1 //===-- tsan_rtl.cc -------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 // Main file (entry points) for the TSan run-time.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_atomic.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_libc.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "sanitizer_common/sanitizer_symbolizer.h"
19 #include "tsan_defs.h"
20 #include "tsan_platform.h"
22 #include "tsan_mman.h"
23 #include "tsan_suppressions.h"
24 #include "tsan_symbolize.h"
26 volatile int __tsan_resumed
= 0;
28 extern "C" void __tsan_resume() {
35 THREADLOCAL
char cur_thread_placeholder
[sizeof(ThreadState
)] ALIGNED(64);
37 static char ctx_placeholder
[sizeof(Context
)] ALIGNED(64);
39 // Can be overriden by a front-end.
40 bool CPP_WEAK
OnFinalize(bool failed
) {
49 static char thread_registry_placeholder
[sizeof(ThreadRegistry
)];
51 static ThreadContextBase
*CreateThreadContext(u32 tid
) {
52 // Map thread trace when context is created.
53 MapThreadTrace(GetThreadTrace(tid
), TraceSize() * sizeof(Event
));
54 MapThreadTrace(GetThreadTraceHeader(tid
), sizeof(Trace
));
55 new(ThreadTrace(tid
)) Trace();
56 void *mem
= internal_alloc(MBlockThreadContex
, sizeof(ThreadContext
));
57 return new(mem
) ThreadContext(tid
);
61 static const u32 kThreadQuarantineSize
= 16;
63 static const u32 kThreadQuarantineSize
= 64;
68 , report_mtx(MutexTypeReport
, StatMtxReport
)
71 , thread_registry(new(thread_registry_placeholder
) ThreadRegistry(
72 CreateThreadContext
, kMaxTid
, kThreadQuarantineSize
))
73 , racy_stacks(MBlockRacyStacks
)
74 , racy_addresses(MBlockRacyAddresses
)
75 , fired_suppressions(8) {
78 // The objects are allocated in TLS, so one may rely on zero-initialization.
79 ThreadState::ThreadState(Context
*ctx
, int tid
, int unique_id
, u64 epoch
,
80 uptr stk_addr
, uptr stk_size
,
81 uptr tls_addr
, uptr tls_size
)
82 : fast_state(tid
, epoch
)
83 // Do not touch these, rely on zero initialization,
84 // they may be accessed before the ctor.
85 // , ignore_reads_and_writes()
87 , shadow_stack_pos(&shadow_stack
[0])
89 , jmp_bufs(MBlockJmpBuf
)
92 , unique_id(unique_id
)
96 , tls_size(tls_size
) {
99 static void MemoryProfiler(Context
*ctx
, fd_t fd
, int i
) {
101 uptr n_running_threads
;
102 ctx
->thread_registry
->GetNumberOfThreads(&n_threads
, &n_running_threads
);
103 InternalScopedBuffer
<char> buf(4096);
104 internal_snprintf(buf
.data(), buf
.size(), "%d: nthr=%d nlive=%d\n",
105 i
, n_threads
, n_running_threads
);
106 internal_write(fd
, buf
.data(), internal_strlen(buf
.data()));
107 WriteMemoryProfile(buf
.data(), buf
.size());
108 internal_write(fd
, buf
.data(), internal_strlen(buf
.data()));
111 static void BackgroundThread(void *arg
) {
113 Context
*ctx
= CTX();
114 const u64 kMs2Ns
= 1000 * 1000;
116 fd_t mprof_fd
= kInvalidFd
;
117 if (flags()->profile_memory
&& flags()->profile_memory
[0]) {
118 InternalScopedBuffer
<char> filename(4096);
119 internal_snprintf(filename
.data(), filename
.size(), "%s.%d",
120 flags()->profile_memory
, (int)internal_getpid());
121 uptr openrv
= OpenFile(filename
.data(), true);
122 if (internal_iserror(openrv
)) {
123 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
130 u64 last_flush
= NanoTime();
131 for (int i
= 0; ; i
++) {
133 u64 now
= NanoTime();
135 // Flush memory if requested.
136 if (flags()->flush_memory_ms
) {
137 if (last_flush
+ flags()->flush_memory_ms
* kMs2Ns
< now
) {
139 last_flush
= NanoTime();
143 // Write memory profile if requested.
144 if (mprof_fd
!= kInvalidFd
)
145 MemoryProfiler(ctx
, mprof_fd
, i
);
148 // Flush symbolizer cache if requested.
149 if (flags()->flush_symbolizer_ms
> 0) {
150 u64 last
= atomic_load(&ctx
->last_symbolize_time_ns
,
151 memory_order_relaxed
);
152 if (last
!= 0 && last
+ flags()->flush_symbolizer_ms
* kMs2Ns
< now
) {
153 Lock
l(&ctx
->report_mtx
);
154 SpinMutexLock
l2(&CommonSanitizerReportMutex
);
156 atomic_store(&ctx
->last_symbolize_time_ns
, 0, memory_order_relaxed
);
163 void DontNeedShadowFor(uptr addr
, uptr size
) {
164 uptr shadow_beg
= MemToShadow(addr
);
165 uptr shadow_end
= MemToShadow(addr
+ size
);
166 FlushUnneededShadowMemory(shadow_beg
, shadow_end
- shadow_beg
);
169 void MapShadow(uptr addr
, uptr size
) {
170 MmapFixedNoReserve(MemToShadow(addr
), size
* kShadowMultiplier
);
173 void MapThreadTrace(uptr addr
, uptr size
) {
174 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr
, addr
+ size
, size
);
175 CHECK_GE(addr
, kTraceMemBegin
);
176 CHECK_LE(addr
+ size
, kTraceMemBegin
+ kTraceMemSize
);
177 if (addr
!= (uptr
)MmapFixedNoReserve(addr
, size
)) {
178 Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
183 void Initialize(ThreadState
*thr
) {
184 // Thread safe because done before all threads exist.
185 static bool is_initialized
= false;
188 is_initialized
= true;
189 SanitizerToolName
= "ThreadSanitizer";
190 // Install tool-specific callbacks in sanitizer_common.
191 SetCheckFailedCallback(TsanCheckFailed
);
195 InitializeAllocator();
197 InitializeInterceptors();
198 const char *env
= InitializePlatform();
200 InitializeDynamicAnnotations();
201 ctx
= new(ctx_placeholder
) Context
;
203 InitializeShadowMemory();
205 InitializeFlags(&ctx
->flags
, env
);
206 // Setup correct file descriptor for error reports.
207 if (internal_strcmp(flags()->log_path
, "stdout") == 0)
208 __sanitizer_set_report_fd(kStdoutFd
);
209 else if (internal_strcmp(flags()->log_path
, "stderr") == 0)
210 __sanitizer_set_report_fd(kStderrFd
);
212 __sanitizer_set_report_path(flags()->log_path
);
213 InitializeSuppressions();
215 // Initialize external symbolizer before internal threads are started.
216 const char *external_symbolizer
= flags()->external_symbolizer_path
;
217 if (external_symbolizer
!= 0 && external_symbolizer
[0] != '\0') {
218 if (!getSymbolizer()->InitializeExternal(external_symbolizer
)) {
219 Printf("Failed to start external symbolizer: '%s'\n",
220 external_symbolizer
);
225 internal_start_thread(&BackgroundThread
, 0);
227 if (ctx
->flags
.verbosity
)
228 Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
229 (int)internal_getpid());
231 // Initialize thread 0.
232 int tid
= ThreadCreate(thr
, 0, 0, true);
234 ThreadStart(thr
, tid
, internal_getpid());
235 CHECK_EQ(thr
->in_rtl
, 1);
236 ctx
->initialized
= true;
238 if (flags()->stop_on_start
) {
239 Printf("ThreadSanitizer is suspended at startup (pid %d)."
240 " Call __tsan_resume().\n",
241 (int)internal_getpid());
242 while (__tsan_resumed
== 0) {}
246 int Finalize(ThreadState
*thr
) {
248 Context
*ctx
= __tsan::ctx
;
251 if (flags()->atexit_sleep_ms
> 0 && ThreadCount(thr
) > 1)
252 SleepForMillis(flags()->atexit_sleep_ms
);
254 // Wait for pending reports.
255 ctx
->report_mtx
.Lock();
256 CommonSanitizerReportMutex
.Lock();
257 CommonSanitizerReportMutex
.Unlock();
258 ctx
->report_mtx
.Unlock();
261 if (ctx
->flags
.verbosity
)
262 AllocatorPrintStats();
267 if (ctx
->nreported
) {
270 Printf("ThreadSanitizer: reported %d warnings\n", ctx
->nreported
);
272 Printf("Found %d data race(s)\n", ctx
->nreported
);
276 if (ctx
->nmissed_expected
) {
278 Printf("ThreadSanitizer: missed %d expected races\n",
279 ctx
->nmissed_expected
);
282 if (flags()->print_suppressions
)
283 PrintMatchedSuppressions();
285 if (flags()->print_benign
)
286 PrintMatchedBenignRaces();
289 failed
= OnFinalize(failed
);
291 StatAggregate(ctx
->stat
, thr
->stat
);
292 StatOutput(ctx
->stat
);
293 return failed
? flags()->exitcode
: 0;
297 u32
CurrentStackId(ThreadState
*thr
, uptr pc
) {
298 if (thr
->shadow_stack_pos
== 0) // May happen during bootstrap.
301 thr
->shadow_stack_pos
[0] = pc
;
302 thr
->shadow_stack_pos
++;
304 u32 id
= StackDepotPut(thr
->shadow_stack
,
305 thr
->shadow_stack_pos
- thr
->shadow_stack
);
307 thr
->shadow_stack_pos
--;
312 void TraceSwitch(ThreadState
*thr
) {
315 Trace
*thr_trace
= ThreadTrace(thr
->tid
);
316 Lock
l(&thr_trace
->mtx
);
317 unsigned trace
= (thr
->fast_state
.epoch() / kTracePartSize
) % TraceParts();
318 TraceHeader
*hdr
= &thr_trace
->headers
[trace
];
319 hdr
->epoch0
= thr
->fast_state
.epoch();
320 hdr
->stack0
.ObtainCurrent(thr
, 0);
321 hdr
->mset0
= thr
->mset
;
325 Trace
*ThreadTrace(int tid
) {
326 return (Trace
*)GetThreadTraceHeader(tid
);
329 uptr
TraceTopPC(ThreadState
*thr
) {
330 Event
*events
= (Event
*)GetThreadTrace(thr
->tid
);
331 uptr pc
= events
[thr
->fast_state
.GetTracePos()];
336 return (uptr
)(1ull << (kTracePartSizeBits
+ flags()->history_size
+ 1));
340 return TraceSize() / kTracePartSize
;
344 extern "C" void __tsan_trace_switch() {
345 TraceSwitch(cur_thread());
348 extern "C" void __tsan_report_race() {
349 ReportRace(cur_thread());
354 Shadow
LoadShadow(u64
*p
) {
355 u64 raw
= atomic_load((atomic_uint64_t
*)p
, memory_order_relaxed
);
360 void StoreShadow(u64
*sp
, u64 s
) {
361 atomic_store((atomic_uint64_t
*)sp
, s
, memory_order_relaxed
);
365 void StoreIfNotYetStored(u64
*sp
, u64
*s
) {
370 static inline void HandleRace(ThreadState
*thr
, u64
*shadow_mem
,
371 Shadow cur
, Shadow old
) {
372 thr
->racy_state
[0] = cur
.raw();
373 thr
->racy_state
[1] = old
.raw();
374 thr
->racy_shadow_addr
= shadow_mem
;
376 HACKY_CALL(__tsan_report_race
);
382 static inline bool OldIsInSameSynchEpoch(Shadow old
, ThreadState
*thr
) {
383 return old
.epoch() >= thr
->fast_synch_epoch
;
386 static inline bool HappensBefore(Shadow old
, ThreadState
*thr
) {
387 return thr
->clock
.get(old
.TidWithIgnore()) >= old
.epoch();
391 void MemoryAccessImpl(ThreadState
*thr
, uptr addr
,
392 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
,
393 u64
*shadow_mem
, Shadow cur
) {
394 StatInc(thr
, StatMop
);
395 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
396 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
398 // This potentially can live in an MMX/SSE scratch register.
399 // The required intrinsics are:
400 // __m128i _mm_move_epi64(__m128i*);
401 // _mm_storel_epi64(u64*, __m128i);
402 u64 store_word
= cur
.raw();
404 // scan all the shadow values and dispatch to 4 categories:
405 // same, replace, candidate and race (see comments below).
406 // we consider only 3 cases regarding access sizes:
407 // equal, intersect and not intersect. initially I considered
408 // larger and smaller as well, it allowed to replace some
409 // 'candidates' with 'same' or 'replace', but I think
410 // it's just not worth it (performance- and complexity-wise).
413 if (kShadowCnt
== 1) {
415 #include "tsan_update_shadow_word_inl.h"
416 } else if (kShadowCnt
== 2) {
418 #include "tsan_update_shadow_word_inl.h"
420 #include "tsan_update_shadow_word_inl.h"
421 } else if (kShadowCnt
== 4) {
423 #include "tsan_update_shadow_word_inl.h"
425 #include "tsan_update_shadow_word_inl.h"
427 #include "tsan_update_shadow_word_inl.h"
429 #include "tsan_update_shadow_word_inl.h"
430 } else if (kShadowCnt
== 8) {
432 #include "tsan_update_shadow_word_inl.h"
434 #include "tsan_update_shadow_word_inl.h"
436 #include "tsan_update_shadow_word_inl.h"
438 #include "tsan_update_shadow_word_inl.h"
440 #include "tsan_update_shadow_word_inl.h"
442 #include "tsan_update_shadow_word_inl.h"
444 #include "tsan_update_shadow_word_inl.h"
446 #include "tsan_update_shadow_word_inl.h"
451 // we did not find any races and had already stored
452 // the current access info, so we are done
453 if (LIKELY(store_word
== 0))
455 // choose a random candidate slot and replace it
456 StoreShadow(shadow_mem
+ (cur
.epoch() % kShadowCnt
), store_word
);
457 StatInc(thr
, StatShadowReplace
);
460 HandleRace(thr
, shadow_mem
, cur
, old
);
464 void UnalignedMemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
465 int size
, bool kAccessIsWrite
, bool kIsAtomic
) {
468 int kAccessSizeLog
= kSizeLog1
;
469 if (size
>= 8 && (addr
& ~7) == ((addr
+ 8) & ~7)) {
471 kAccessSizeLog
= kSizeLog8
;
472 } else if (size
>= 4 && (addr
& ~7) == ((addr
+ 4) & ~7)) {
474 kAccessSizeLog
= kSizeLog4
;
475 } else if (size
>= 2 && (addr
& ~7) == ((addr
+ 2) & ~7)) {
477 kAccessSizeLog
= kSizeLog2
;
479 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
);
486 void MemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
487 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
) {
488 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
489 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
490 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
491 (int)thr
->fast_state
.tid(), (void*)pc
, (void*)addr
,
492 (int)(1 << kAccessSizeLog
), kAccessIsWrite
, shadow_mem
,
493 (uptr
)shadow_mem
[0], (uptr
)shadow_mem
[1],
494 (uptr
)shadow_mem
[2], (uptr
)shadow_mem
[3]);
496 if (!IsAppMem(addr
)) {
497 Printf("Access to non app mem %zx\n", addr
);
498 DCHECK(IsAppMem(addr
));
500 if (!IsShadowMem((uptr
)shadow_mem
)) {
501 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
502 DCHECK(IsShadowMem((uptr
)shadow_mem
));
506 if (*shadow_mem
== kShadowRodata
) {
507 // Access to .rodata section, no races here.
508 // Measurements show that it can be 10-20% of all memory accesses.
509 StatInc(thr
, StatMop
);
510 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
511 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
512 StatInc(thr
, StatMopRodata
);
516 FastState fast_state
= thr
->fast_state
;
517 if (fast_state
.GetIgnoreBit())
519 fast_state
.IncrementEpoch();
520 thr
->fast_state
= fast_state
;
521 Shadow
cur(fast_state
);
522 cur
.SetAddr0AndSizeLog(addr
& 7, kAccessSizeLog
);
523 cur
.SetWrite(kAccessIsWrite
);
524 cur
.SetAtomic(kIsAtomic
);
526 // We must not store to the trace if we do not store to the shadow.
527 // That is, this call must be moved somewhere below.
528 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
530 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
,
534 static void MemoryRangeSet(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
,
541 uptr offset
= addr
% kShadowCell
;
543 offset
= kShadowCell
- offset
;
549 DCHECK_EQ(addr
% 8, 0);
550 // If a user passes some insane arguments (memset(0)),
551 // let it just crash as usual.
552 if (!IsAppMem(addr
) || !IsAppMem(addr
+ size
- 1))
554 // Don't want to touch lots of shadow memory.
555 // If a program maps 10MB stack, there is no need reset the whole range.
556 size
= (size
+ (kShadowCell
- 1)) & ~(kShadowCell
- 1);
557 // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
558 // so we do it only for C/C++.
559 if (kGoMode
|| size
< 64*1024) {
560 u64
*p
= (u64
*)MemToShadow(addr
);
561 CHECK(IsShadowMem((uptr
)p
));
562 CHECK(IsShadowMem((uptr
)(p
+ size
* kShadowCnt
/ kShadowCell
- 1)));
563 // FIXME: may overwrite a part outside the region
564 for (uptr i
= 0; i
< size
/ kShadowCell
* kShadowCnt
;) {
566 for (uptr j
= 1; j
< kShadowCnt
; j
++)
570 // The region is big, reset only beginning and end.
571 const uptr kPageSize
= 4096;
572 u64
*begin
= (u64
*)MemToShadow(addr
);
573 u64
*end
= begin
+ size
/ kShadowCell
* kShadowCnt
;
575 // Set at least first kPageSize/2 to page boundary.
576 while ((p
< begin
+ kPageSize
/ kShadowSize
/ 2) || ((uptr
)p
% kPageSize
)) {
578 for (uptr j
= 1; j
< kShadowCnt
; j
++)
581 // Reset middle part.
583 p
= RoundDown(end
, kPageSize
);
584 UnmapOrDie((void*)p1
, (uptr
)p
- (uptr
)p1
);
585 MmapFixedNoReserve((uptr
)p1
, (uptr
)p
- (uptr
)p1
);
589 for (uptr j
= 1; j
< kShadowCnt
; j
++)
595 void MemoryResetRange(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
596 MemoryRangeSet(thr
, pc
, addr
, size
, 0);
599 void MemoryRangeFreed(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
600 // Processing more than 1k (4k of shadow) is expensive,
601 // can cause excessive memory consumption (user does not necessary touch
602 // the whole range) and most likely unnecessary.
605 CHECK_EQ(thr
->is_freeing
, false);
606 thr
->is_freeing
= true;
607 MemoryAccessRange(thr
, pc
, addr
, size
, true);
608 thr
->is_freeing
= false;
609 thr
->fast_state
.IncrementEpoch();
610 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, pc
);
611 Shadow
s(thr
->fast_state
);
615 s
.SetAddr0AndSizeLog(0, 3);
616 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
619 void MemoryRangeImitateWrite(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
620 thr
->fast_state
.IncrementEpoch();
621 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, pc
);
622 Shadow
s(thr
->fast_state
);
625 s
.SetAddr0AndSizeLog(0, 3);
626 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
630 void FuncEntry(ThreadState
*thr
, uptr pc
) {
631 DCHECK_EQ(thr
->in_rtl
, 0);
632 StatInc(thr
, StatFuncEnter
);
633 DPrintf2("#%d: FuncEntry %p\n", (int)thr
->fast_state
.tid(), (void*)pc
);
634 thr
->fast_state
.IncrementEpoch();
635 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncEnter
, pc
);
637 // Shadow stack maintenance can be replaced with
638 // stack unwinding during trace switch (which presumably must be faster).
639 DCHECK_GE(thr
->shadow_stack_pos
, &thr
->shadow_stack
[0]);
641 DCHECK_LT(thr
->shadow_stack_pos
, &thr
->shadow_stack
[kShadowStackSize
]);
643 if (thr
->shadow_stack_pos
== thr
->shadow_stack_end
) {
644 const int sz
= thr
->shadow_stack_end
- thr
->shadow_stack
;
645 const int newsz
= 2 * sz
;
646 uptr
*newstack
= (uptr
*)internal_alloc(MBlockShadowStack
,
647 newsz
* sizeof(uptr
));
648 internal_memcpy(newstack
, thr
->shadow_stack
, sz
* sizeof(uptr
));
649 internal_free(thr
->shadow_stack
);
650 thr
->shadow_stack
= newstack
;
651 thr
->shadow_stack_pos
= newstack
+ sz
;
652 thr
->shadow_stack_end
= newstack
+ newsz
;
655 thr
->shadow_stack_pos
[0] = pc
;
656 thr
->shadow_stack_pos
++;
660 void FuncExit(ThreadState
*thr
) {
661 DCHECK_EQ(thr
->in_rtl
, 0);
662 StatInc(thr
, StatFuncExit
);
663 DPrintf2("#%d: FuncExit\n", (int)thr
->fast_state
.tid());
664 thr
->fast_state
.IncrementEpoch();
665 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncExit
, 0);
667 DCHECK_GT(thr
->shadow_stack_pos
, &thr
->shadow_stack
[0]);
669 DCHECK_LT(thr
->shadow_stack_pos
, &thr
->shadow_stack
[kShadowStackSize
]);
671 thr
->shadow_stack_pos
--;
674 void ThreadIgnoreBegin(ThreadState
*thr
) {
675 DPrintf("#%d: ThreadIgnoreBegin\n", thr
->tid
);
676 thr
->ignore_reads_and_writes
++;
677 CHECK_GE(thr
->ignore_reads_and_writes
, 0);
678 thr
->fast_state
.SetIgnoreBit();
681 void ThreadIgnoreEnd(ThreadState
*thr
) {
682 DPrintf("#%d: ThreadIgnoreEnd\n", thr
->tid
);
683 thr
->ignore_reads_and_writes
--;
684 CHECK_GE(thr
->ignore_reads_and_writes
, 0);
685 if (thr
->ignore_reads_and_writes
== 0)
686 thr
->fast_state
.ClearIgnoreBit();
689 bool MD5Hash::operator==(const MD5Hash
&other
) const {
690 return hash
[0] == other
.hash
[0] && hash
[1] == other
.hash
[1];
694 void build_consistency_debug() {}
696 void build_consistency_release() {}
699 #if TSAN_COLLECT_STATS
700 void build_consistency_stats() {}
702 void build_consistency_nostats() {}
705 #if TSAN_SHADOW_COUNT == 1
706 void build_consistency_shadow1() {}
707 #elif TSAN_SHADOW_COUNT == 2
708 void build_consistency_shadow2() {}
709 #elif TSAN_SHADOW_COUNT == 4
710 void build_consistency_shadow4() {}
712 void build_consistency_shadow8() {}
715 } // namespace __tsan
718 // Must be included in this file to make sure everything is inlined.
719 #include "tsan_interface_inl.h"