Reverting merge from trunk
[official-gcc.git] / libsanitizer / tsan / tsan_rtl.cc
blob7f18064e957d16a29f032c07cd16b260dff4bd74
1 //===-- tsan_rtl.cc -------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // Main file (entry points) for the TSan run-time.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_atomic.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_libc.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "sanitizer_common/sanitizer_symbolizer.h"
19 #include "tsan_defs.h"
20 #include "tsan_platform.h"
21 #include "tsan_rtl.h"
22 #include "tsan_mman.h"
23 #include "tsan_suppressions.h"
24 #include "tsan_symbolize.h"
26 volatile int __tsan_resumed = 0;
28 extern "C" void __tsan_resume() {
29 __tsan_resumed = 1;
32 namespace __tsan {
34 #ifndef TSAN_GO
35 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
36 #endif
37 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
39 // Can be overriden by a front-end.
40 bool CPP_WEAK OnFinalize(bool failed) {
41 return failed;
44 static Context *ctx;
45 Context *CTX() {
46 return ctx;
49 static char thread_registry_placeholder[sizeof(ThreadRegistry)];
51 static ThreadContextBase *CreateThreadContext(u32 tid) {
52 // Map thread trace when context is created.
53 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
54 MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace));
55 new(ThreadTrace(tid)) Trace();
56 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
57 return new(mem) ThreadContext(tid);
60 #ifndef TSAN_GO
61 static const u32 kThreadQuarantineSize = 16;
62 #else
63 static const u32 kThreadQuarantineSize = 64;
64 #endif
66 Context::Context()
67 : initialized()
68 , report_mtx(MutexTypeReport, StatMtxReport)
69 , nreported()
70 , nmissed_expected()
71 , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
72 CreateThreadContext, kMaxTid, kThreadQuarantineSize))
73 , racy_stacks(MBlockRacyStacks)
74 , racy_addresses(MBlockRacyAddresses)
75 , fired_suppressions(8) {
78 // The objects are allocated in TLS, so one may rely on zero-initialization.
79 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
80 uptr stk_addr, uptr stk_size,
81 uptr tls_addr, uptr tls_size)
82 : fast_state(tid, epoch)
83 // Do not touch these, rely on zero initialization,
84 // they may be accessed before the ctor.
85 // , ignore_reads_and_writes()
86 // , in_rtl()
87 , shadow_stack_pos(&shadow_stack[0])
88 #ifndef TSAN_GO
89 , jmp_bufs(MBlockJmpBuf)
90 #endif
91 , tid(tid)
92 , unique_id(unique_id)
93 , stk_addr(stk_addr)
94 , stk_size(stk_size)
95 , tls_addr(tls_addr)
96 , tls_size(tls_size) {
99 static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
100 uptr n_threads;
101 uptr n_running_threads;
102 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
103 InternalScopedBuffer<char> buf(4096);
104 internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
105 i, n_threads, n_running_threads);
106 internal_write(fd, buf.data(), internal_strlen(buf.data()));
107 WriteMemoryProfile(buf.data(), buf.size());
108 internal_write(fd, buf.data(), internal_strlen(buf.data()));
111 static void BackgroundThread(void *arg) {
112 ScopedInRtl in_rtl;
113 Context *ctx = CTX();
114 const u64 kMs2Ns = 1000 * 1000;
116 fd_t mprof_fd = kInvalidFd;
117 if (flags()->profile_memory && flags()->profile_memory[0]) {
118 InternalScopedBuffer<char> filename(4096);
119 internal_snprintf(filename.data(), filename.size(), "%s.%d",
120 flags()->profile_memory, (int)internal_getpid());
121 uptr openrv = OpenFile(filename.data(), true);
122 if (internal_iserror(openrv)) {
123 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
124 &filename[0]);
125 } else {
126 mprof_fd = openrv;
130 u64 last_flush = NanoTime();
131 for (int i = 0; ; i++) {
132 SleepForSeconds(1);
133 u64 now = NanoTime();
135 // Flush memory if requested.
136 if (flags()->flush_memory_ms) {
137 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
138 FlushShadowMemory();
139 last_flush = NanoTime();
143 // Write memory profile if requested.
144 if (mprof_fd != kInvalidFd)
145 MemoryProfiler(ctx, mprof_fd, i);
147 #ifndef TSAN_GO
148 // Flush symbolizer cache if requested.
149 if (flags()->flush_symbolizer_ms > 0) {
150 u64 last = atomic_load(&ctx->last_symbolize_time_ns,
151 memory_order_relaxed);
152 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
153 Lock l(&ctx->report_mtx);
154 SpinMutexLock l2(&CommonSanitizerReportMutex);
155 SymbolizeFlush();
156 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
159 #endif
163 void DontNeedShadowFor(uptr addr, uptr size) {
164 uptr shadow_beg = MemToShadow(addr);
165 uptr shadow_end = MemToShadow(addr + size);
166 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
169 void MapShadow(uptr addr, uptr size) {
170 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
173 void MapThreadTrace(uptr addr, uptr size) {
174 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
175 CHECK_GE(addr, kTraceMemBegin);
176 CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
177 if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
178 Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
179 Die();
183 void Initialize(ThreadState *thr) {
184 // Thread safe because done before all threads exist.
185 static bool is_initialized = false;
186 if (is_initialized)
187 return;
188 is_initialized = true;
189 SanitizerToolName = "ThreadSanitizer";
190 // Install tool-specific callbacks in sanitizer_common.
191 SetCheckFailedCallback(TsanCheckFailed);
193 ScopedInRtl in_rtl;
194 #ifndef TSAN_GO
195 InitializeAllocator();
196 #endif
197 InitializeInterceptors();
198 const char *env = InitializePlatform();
199 InitializeMutex();
200 InitializeDynamicAnnotations();
201 ctx = new(ctx_placeholder) Context;
202 #ifndef TSAN_GO
203 InitializeShadowMemory();
204 #endif
205 InitializeFlags(&ctx->flags, env);
206 // Setup correct file descriptor for error reports.
207 if (internal_strcmp(flags()->log_path, "stdout") == 0)
208 __sanitizer_set_report_fd(kStdoutFd);
209 else if (internal_strcmp(flags()->log_path, "stderr") == 0)
210 __sanitizer_set_report_fd(kStderrFd);
211 else
212 __sanitizer_set_report_path(flags()->log_path);
213 InitializeSuppressions();
214 #ifndef TSAN_GO
215 // Initialize external symbolizer before internal threads are started.
216 const char *external_symbolizer = flags()->external_symbolizer_path;
217 if (external_symbolizer != 0 && external_symbolizer[0] != '\0') {
218 if (!getSymbolizer()->InitializeExternal(external_symbolizer)) {
219 Printf("Failed to start external symbolizer: '%s'\n",
220 external_symbolizer);
221 Die();
224 #endif
225 internal_start_thread(&BackgroundThread, 0);
227 if (ctx->flags.verbosity)
228 Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
229 (int)internal_getpid());
231 // Initialize thread 0.
232 int tid = ThreadCreate(thr, 0, 0, true);
233 CHECK_EQ(tid, 0);
234 ThreadStart(thr, tid, internal_getpid());
235 CHECK_EQ(thr->in_rtl, 1);
236 ctx->initialized = true;
238 if (flags()->stop_on_start) {
239 Printf("ThreadSanitizer is suspended at startup (pid %d)."
240 " Call __tsan_resume().\n",
241 (int)internal_getpid());
242 while (__tsan_resumed == 0) {}
246 int Finalize(ThreadState *thr) {
247 ScopedInRtl in_rtl;
248 Context *ctx = __tsan::ctx;
249 bool failed = false;
251 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
252 SleepForMillis(flags()->atexit_sleep_ms);
254 // Wait for pending reports.
255 ctx->report_mtx.Lock();
256 CommonSanitizerReportMutex.Lock();
257 CommonSanitizerReportMutex.Unlock();
258 ctx->report_mtx.Unlock();
260 #ifndef TSAN_GO
261 if (ctx->flags.verbosity)
262 AllocatorPrintStats();
263 #endif
265 ThreadFinalize(thr);
267 if (ctx->nreported) {
268 failed = true;
269 #ifndef TSAN_GO
270 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
271 #else
272 Printf("Found %d data race(s)\n", ctx->nreported);
273 #endif
276 if (ctx->nmissed_expected) {
277 failed = true;
278 Printf("ThreadSanitizer: missed %d expected races\n",
279 ctx->nmissed_expected);
282 if (flags()->print_suppressions)
283 PrintMatchedSuppressions();
284 #ifndef TSAN_GO
285 if (flags()->print_benign)
286 PrintMatchedBenignRaces();
287 #endif
289 failed = OnFinalize(failed);
291 StatAggregate(ctx->stat, thr->stat);
292 StatOutput(ctx->stat);
293 return failed ? flags()->exitcode : 0;
296 #ifndef TSAN_GO
297 u32 CurrentStackId(ThreadState *thr, uptr pc) {
298 if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
299 return 0;
300 if (pc) {
301 thr->shadow_stack_pos[0] = pc;
302 thr->shadow_stack_pos++;
304 u32 id = StackDepotPut(thr->shadow_stack,
305 thr->shadow_stack_pos - thr->shadow_stack);
306 if (pc)
307 thr->shadow_stack_pos--;
308 return id;
310 #endif
312 void TraceSwitch(ThreadState *thr) {
313 thr->nomalloc++;
314 ScopedInRtl in_rtl;
315 Trace *thr_trace = ThreadTrace(thr->tid);
316 Lock l(&thr_trace->mtx);
317 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
318 TraceHeader *hdr = &thr_trace->headers[trace];
319 hdr->epoch0 = thr->fast_state.epoch();
320 hdr->stack0.ObtainCurrent(thr, 0);
321 hdr->mset0 = thr->mset;
322 thr->nomalloc--;
325 Trace *ThreadTrace(int tid) {
326 return (Trace*)GetThreadTraceHeader(tid);
329 uptr TraceTopPC(ThreadState *thr) {
330 Event *events = (Event*)GetThreadTrace(thr->tid);
331 uptr pc = events[thr->fast_state.GetTracePos()];
332 return pc;
335 uptr TraceSize() {
336 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
339 uptr TraceParts() {
340 return TraceSize() / kTracePartSize;
343 #ifndef TSAN_GO
344 extern "C" void __tsan_trace_switch() {
345 TraceSwitch(cur_thread());
348 extern "C" void __tsan_report_race() {
349 ReportRace(cur_thread());
351 #endif
353 ALWAYS_INLINE
354 Shadow LoadShadow(u64 *p) {
355 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
356 return Shadow(raw);
359 ALWAYS_INLINE
360 void StoreShadow(u64 *sp, u64 s) {
361 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
364 ALWAYS_INLINE
365 void StoreIfNotYetStored(u64 *sp, u64 *s) {
366 StoreShadow(sp, *s);
367 *s = 0;
370 static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
371 Shadow cur, Shadow old) {
372 thr->racy_state[0] = cur.raw();
373 thr->racy_state[1] = old.raw();
374 thr->racy_shadow_addr = shadow_mem;
375 #ifndef TSAN_GO
376 HACKY_CALL(__tsan_report_race);
377 #else
378 ReportRace(thr);
379 #endif
382 static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
383 return old.epoch() >= thr->fast_synch_epoch;
386 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
387 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
390 ALWAYS_INLINE USED
391 void MemoryAccessImpl(ThreadState *thr, uptr addr,
392 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
393 u64 *shadow_mem, Shadow cur) {
394 StatInc(thr, StatMop);
395 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
396 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
398 // This potentially can live in an MMX/SSE scratch register.
399 // The required intrinsics are:
400 // __m128i _mm_move_epi64(__m128i*);
401 // _mm_storel_epi64(u64*, __m128i);
402 u64 store_word = cur.raw();
404 // scan all the shadow values and dispatch to 4 categories:
405 // same, replace, candidate and race (see comments below).
406 // we consider only 3 cases regarding access sizes:
407 // equal, intersect and not intersect. initially I considered
408 // larger and smaller as well, it allowed to replace some
409 // 'candidates' with 'same' or 'replace', but I think
410 // it's just not worth it (performance- and complexity-wise).
412 Shadow old(0);
413 if (kShadowCnt == 1) {
414 int idx = 0;
415 #include "tsan_update_shadow_word_inl.h"
416 } else if (kShadowCnt == 2) {
417 int idx = 0;
418 #include "tsan_update_shadow_word_inl.h"
419 idx = 1;
420 #include "tsan_update_shadow_word_inl.h"
421 } else if (kShadowCnt == 4) {
422 int idx = 0;
423 #include "tsan_update_shadow_word_inl.h"
424 idx = 1;
425 #include "tsan_update_shadow_word_inl.h"
426 idx = 2;
427 #include "tsan_update_shadow_word_inl.h"
428 idx = 3;
429 #include "tsan_update_shadow_word_inl.h"
430 } else if (kShadowCnt == 8) {
431 int idx = 0;
432 #include "tsan_update_shadow_word_inl.h"
433 idx = 1;
434 #include "tsan_update_shadow_word_inl.h"
435 idx = 2;
436 #include "tsan_update_shadow_word_inl.h"
437 idx = 3;
438 #include "tsan_update_shadow_word_inl.h"
439 idx = 4;
440 #include "tsan_update_shadow_word_inl.h"
441 idx = 5;
442 #include "tsan_update_shadow_word_inl.h"
443 idx = 6;
444 #include "tsan_update_shadow_word_inl.h"
445 idx = 7;
446 #include "tsan_update_shadow_word_inl.h"
447 } else {
448 CHECK(false);
451 // we did not find any races and had already stored
452 // the current access info, so we are done
453 if (LIKELY(store_word == 0))
454 return;
455 // choose a random candidate slot and replace it
456 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
457 StatInc(thr, StatShadowReplace);
458 return;
459 RACE:
460 HandleRace(thr, shadow_mem, cur, old);
461 return;
464 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
465 int size, bool kAccessIsWrite, bool kIsAtomic) {
466 while (size) {
467 int size1 = 1;
468 int kAccessSizeLog = kSizeLog1;
469 if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) {
470 size1 = 8;
471 kAccessSizeLog = kSizeLog8;
472 } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) {
473 size1 = 4;
474 kAccessSizeLog = kSizeLog4;
475 } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) {
476 size1 = 2;
477 kAccessSizeLog = kSizeLog2;
479 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
480 addr += size1;
481 size -= size1;
485 ALWAYS_INLINE USED
486 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
487 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
488 u64 *shadow_mem = (u64*)MemToShadow(addr);
489 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
490 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
491 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
492 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
493 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
494 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
495 #if TSAN_DEBUG
496 if (!IsAppMem(addr)) {
497 Printf("Access to non app mem %zx\n", addr);
498 DCHECK(IsAppMem(addr));
500 if (!IsShadowMem((uptr)shadow_mem)) {
501 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
502 DCHECK(IsShadowMem((uptr)shadow_mem));
504 #endif
506 if (*shadow_mem == kShadowRodata) {
507 // Access to .rodata section, no races here.
508 // Measurements show that it can be 10-20% of all memory accesses.
509 StatInc(thr, StatMop);
510 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
511 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
512 StatInc(thr, StatMopRodata);
513 return;
516 FastState fast_state = thr->fast_state;
517 if (fast_state.GetIgnoreBit())
518 return;
519 fast_state.IncrementEpoch();
520 thr->fast_state = fast_state;
521 Shadow cur(fast_state);
522 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
523 cur.SetWrite(kAccessIsWrite);
524 cur.SetAtomic(kIsAtomic);
526 // We must not store to the trace if we do not store to the shadow.
527 // That is, this call must be moved somewhere below.
528 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
530 MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
531 shadow_mem, cur);
534 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
535 u64 val) {
536 (void)thr;
537 (void)pc;
538 if (size == 0)
539 return;
540 // FIXME: fix me.
541 uptr offset = addr % kShadowCell;
542 if (offset) {
543 offset = kShadowCell - offset;
544 if (size <= offset)
545 return;
546 addr += offset;
547 size -= offset;
549 DCHECK_EQ(addr % 8, 0);
550 // If a user passes some insane arguments (memset(0)),
551 // let it just crash as usual.
552 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
553 return;
554 // Don't want to touch lots of shadow memory.
555 // If a program maps 10MB stack, there is no need reset the whole range.
556 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
557 // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
558 // so we do it only for C/C++.
559 if (kGoMode || size < 64*1024) {
560 u64 *p = (u64*)MemToShadow(addr);
561 CHECK(IsShadowMem((uptr)p));
562 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
563 // FIXME: may overwrite a part outside the region
564 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
565 p[i++] = val;
566 for (uptr j = 1; j < kShadowCnt; j++)
567 p[i++] = 0;
569 } else {
570 // The region is big, reset only beginning and end.
571 const uptr kPageSize = 4096;
572 u64 *begin = (u64*)MemToShadow(addr);
573 u64 *end = begin + size / kShadowCell * kShadowCnt;
574 u64 *p = begin;
575 // Set at least first kPageSize/2 to page boundary.
576 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
577 *p++ = val;
578 for (uptr j = 1; j < kShadowCnt; j++)
579 *p++ = 0;
581 // Reset middle part.
582 u64 *p1 = p;
583 p = RoundDown(end, kPageSize);
584 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
585 MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
586 // Set the ending.
587 while (p < end) {
588 *p++ = val;
589 for (uptr j = 1; j < kShadowCnt; j++)
590 *p++ = 0;
595 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
596 MemoryRangeSet(thr, pc, addr, size, 0);
599 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
600 // Processing more than 1k (4k of shadow) is expensive,
601 // can cause excessive memory consumption (user does not necessary touch
602 // the whole range) and most likely unnecessary.
603 if (size > 1024)
604 size = 1024;
605 CHECK_EQ(thr->is_freeing, false);
606 thr->is_freeing = true;
607 MemoryAccessRange(thr, pc, addr, size, true);
608 thr->is_freeing = false;
609 thr->fast_state.IncrementEpoch();
610 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
611 Shadow s(thr->fast_state);
612 s.ClearIgnoreBit();
613 s.MarkAsFreed();
614 s.SetWrite(true);
615 s.SetAddr0AndSizeLog(0, 3);
616 MemoryRangeSet(thr, pc, addr, size, s.raw());
619 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
620 thr->fast_state.IncrementEpoch();
621 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
622 Shadow s(thr->fast_state);
623 s.ClearIgnoreBit();
624 s.SetWrite(true);
625 s.SetAddr0AndSizeLog(0, 3);
626 MemoryRangeSet(thr, pc, addr, size, s.raw());
629 ALWAYS_INLINE USED
630 void FuncEntry(ThreadState *thr, uptr pc) {
631 DCHECK_EQ(thr->in_rtl, 0);
632 StatInc(thr, StatFuncEnter);
633 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
634 thr->fast_state.IncrementEpoch();
635 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
637 // Shadow stack maintenance can be replaced with
638 // stack unwinding during trace switch (which presumably must be faster).
639 DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
640 #ifndef TSAN_GO
641 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
642 #else
643 if (thr->shadow_stack_pos == thr->shadow_stack_end) {
644 const int sz = thr->shadow_stack_end - thr->shadow_stack;
645 const int newsz = 2 * sz;
646 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
647 newsz * sizeof(uptr));
648 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
649 internal_free(thr->shadow_stack);
650 thr->shadow_stack = newstack;
651 thr->shadow_stack_pos = newstack + sz;
652 thr->shadow_stack_end = newstack + newsz;
654 #endif
655 thr->shadow_stack_pos[0] = pc;
656 thr->shadow_stack_pos++;
659 ALWAYS_INLINE USED
660 void FuncExit(ThreadState *thr) {
661 DCHECK_EQ(thr->in_rtl, 0);
662 StatInc(thr, StatFuncExit);
663 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
664 thr->fast_state.IncrementEpoch();
665 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
667 DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
668 #ifndef TSAN_GO
669 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
670 #endif
671 thr->shadow_stack_pos--;
674 void ThreadIgnoreBegin(ThreadState *thr) {
675 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
676 thr->ignore_reads_and_writes++;
677 CHECK_GE(thr->ignore_reads_and_writes, 0);
678 thr->fast_state.SetIgnoreBit();
681 void ThreadIgnoreEnd(ThreadState *thr) {
682 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
683 thr->ignore_reads_and_writes--;
684 CHECK_GE(thr->ignore_reads_and_writes, 0);
685 if (thr->ignore_reads_and_writes == 0)
686 thr->fast_state.ClearIgnoreBit();
689 bool MD5Hash::operator==(const MD5Hash &other) const {
690 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
693 #if TSAN_DEBUG
694 void build_consistency_debug() {}
695 #else
696 void build_consistency_release() {}
697 #endif
699 #if TSAN_COLLECT_STATS
700 void build_consistency_stats() {}
701 #else
702 void build_consistency_nostats() {}
703 #endif
705 #if TSAN_SHADOW_COUNT == 1
706 void build_consistency_shadow1() {}
707 #elif TSAN_SHADOW_COUNT == 2
708 void build_consistency_shadow2() {}
709 #elif TSAN_SHADOW_COUNT == 4
710 void build_consistency_shadow4() {}
711 #else
712 void build_consistency_shadow8() {}
713 #endif
715 } // namespace __tsan
717 #ifndef TSAN_GO
718 // Must be included in this file to make sure everything is inlined.
719 #include "tsan_interface_inl.h"
720 #endif