* config/rl78/constraints.md (Wcv): Allow up to $r31.
[official-gcc.git] / libsanitizer / tsan / tsan_rtl.cc
blob673a355f1dc3f6e5d41fb237537e1f6d229d0593
1 //===-- tsan_rtl.cc -------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // Main file (entry points) for the TSan run-time.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_atomic.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_libc.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "sanitizer_common/sanitizer_symbolizer.h"
19 #include "tsan_defs.h"
20 #include "tsan_platform.h"
21 #include "tsan_rtl.h"
22 #include "tsan_mman.h"
23 #include "tsan_suppressions.h"
25 volatile int __tsan_resumed = 0;
27 extern "C" void __tsan_resume() {
28 __tsan_resumed = 1;
31 namespace __tsan {
33 #ifndef TSAN_GO
34 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
35 #endif
36 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
38 // Can be overriden by a front-end.
39 bool CPP_WEAK OnFinalize(bool failed) {
40 return failed;
43 static Context *ctx;
44 Context *CTX() {
45 return ctx;
48 Context::Context()
49 : initialized()
50 , report_mtx(MutexTypeReport, StatMtxReport)
51 , nreported()
52 , nmissed_expected()
53 , thread_mtx(MutexTypeThreads, StatMtxThreads)
54 , racy_stacks(MBlockRacyStacks)
55 , racy_addresses(MBlockRacyAddresses)
56 , fired_suppressions(MBlockRacyAddresses) {
59 // The objects are allocated in TLS, so one may rely on zero-initialization.
60 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
61 uptr stk_addr, uptr stk_size,
62 uptr tls_addr, uptr tls_size)
63 : fast_state(tid, epoch)
64 // Do not touch these, rely on zero initialization,
65 // they may be accessed before the ctor.
66 // , fast_ignore_reads()
67 // , fast_ignore_writes()
68 // , in_rtl()
69 , shadow_stack_pos(&shadow_stack[0])
70 , tid(tid)
71 , unique_id(unique_id)
72 , stk_addr(stk_addr)
73 , stk_size(stk_size)
74 , tls_addr(tls_addr)
75 , tls_size(tls_size) {
78 ThreadContext::ThreadContext(int tid)
79 : tid(tid)
80 , unique_id()
81 , os_id()
82 , user_id()
83 , thr()
84 , status(ThreadStatusInvalid)
85 , detached()
86 , reuse_count()
87 , epoch0()
88 , epoch1()
89 , dead_info()
90 , dead_next()
91 , name() {
94 static void WriteMemoryProfile(char *buf, uptr buf_size, int num) {
95 uptr shadow = GetShadowMemoryConsumption();
97 int nthread = 0;
98 int nlivethread = 0;
99 uptr threadmem = 0;
101 Lock l(&ctx->thread_mtx);
102 for (unsigned i = 0; i < kMaxTid; i++) {
103 ThreadContext *tctx = ctx->threads[i];
104 if (tctx == 0)
105 continue;
106 nthread += 1;
107 threadmem += sizeof(ThreadContext);
108 if (tctx->status != ThreadStatusRunning)
109 continue;
110 nlivethread += 1;
111 threadmem += sizeof(ThreadState);
115 uptr nsync = 0;
116 uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync);
118 internal_snprintf(buf, buf_size, "%d: shadow=%zuMB"
119 " thread=%zuMB(total=%d/live=%d)"
120 " sync=%zuMB(cnt=%zu)\n",
121 num,
122 shadow >> 20,
123 threadmem >> 20, nthread, nlivethread,
124 syncmem >> 20, nsync);
127 static void MemoryProfileThread(void *arg) {
128 ScopedInRtl in_rtl;
129 fd_t fd = (fd_t)(uptr)arg;
130 for (int i = 0; ; i++) {
131 InternalScopedBuffer<char> buf(4096);
132 WriteMemoryProfile(buf.data(), buf.size(), i);
133 internal_write(fd, buf.data(), internal_strlen(buf.data()));
134 SleepForSeconds(1);
138 static void InitializeMemoryProfile() {
139 if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0)
140 return;
141 InternalScopedBuffer<char> filename(4096);
142 internal_snprintf(filename.data(), filename.size(), "%s.%d",
143 flags()->profile_memory, GetPid());
144 fd_t fd = OpenFile(filename.data(), true);
145 if (fd == kInvalidFd) {
146 Printf("Failed to open memory profile file '%s'\n", &filename[0]);
147 Die();
149 internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd);
152 static void MemoryFlushThread(void *arg) {
153 ScopedInRtl in_rtl;
154 for (int i = 0; ; i++) {
155 SleepForMillis(flags()->flush_memory_ms);
156 FlushShadowMemory();
160 static void InitializeMemoryFlush() {
161 if (flags()->flush_memory_ms == 0)
162 return;
163 if (flags()->flush_memory_ms < 100)
164 flags()->flush_memory_ms = 100;
165 internal_start_thread(&MemoryFlushThread, 0);
168 void MapShadow(uptr addr, uptr size) {
169 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
172 void MapThreadTrace(uptr addr, uptr size) {
173 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
174 CHECK_GE(addr, kTraceMemBegin);
175 CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
176 if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
177 Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
178 Die();
182 void Initialize(ThreadState *thr) {
183 // Thread safe because done before all threads exist.
184 static bool is_initialized = false;
185 if (is_initialized)
186 return;
187 is_initialized = true;
188 SanitizerToolName = "ThreadSanitizer";
189 // Install tool-specific callbacks in sanitizer_common.
190 SetCheckFailedCallback(TsanCheckFailed);
192 ScopedInRtl in_rtl;
193 #ifndef TSAN_GO
194 InitializeAllocator();
195 #endif
196 InitializeInterceptors();
197 const char *env = InitializePlatform();
198 InitializeMutex();
199 InitializeDynamicAnnotations();
200 ctx = new(ctx_placeholder) Context;
201 #ifndef TSAN_GO
202 InitializeShadowMemory();
203 #endif
204 ctx->dead_list_size = 0;
205 ctx->dead_list_head = 0;
206 ctx->dead_list_tail = 0;
207 InitializeFlags(&ctx->flags, env);
208 // Setup correct file descriptor for error reports.
209 if (internal_strcmp(flags()->log_path, "stdout") == 0)
210 __sanitizer_set_report_fd(kStdoutFd);
211 else if (internal_strcmp(flags()->log_path, "stderr") == 0)
212 __sanitizer_set_report_fd(kStderrFd);
213 else
214 __sanitizer_set_report_path(flags()->log_path);
215 InitializeSuppressions();
216 #ifndef TSAN_GO
217 // Initialize external symbolizer before internal threads are started.
218 const char *external_symbolizer = flags()->external_symbolizer_path;
219 if (external_symbolizer != 0 && external_symbolizer[0] != '\0') {
220 if (!InitializeExternalSymbolizer(external_symbolizer)) {
221 Printf("Failed to start external symbolizer: '%s'\n",
222 external_symbolizer);
223 Die();
226 #endif
227 InitializeMemoryProfile();
228 InitializeMemoryFlush();
230 if (ctx->flags.verbosity)
231 Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
232 GetPid());
234 // Initialize thread 0.
235 ctx->thread_seq = 0;
236 int tid = ThreadCreate(thr, 0, 0, true);
237 CHECK_EQ(tid, 0);
238 ThreadStart(thr, tid, GetPid());
239 CHECK_EQ(thr->in_rtl, 1);
240 ctx->initialized = true;
242 if (flags()->stop_on_start) {
243 Printf("ThreadSanitizer is suspended at startup (pid %d)."
244 " Call __tsan_resume().\n",
245 GetPid());
246 while (__tsan_resumed == 0) {}
250 int Finalize(ThreadState *thr) {
251 ScopedInRtl in_rtl;
252 Context *ctx = __tsan::ctx;
253 bool failed = false;
255 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
256 SleepForMillis(flags()->atexit_sleep_ms);
258 // Wait for pending reports.
259 ctx->report_mtx.Lock();
260 ctx->report_mtx.Unlock();
262 #ifndef TSAN_GO
263 if (ctx->flags.verbosity)
264 AllocatorPrintStats();
265 #endif
267 ThreadFinalize(thr);
269 if (ctx->nreported) {
270 failed = true;
271 #ifndef TSAN_GO
272 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
273 #else
274 Printf("Found %d data race(s)\n", ctx->nreported);
275 #endif
278 if (ctx->nmissed_expected) {
279 failed = true;
280 Printf("ThreadSanitizer: missed %d expected races\n",
281 ctx->nmissed_expected);
284 failed = OnFinalize(failed);
286 StatAggregate(ctx->stat, thr->stat);
287 StatOutput(ctx->stat);
288 return failed ? flags()->exitcode : 0;
291 #ifndef TSAN_GO
292 u32 CurrentStackId(ThreadState *thr, uptr pc) {
293 if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
294 return 0;
295 if (pc) {
296 thr->shadow_stack_pos[0] = pc;
297 thr->shadow_stack_pos++;
299 u32 id = StackDepotPut(thr->shadow_stack,
300 thr->shadow_stack_pos - thr->shadow_stack);
301 if (pc)
302 thr->shadow_stack_pos--;
303 return id;
305 #endif
307 void TraceSwitch(ThreadState *thr) {
308 thr->nomalloc++;
309 ScopedInRtl in_rtl;
310 Lock l(&thr->trace.mtx);
311 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
312 TraceHeader *hdr = &thr->trace.headers[trace];
313 hdr->epoch0 = thr->fast_state.epoch();
314 hdr->stack0.ObtainCurrent(thr, 0);
315 hdr->mset0 = thr->mset;
316 thr->nomalloc--;
319 uptr TraceTopPC(ThreadState *thr) {
320 Event *events = (Event*)GetThreadTrace(thr->tid);
321 uptr pc = events[thr->fast_state.GetTracePos()];
322 return pc;
325 uptr TraceSize() {
326 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
329 uptr TraceParts() {
330 return TraceSize() / kTracePartSize;
333 #ifndef TSAN_GO
334 extern "C" void __tsan_trace_switch() {
335 TraceSwitch(cur_thread());
338 extern "C" void __tsan_report_race() {
339 ReportRace(cur_thread());
341 #endif
343 ALWAYS_INLINE
344 static Shadow LoadShadow(u64 *p) {
345 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
346 return Shadow(raw);
349 ALWAYS_INLINE
350 static void StoreShadow(u64 *sp, u64 s) {
351 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
354 ALWAYS_INLINE
355 static void StoreIfNotYetStored(u64 *sp, u64 *s) {
356 StoreShadow(sp, *s);
357 *s = 0;
360 static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
361 Shadow cur, Shadow old) {
362 thr->racy_state[0] = cur.raw();
363 thr->racy_state[1] = old.raw();
364 thr->racy_shadow_addr = shadow_mem;
365 #ifndef TSAN_GO
366 HACKY_CALL(__tsan_report_race);
367 #else
368 ReportRace(thr);
369 #endif
372 static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
373 return old.epoch() >= thr->fast_synch_epoch;
376 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
377 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
380 ALWAYS_INLINE
381 void MemoryAccessImpl(ThreadState *thr, uptr addr,
382 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
383 u64 *shadow_mem, Shadow cur) {
384 StatInc(thr, StatMop);
385 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
386 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
388 // This potentially can live in an MMX/SSE scratch register.
389 // The required intrinsics are:
390 // __m128i _mm_move_epi64(__m128i*);
391 // _mm_storel_epi64(u64*, __m128i);
392 u64 store_word = cur.raw();
394 // scan all the shadow values and dispatch to 4 categories:
395 // same, replace, candidate and race (see comments below).
396 // we consider only 3 cases regarding access sizes:
397 // equal, intersect and not intersect. initially I considered
398 // larger and smaller as well, it allowed to replace some
399 // 'candidates' with 'same' or 'replace', but I think
400 // it's just not worth it (performance- and complexity-wise).
402 Shadow old(0);
403 if (kShadowCnt == 1) {
404 int idx = 0;
405 #include "tsan_update_shadow_word_inl.h"
406 } else if (kShadowCnt == 2) {
407 int idx = 0;
408 #include "tsan_update_shadow_word_inl.h"
409 idx = 1;
410 #include "tsan_update_shadow_word_inl.h"
411 } else if (kShadowCnt == 4) {
412 int idx = 0;
413 #include "tsan_update_shadow_word_inl.h"
414 idx = 1;
415 #include "tsan_update_shadow_word_inl.h"
416 idx = 2;
417 #include "tsan_update_shadow_word_inl.h"
418 idx = 3;
419 #include "tsan_update_shadow_word_inl.h"
420 } else if (kShadowCnt == 8) {
421 int idx = 0;
422 #include "tsan_update_shadow_word_inl.h"
423 idx = 1;
424 #include "tsan_update_shadow_word_inl.h"
425 idx = 2;
426 #include "tsan_update_shadow_word_inl.h"
427 idx = 3;
428 #include "tsan_update_shadow_word_inl.h"
429 idx = 4;
430 #include "tsan_update_shadow_word_inl.h"
431 idx = 5;
432 #include "tsan_update_shadow_word_inl.h"
433 idx = 6;
434 #include "tsan_update_shadow_word_inl.h"
435 idx = 7;
436 #include "tsan_update_shadow_word_inl.h"
437 } else {
438 CHECK(false);
441 // we did not find any races and had already stored
442 // the current access info, so we are done
443 if (LIKELY(store_word == 0))
444 return;
445 // choose a random candidate slot and replace it
446 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
447 StatInc(thr, StatShadowReplace);
448 return;
449 RACE:
450 HandleRace(thr, shadow_mem, cur, old);
451 return;
454 ALWAYS_INLINE
455 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
456 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
457 u64 *shadow_mem = (u64*)MemToShadow(addr);
458 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
459 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
460 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
461 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
462 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
463 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
464 #if TSAN_DEBUG
465 if (!IsAppMem(addr)) {
466 Printf("Access to non app mem %zx\n", addr);
467 DCHECK(IsAppMem(addr));
469 if (!IsShadowMem((uptr)shadow_mem)) {
470 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
471 DCHECK(IsShadowMem((uptr)shadow_mem));
473 #endif
475 FastState fast_state = thr->fast_state;
476 if (fast_state.GetIgnoreBit())
477 return;
478 fast_state.IncrementEpoch();
479 thr->fast_state = fast_state;
480 Shadow cur(fast_state);
481 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
482 cur.SetWrite(kAccessIsWrite);
483 cur.SetAtomic(kIsAtomic);
485 // We must not store to the trace if we do not store to the shadow.
486 // That is, this call must be moved somewhere below.
487 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
489 MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
490 shadow_mem, cur);
493 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
494 u64 val) {
495 if (size == 0)
496 return;
497 // FIXME: fix me.
498 uptr offset = addr % kShadowCell;
499 if (offset) {
500 offset = kShadowCell - offset;
501 if (size <= offset)
502 return;
503 addr += offset;
504 size -= offset;
506 DCHECK_EQ(addr % 8, 0);
507 // If a user passes some insane arguments (memset(0)),
508 // let it just crash as usual.
509 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
510 return;
511 (void)thr;
512 (void)pc;
513 // Some programs mmap like hundreds of GBs but actually used a small part.
514 // So, it's better to report a false positive on the memory
515 // then to hang here senselessly.
516 const uptr kMaxResetSize = 4ull*1024*1024*1024;
517 if (size > kMaxResetSize)
518 size = kMaxResetSize;
519 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
520 u64 *p = (u64*)MemToShadow(addr);
521 CHECK(IsShadowMem((uptr)p));
522 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
523 // FIXME: may overwrite a part outside the region
524 for (uptr i = 0; i < size * kShadowCnt / kShadowCell;) {
525 p[i++] = val;
526 for (uptr j = 1; j < kShadowCnt; j++)
527 p[i++] = 0;
531 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
532 MemoryRangeSet(thr, pc, addr, size, 0);
535 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
536 CHECK_EQ(thr->is_freeing, false);
537 thr->is_freeing = true;
538 MemoryAccessRange(thr, pc, addr, size, true);
539 thr->is_freeing = false;
540 Shadow s(thr->fast_state);
541 s.ClearIgnoreBit();
542 s.MarkAsFreed();
543 s.SetWrite(true);
544 s.SetAddr0AndSizeLog(0, 3);
545 MemoryRangeSet(thr, pc, addr, size, s.raw());
548 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
549 Shadow s(thr->fast_state);
550 s.ClearIgnoreBit();
551 s.SetWrite(true);
552 s.SetAddr0AndSizeLog(0, 3);
553 MemoryRangeSet(thr, pc, addr, size, s.raw());
556 ALWAYS_INLINE
557 void FuncEntry(ThreadState *thr, uptr pc) {
558 DCHECK_EQ(thr->in_rtl, 0);
559 StatInc(thr, StatFuncEnter);
560 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
561 thr->fast_state.IncrementEpoch();
562 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
564 // Shadow stack maintenance can be replaced with
565 // stack unwinding during trace switch (which presumably must be faster).
566 DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
567 #ifndef TSAN_GO
568 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
569 #else
570 if (thr->shadow_stack_pos == thr->shadow_stack_end) {
571 const int sz = thr->shadow_stack_end - thr->shadow_stack;
572 const int newsz = 2 * sz;
573 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
574 newsz * sizeof(uptr));
575 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
576 internal_free(thr->shadow_stack);
577 thr->shadow_stack = newstack;
578 thr->shadow_stack_pos = newstack + sz;
579 thr->shadow_stack_end = newstack + newsz;
581 #endif
582 thr->shadow_stack_pos[0] = pc;
583 thr->shadow_stack_pos++;
586 ALWAYS_INLINE
587 void FuncExit(ThreadState *thr) {
588 DCHECK_EQ(thr->in_rtl, 0);
589 StatInc(thr, StatFuncExit);
590 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
591 thr->fast_state.IncrementEpoch();
592 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
594 DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
595 #ifndef TSAN_GO
596 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
597 #endif
598 thr->shadow_stack_pos--;
601 void IgnoreCtl(ThreadState *thr, bool write, bool begin) {
602 DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin);
603 thr->ignore_reads_and_writes += begin ? 1 : -1;
604 CHECK_GE(thr->ignore_reads_and_writes, 0);
605 if (thr->ignore_reads_and_writes)
606 thr->fast_state.SetIgnoreBit();
607 else
608 thr->fast_state.ClearIgnoreBit();
611 bool MD5Hash::operator==(const MD5Hash &other) const {
612 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
615 #if TSAN_DEBUG
616 void build_consistency_debug() {}
617 #else
618 void build_consistency_release() {}
619 #endif
621 #if TSAN_COLLECT_STATS
622 void build_consistency_stats() {}
623 #else
624 void build_consistency_nostats() {}
625 #endif
627 #if TSAN_SHADOW_COUNT == 1
628 void build_consistency_shadow1() {}
629 #elif TSAN_SHADOW_COUNT == 2
630 void build_consistency_shadow2() {}
631 #elif TSAN_SHADOW_COUNT == 4
632 void build_consistency_shadow4() {}
633 #else
634 void build_consistency_shadow8() {}
635 #endif
637 } // namespace __tsan
639 #ifndef TSAN_GO
640 // Must be included in this file to make sure everything is inlined.
641 #include "tsan_interface_inl.h"
642 #endif