2013-02-04 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / libsanitizer / tsan / tsan_rtl.cc
blob3615a7a9c2f6e1538362799fc47fc91867f0e998
1 //===-- tsan_rtl.cc -------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // Main file (entry points) for the TSan run-time.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_atomic.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_libc.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "sanitizer_common/sanitizer_symbolizer.h"
19 #include "tsan_defs.h"
20 #include "tsan_platform.h"
21 #include "tsan_rtl.h"
22 #include "tsan_mman.h"
23 #include "tsan_suppressions.h"
25 volatile int __tsan_resumed = 0;
27 extern "C" void __tsan_resume() {
28 __tsan_resumed = 1;
31 namespace __tsan {
33 #ifndef TSAN_GO
34 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
35 #endif
36 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
38 static Context *ctx;
39 Context *CTX() {
40 return ctx;
43 Context::Context()
44 : initialized()
45 , report_mtx(MutexTypeReport, StatMtxReport)
46 , nreported()
47 , nmissed_expected()
48 , thread_mtx(MutexTypeThreads, StatMtxThreads)
49 , racy_stacks(MBlockRacyStacks)
50 , racy_addresses(MBlockRacyAddresses)
51 , fired_suppressions(MBlockRacyAddresses) {
54 // The objects are allocated in TLS, so one may rely on zero-initialization.
55 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
56 uptr stk_addr, uptr stk_size,
57 uptr tls_addr, uptr tls_size)
58 : fast_state(tid, epoch)
59 // Do not touch these, rely on zero initialization,
60 // they may be accessed before the ctor.
61 // , fast_ignore_reads()
62 // , fast_ignore_writes()
63 // , in_rtl()
64 , shadow_stack_pos(&shadow_stack[0])
65 , tid(tid)
66 , unique_id(unique_id)
67 , stk_addr(stk_addr)
68 , stk_size(stk_size)
69 , tls_addr(tls_addr)
70 , tls_size(tls_size) {
73 ThreadContext::ThreadContext(int tid)
74 : tid(tid)
75 , unique_id()
76 , os_id()
77 , user_id()
78 , thr()
79 , status(ThreadStatusInvalid)
80 , detached()
81 , reuse_count()
82 , epoch0()
83 , epoch1()
84 , dead_info()
85 , dead_next()
86 , name() {
89 static void WriteMemoryProfile(char *buf, uptr buf_size, int num) {
90 uptr shadow = GetShadowMemoryConsumption();
92 int nthread = 0;
93 int nlivethread = 0;
94 uptr threadmem = 0;
96 Lock l(&ctx->thread_mtx);
97 for (unsigned i = 0; i < kMaxTid; i++) {
98 ThreadContext *tctx = ctx->threads[i];
99 if (tctx == 0)
100 continue;
101 nthread += 1;
102 threadmem += sizeof(ThreadContext);
103 if (tctx->status != ThreadStatusRunning)
104 continue;
105 nlivethread += 1;
106 threadmem += sizeof(ThreadState);
110 uptr nsync = 0;
111 uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync);
113 internal_snprintf(buf, buf_size, "%d: shadow=%zuMB"
114 " thread=%zuMB(total=%d/live=%d)"
115 " sync=%zuMB(cnt=%zu)\n",
116 num,
117 shadow >> 20,
118 threadmem >> 20, nthread, nlivethread,
119 syncmem >> 20, nsync);
122 static void MemoryProfileThread(void *arg) {
123 ScopedInRtl in_rtl;
124 fd_t fd = (fd_t)(uptr)arg;
125 for (int i = 0; ; i++) {
126 InternalScopedBuffer<char> buf(4096);
127 WriteMemoryProfile(buf.data(), buf.size(), i);
128 internal_write(fd, buf.data(), internal_strlen(buf.data()));
129 SleepForSeconds(1);
133 static void InitializeMemoryProfile() {
134 if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0)
135 return;
136 InternalScopedBuffer<char> filename(4096);
137 internal_snprintf(filename.data(), filename.size(), "%s.%d",
138 flags()->profile_memory, GetPid());
139 fd_t fd = internal_open(filename.data(), true);
140 if (fd == kInvalidFd) {
141 Printf("Failed to open memory profile file '%s'\n", &filename[0]);
142 Die();
144 internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd);
147 static void MemoryFlushThread(void *arg) {
148 ScopedInRtl in_rtl;
149 for (int i = 0; ; i++) {
150 SleepForMillis(flags()->flush_memory_ms);
151 FlushShadowMemory();
155 static void InitializeMemoryFlush() {
156 if (flags()->flush_memory_ms == 0)
157 return;
158 if (flags()->flush_memory_ms < 100)
159 flags()->flush_memory_ms = 100;
160 internal_start_thread(&MemoryFlushThread, 0);
163 void MapShadow(uptr addr, uptr size) {
164 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
167 void MapThreadTrace(uptr addr, uptr size) {
168 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
169 CHECK_GE(addr, kTraceMemBegin);
170 CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
171 if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
172 Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
173 Die();
177 void Initialize(ThreadState *thr) {
178 // Thread safe because done before all threads exist.
179 static bool is_initialized = false;
180 if (is_initialized)
181 return;
182 is_initialized = true;
183 // Install tool-specific callbacks in sanitizer_common.
184 SetCheckFailedCallback(TsanCheckFailed);
186 ScopedInRtl in_rtl;
187 #ifndef TSAN_GO
188 InitializeAllocator();
189 #endif
190 InitializeInterceptors();
191 const char *env = InitializePlatform();
192 InitializeMutex();
193 InitializeDynamicAnnotations();
194 ctx = new(ctx_placeholder) Context;
195 #ifndef TSAN_GO
196 InitializeShadowMemory();
197 #endif
198 ctx->dead_list_size = 0;
199 ctx->dead_list_head = 0;
200 ctx->dead_list_tail = 0;
201 InitializeFlags(&ctx->flags, env);
202 // Setup correct file descriptor for error reports.
203 if (internal_strcmp(flags()->log_path, "stdout") == 0)
204 __sanitizer_set_report_fd(kStdoutFd);
205 else if (internal_strcmp(flags()->log_path, "stderr") == 0)
206 __sanitizer_set_report_fd(kStderrFd);
207 else
208 __sanitizer_set_report_path(flags()->log_path);
209 InitializeSuppressions();
210 #ifndef TSAN_GO
211 // Initialize external symbolizer before internal threads are started.
212 const char *external_symbolizer = flags()->external_symbolizer_path;
213 if (external_symbolizer != 0 && external_symbolizer[0] != '\0') {
214 if (!InitializeExternalSymbolizer(external_symbolizer)) {
215 Printf("Failed to start external symbolizer: '%s'\n",
216 external_symbolizer);
217 Die();
220 #endif
221 InitializeMemoryProfile();
222 InitializeMemoryFlush();
224 if (ctx->flags.verbosity)
225 Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
226 GetPid());
228 // Initialize thread 0.
229 ctx->thread_seq = 0;
230 int tid = ThreadCreate(thr, 0, 0, true);
231 CHECK_EQ(tid, 0);
232 ThreadStart(thr, tid, GetPid());
233 CHECK_EQ(thr->in_rtl, 1);
234 ctx->initialized = true;
236 if (flags()->stop_on_start) {
237 Printf("ThreadSanitizer is suspended at startup (pid %d)."
238 " Call __tsan_resume().\n",
239 GetPid());
240 while (__tsan_resumed == 0);
244 int Finalize(ThreadState *thr) {
245 ScopedInRtl in_rtl;
246 Context *ctx = __tsan::ctx;
247 bool failed = false;
249 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
250 SleepForMillis(flags()->atexit_sleep_ms);
252 // Wait for pending reports.
253 ctx->report_mtx.Lock();
254 ctx->report_mtx.Unlock();
256 ThreadFinalize(thr);
258 if (ctx->nreported) {
259 failed = true;
260 #ifndef TSAN_GO
261 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
262 #else
263 Printf("Found %d data race(s)\n", ctx->nreported);
264 #endif
267 if (ctx->nmissed_expected) {
268 failed = true;
269 Printf("ThreadSanitizer: missed %d expected races\n",
270 ctx->nmissed_expected);
273 StatAggregate(ctx->stat, thr->stat);
274 StatOutput(ctx->stat);
275 return failed ? flags()->exitcode : 0;
278 #ifndef TSAN_GO
279 u32 CurrentStackId(ThreadState *thr, uptr pc) {
280 if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
281 return 0;
282 if (pc) {
283 thr->shadow_stack_pos[0] = pc;
284 thr->shadow_stack_pos++;
286 u32 id = StackDepotPut(thr->shadow_stack,
287 thr->shadow_stack_pos - thr->shadow_stack);
288 if (pc)
289 thr->shadow_stack_pos--;
290 return id;
292 #endif
294 void TraceSwitch(ThreadState *thr) {
295 thr->nomalloc++;
296 ScopedInRtl in_rtl;
297 Lock l(&thr->trace.mtx);
298 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
299 TraceHeader *hdr = &thr->trace.headers[trace];
300 hdr->epoch0 = thr->fast_state.epoch();
301 hdr->stack0.ObtainCurrent(thr, 0);
302 hdr->mset0 = thr->mset;
303 thr->nomalloc--;
306 uptr TraceTopPC(ThreadState *thr) {
307 Event *events = (Event*)GetThreadTrace(thr->tid);
308 uptr pc = events[thr->fast_state.GetTracePos()];
309 return pc;
312 uptr TraceSize() {
313 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
316 uptr TraceParts() {
317 return TraceSize() / kTracePartSize;
320 #ifndef TSAN_GO
321 extern "C" void __tsan_trace_switch() {
322 TraceSwitch(cur_thread());
325 extern "C" void __tsan_report_race() {
326 ReportRace(cur_thread());
328 #endif
330 ALWAYS_INLINE
331 static Shadow LoadShadow(u64 *p) {
332 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
333 return Shadow(raw);
336 ALWAYS_INLINE
337 static void StoreShadow(u64 *sp, u64 s) {
338 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
341 ALWAYS_INLINE
342 static void StoreIfNotYetStored(u64 *sp, u64 *s) {
343 StoreShadow(sp, *s);
344 *s = 0;
347 static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
348 Shadow cur, Shadow old) {
349 thr->racy_state[0] = cur.raw();
350 thr->racy_state[1] = old.raw();
351 thr->racy_shadow_addr = shadow_mem;
352 #ifndef TSAN_GO
353 HACKY_CALL(__tsan_report_race);
354 #else
355 ReportRace(thr);
356 #endif
359 static inline bool BothReads(Shadow s, int kAccessIsWrite) {
360 return !kAccessIsWrite && !s.is_write();
363 static inline bool OldIsRWNotWeaker(Shadow old, int kAccessIsWrite) {
364 return old.is_write() || !kAccessIsWrite;
367 static inline bool OldIsRWWeakerOrEqual(Shadow old, int kAccessIsWrite) {
368 return !old.is_write() || kAccessIsWrite;
371 static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
372 return old.epoch() >= thr->fast_synch_epoch;
375 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
376 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
379 ALWAYS_INLINE
380 void MemoryAccessImpl(ThreadState *thr, uptr addr,
381 int kAccessSizeLog, bool kAccessIsWrite,
382 u64 *shadow_mem, Shadow cur) {
383 StatInc(thr, StatMop);
384 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
385 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
387 // This potentially can live in an MMX/SSE scratch register.
388 // The required intrinsics are:
389 // __m128i _mm_move_epi64(__m128i*);
390 // _mm_storel_epi64(u64*, __m128i);
391 u64 store_word = cur.raw();
393 // scan all the shadow values and dispatch to 4 categories:
394 // same, replace, candidate and race (see comments below).
395 // we consider only 3 cases regarding access sizes:
396 // equal, intersect and not intersect. initially I considered
397 // larger and smaller as well, it allowed to replace some
398 // 'candidates' with 'same' or 'replace', but I think
399 // it's just not worth it (performance- and complexity-wise).
401 Shadow old(0);
402 if (kShadowCnt == 1) {
403 int idx = 0;
404 #include "tsan_update_shadow_word_inl.h"
405 } else if (kShadowCnt == 2) {
406 int idx = 0;
407 #include "tsan_update_shadow_word_inl.h"
408 idx = 1;
409 #include "tsan_update_shadow_word_inl.h"
410 } else if (kShadowCnt == 4) {
411 int idx = 0;
412 #include "tsan_update_shadow_word_inl.h"
413 idx = 1;
414 #include "tsan_update_shadow_word_inl.h"
415 idx = 2;
416 #include "tsan_update_shadow_word_inl.h"
417 idx = 3;
418 #include "tsan_update_shadow_word_inl.h"
419 } else if (kShadowCnt == 8) {
420 int idx = 0;
421 #include "tsan_update_shadow_word_inl.h"
422 idx = 1;
423 #include "tsan_update_shadow_word_inl.h"
424 idx = 2;
425 #include "tsan_update_shadow_word_inl.h"
426 idx = 3;
427 #include "tsan_update_shadow_word_inl.h"
428 idx = 4;
429 #include "tsan_update_shadow_word_inl.h"
430 idx = 5;
431 #include "tsan_update_shadow_word_inl.h"
432 idx = 6;
433 #include "tsan_update_shadow_word_inl.h"
434 idx = 7;
435 #include "tsan_update_shadow_word_inl.h"
436 } else {
437 CHECK(false);
440 // we did not find any races and had already stored
441 // the current access info, so we are done
442 if (LIKELY(store_word == 0))
443 return;
444 // choose a random candidate slot and replace it
445 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
446 StatInc(thr, StatShadowReplace);
447 return;
448 RACE:
449 HandleRace(thr, shadow_mem, cur, old);
450 return;
453 ALWAYS_INLINE
454 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
455 int kAccessSizeLog, bool kAccessIsWrite) {
456 u64 *shadow_mem = (u64*)MemToShadow(addr);
457 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
458 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
459 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
460 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
461 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
462 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
463 #if TSAN_DEBUG
464 if (!IsAppMem(addr)) {
465 Printf("Access to non app mem %zx\n", addr);
466 DCHECK(IsAppMem(addr));
468 if (!IsShadowMem((uptr)shadow_mem)) {
469 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
470 DCHECK(IsShadowMem((uptr)shadow_mem));
472 #endif
474 FastState fast_state = thr->fast_state;
475 if (fast_state.GetIgnoreBit())
476 return;
477 fast_state.IncrementEpoch();
478 thr->fast_state = fast_state;
479 Shadow cur(fast_state);
480 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
481 cur.SetWrite(kAccessIsWrite);
483 // We must not store to the trace if we do not store to the shadow.
484 // That is, this call must be moved somewhere below.
485 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
487 MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite,
488 shadow_mem, cur);
491 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
492 u64 val) {
493 if (size == 0)
494 return;
495 // FIXME: fix me.
496 uptr offset = addr % kShadowCell;
497 if (offset) {
498 offset = kShadowCell - offset;
499 if (size <= offset)
500 return;
501 addr += offset;
502 size -= offset;
504 DCHECK_EQ(addr % 8, 0);
505 // If a user passes some insane arguments (memset(0)),
506 // let it just crash as usual.
507 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
508 return;
509 (void)thr;
510 (void)pc;
511 // Some programs mmap like hundreds of GBs but actually used a small part.
512 // So, it's better to report a false positive on the memory
513 // then to hang here senselessly.
514 const uptr kMaxResetSize = 4ull*1024*1024*1024;
515 if (size > kMaxResetSize)
516 size = kMaxResetSize;
517 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
518 u64 *p = (u64*)MemToShadow(addr);
519 CHECK(IsShadowMem((uptr)p));
520 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
521 // FIXME: may overwrite a part outside the region
522 for (uptr i = 0; i < size * kShadowCnt / kShadowCell;) {
523 p[i++] = val;
524 for (uptr j = 1; j < kShadowCnt; j++)
525 p[i++] = 0;
529 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
530 MemoryRangeSet(thr, pc, addr, size, 0);
533 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
534 MemoryAccessRange(thr, pc, addr, size, true);
535 Shadow s(thr->fast_state);
536 s.ClearIgnoreBit();
537 s.MarkAsFreed();
538 s.SetWrite(true);
539 s.SetAddr0AndSizeLog(0, 3);
540 MemoryRangeSet(thr, pc, addr, size, s.raw());
543 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
544 Shadow s(thr->fast_state);
545 s.ClearIgnoreBit();
546 s.SetWrite(true);
547 s.SetAddr0AndSizeLog(0, 3);
548 MemoryRangeSet(thr, pc, addr, size, s.raw());
551 ALWAYS_INLINE
552 void FuncEntry(ThreadState *thr, uptr pc) {
553 DCHECK_EQ(thr->in_rtl, 0);
554 StatInc(thr, StatFuncEnter);
555 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
556 thr->fast_state.IncrementEpoch();
557 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
559 // Shadow stack maintenance can be replaced with
560 // stack unwinding during trace switch (which presumably must be faster).
561 DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
562 #ifndef TSAN_GO
563 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
564 #else
565 if (thr->shadow_stack_pos == thr->shadow_stack_end) {
566 const int sz = thr->shadow_stack_end - thr->shadow_stack;
567 const int newsz = 2 * sz;
568 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
569 newsz * sizeof(uptr));
570 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
571 internal_free(thr->shadow_stack);
572 thr->shadow_stack = newstack;
573 thr->shadow_stack_pos = newstack + sz;
574 thr->shadow_stack_end = newstack + newsz;
576 #endif
577 thr->shadow_stack_pos[0] = pc;
578 thr->shadow_stack_pos++;
581 ALWAYS_INLINE
582 void FuncExit(ThreadState *thr) {
583 DCHECK_EQ(thr->in_rtl, 0);
584 StatInc(thr, StatFuncExit);
585 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
586 thr->fast_state.IncrementEpoch();
587 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
589 DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
590 #ifndef TSAN_GO
591 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
592 #endif
593 thr->shadow_stack_pos--;
596 void IgnoreCtl(ThreadState *thr, bool write, bool begin) {
597 DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin);
598 thr->ignore_reads_and_writes += begin ? 1 : -1;
599 CHECK_GE(thr->ignore_reads_and_writes, 0);
600 if (thr->ignore_reads_and_writes)
601 thr->fast_state.SetIgnoreBit();
602 else
603 thr->fast_state.ClearIgnoreBit();
606 bool MD5Hash::operator==(const MD5Hash &other) const {
607 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
610 #if TSAN_DEBUG
611 void build_consistency_debug() {}
612 #else
613 void build_consistency_release() {}
614 #endif
616 #if TSAN_COLLECT_STATS
617 void build_consistency_stats() {}
618 #else
619 void build_consistency_nostats() {}
620 #endif
622 #if TSAN_SHADOW_COUNT == 1
623 void build_consistency_shadow1() {}
624 #elif TSAN_SHADOW_COUNT == 2
625 void build_consistency_shadow2() {}
626 #elif TSAN_SHADOW_COUNT == 4
627 void build_consistency_shadow4() {}
628 #else
629 void build_consistency_shadow8() {}
630 #endif
632 } // namespace __tsan
634 #ifndef TSAN_GO
635 // Must be included in this file to make sure everything is inlined.
636 #include "tsan_interface_inl.h"
637 #endif