2012-11-28 Marc Glisse <marc.glisse@inria.fr>
[official-gcc.git] / libsanitizer / tsan / tsan_rtl.cc
bloba3e82710d901e2657d704a4ff641e5194de1b231
1 //===-- tsan_rtl.cc -------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // Main file (entry points) for the TSan run-time.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_atomic.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_libc.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "sanitizer_common/sanitizer_symbolizer.h"
19 #include "tsan_defs.h"
20 #include "tsan_platform.h"
21 #include "tsan_rtl.h"
22 #include "tsan_mman.h"
23 #include "tsan_suppressions.h"
25 volatile int __tsan_resumed = 0;
27 extern "C" void __tsan_resume() {
28 __tsan_resumed = 1;
31 namespace __tsan {
33 #ifndef TSAN_GO
34 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
35 #endif
36 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
38 static Context *ctx;
39 Context *CTX() {
40 return ctx;
43 Context::Context()
44 : initialized()
45 , report_mtx(MutexTypeReport, StatMtxReport)
46 , nreported()
47 , nmissed_expected()
48 , thread_mtx(MutexTypeThreads, StatMtxThreads)
49 , racy_stacks(MBlockRacyStacks)
50 , racy_addresses(MBlockRacyAddresses)
51 , fired_suppressions(MBlockRacyAddresses) {
54 // The objects are allocated in TLS, so one may rely on zero-initialization.
55 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
56 uptr stk_addr, uptr stk_size,
57 uptr tls_addr, uptr tls_size)
58 : fast_state(tid, epoch)
59 // Do not touch these, rely on zero initialization,
60 // they may be accessed before the ctor.
61 // , fast_ignore_reads()
62 // , fast_ignore_writes()
63 // , in_rtl()
64 , shadow_stack_pos(&shadow_stack[0])
65 , tid(tid)
66 , unique_id(unique_id)
67 , stk_addr(stk_addr)
68 , stk_size(stk_size)
69 , tls_addr(tls_addr)
70 , tls_size(tls_size) {
73 ThreadContext::ThreadContext(int tid)
74 : tid(tid)
75 , unique_id()
76 , os_id()
77 , user_id()
78 , thr()
79 , status(ThreadStatusInvalid)
80 , detached()
81 , reuse_count()
82 , epoch0()
83 , epoch1()
84 , dead_info()
85 , dead_next() {
88 static void WriteMemoryProfile(char *buf, uptr buf_size, int num) {
89 uptr shadow = GetShadowMemoryConsumption();
91 int nthread = 0;
92 int nlivethread = 0;
93 uptr threadmem = 0;
95 Lock l(&ctx->thread_mtx);
96 for (unsigned i = 0; i < kMaxTid; i++) {
97 ThreadContext *tctx = ctx->threads[i];
98 if (tctx == 0)
99 continue;
100 nthread += 1;
101 threadmem += sizeof(ThreadContext);
102 if (tctx->status != ThreadStatusRunning)
103 continue;
104 nlivethread += 1;
105 threadmem += sizeof(ThreadState);
109 uptr nsync = 0;
110 uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync);
112 internal_snprintf(buf, buf_size, "%d: shadow=%zuMB"
113 " thread=%zuMB(total=%d/live=%d)"
114 " sync=%zuMB(cnt=%zu)\n",
115 num,
116 shadow >> 20,
117 threadmem >> 20, nthread, nlivethread,
118 syncmem >> 20, nsync);
121 static void MemoryProfileThread(void *arg) {
122 ScopedInRtl in_rtl;
123 fd_t fd = (fd_t)(uptr)arg;
124 for (int i = 0; ; i++) {
125 InternalScopedBuffer<char> buf(4096);
126 WriteMemoryProfile(buf.data(), buf.size(), i);
127 internal_write(fd, buf.data(), internal_strlen(buf.data()));
128 SleepForSeconds(1);
132 static void InitializeMemoryProfile() {
133 if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0)
134 return;
135 InternalScopedBuffer<char> filename(4096);
136 internal_snprintf(filename.data(), filename.size(), "%s.%d",
137 flags()->profile_memory, GetPid());
138 fd_t fd = internal_open(filename.data(), true);
139 if (fd == kInvalidFd) {
140 Printf("Failed to open memory profile file '%s'\n", &filename[0]);
141 Die();
143 internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd);
146 static void MemoryFlushThread(void *arg) {
147 ScopedInRtl in_rtl;
148 for (int i = 0; ; i++) {
149 SleepForMillis(flags()->flush_memory_ms);
150 FlushShadowMemory();
154 static void InitializeMemoryFlush() {
155 if (flags()->flush_memory_ms == 0)
156 return;
157 if (flags()->flush_memory_ms < 100)
158 flags()->flush_memory_ms = 100;
159 internal_start_thread(&MemoryFlushThread, 0);
162 void MapShadow(uptr addr, uptr size) {
163 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
166 void Initialize(ThreadState *thr) {
167 // Thread safe because done before all threads exist.
168 static bool is_initialized = false;
169 if (is_initialized)
170 return;
171 is_initialized = true;
172 // Install tool-specific callbacks in sanitizer_common.
173 SetCheckFailedCallback(TsanCheckFailed);
175 ScopedInRtl in_rtl;
176 #ifndef TSAN_GO
177 InitializeAllocator();
178 #endif
179 InitializeInterceptors();
180 const char *env = InitializePlatform();
181 InitializeMutex();
182 InitializeDynamicAnnotations();
183 ctx = new(ctx_placeholder) Context;
184 #ifndef TSAN_GO
185 InitializeShadowMemory();
186 #endif
187 ctx->dead_list_size = 0;
188 ctx->dead_list_head = 0;
189 ctx->dead_list_tail = 0;
190 InitializeFlags(&ctx->flags, env);
191 // Setup correct file descriptor for error reports.
192 __sanitizer_set_report_fd(flags()->log_fileno);
193 InitializeSuppressions();
194 #ifndef TSAN_GO
195 // Initialize external symbolizer before internal threads are started.
196 const char *external_symbolizer = flags()->external_symbolizer_path;
197 if (external_symbolizer != 0 && external_symbolizer[0] != '\0') {
198 if (!InitializeExternalSymbolizer(external_symbolizer)) {
199 Printf("Failed to start external symbolizer: '%s'\n",
200 external_symbolizer);
201 Die();
204 #endif
205 InitializeMemoryProfile();
206 InitializeMemoryFlush();
208 if (ctx->flags.verbosity)
209 Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
210 GetPid());
212 // Initialize thread 0.
213 ctx->thread_seq = 0;
214 int tid = ThreadCreate(thr, 0, 0, true);
215 CHECK_EQ(tid, 0);
216 ThreadStart(thr, tid, GetPid());
217 CHECK_EQ(thr->in_rtl, 1);
218 ctx->initialized = true;
220 if (flags()->stop_on_start) {
221 Printf("ThreadSanitizer is suspended at startup (pid %d)."
222 " Call __tsan_resume().\n",
223 GetPid());
224 while (__tsan_resumed == 0);
228 int Finalize(ThreadState *thr) {
229 ScopedInRtl in_rtl;
230 Context *ctx = __tsan::ctx;
231 bool failed = false;
233 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
234 SleepForMillis(flags()->atexit_sleep_ms);
236 // Wait for pending reports.
237 ctx->report_mtx.Lock();
238 ctx->report_mtx.Unlock();
240 ThreadFinalize(thr);
242 if (ctx->nreported) {
243 failed = true;
244 #ifndef TSAN_GO
245 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
246 #else
247 Printf("Found %d data race(s)\n", ctx->nreported);
248 #endif
251 if (ctx->nmissed_expected) {
252 failed = true;
253 Printf("ThreadSanitizer: missed %d expected races\n",
254 ctx->nmissed_expected);
257 StatAggregate(ctx->stat, thr->stat);
258 StatOutput(ctx->stat);
259 return failed ? flags()->exitcode : 0;
262 #ifndef TSAN_GO
263 u32 CurrentStackId(ThreadState *thr, uptr pc) {
264 if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
265 return 0;
266 if (pc) {
267 thr->shadow_stack_pos[0] = pc;
268 thr->shadow_stack_pos++;
270 u32 id = StackDepotPut(thr->shadow_stack,
271 thr->shadow_stack_pos - thr->shadow_stack);
272 if (pc)
273 thr->shadow_stack_pos--;
274 return id;
276 #endif
278 void TraceSwitch(ThreadState *thr) {
279 thr->nomalloc++;
280 ScopedInRtl in_rtl;
281 Lock l(&thr->trace.mtx);
282 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % kTraceParts;
283 TraceHeader *hdr = &thr->trace.headers[trace];
284 hdr->epoch0 = thr->fast_state.epoch();
285 hdr->stack0.ObtainCurrent(thr, 0);
286 thr->nomalloc--;
289 #ifndef TSAN_GO
290 extern "C" void __tsan_trace_switch() {
291 TraceSwitch(cur_thread());
294 extern "C" void __tsan_report_race() {
295 ReportRace(cur_thread());
297 #endif
299 ALWAYS_INLINE
300 static Shadow LoadShadow(u64 *p) {
301 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
302 return Shadow(raw);
305 ALWAYS_INLINE
306 static void StoreShadow(u64 *sp, u64 s) {
307 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
310 ALWAYS_INLINE
311 static void StoreIfNotYetStored(u64 *sp, u64 *s) {
312 StoreShadow(sp, *s);
313 *s = 0;
316 static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
317 Shadow cur, Shadow old) {
318 thr->racy_state[0] = cur.raw();
319 thr->racy_state[1] = old.raw();
320 thr->racy_shadow_addr = shadow_mem;
321 #ifndef TSAN_GO
322 HACKY_CALL(__tsan_report_race);
323 #else
324 ReportRace(thr);
325 #endif
328 static inline bool BothReads(Shadow s, int kAccessIsWrite) {
329 return !kAccessIsWrite && !s.is_write();
332 static inline bool OldIsRWNotWeaker(Shadow old, int kAccessIsWrite) {
333 return old.is_write() || !kAccessIsWrite;
336 static inline bool OldIsRWWeakerOrEqual(Shadow old, int kAccessIsWrite) {
337 return !old.is_write() || kAccessIsWrite;
340 static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
341 return old.epoch() >= thr->fast_synch_epoch;
344 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
345 return thr->clock.get(old.tid()) >= old.epoch();
348 ALWAYS_INLINE
349 void MemoryAccessImpl(ThreadState *thr, uptr addr,
350 int kAccessSizeLog, bool kAccessIsWrite,
351 u64 *shadow_mem, Shadow cur) {
352 StatInc(thr, StatMop);
353 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
354 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
356 // This potentially can live in an MMX/SSE scratch register.
357 // The required intrinsics are:
358 // __m128i _mm_move_epi64(__m128i*);
359 // _mm_storel_epi64(u64*, __m128i);
360 u64 store_word = cur.raw();
362 // scan all the shadow values and dispatch to 4 categories:
363 // same, replace, candidate and race (see comments below).
364 // we consider only 3 cases regarding access sizes:
365 // equal, intersect and not intersect. initially I considered
366 // larger and smaller as well, it allowed to replace some
367 // 'candidates' with 'same' or 'replace', but I think
368 // it's just not worth it (performance- and complexity-wise).
370 Shadow old(0);
371 if (kShadowCnt == 1) {
372 int idx = 0;
373 #include "tsan_update_shadow_word_inl.h"
374 } else if (kShadowCnt == 2) {
375 int idx = 0;
376 #include "tsan_update_shadow_word_inl.h"
377 idx = 1;
378 #include "tsan_update_shadow_word_inl.h"
379 } else if (kShadowCnt == 4) {
380 int idx = 0;
381 #include "tsan_update_shadow_word_inl.h"
382 idx = 1;
383 #include "tsan_update_shadow_word_inl.h"
384 idx = 2;
385 #include "tsan_update_shadow_word_inl.h"
386 idx = 3;
387 #include "tsan_update_shadow_word_inl.h"
388 } else if (kShadowCnt == 8) {
389 int idx = 0;
390 #include "tsan_update_shadow_word_inl.h"
391 idx = 1;
392 #include "tsan_update_shadow_word_inl.h"
393 idx = 2;
394 #include "tsan_update_shadow_word_inl.h"
395 idx = 3;
396 #include "tsan_update_shadow_word_inl.h"
397 idx = 4;
398 #include "tsan_update_shadow_word_inl.h"
399 idx = 5;
400 #include "tsan_update_shadow_word_inl.h"
401 idx = 6;
402 #include "tsan_update_shadow_word_inl.h"
403 idx = 7;
404 #include "tsan_update_shadow_word_inl.h"
405 } else {
406 CHECK(false);
409 // we did not find any races and had already stored
410 // the current access info, so we are done
411 if (LIKELY(store_word == 0))
412 return;
413 // choose a random candidate slot and replace it
414 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
415 StatInc(thr, StatShadowReplace);
416 return;
417 RACE:
418 HandleRace(thr, shadow_mem, cur, old);
419 return;
422 ALWAYS_INLINE
423 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
424 int kAccessSizeLog, bool kAccessIsWrite) {
425 u64 *shadow_mem = (u64*)MemToShadow(addr);
426 DPrintf2("#%d: tsan::OnMemoryAccess: @%p %p size=%d"
427 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
428 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
429 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
430 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
431 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
432 #if TSAN_DEBUG
433 if (!IsAppMem(addr)) {
434 Printf("Access to non app mem %zx\n", addr);
435 DCHECK(IsAppMem(addr));
437 if (!IsShadowMem((uptr)shadow_mem)) {
438 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
439 DCHECK(IsShadowMem((uptr)shadow_mem));
441 #endif
443 FastState fast_state = thr->fast_state;
444 if (fast_state.GetIgnoreBit())
445 return;
446 fast_state.IncrementEpoch();
447 thr->fast_state = fast_state;
448 Shadow cur(fast_state);
449 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
450 cur.SetWrite(kAccessIsWrite);
452 // We must not store to the trace if we do not store to the shadow.
453 // That is, this call must be moved somewhere below.
454 TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
456 MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite,
457 shadow_mem, cur);
460 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
461 u64 val) {
462 if (size == 0)
463 return;
464 // FIXME: fix me.
465 uptr offset = addr % kShadowCell;
466 if (offset) {
467 offset = kShadowCell - offset;
468 if (size <= offset)
469 return;
470 addr += offset;
471 size -= offset;
473 DCHECK_EQ(addr % 8, 0);
474 // If a user passes some insane arguments (memset(0)),
475 // let it just crash as usual.
476 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
477 return;
478 (void)thr;
479 (void)pc;
480 // Some programs mmap like hundreds of GBs but actually used a small part.
481 // So, it's better to report a false positive on the memory
482 // then to hang here senselessly.
483 const uptr kMaxResetSize = 4ull*1024*1024*1024;
484 if (size > kMaxResetSize)
485 size = kMaxResetSize;
486 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
487 u64 *p = (u64*)MemToShadow(addr);
488 CHECK(IsShadowMem((uptr)p));
489 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
490 // FIXME: may overwrite a part outside the region
491 for (uptr i = 0; i < size * kShadowCnt / kShadowCell;) {
492 p[i++] = val;
493 for (uptr j = 1; j < kShadowCnt; j++)
494 p[i++] = 0;
498 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
499 MemoryRangeSet(thr, pc, addr, size, 0);
502 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
503 MemoryAccessRange(thr, pc, addr, size, true);
504 Shadow s(thr->fast_state);
505 s.MarkAsFreed();
506 s.SetWrite(true);
507 s.SetAddr0AndSizeLog(0, 3);
508 MemoryRangeSet(thr, pc, addr, size, s.raw());
511 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
512 Shadow s(thr->fast_state);
513 s.SetWrite(true);
514 s.SetAddr0AndSizeLog(0, 3);
515 MemoryRangeSet(thr, pc, addr, size, s.raw());
518 ALWAYS_INLINE
519 void FuncEntry(ThreadState *thr, uptr pc) {
520 DCHECK_EQ(thr->in_rtl, 0);
521 StatInc(thr, StatFuncEnter);
522 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
523 thr->fast_state.IncrementEpoch();
524 TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncEnter, pc);
526 // Shadow stack maintenance can be replaced with
527 // stack unwinding during trace switch (which presumably must be faster).
528 DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
529 #ifndef TSAN_GO
530 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
531 #else
532 if (thr->shadow_stack_pos == thr->shadow_stack_end) {
533 const int sz = thr->shadow_stack_end - thr->shadow_stack;
534 const int newsz = 2 * sz;
535 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
536 newsz * sizeof(uptr));
537 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
538 internal_free(thr->shadow_stack);
539 thr->shadow_stack = newstack;
540 thr->shadow_stack_pos = newstack + sz;
541 thr->shadow_stack_end = newstack + newsz;
543 #endif
544 thr->shadow_stack_pos[0] = pc;
545 thr->shadow_stack_pos++;
548 ALWAYS_INLINE
549 void FuncExit(ThreadState *thr) {
550 DCHECK_EQ(thr->in_rtl, 0);
551 StatInc(thr, StatFuncExit);
552 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
553 thr->fast_state.IncrementEpoch();
554 TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncExit, 0);
556 DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
557 #ifndef TSAN_GO
558 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
559 #endif
560 thr->shadow_stack_pos--;
563 void IgnoreCtl(ThreadState *thr, bool write, bool begin) {
564 DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin);
565 thr->ignore_reads_and_writes += begin ? 1 : -1;
566 CHECK_GE(thr->ignore_reads_and_writes, 0);
567 if (thr->ignore_reads_and_writes)
568 thr->fast_state.SetIgnoreBit();
569 else
570 thr->fast_state.ClearIgnoreBit();
573 bool MD5Hash::operator==(const MD5Hash &other) const {
574 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
577 #if TSAN_DEBUG
578 void build_consistency_debug() {}
579 #else
580 void build_consistency_release() {}
581 #endif
583 #if TSAN_COLLECT_STATS
584 void build_consistency_stats() {}
585 #else
586 void build_consistency_nostats() {}
587 #endif
589 #if TSAN_SHADOW_COUNT == 1
590 void build_consistency_shadow1() {}
591 #elif TSAN_SHADOW_COUNT == 2
592 void build_consistency_shadow2() {}
593 #elif TSAN_SHADOW_COUNT == 4
594 void build_consistency_shadow4() {}
595 #else
596 void build_consistency_shadow8() {}
597 #endif
599 } // namespace __tsan
601 #ifndef TSAN_GO
602 // Must be included in this file to make sure everything is inlined.
603 #include "tsan_interface_inl.h"
604 #endif