2018-01-30 Thomas Koenig <tkoenig@gcc.gnu.org>
[official-gcc.git] / libsanitizer / tsan / tsan_interceptors.cc
blob15f20d4b668a65134561e48e779c99d9f5d048fb
1 //===-- tsan_interceptors.cc ----------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // FIXME: move as many interceptors as possible into
11 // sanitizer_common/sanitizer_common_interceptors.inc
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_libc.h"
17 #include "sanitizer_common/sanitizer_linux.h"
18 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
19 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_posix.h"
22 #include "sanitizer_common/sanitizer_stacktrace.h"
23 #include "sanitizer_common/sanitizer_tls_get_addr.h"
24 #include "interception/interception.h"
25 #include "tsan_interceptors.h"
26 #include "tsan_interface.h"
27 #include "tsan_platform.h"
28 #include "tsan_suppressions.h"
29 #include "tsan_rtl.h"
30 #include "tsan_mman.h"
31 #include "tsan_fd.h"
34 using namespace __tsan; // NOLINT
36 #if SANITIZER_FREEBSD || SANITIZER_MAC
37 #define stdout __stdoutp
38 #define stderr __stderrp
39 #endif
41 #if SANITIZER_NETBSD
42 #define dirfd(dirp) (*(int *)(dirp))
43 #define fileno_unlocked fileno
44 #define stdout __sF[1]
45 #define stderr __sF[2]
46 #endif
48 #if SANITIZER_ANDROID
49 #define mallopt(a, b)
50 #endif
52 #ifdef __mips__
53 const int kSigCount = 129;
54 #else
55 const int kSigCount = 65;
56 #endif
58 struct my_siginfo_t {
59 // The size is determined by looking at sizeof of real siginfo_t on linux.
60 u64 opaque[128 / sizeof(u64)];
63 #ifdef __mips__
64 struct ucontext_t {
65 u64 opaque[768 / sizeof(u64) + 1];
67 #else
68 struct ucontext_t {
69 // The size is determined by looking at sizeof of real ucontext_t on linux.
70 u64 opaque[936 / sizeof(u64) + 1];
72 #endif
74 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1
75 #define PTHREAD_ABI_BASE "GLIBC_2.3.2"
76 #elif defined(__aarch64__) || SANITIZER_PPC64V2
77 #define PTHREAD_ABI_BASE "GLIBC_2.17"
78 #endif
80 extern "C" int pthread_attr_init(void *attr);
81 extern "C" int pthread_attr_destroy(void *attr);
82 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
83 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
84 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
85 extern "C" int pthread_setspecific(unsigned key, const void *v);
86 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
87 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
88 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
89 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
90 extern "C" void *pthread_self();
91 extern "C" void _exit(int status);
92 extern "C" int fileno_unlocked(void *stream);
93 #if !SANITIZER_NETBSD
94 extern "C" int dirfd(void *dirp);
95 #endif
96 #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_NETBSD
97 extern "C" int mallopt(int param, int value);
98 #endif
99 #if SANITIZER_NETBSD
100 extern __sanitizer_FILE **__sF;
101 #else
102 extern __sanitizer_FILE *stdout, *stderr;
103 #endif
104 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
105 const int PTHREAD_MUTEX_RECURSIVE = 1;
106 const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
107 #else
108 const int PTHREAD_MUTEX_RECURSIVE = 2;
109 const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
110 #endif
111 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
112 const int EPOLL_CTL_ADD = 1;
113 #endif
114 const int SIGILL = 4;
115 const int SIGABRT = 6;
116 const int SIGFPE = 8;
117 const int SIGSEGV = 11;
118 const int SIGPIPE = 13;
119 const int SIGTERM = 15;
120 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
121 const int SIGBUS = 10;
122 const int SIGSYS = 12;
123 #else
124 const int SIGBUS = 7;
125 const int SIGSYS = 31;
126 #endif
127 void *const MAP_FAILED = (void*)-1;
128 #if SANITIZER_NETBSD
129 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
130 #elif !SANITIZER_MAC
131 const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
132 #endif
133 const int MAP_FIXED = 0x10;
134 typedef long long_t; // NOLINT
136 // From /usr/include/unistd.h
137 # define F_ULOCK 0 /* Unlock a previously locked region. */
138 # define F_LOCK 1 /* Lock a region for exclusive use. */
139 # define F_TLOCK 2 /* Test and lock a region for exclusive use. */
140 # define F_TEST 3 /* Test a region for other processes locks. */
142 typedef void (*sighandler_t)(int sig);
143 typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx);
145 #if SANITIZER_ANDROID
146 struct sigaction_t {
147 u32 sa_flags;
148 union {
149 sighandler_t sa_handler;
150 sigactionhandler_t sa_sigaction;
152 __sanitizer_sigset_t sa_mask;
153 void (*sa_restorer)();
155 #elif SANITIZER_NETBSD
156 struct sigaction_t {
157 union {
158 sighandler_t sa_handler;
159 sigactionhandler_t sa_sigaction;
161 __sanitizer_sigset_t sa_mask;
162 int sa_flags;
164 #else
165 struct sigaction_t {
166 #ifdef __mips__
167 u32 sa_flags;
168 #endif
169 union {
170 sighandler_t sa_handler;
171 sigactionhandler_t sa_sigaction;
173 #if SANITIZER_FREEBSD
174 int sa_flags;
175 __sanitizer_sigset_t sa_mask;
176 #elif SANITIZER_MAC
177 __sanitizer_sigset_t sa_mask;
178 int sa_flags;
179 #else
180 __sanitizer_sigset_t sa_mask;
181 #ifndef __mips__
182 int sa_flags;
183 #endif
184 void (*sa_restorer)();
185 #endif
187 #endif
189 const sighandler_t SIG_DFL = (sighandler_t)0;
190 const sighandler_t SIG_IGN = (sighandler_t)1;
191 const sighandler_t SIG_ERR = (sighandler_t)-1;
192 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
193 const int SA_SIGINFO = 0x40;
194 const int SIG_SETMASK = 3;
195 #elif defined(__mips__)
196 const int SA_SIGINFO = 8;
197 const int SIG_SETMASK = 3;
198 #else
199 const int SA_SIGINFO = 4;
200 const int SIG_SETMASK = 2;
201 #endif
203 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
204 (!cur_thread()->is_inited)
206 static sigaction_t sigactions[kSigCount];
208 namespace __tsan {
209 struct SignalDesc {
210 bool armed;
211 bool sigaction;
212 my_siginfo_t siginfo;
213 ucontext_t ctx;
216 struct ThreadSignalContext {
217 int int_signal_send;
218 atomic_uintptr_t in_blocking_func;
219 atomic_uintptr_t have_pending_signals;
220 SignalDesc pending_signals[kSigCount];
221 // emptyset and oldset are too big for stack.
222 __sanitizer_sigset_t emptyset;
223 __sanitizer_sigset_t oldset;
226 // The object is 64-byte aligned, because we want hot data to be located in
227 // a single cache line if possible (it's accessed in every interceptor).
228 static ALIGNED(64) char libignore_placeholder[sizeof(LibIgnore)];
229 LibIgnore *libignore() {
230 return reinterpret_cast<LibIgnore*>(&libignore_placeholder[0]);
233 void InitializeLibIgnore() {
234 const SuppressionContext &supp = *Suppressions();
235 const uptr n = supp.SuppressionCount();
236 for (uptr i = 0; i < n; i++) {
237 const Suppression *s = supp.SuppressionAt(i);
238 if (0 == internal_strcmp(s->type, kSuppressionLib))
239 libignore()->AddIgnoredLibrary(s->templ);
241 if (flags()->ignore_noninstrumented_modules)
242 libignore()->IgnoreNoninstrumentedModules(true);
243 libignore()->OnLibraryLoaded(0);
246 } // namespace __tsan
248 static ThreadSignalContext *SigCtx(ThreadState *thr) {
249 ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
250 if (ctx == 0 && !thr->is_dead) {
251 ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
252 MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
253 thr->signal_ctx = ctx;
255 return ctx;
258 #if !SANITIZER_MAC
259 static unsigned g_thread_finalize_key;
260 #endif
262 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
263 uptr pc)
264 : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
265 Initialize(thr);
266 if (!thr_->is_inited) return;
267 if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
268 DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
269 ignoring_ =
270 !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
271 libignore()->IsIgnored(pc, &in_ignored_lib_));
272 EnableIgnores();
275 ScopedInterceptor::~ScopedInterceptor() {
276 if (!thr_->is_inited) return;
277 DisableIgnores();
278 if (!thr_->ignore_interceptors) {
279 ProcessPendingSignals(thr_);
280 FuncExit(thr_);
281 CheckNoLocks(thr_);
285 void ScopedInterceptor::EnableIgnores() {
286 if (ignoring_) {
287 ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false);
288 if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
289 if (in_ignored_lib_) {
290 DCHECK(!thr_->in_ignored_lib);
291 thr_->in_ignored_lib = true;
296 void ScopedInterceptor::DisableIgnores() {
297 if (ignoring_) {
298 ThreadIgnoreEnd(thr_, pc_);
299 if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--;
300 if (in_ignored_lib_) {
301 DCHECK(thr_->in_ignored_lib);
302 thr_->in_ignored_lib = false;
307 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
308 #if SANITIZER_FREEBSD || SANITIZER_NETBSD
309 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
310 #else
311 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
312 #endif
314 #define READ_STRING_OF_LEN(thr, pc, s, len, n) \
315 MemoryAccessRange((thr), (pc), (uptr)(s), \
316 common_flags()->strict_string_checks ? (len) + 1 : (n), false)
318 #define READ_STRING(thr, pc, s, n) \
319 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
321 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
323 struct BlockingCall {
324 explicit BlockingCall(ThreadState *thr)
325 : thr(thr)
326 , ctx(SigCtx(thr)) {
327 for (;;) {
328 atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
329 if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
330 break;
331 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
332 ProcessPendingSignals(thr);
334 // When we are in a "blocking call", we process signals asynchronously
335 // (right when they arrive). In this context we do not expect to be
336 // executing any user/runtime code. The known interceptor sequence when
337 // this is not true is: pthread_join -> munmap(stack). It's fine
338 // to ignore munmap in this case -- we handle stack shadow separately.
339 thr->ignore_interceptors++;
342 ~BlockingCall() {
343 thr->ignore_interceptors--;
344 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
347 ThreadState *thr;
348 ThreadSignalContext *ctx;
351 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
352 SCOPED_TSAN_INTERCEPTOR(sleep, sec);
353 unsigned res = BLOCK_REAL(sleep)(sec);
354 AfterSleep(thr, pc);
355 return res;
358 TSAN_INTERCEPTOR(int, usleep, long_t usec) {
359 SCOPED_TSAN_INTERCEPTOR(usleep, usec);
360 int res = BLOCK_REAL(usleep)(usec);
361 AfterSleep(thr, pc);
362 return res;
365 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
366 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
367 int res = BLOCK_REAL(nanosleep)(req, rem);
368 AfterSleep(thr, pc);
369 return res;
372 TSAN_INTERCEPTOR(int, pause) {
373 SCOPED_TSAN_INTERCEPTOR(pause);
374 return BLOCK_REAL(pause)();
377 // The sole reason tsan wraps atexit callbacks is to establish synchronization
378 // between callback setup and callback execution.
379 struct AtExitCtx {
380 void (*f)();
381 void *arg;
384 static void at_exit_wrapper(void *arg) {
385 ThreadState *thr = cur_thread();
386 uptr pc = 0;
387 Acquire(thr, pc, (uptr)arg);
388 AtExitCtx *ctx = (AtExitCtx*)arg;
389 ((void(*)(void *arg))ctx->f)(ctx->arg);
390 InternalFree(ctx);
393 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
394 void *arg, void *dso);
396 #if !SANITIZER_ANDROID
397 TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
398 if (cur_thread()->in_symbolizer)
399 return 0;
400 // We want to setup the atexit callback even if we are in ignored lib
401 // or after fork.
402 SCOPED_INTERCEPTOR_RAW(atexit, f);
403 return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
405 #endif
407 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
408 if (cur_thread()->in_symbolizer)
409 return 0;
410 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
411 return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
414 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
415 void *arg, void *dso) {
416 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
417 ctx->f = f;
418 ctx->arg = arg;
419 Release(thr, pc, (uptr)ctx);
420 // Memory allocation in __cxa_atexit will race with free during exit,
421 // because we do not see synchronization around atexit callback list.
422 ThreadIgnoreBegin(thr, pc);
423 int res = REAL(__cxa_atexit)(at_exit_wrapper, ctx, dso);
424 ThreadIgnoreEnd(thr, pc);
425 return res;
428 #if !SANITIZER_MAC
429 static void on_exit_wrapper(int status, void *arg) {
430 ThreadState *thr = cur_thread();
431 uptr pc = 0;
432 Acquire(thr, pc, (uptr)arg);
433 AtExitCtx *ctx = (AtExitCtx*)arg;
434 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
435 InternalFree(ctx);
438 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
439 if (cur_thread()->in_symbolizer)
440 return 0;
441 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
442 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
443 ctx->f = (void(*)())f;
444 ctx->arg = arg;
445 Release(thr, pc, (uptr)ctx);
446 // Memory allocation in __cxa_atexit will race with free during exit,
447 // because we do not see synchronization around atexit callback list.
448 ThreadIgnoreBegin(thr, pc);
449 int res = REAL(on_exit)(on_exit_wrapper, ctx);
450 ThreadIgnoreEnd(thr, pc);
451 return res;
453 #endif
455 // Cleanup old bufs.
456 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
457 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
458 JmpBuf *buf = &thr->jmp_bufs[i];
459 if (buf->sp <= sp) {
460 uptr sz = thr->jmp_bufs.Size();
461 internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
462 thr->jmp_bufs.PopBack();
463 i--;
468 static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
469 if (!thr->is_inited) // called from libc guts during bootstrap
470 return;
471 // Cleanup old bufs.
472 JmpBufGarbageCollect(thr, sp);
473 // Remember the buf.
474 JmpBuf *buf = thr->jmp_bufs.PushBack();
475 buf->sp = sp;
476 buf->mangled_sp = mangled_sp;
477 buf->shadow_stack_pos = thr->shadow_stack_pos;
478 ThreadSignalContext *sctx = SigCtx(thr);
479 buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
480 buf->in_blocking_func = sctx ?
481 atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
482 false;
483 buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
484 memory_order_relaxed);
487 static void LongJmp(ThreadState *thr, uptr *env) {
488 #ifdef __powerpc__
489 uptr mangled_sp = env[0];
490 #elif SANITIZER_FREEBSD || SANITIZER_NETBSD
491 uptr mangled_sp = env[2];
492 #elif SANITIZER_MAC
493 # ifdef __aarch64__
494 uptr mangled_sp = env[13];
495 # else
496 uptr mangled_sp = env[2];
497 # endif
498 #elif defined(SANITIZER_LINUX)
499 # ifdef __aarch64__
500 uptr mangled_sp = env[13];
501 # elif defined(__mips64)
502 uptr mangled_sp = env[1];
503 # else
504 uptr mangled_sp = env[6];
505 # endif
506 #endif
507 // Find the saved buf by mangled_sp.
508 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
509 JmpBuf *buf = &thr->jmp_bufs[i];
510 if (buf->mangled_sp == mangled_sp) {
511 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
512 // Unwind the stack.
513 while (thr->shadow_stack_pos > buf->shadow_stack_pos)
514 FuncExit(thr);
515 ThreadSignalContext *sctx = SigCtx(thr);
516 if (sctx) {
517 sctx->int_signal_send = buf->int_signal_send;
518 atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
519 memory_order_relaxed);
521 atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
522 memory_order_relaxed);
523 JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
524 return;
527 Printf("ThreadSanitizer: can't find longjmp buf\n");
528 CHECK(0);
531 // FIXME: put everything below into a common extern "C" block?
532 extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) {
533 SetJmp(cur_thread(), sp, mangled_sp);
536 #if SANITIZER_MAC
537 TSAN_INTERCEPTOR(int, setjmp, void *env);
538 TSAN_INTERCEPTOR(int, _setjmp, void *env);
539 TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
540 #else // SANITIZER_MAC
541 // Not called. Merely to satisfy TSAN_INTERCEPT().
542 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
543 int __interceptor_setjmp(void *env);
544 extern "C" int __interceptor_setjmp(void *env) {
545 CHECK(0);
546 return 0;
549 // FIXME: any reason to have a separate declaration?
550 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
551 int __interceptor__setjmp(void *env);
552 extern "C" int __interceptor__setjmp(void *env) {
553 CHECK(0);
554 return 0;
557 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
558 int __interceptor_sigsetjmp(void *env);
559 extern "C" int __interceptor_sigsetjmp(void *env) {
560 CHECK(0);
561 return 0;
564 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
565 int __interceptor___sigsetjmp(void *env);
566 extern "C" int __interceptor___sigsetjmp(void *env) {
567 CHECK(0);
568 return 0;
571 extern "C" int setjmp(void *env);
572 extern "C" int _setjmp(void *env);
573 extern "C" int sigsetjmp(void *env);
574 extern "C" int __sigsetjmp(void *env);
575 DEFINE_REAL(int, setjmp, void *env)
576 DEFINE_REAL(int, _setjmp, void *env)
577 DEFINE_REAL(int, sigsetjmp, void *env)
578 DEFINE_REAL(int, __sigsetjmp, void *env)
579 #endif // SANITIZER_MAC
581 TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) {
582 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
583 // bad things will happen. We will jump over ScopedInterceptor dtor and can
584 // leave thr->in_ignored_lib set.
586 SCOPED_INTERCEPTOR_RAW(longjmp, env, val);
588 LongJmp(cur_thread(), env);
589 REAL(longjmp)(env, val);
592 TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) {
594 SCOPED_INTERCEPTOR_RAW(siglongjmp, env, val);
596 LongJmp(cur_thread(), env);
597 REAL(siglongjmp)(env, val);
600 #if !SANITIZER_MAC
601 TSAN_INTERCEPTOR(void*, malloc, uptr size) {
602 if (cur_thread()->in_symbolizer)
603 return InternalAlloc(size);
604 void *p = 0;
606 SCOPED_INTERCEPTOR_RAW(malloc, size);
607 p = user_alloc(thr, pc, size);
609 invoke_malloc_hook(p, size);
610 return p;
613 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
614 SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
615 return user_memalign(thr, pc, align, sz);
618 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
619 if (cur_thread()->in_symbolizer)
620 return InternalCalloc(size, n);
621 void *p = 0;
623 SCOPED_INTERCEPTOR_RAW(calloc, size, n);
624 p = user_calloc(thr, pc, size, n);
626 invoke_malloc_hook(p, n * size);
627 return p;
630 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
631 if (cur_thread()->in_symbolizer)
632 return InternalRealloc(p, size);
633 if (p)
634 invoke_free_hook(p);
636 SCOPED_INTERCEPTOR_RAW(realloc, p, size);
637 p = user_realloc(thr, pc, p, size);
639 invoke_malloc_hook(p, size);
640 return p;
643 TSAN_INTERCEPTOR(void, free, void *p) {
644 if (p == 0)
645 return;
646 if (cur_thread()->in_symbolizer)
647 return InternalFree(p);
648 invoke_free_hook(p);
649 SCOPED_INTERCEPTOR_RAW(free, p);
650 user_free(thr, pc, p);
653 TSAN_INTERCEPTOR(void, cfree, void *p) {
654 if (p == 0)
655 return;
656 if (cur_thread()->in_symbolizer)
657 return InternalFree(p);
658 invoke_free_hook(p);
659 SCOPED_INTERCEPTOR_RAW(cfree, p);
660 user_free(thr, pc, p);
663 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
664 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
665 return user_alloc_usable_size(p);
667 #endif
669 TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT
670 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT
671 uptr srclen = internal_strlen(src);
672 MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
673 MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
674 return REAL(strcpy)(dst, src); // NOLINT
677 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
678 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
679 uptr srclen = internal_strnlen(src, n);
680 MemoryAccessRange(thr, pc, (uptr)dst, n, true);
681 MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
682 return REAL(strncpy)(dst, src, n);
685 TSAN_INTERCEPTOR(char*, strdup, const char *str) {
686 SCOPED_TSAN_INTERCEPTOR(strdup, str);
687 // strdup will call malloc, so no instrumentation is required here.
688 return REAL(strdup)(str);
691 static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
692 if (*addr) {
693 if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
694 if (flags & MAP_FIXED) {
695 errno = errno_EINVAL;
696 return false;
697 } else {
698 *addr = 0;
702 return true;
705 TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags,
706 int fd, OFF_T off) {
707 SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off);
708 if (!fix_mmap_addr(&addr, sz, flags))
709 return MAP_FAILED;
710 void *res = REAL(mmap)(addr, sz, prot, flags, fd, off);
711 if (res != MAP_FAILED) {
712 if (fd > 0)
713 FdAccess(thr, pc, fd);
715 if (thr->ignore_reads_and_writes == 0)
716 MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
717 else
718 MemoryResetRange(thr, pc, (uptr)res, sz);
720 return res;
723 #if SANITIZER_LINUX
724 TSAN_INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags,
725 int fd, OFF64_T off) {
726 SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off);
727 if (!fix_mmap_addr(&addr, sz, flags))
728 return MAP_FAILED;
729 void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off);
730 if (res != MAP_FAILED) {
731 if (fd > 0)
732 FdAccess(thr, pc, fd);
734 if (thr->ignore_reads_and_writes == 0)
735 MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
736 else
737 MemoryResetRange(thr, pc, (uptr)res, sz);
739 return res;
741 #define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64)
742 #else
743 #define TSAN_MAYBE_INTERCEPT_MMAP64
744 #endif
746 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
747 SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
748 if (sz != 0) {
749 // If sz == 0, munmap will return EINVAL and don't unmap any memory.
750 DontNeedShadowFor((uptr)addr, sz);
751 ScopedGlobalProcessor sgp;
752 ctx->metamap.ResetRange(thr->proc(), (uptr)addr, (uptr)sz);
754 int res = REAL(munmap)(addr, sz);
755 return res;
758 #if SANITIZER_LINUX
759 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
760 SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
761 return user_memalign(thr, pc, align, sz);
763 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
764 #else
765 #define TSAN_MAYBE_INTERCEPT_MEMALIGN
766 #endif
768 #if !SANITIZER_MAC
769 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
770 SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
771 return user_aligned_alloc(thr, pc, align, sz);
774 TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
775 SCOPED_INTERCEPTOR_RAW(valloc, sz);
776 return user_valloc(thr, pc, sz);
778 #endif
780 #if SANITIZER_LINUX
781 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
782 SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
783 return user_pvalloc(thr, pc, sz);
785 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
786 #else
787 #define TSAN_MAYBE_INTERCEPT_PVALLOC
788 #endif
790 #if !SANITIZER_MAC
791 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
792 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
793 return user_posix_memalign(thr, pc, memptr, align, sz);
795 #endif
797 // __cxa_guard_acquire and friends need to be intercepted in a special way -
798 // regular interceptors will break statically-linked libstdc++. Linux
799 // interceptors are especially defined as weak functions (so that they don't
800 // cause link errors when user defines them as well). So they silently
801 // auto-disable themselves when such symbol is already present in the binary. If
802 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
803 // will silently replace our interceptor. That's why on Linux we simply export
804 // these interceptors with INTERFACE_ATTRIBUTE.
805 // On OS X, we don't support statically linking, so we just use a regular
806 // interceptor.
807 #if SANITIZER_MAC
808 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
809 #else
810 #define STDCXX_INTERCEPTOR(rettype, name, ...) \
811 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
812 #endif
814 // Used in thread-safe function static initialization.
815 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
816 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
817 for (;;) {
818 u32 cmp = atomic_load(g, memory_order_acquire);
819 if (cmp == 0) {
820 if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed))
821 return 1;
822 } else if (cmp == 1) {
823 Acquire(thr, pc, (uptr)g);
824 return 0;
825 } else {
826 internal_sched_yield();
831 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
832 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
833 Release(thr, pc, (uptr)g);
834 atomic_store(g, 1, memory_order_release);
837 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
838 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
839 atomic_store(g, 0, memory_order_relaxed);
842 namespace __tsan {
843 void DestroyThreadState() {
844 ThreadState *thr = cur_thread();
845 Processor *proc = thr->proc();
846 ThreadFinish(thr);
847 ProcUnwire(proc, thr);
848 ProcDestroy(proc);
849 ThreadSignalContext *sctx = thr->signal_ctx;
850 if (sctx) {
851 thr->signal_ctx = 0;
852 UnmapOrDie(sctx, sizeof(*sctx));
854 DTLS_Destroy();
855 cur_thread_finalize();
857 } // namespace __tsan
859 #if !SANITIZER_MAC
860 static void thread_finalize(void *v) {
861 uptr iter = (uptr)v;
862 if (iter > 1) {
863 if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
864 Printf("ThreadSanitizer: failed to set thread key\n");
865 Die();
867 return;
869 DestroyThreadState();
871 #endif
874 struct ThreadParam {
875 void* (*callback)(void *arg);
876 void *param;
877 atomic_uintptr_t tid;
880 extern "C" void *__tsan_thread_start_func(void *arg) {
881 ThreadParam *p = (ThreadParam*)arg;
882 void* (*callback)(void *arg) = p->callback;
883 void *param = p->param;
884 int tid = 0;
886 ThreadState *thr = cur_thread();
887 // Thread-local state is not initialized yet.
888 ScopedIgnoreInterceptors ignore;
889 #if !SANITIZER_MAC
890 ThreadIgnoreBegin(thr, 0);
891 if (pthread_setspecific(g_thread_finalize_key,
892 (void *)GetPthreadDestructorIterations())) {
893 Printf("ThreadSanitizer: failed to set thread key\n");
894 Die();
896 ThreadIgnoreEnd(thr, 0);
897 #endif
898 while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
899 internal_sched_yield();
900 Processor *proc = ProcCreate();
901 ProcWire(proc, thr);
902 ThreadStart(thr, tid, GetTid(), /*workerthread*/ false);
903 atomic_store(&p->tid, 0, memory_order_release);
905 void *res = callback(param);
906 // Prevent the callback from being tail called,
907 // it mixes up stack traces.
908 volatile int foo = 42;
909 foo++;
910 return res;
913 TSAN_INTERCEPTOR(int, pthread_create,
914 void *th, void *attr, void *(*callback)(void*), void * param) {
915 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
916 if (ctx->after_multithreaded_fork) {
917 if (flags()->die_after_fork) {
918 Report("ThreadSanitizer: starting new threads after multi-threaded "
919 "fork is not supported. Dying (set die_after_fork=0 to override)\n");
920 Die();
921 } else {
922 VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
923 "fork is not supported (pid %d). Continuing because of "
924 "die_after_fork=0, but you are on your own\n", internal_getpid());
927 __sanitizer_pthread_attr_t myattr;
928 if (attr == 0) {
929 pthread_attr_init(&myattr);
930 attr = &myattr;
932 int detached = 0;
933 REAL(pthread_attr_getdetachstate)(attr, &detached);
934 AdjustStackSize(attr);
936 ThreadParam p;
937 p.callback = callback;
938 p.param = param;
939 atomic_store(&p.tid, 0, memory_order_relaxed);
940 int res = -1;
942 // Otherwise we see false positives in pthread stack manipulation.
943 ScopedIgnoreInterceptors ignore;
944 ThreadIgnoreBegin(thr, pc);
945 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
946 ThreadIgnoreEnd(thr, pc);
948 if (res == 0) {
949 int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
950 CHECK_NE(tid, 0);
951 // Synchronization on p.tid serves two purposes:
952 // 1. ThreadCreate must finish before the new thread starts.
953 // Otherwise the new thread can call pthread_detach, but the pthread_t
954 // identifier is not yet registered in ThreadRegistry by ThreadCreate.
955 // 2. ThreadStart must finish before this thread continues.
956 // Otherwise, this thread can call pthread_detach and reset thr->sync
957 // before the new thread got a chance to acquire from it in ThreadStart.
958 atomic_store(&p.tid, tid, memory_order_release);
959 while (atomic_load(&p.tid, memory_order_acquire) != 0)
960 internal_sched_yield();
962 if (attr == &myattr)
963 pthread_attr_destroy(&myattr);
964 return res;
967 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
968 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
969 int tid = ThreadTid(thr, pc, (uptr)th);
970 ThreadIgnoreBegin(thr, pc);
971 int res = BLOCK_REAL(pthread_join)(th, ret);
972 ThreadIgnoreEnd(thr, pc);
973 if (res == 0) {
974 ThreadJoin(thr, pc, tid);
976 return res;
979 DEFINE_REAL_PTHREAD_FUNCTIONS
981 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
982 SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
983 int tid = ThreadTid(thr, pc, (uptr)th);
984 int res = REAL(pthread_detach)(th);
985 if (res == 0) {
986 ThreadDetach(thr, pc, tid);
988 return res;
991 // Problem:
992 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
993 // pthread_cond_t has different size in the different versions.
994 // If call new REAL functions for old pthread_cond_t, they will corrupt memory
995 // after pthread_cond_t (old cond is smaller).
996 // If we call old REAL functions for new pthread_cond_t, we will lose some
997 // functionality (e.g. old functions do not support waiting against
998 // CLOCK_REALTIME).
999 // Proper handling would require to have 2 versions of interceptors as well.
1000 // But this is messy, in particular requires linker scripts when sanitizer
1001 // runtime is linked into a shared library.
1002 // Instead we assume we don't have dynamic libraries built against old
1003 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1004 // that allows to work with old libraries (but this mode does not support
1005 // some features, e.g. pthread_condattr_getpshared).
1006 static void *init_cond(void *c, bool force = false) {
1007 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1008 // So we allocate additional memory on the side large enough to hold
1009 // any pthread_cond_t object. Always call new REAL functions, but pass
1010 // the aux object to them.
1011 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1012 // first word of pthread_cond_t to zero.
1013 // It's all relevant only for linux.
1014 if (!common_flags()->legacy_pthread_cond)
1015 return c;
1016 atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1017 uptr cond = atomic_load(p, memory_order_acquire);
1018 if (!force && cond != 0)
1019 return (void*)cond;
1020 void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1021 internal_memset(newcond, 0, pthread_cond_t_sz);
1022 if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1023 memory_order_acq_rel))
1024 return newcond;
1025 WRAP(free)(newcond);
1026 return (void*)cond;
1029 struct CondMutexUnlockCtx {
1030 ScopedInterceptor *si;
1031 ThreadState *thr;
1032 uptr pc;
1033 void *m;
1036 static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
1037 // pthread_cond_wait interceptor has enabled async signal delivery
1038 // (see BlockingCall below). Disable async signals since we are running
1039 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1040 // since the thread is cancelled, so we have to manually execute them
1041 // (the thread still can run some user code due to pthread_cleanup_push).
1042 ThreadSignalContext *ctx = SigCtx(arg->thr);
1043 CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
1044 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
1045 MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock);
1046 // Undo BlockingCall ctor effects.
1047 arg->thr->ignore_interceptors--;
1048 arg->si->~ScopedInterceptor();
1051 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1052 void *cond = init_cond(c, true);
1053 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1054 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1055 return REAL(pthread_cond_init)(cond, a);
1058 static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
1059 int (*fn)(void *c, void *m, void *abstime), void *c,
1060 void *m, void *t) {
1061 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1062 MutexUnlock(thr, pc, (uptr)m);
1063 CondMutexUnlockCtx arg = {si, thr, pc, m};
1064 int res = 0;
1065 // This ensures that we handle mutex lock even in case of pthread_cancel.
1066 // See test/tsan/cond_cancel.cc.
1068 // Enable signal delivery while the thread is blocked.
1069 BlockingCall bc(thr);
1070 res = call_pthread_cancel_with_cleanup(
1071 fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
1073 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1074 MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1075 return res;
1078 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1079 void *cond = init_cond(c);
1080 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1081 return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
1082 pthread_cond_wait),
1083 cond, m, 0);
1086 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1087 void *cond = init_cond(c);
1088 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1089 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m,
1090 abstime);
1093 #if SANITIZER_MAC
1094 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1095 void *reltime) {
1096 void *cond = init_cond(c);
1097 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1098 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond,
1099 m, reltime);
1101 #endif
1103 INTERCEPTOR(int, pthread_cond_signal, void *c) {
1104 void *cond = init_cond(c);
1105 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1106 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1107 return REAL(pthread_cond_signal)(cond);
1110 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1111 void *cond = init_cond(c);
1112 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1113 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1114 return REAL(pthread_cond_broadcast)(cond);
1117 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1118 void *cond = init_cond(c);
1119 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1120 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1121 int res = REAL(pthread_cond_destroy)(cond);
1122 if (common_flags()->legacy_pthread_cond) {
1123 // Free our aux cond and zero the pointer to not leave dangling pointers.
1124 WRAP(free)(cond);
1125 atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1127 return res;
1130 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1131 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1132 int res = REAL(pthread_mutex_init)(m, a);
1133 if (res == 0) {
1134 u32 flagz = 0;
1135 if (a) {
1136 int type = 0;
1137 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1138 if (type == PTHREAD_MUTEX_RECURSIVE ||
1139 type == PTHREAD_MUTEX_RECURSIVE_NP)
1140 flagz |= MutexFlagWriteReentrant;
1142 MutexCreate(thr, pc, (uptr)m, flagz);
1144 return res;
1147 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1148 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1149 int res = REAL(pthread_mutex_destroy)(m);
1150 if (res == 0 || res == errno_EBUSY) {
1151 MutexDestroy(thr, pc, (uptr)m);
1153 return res;
1156 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1157 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1158 int res = REAL(pthread_mutex_trylock)(m);
1159 if (res == errno_EOWNERDEAD)
1160 MutexRepair(thr, pc, (uptr)m);
1161 if (res == 0 || res == errno_EOWNERDEAD)
1162 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1163 return res;
1166 #if !SANITIZER_MAC
1167 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1168 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1169 int res = REAL(pthread_mutex_timedlock)(m, abstime);
1170 if (res == 0) {
1171 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1173 return res;
1175 #endif
1177 #if !SANITIZER_MAC
1178 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1179 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1180 int res = REAL(pthread_spin_init)(m, pshared);
1181 if (res == 0) {
1182 MutexCreate(thr, pc, (uptr)m);
1184 return res;
1187 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1188 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1189 int res = REAL(pthread_spin_destroy)(m);
1190 if (res == 0) {
1191 MutexDestroy(thr, pc, (uptr)m);
1193 return res;
1196 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1197 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1198 MutexPreLock(thr, pc, (uptr)m);
1199 int res = REAL(pthread_spin_lock)(m);
1200 if (res == 0) {
1201 MutexPostLock(thr, pc, (uptr)m);
1203 return res;
1206 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1207 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1208 int res = REAL(pthread_spin_trylock)(m);
1209 if (res == 0) {
1210 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1212 return res;
1215 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1216 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1217 MutexUnlock(thr, pc, (uptr)m);
1218 int res = REAL(pthread_spin_unlock)(m);
1219 return res;
1221 #endif
1223 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1224 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1225 int res = REAL(pthread_rwlock_init)(m, a);
1226 if (res == 0) {
1227 MutexCreate(thr, pc, (uptr)m);
1229 return res;
1232 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1233 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1234 int res = REAL(pthread_rwlock_destroy)(m);
1235 if (res == 0) {
1236 MutexDestroy(thr, pc, (uptr)m);
1238 return res;
1241 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1242 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1243 MutexPreReadLock(thr, pc, (uptr)m);
1244 int res = REAL(pthread_rwlock_rdlock)(m);
1245 if (res == 0) {
1246 MutexPostReadLock(thr, pc, (uptr)m);
1248 return res;
1251 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1252 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1253 int res = REAL(pthread_rwlock_tryrdlock)(m);
1254 if (res == 0) {
1255 MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
1257 return res;
1260 #if !SANITIZER_MAC
1261 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1262 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1263 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1264 if (res == 0) {
1265 MutexPostReadLock(thr, pc, (uptr)m);
1267 return res;
1269 #endif
1271 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1272 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1273 MutexPreLock(thr, pc, (uptr)m);
1274 int res = REAL(pthread_rwlock_wrlock)(m);
1275 if (res == 0) {
1276 MutexPostLock(thr, pc, (uptr)m);
1278 return res;
1281 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1282 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1283 int res = REAL(pthread_rwlock_trywrlock)(m);
1284 if (res == 0) {
1285 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1287 return res;
1290 #if !SANITIZER_MAC
1291 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1292 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1293 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1294 if (res == 0) {
1295 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1297 return res;
1299 #endif
1301 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1302 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1303 MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1304 int res = REAL(pthread_rwlock_unlock)(m);
1305 return res;
1308 #if !SANITIZER_MAC
1309 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1310 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1311 MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1312 int res = REAL(pthread_barrier_init)(b, a, count);
1313 return res;
1316 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1317 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1318 MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1319 int res = REAL(pthread_barrier_destroy)(b);
1320 return res;
1323 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1324 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1325 Release(thr, pc, (uptr)b);
1326 MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1327 int res = REAL(pthread_barrier_wait)(b);
1328 MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1329 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1330 Acquire(thr, pc, (uptr)b);
1332 return res;
1334 #endif
1336 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1337 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1338 if (o == 0 || f == 0)
1339 return errno_EINVAL;
1340 atomic_uint32_t *a;
1341 if (!SANITIZER_MAC)
1342 a = static_cast<atomic_uint32_t*>(o);
1343 else // On OS X, pthread_once_t has a header with a long-sized signature.
1344 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1345 u32 v = atomic_load(a, memory_order_acquire);
1346 if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
1347 memory_order_relaxed)) {
1348 (*f)();
1349 if (!thr->in_ignored_lib)
1350 Release(thr, pc, (uptr)o);
1351 atomic_store(a, 2, memory_order_release);
1352 } else {
1353 while (v != 2) {
1354 internal_sched_yield();
1355 v = atomic_load(a, memory_order_acquire);
1357 if (!thr->in_ignored_lib)
1358 Acquire(thr, pc, (uptr)o);
1360 return 0;
1363 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1364 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1365 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1366 if (fd > 0)
1367 FdAccess(thr, pc, fd);
1368 return REAL(__fxstat)(version, fd, buf);
1370 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
1371 #else
1372 #define TSAN_MAYBE_INTERCEPT___FXSTAT
1373 #endif
1375 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1376 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
1377 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1378 if (fd > 0)
1379 FdAccess(thr, pc, fd);
1380 return REAL(fstat)(fd, buf);
1381 #else
1382 SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
1383 if (fd > 0)
1384 FdAccess(thr, pc, fd);
1385 return REAL(__fxstat)(0, fd, buf);
1386 #endif
1389 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1390 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1391 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1392 if (fd > 0)
1393 FdAccess(thr, pc, fd);
1394 return REAL(__fxstat64)(version, fd, buf);
1396 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
1397 #else
1398 #define TSAN_MAYBE_INTERCEPT___FXSTAT64
1399 #endif
1401 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1402 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1403 SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
1404 if (fd > 0)
1405 FdAccess(thr, pc, fd);
1406 return REAL(__fxstat64)(0, fd, buf);
1408 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1409 #else
1410 #define TSAN_MAYBE_INTERCEPT_FSTAT64
1411 #endif
1413 TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
1414 SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
1415 READ_STRING(thr, pc, name, 0);
1416 int fd = REAL(open)(name, flags, mode);
1417 if (fd >= 0)
1418 FdFileCreate(thr, pc, fd);
1419 return fd;
1422 #if SANITIZER_LINUX
1423 TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
1424 SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
1425 READ_STRING(thr, pc, name, 0);
1426 int fd = REAL(open64)(name, flags, mode);
1427 if (fd >= 0)
1428 FdFileCreate(thr, pc, fd);
1429 return fd;
1431 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1432 #else
1433 #define TSAN_MAYBE_INTERCEPT_OPEN64
1434 #endif
1436 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1437 SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1438 READ_STRING(thr, pc, name, 0);
1439 int fd = REAL(creat)(name, mode);
1440 if (fd >= 0)
1441 FdFileCreate(thr, pc, fd);
1442 return fd;
1445 #if SANITIZER_LINUX
1446 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1447 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1448 READ_STRING(thr, pc, name, 0);
1449 int fd = REAL(creat64)(name, mode);
1450 if (fd >= 0)
1451 FdFileCreate(thr, pc, fd);
1452 return fd;
1454 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1455 #else
1456 #define TSAN_MAYBE_INTERCEPT_CREAT64
1457 #endif
1459 TSAN_INTERCEPTOR(int, dup, int oldfd) {
1460 SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1461 int newfd = REAL(dup)(oldfd);
1462 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1463 FdDup(thr, pc, oldfd, newfd, true);
1464 return newfd;
1467 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1468 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1469 int newfd2 = REAL(dup2)(oldfd, newfd);
1470 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1471 FdDup(thr, pc, oldfd, newfd2, false);
1472 return newfd2;
1475 #if !SANITIZER_MAC
1476 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1477 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1478 int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1479 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1480 FdDup(thr, pc, oldfd, newfd2, false);
1481 return newfd2;
1483 #endif
1485 #if SANITIZER_LINUX
1486 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1487 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1488 int fd = REAL(eventfd)(initval, flags);
1489 if (fd >= 0)
1490 FdEventCreate(thr, pc, fd);
1491 return fd;
1493 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1494 #else
1495 #define TSAN_MAYBE_INTERCEPT_EVENTFD
1496 #endif
1498 #if SANITIZER_LINUX
1499 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1500 SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
1501 if (fd >= 0)
1502 FdClose(thr, pc, fd);
1503 fd = REAL(signalfd)(fd, mask, flags);
1504 if (fd >= 0)
1505 FdSignalCreate(thr, pc, fd);
1506 return fd;
1508 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1509 #else
1510 #define TSAN_MAYBE_INTERCEPT_SIGNALFD
1511 #endif
1513 #if SANITIZER_LINUX
1514 TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1515 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1516 int fd = REAL(inotify_init)(fake);
1517 if (fd >= 0)
1518 FdInotifyCreate(thr, pc, fd);
1519 return fd;
1521 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1522 #else
1523 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1524 #endif
1526 #if SANITIZER_LINUX
1527 TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1528 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1529 int fd = REAL(inotify_init1)(flags);
1530 if (fd >= 0)
1531 FdInotifyCreate(thr, pc, fd);
1532 return fd;
1534 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1535 #else
1536 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1537 #endif
1539 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1540 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1541 int fd = REAL(socket)(domain, type, protocol);
1542 if (fd >= 0)
1543 FdSocketCreate(thr, pc, fd);
1544 return fd;
1547 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1548 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1549 int res = REAL(socketpair)(domain, type, protocol, fd);
1550 if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1551 FdPipeCreate(thr, pc, fd[0], fd[1]);
1552 return res;
1555 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1556 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1557 FdSocketConnecting(thr, pc, fd);
1558 int res = REAL(connect)(fd, addr, addrlen);
1559 if (res == 0 && fd >= 0)
1560 FdSocketConnect(thr, pc, fd);
1561 return res;
1564 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1565 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1566 int res = REAL(bind)(fd, addr, addrlen);
1567 if (fd > 0 && res == 0)
1568 FdAccess(thr, pc, fd);
1569 return res;
1572 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1573 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1574 int res = REAL(listen)(fd, backlog);
1575 if (fd > 0 && res == 0)
1576 FdAccess(thr, pc, fd);
1577 return res;
1580 TSAN_INTERCEPTOR(int, close, int fd) {
1581 SCOPED_TSAN_INTERCEPTOR(close, fd);
1582 if (fd >= 0)
1583 FdClose(thr, pc, fd);
1584 return REAL(close)(fd);
1587 #if SANITIZER_LINUX
1588 TSAN_INTERCEPTOR(int, __close, int fd) {
1589 SCOPED_TSAN_INTERCEPTOR(__close, fd);
1590 if (fd >= 0)
1591 FdClose(thr, pc, fd);
1592 return REAL(__close)(fd);
1594 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1595 #else
1596 #define TSAN_MAYBE_INTERCEPT___CLOSE
1597 #endif
1599 // glibc guts
1600 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1601 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1602 SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
1603 int fds[64];
1604 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1605 for (int i = 0; i < cnt; i++) {
1606 if (fds[i] > 0)
1607 FdClose(thr, pc, fds[i]);
1609 REAL(__res_iclose)(state, free_addr);
1611 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1612 #else
1613 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1614 #endif
1616 TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1617 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1618 int res = REAL(pipe)(pipefd);
1619 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1620 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1621 return res;
1624 #if !SANITIZER_MAC
1625 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1626 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1627 int res = REAL(pipe2)(pipefd, flags);
1628 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1629 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1630 return res;
1632 #endif
1634 TSAN_INTERCEPTOR(int, unlink, char *path) {
1635 SCOPED_TSAN_INTERCEPTOR(unlink, path);
1636 Release(thr, pc, File2addr(path));
1637 int res = REAL(unlink)(path);
1638 return res;
1641 TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1642 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1643 void *res = REAL(tmpfile)(fake);
1644 if (res) {
1645 int fd = fileno_unlocked(res);
1646 if (fd >= 0)
1647 FdFileCreate(thr, pc, fd);
1649 return res;
1652 #if SANITIZER_LINUX
1653 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1654 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1655 void *res = REAL(tmpfile64)(fake);
1656 if (res) {
1657 int fd = fileno_unlocked(res);
1658 if (fd >= 0)
1659 FdFileCreate(thr, pc, fd);
1661 return res;
1663 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1664 #else
1665 #define TSAN_MAYBE_INTERCEPT_TMPFILE64
1666 #endif
1668 static void FlushStreams() {
1669 // Flushing all the streams here may freeze the process if a child thread is
1670 // performing file stream operations at the same time.
1671 REAL(fflush)(stdout);
1672 REAL(fflush)(stderr);
1675 TSAN_INTERCEPTOR(void, abort, int fake) {
1676 SCOPED_TSAN_INTERCEPTOR(abort, fake);
1677 FlushStreams();
1678 REAL(abort)(fake);
1681 TSAN_INTERCEPTOR(int, puts, const char *s) {
1682 SCOPED_TSAN_INTERCEPTOR(puts, s);
1683 MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false);
1684 return REAL(puts)(s);
1687 TSAN_INTERCEPTOR(int, rmdir, char *path) {
1688 SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1689 Release(thr, pc, Dir2addr(path));
1690 int res = REAL(rmdir)(path);
1691 return res;
1694 TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1695 SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
1696 if (dirp) {
1697 int fd = dirfd(dirp);
1698 FdClose(thr, pc, fd);
1700 return REAL(closedir)(dirp);
1703 #if SANITIZER_LINUX
1704 TSAN_INTERCEPTOR(int, epoll_create, int size) {
1705 SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1706 int fd = REAL(epoll_create)(size);
1707 if (fd >= 0)
1708 FdPollCreate(thr, pc, fd);
1709 return fd;
1712 TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1713 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1714 int fd = REAL(epoll_create1)(flags);
1715 if (fd >= 0)
1716 FdPollCreate(thr, pc, fd);
1717 return fd;
1720 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1721 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1722 if (epfd >= 0)
1723 FdAccess(thr, pc, epfd);
1724 if (epfd >= 0 && fd >= 0)
1725 FdAccess(thr, pc, fd);
1726 if (op == EPOLL_CTL_ADD && epfd >= 0)
1727 FdRelease(thr, pc, epfd);
1728 int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1729 return res;
1732 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1733 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1734 if (epfd >= 0)
1735 FdAccess(thr, pc, epfd);
1736 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1737 if (res > 0 && epfd >= 0)
1738 FdAcquire(thr, pc, epfd);
1739 return res;
1742 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1743 void *sigmask) {
1744 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
1745 if (epfd >= 0)
1746 FdAccess(thr, pc, epfd);
1747 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
1748 if (res > 0 && epfd >= 0)
1749 FdAcquire(thr, pc, epfd);
1750 return res;
1753 #define TSAN_MAYBE_INTERCEPT_EPOLL \
1754 TSAN_INTERCEPT(epoll_create); \
1755 TSAN_INTERCEPT(epoll_create1); \
1756 TSAN_INTERCEPT(epoll_ctl); \
1757 TSAN_INTERCEPT(epoll_wait); \
1758 TSAN_INTERCEPT(epoll_pwait)
1759 #else
1760 #define TSAN_MAYBE_INTERCEPT_EPOLL
1761 #endif
1763 // The following functions are intercepted merely to process pending signals.
1764 // If program blocks signal X, we must deliver the signal before the function
1765 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
1766 // it's better to deliver the signal straight away.
1767 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
1768 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
1769 return REAL(sigsuspend)(mask);
1772 TSAN_INTERCEPTOR(int, sigblock, int mask) {
1773 SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
1774 return REAL(sigblock)(mask);
1777 TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
1778 SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
1779 return REAL(sigsetmask)(mask);
1782 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
1783 __sanitizer_sigset_t *oldset) {
1784 SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
1785 return REAL(pthread_sigmask)(how, set, oldset);
1788 namespace __tsan {
1790 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
1791 bool sigact, int sig, my_siginfo_t *info, void *uctx) {
1792 if (acquire)
1793 Acquire(thr, 0, (uptr)&sigactions[sig]);
1794 // Signals are generally asynchronous, so if we receive a signals when
1795 // ignores are enabled we should disable ignores. This is critical for sync
1796 // and interceptors, because otherwise we can miss syncronization and report
1797 // false races.
1798 int ignore_reads_and_writes = thr->ignore_reads_and_writes;
1799 int ignore_interceptors = thr->ignore_interceptors;
1800 int ignore_sync = thr->ignore_sync;
1801 if (!ctx->after_multithreaded_fork) {
1802 thr->ignore_reads_and_writes = 0;
1803 thr->fast_state.ClearIgnoreBit();
1804 thr->ignore_interceptors = 0;
1805 thr->ignore_sync = 0;
1807 // Ensure that the handler does not spoil errno.
1808 const int saved_errno = errno;
1809 errno = 99;
1810 // This code races with sigaction. Be careful to not read sa_sigaction twice.
1811 // Also need to remember pc for reporting before the call,
1812 // because the handler can reset it.
1813 volatile uptr pc = sigact ?
1814 (uptr)sigactions[sig].sa_sigaction :
1815 (uptr)sigactions[sig].sa_handler;
1816 if (pc != (uptr)SIG_DFL && pc != (uptr)SIG_IGN) {
1817 if (sigact)
1818 ((sigactionhandler_t)pc)(sig, info, uctx);
1819 else
1820 ((sighandler_t)pc)(sig);
1822 if (!ctx->after_multithreaded_fork) {
1823 thr->ignore_reads_and_writes = ignore_reads_and_writes;
1824 if (ignore_reads_and_writes)
1825 thr->fast_state.SetIgnoreBit();
1826 thr->ignore_interceptors = ignore_interceptors;
1827 thr->ignore_sync = ignore_sync;
1829 // We do not detect errno spoiling for SIGTERM,
1830 // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
1831 // tsan reports false positive in such case.
1832 // It's difficult to properly detect this situation (reraise),
1833 // because in async signal processing case (when handler is called directly
1834 // from rtl_generic_sighandler) we have not yet received the reraised
1835 // signal; and it looks too fragile to intercept all ways to reraise a signal.
1836 if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
1837 VarSizeStackTrace stack;
1838 // StackTrace::GetNestInstructionPc(pc) is used because return address is
1839 // expected, OutputReport() will undo this.
1840 ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
1841 ThreadRegistryLock l(ctx->thread_registry);
1842 ScopedReport rep(ReportTypeErrnoInSignal);
1843 if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
1844 rep.AddStack(stack, true);
1845 OutputReport(thr, rep);
1848 errno = saved_errno;
1851 void ProcessPendingSignals(ThreadState *thr) {
1852 ThreadSignalContext *sctx = SigCtx(thr);
1853 if (sctx == 0 ||
1854 atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
1855 return;
1856 atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
1857 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
1858 internal_sigfillset(&sctx->emptyset);
1859 int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
1860 CHECK_EQ(res, 0);
1861 for (int sig = 0; sig < kSigCount; sig++) {
1862 SignalDesc *signal = &sctx->pending_signals[sig];
1863 if (signal->armed) {
1864 signal->armed = false;
1865 CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
1866 &signal->siginfo, &signal->ctx);
1869 res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
1870 CHECK_EQ(res, 0);
1871 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
1874 } // namespace __tsan
1876 static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
1877 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
1878 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
1879 // If we are sending signal to ourselves, we must process it now.
1880 (sctx && sig == sctx->int_signal_send);
1883 void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
1884 my_siginfo_t *info, void *ctx) {
1885 ThreadState *thr = cur_thread();
1886 ThreadSignalContext *sctx = SigCtx(thr);
1887 if (sig < 0 || sig >= kSigCount) {
1888 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
1889 return;
1891 // Don't mess with synchronous signals.
1892 const bool sync = is_sync_signal(sctx, sig);
1893 if (sync ||
1894 // If we are in blocking function, we can safely process it now
1895 // (but check if we are in a recursive interceptor,
1896 // i.e. pthread_join()->munmap()).
1897 (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
1898 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
1899 if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
1900 atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
1901 CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
1902 atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
1903 } else {
1904 // Be very conservative with when we do acquire in this case.
1905 // It's unsafe to do acquire in async handlers, because ThreadState
1906 // can be in inconsistent state.
1907 // SIGSYS looks relatively safe -- it's synchronous and can actually
1908 // need some global state.
1909 bool acq = (sig == SIGSYS);
1910 CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
1912 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
1913 return;
1916 if (sctx == 0)
1917 return;
1918 SignalDesc *signal = &sctx->pending_signals[sig];
1919 if (signal->armed == false) {
1920 signal->armed = true;
1921 signal->sigaction = sigact;
1922 if (info)
1923 internal_memcpy(&signal->siginfo, info, sizeof(*info));
1924 if (ctx)
1925 internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
1926 atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
1930 static void rtl_sighandler(int sig) {
1931 rtl_generic_sighandler(false, sig, 0, 0);
1934 static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) {
1935 rtl_generic_sighandler(true, sig, info, ctx);
1938 TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
1939 // Note: if we call REAL(sigaction) directly for any reason without proxying
1940 // the signal handler through rtl_sigaction, very bad things will happen.
1941 // The handler will run synchronously and corrupt tsan per-thread state.
1942 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
1943 if (old)
1944 internal_memcpy(old, &sigactions[sig], sizeof(*old));
1945 if (act == 0)
1946 return 0;
1947 // Copy act into sigactions[sig].
1948 // Can't use struct copy, because compiler can emit call to memcpy.
1949 // Can't use internal_memcpy, because it copies byte-by-byte,
1950 // and signal handler reads the sa_handler concurrently. It it can read
1951 // some bytes from old value and some bytes from new value.
1952 // Use volatile to prevent insertion of memcpy.
1953 sigactions[sig].sa_handler = *(volatile sighandler_t*)&act->sa_handler;
1954 sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags;
1955 internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
1956 sizeof(sigactions[sig].sa_mask));
1957 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
1958 sigactions[sig].sa_restorer = act->sa_restorer;
1959 #endif
1960 sigaction_t newact;
1961 internal_memcpy(&newact, act, sizeof(newact));
1962 internal_sigfillset(&newact.sa_mask);
1963 if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL) {
1964 if (newact.sa_flags & SA_SIGINFO)
1965 newact.sa_sigaction = rtl_sigaction;
1966 else
1967 newact.sa_handler = rtl_sighandler;
1969 ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
1970 int res = REAL(sigaction)(sig, &newact, 0);
1971 return res;
1974 TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) {
1975 sigaction_t act;
1976 act.sa_handler = h;
1977 internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
1978 act.sa_flags = 0;
1979 sigaction_t old;
1980 int res = sigaction(sig, &act, &old);
1981 if (res)
1982 return SIG_ERR;
1983 return old.sa_handler;
1986 TSAN_INTERCEPTOR(int, raise, int sig) {
1987 SCOPED_TSAN_INTERCEPTOR(raise, sig);
1988 ThreadSignalContext *sctx = SigCtx(thr);
1989 CHECK_NE(sctx, 0);
1990 int prev = sctx->int_signal_send;
1991 sctx->int_signal_send = sig;
1992 int res = REAL(raise)(sig);
1993 CHECK_EQ(sctx->int_signal_send, sig);
1994 sctx->int_signal_send = prev;
1995 return res;
1998 TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
1999 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2000 ThreadSignalContext *sctx = SigCtx(thr);
2001 CHECK_NE(sctx, 0);
2002 int prev = sctx->int_signal_send;
2003 if (pid == (int)internal_getpid()) {
2004 sctx->int_signal_send = sig;
2006 int res = REAL(kill)(pid, sig);
2007 if (pid == (int)internal_getpid()) {
2008 CHECK_EQ(sctx->int_signal_send, sig);
2009 sctx->int_signal_send = prev;
2011 return res;
2014 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2015 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2016 ThreadSignalContext *sctx = SigCtx(thr);
2017 CHECK_NE(sctx, 0);
2018 int prev = sctx->int_signal_send;
2019 if (tid == pthread_self()) {
2020 sctx->int_signal_send = sig;
2022 int res = REAL(pthread_kill)(tid, sig);
2023 if (tid == pthread_self()) {
2024 CHECK_EQ(sctx->int_signal_send, sig);
2025 sctx->int_signal_send = prev;
2027 return res;
2030 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2031 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2032 // It's intercepted merely to process pending signals.
2033 return REAL(gettimeofday)(tv, tz);
2036 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2037 void *hints, void *rv) {
2038 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2039 // We miss atomic synchronization in getaddrinfo,
2040 // and can report false race between malloc and free
2041 // inside of getaddrinfo. So ignore memory accesses.
2042 ThreadIgnoreBegin(thr, pc);
2043 int res = REAL(getaddrinfo)(node, service, hints, rv);
2044 ThreadIgnoreEnd(thr, pc);
2045 return res;
2048 TSAN_INTERCEPTOR(int, fork, int fake) {
2049 if (cur_thread()->in_symbolizer)
2050 return REAL(fork)(fake);
2051 SCOPED_INTERCEPTOR_RAW(fork, fake);
2052 ForkBefore(thr, pc);
2053 int pid;
2055 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
2056 // we'll assert in CheckNoLocks() unless we ignore interceptors.
2057 ScopedIgnoreInterceptors ignore;
2058 pid = REAL(fork)(fake);
2060 if (pid == 0) {
2061 // child
2062 ForkChildAfter(thr, pc);
2063 FdOnFork(thr, pc);
2064 } else if (pid > 0) {
2065 // parent
2066 ForkParentAfter(thr, pc);
2067 } else {
2068 // error
2069 ForkParentAfter(thr, pc);
2071 return pid;
2074 TSAN_INTERCEPTOR(int, vfork, int fake) {
2075 // Some programs (e.g. openjdk) call close for all file descriptors
2076 // in the child process. Under tsan it leads to false positives, because
2077 // address space is shared, so the parent process also thinks that
2078 // the descriptors are closed (while they are actually not).
2079 // This leads to false positives due to missed synchronization.
2080 // Strictly saying this is undefined behavior, because vfork child is not
2081 // allowed to call any functions other than exec/exit. But this is what
2082 // openjdk does, so we want to handle it.
2083 // We could disable interceptors in the child process. But it's not possible
2084 // to simply intercept and wrap vfork, because vfork child is not allowed
2085 // to return from the function that calls vfork, and that's exactly what
2086 // we would do. So this would require some assembly trickery as well.
2087 // Instead we simply turn vfork into fork.
2088 return WRAP(fork)(fake);
2091 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2092 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2093 void *data);
2094 struct dl_iterate_phdr_data {
2095 ThreadState *thr;
2096 uptr pc;
2097 dl_iterate_phdr_cb_t cb;
2098 void *data;
2101 static bool IsAppNotRodata(uptr addr) {
2102 return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata;
2105 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2106 void *data) {
2107 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2108 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2109 // accessible in dl_iterate_phdr callback. But we don't see synchronization
2110 // inside of dynamic linker, so we "unpoison" it here in order to not
2111 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2112 // because some libc functions call __libc_dlopen.
2113 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2114 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2115 internal_strlen(info->dlpi_name));
2116 int res = cbdata->cb(info, size, cbdata->data);
2117 // Perform the check one more time in case info->dlpi_name was overwritten
2118 // by user callback.
2119 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2120 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2121 internal_strlen(info->dlpi_name));
2122 return res;
2125 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2126 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2127 dl_iterate_phdr_data cbdata;
2128 cbdata.thr = thr;
2129 cbdata.pc = pc;
2130 cbdata.cb = cb;
2131 cbdata.data = data;
2132 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2133 return res;
2135 #endif
2137 static int OnExit(ThreadState *thr) {
2138 int status = Finalize(thr);
2139 FlushStreams();
2140 return status;
2143 struct TsanInterceptorContext {
2144 ThreadState *thr;
2145 const uptr caller_pc;
2146 const uptr pc;
2149 #if !SANITIZER_MAC
2150 static void HandleRecvmsg(ThreadState *thr, uptr pc,
2151 __sanitizer_msghdr *msg) {
2152 int fds[64];
2153 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2154 for (int i = 0; i < cnt; i++)
2155 FdEventCreate(thr, pc, fds[i]);
2157 #endif
2159 #include "sanitizer_common/sanitizer_platform_interceptors.h"
2160 // Causes interceptor recursion (getaddrinfo() and fopen())
2161 #undef SANITIZER_INTERCEPT_GETADDRINFO
2162 // There interceptors do not seem to be strictly necessary for tsan.
2163 // But we see cases where the interceptors consume 70% of execution time.
2164 // Memory blocks passed to fgetgrent_r are "written to" by tsan several times.
2165 // First, there is some recursion (getgrnam_r calls fgetgrent_r), and each
2166 // function "writes to" the buffer. Then, the same memory is "written to"
2167 // twice, first as buf and then as pwbufp (both of them refer to the same
2168 // addresses).
2169 #undef SANITIZER_INTERCEPT_GETPWENT
2170 #undef SANITIZER_INTERCEPT_GETPWENT_R
2171 #undef SANITIZER_INTERCEPT_FGETPWENT
2172 #undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
2173 #undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
2174 // We define our own.
2175 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
2176 #define NEED_TLS_GET_ADDR
2177 #endif
2178 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2180 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
2181 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
2182 INTERCEPT_FUNCTION_VER(name, ver)
2184 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
2185 MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
2186 ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
2187 true)
2189 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
2190 MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
2191 ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
2192 false)
2194 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
2195 SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
2196 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
2197 ctx = (void *)&_ctx; \
2198 (void) ctx;
2200 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2201 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
2202 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
2203 ctx = (void *)&_ctx; \
2204 (void) ctx;
2206 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2207 Acquire(thr, pc, File2addr(path)); \
2208 if (file) { \
2209 int fd = fileno_unlocked(file); \
2210 if (fd >= 0) FdFileCreate(thr, pc, fd); \
2213 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2214 if (file) { \
2215 int fd = fileno_unlocked(file); \
2216 if (fd >= 0) FdClose(thr, pc, fd); \
2219 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2220 libignore()->OnLibraryLoaded(filename)
2222 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2223 libignore()->OnLibraryUnloaded()
2225 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2226 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2228 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2229 Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2231 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2232 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2234 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2235 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2237 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2238 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2240 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2241 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2243 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2244 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2246 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2247 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2249 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
2250 __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
2252 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2254 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2255 OnExit(((TsanInterceptorContext *) ctx)->thr)
2257 #define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
2258 MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
2259 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2261 #define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
2262 MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
2263 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2265 #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
2266 MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
2267 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2269 #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
2270 MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
2271 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2273 #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
2274 MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
2275 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2277 #if !SANITIZER_MAC
2278 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2279 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2280 ((TsanInterceptorContext *)ctx)->pc, msg)
2281 #endif
2283 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
2284 if (TsanThread *t = GetCurrentThread()) { \
2285 *begin = t->tls_begin(); \
2286 *end = t->tls_end(); \
2287 } else { \
2288 *begin = *end = 0; \
2291 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2292 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2294 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2295 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2297 #include "sanitizer_common/sanitizer_common_interceptors.inc"
2299 #define TSAN_SYSCALL() \
2300 ThreadState *thr = cur_thread(); \
2301 if (thr->ignore_interceptors) \
2302 return; \
2303 ScopedSyscall scoped_syscall(thr) \
2304 /**/
2306 struct ScopedSyscall {
2307 ThreadState *thr;
2309 explicit ScopedSyscall(ThreadState *thr)
2310 : thr(thr) {
2311 Initialize(thr);
2314 ~ScopedSyscall() {
2315 ProcessPendingSignals(thr);
2319 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
2320 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2321 TSAN_SYSCALL();
2322 MemoryAccessRange(thr, pc, p, s, write);
2325 static void syscall_acquire(uptr pc, uptr addr) {
2326 TSAN_SYSCALL();
2327 Acquire(thr, pc, addr);
2328 DPrintf("syscall_acquire(%p)\n", addr);
2331 static void syscall_release(uptr pc, uptr addr) {
2332 TSAN_SYSCALL();
2333 DPrintf("syscall_release(%p)\n", addr);
2334 Release(thr, pc, addr);
2337 static void syscall_fd_close(uptr pc, int fd) {
2338 TSAN_SYSCALL();
2339 FdClose(thr, pc, fd);
2342 static USED void syscall_fd_acquire(uptr pc, int fd) {
2343 TSAN_SYSCALL();
2344 FdAcquire(thr, pc, fd);
2345 DPrintf("syscall_fd_acquire(%p)\n", fd);
2348 static USED void syscall_fd_release(uptr pc, int fd) {
2349 TSAN_SYSCALL();
2350 DPrintf("syscall_fd_release(%p)\n", fd);
2351 FdRelease(thr, pc, fd);
2354 static void syscall_pre_fork(uptr pc) {
2355 TSAN_SYSCALL();
2356 ForkBefore(thr, pc);
2359 static void syscall_post_fork(uptr pc, int pid) {
2360 TSAN_SYSCALL();
2361 if (pid == 0) {
2362 // child
2363 ForkChildAfter(thr, pc);
2364 FdOnFork(thr, pc);
2365 } else if (pid > 0) {
2366 // parent
2367 ForkParentAfter(thr, pc);
2368 } else {
2369 // error
2370 ForkParentAfter(thr, pc);
2373 #endif
2375 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2376 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2378 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2379 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2381 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2382 do { \
2383 (void)(p); \
2384 (void)(s); \
2385 } while (false)
2387 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2388 do { \
2389 (void)(p); \
2390 (void)(s); \
2391 } while (false)
2393 #define COMMON_SYSCALL_ACQUIRE(addr) \
2394 syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2396 #define COMMON_SYSCALL_RELEASE(addr) \
2397 syscall_release(GET_CALLER_PC(), (uptr)(addr))
2399 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2401 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2403 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2405 #define COMMON_SYSCALL_PRE_FORK() \
2406 syscall_pre_fork(GET_CALLER_PC())
2408 #define COMMON_SYSCALL_POST_FORK(res) \
2409 syscall_post_fork(GET_CALLER_PC(), res)
2411 #include "sanitizer_common/sanitizer_common_syscalls.inc"
2413 #ifdef NEED_TLS_GET_ADDR
2414 // Define own interceptor instead of sanitizer_common's for three reasons:
2415 // 1. It must not process pending signals.
2416 // Signal handlers may contain MOVDQA instruction (see below).
2417 // 2. It must be as simple as possible to not contain MOVDQA.
2418 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2419 // is empty for tsan (meant only for msan).
2420 // Note: __tls_get_addr can be called with mis-aligned stack due to:
2421 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2422 // So the interceptor must work with mis-aligned stack, in particular, does not
2423 // execute MOVDQA with stack addresses.
2424 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2425 void *res = REAL(__tls_get_addr)(arg);
2426 ThreadState *thr = cur_thread();
2427 if (!thr)
2428 return res;
2429 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, thr->tls_size);
2430 if (!dtv)
2431 return res;
2432 // New DTLS block has been allocated.
2433 MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2434 return res;
2436 #endif
2438 namespace __tsan {
2440 static void finalize(void *arg) {
2441 ThreadState *thr = cur_thread();
2442 int status = Finalize(thr);
2443 // Make sure the output is not lost.
2444 FlushStreams();
2445 if (status)
2446 Die();
2449 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2450 static void unreachable() {
2451 Report("FATAL: ThreadSanitizer: unreachable called\n");
2452 Die();
2454 #endif
2456 void InitializeInterceptors() {
2457 #if !SANITIZER_MAC
2458 // We need to setup it early, because functions like dlsym() can call it.
2459 REAL(memset) = internal_memset;
2460 REAL(memcpy) = internal_memcpy;
2461 #endif
2463 // Instruct libc malloc to consume less memory.
2464 #if SANITIZER_LINUX
2465 mallopt(1, 0); // M_MXFAST
2466 mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
2467 #endif
2469 InitializeCommonInterceptors();
2471 #if !SANITIZER_MAC
2472 // We can not use TSAN_INTERCEPT to get setjmp addr,
2473 // because it does &setjmp and setjmp is not present in some versions of libc.
2474 using __interception::GetRealFunctionAddress;
2475 GetRealFunctionAddress("setjmp", (uptr*)&REAL(setjmp), 0, 0);
2476 GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
2477 GetRealFunctionAddress("sigsetjmp", (uptr*)&REAL(sigsetjmp), 0, 0);
2478 GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
2479 #endif
2481 TSAN_INTERCEPT(longjmp);
2482 TSAN_INTERCEPT(siglongjmp);
2484 TSAN_INTERCEPT(malloc);
2485 TSAN_INTERCEPT(__libc_memalign);
2486 TSAN_INTERCEPT(calloc);
2487 TSAN_INTERCEPT(realloc);
2488 TSAN_INTERCEPT(free);
2489 TSAN_INTERCEPT(cfree);
2490 TSAN_INTERCEPT(mmap);
2491 TSAN_MAYBE_INTERCEPT_MMAP64;
2492 TSAN_INTERCEPT(munmap);
2493 TSAN_MAYBE_INTERCEPT_MEMALIGN;
2494 TSAN_INTERCEPT(valloc);
2495 TSAN_MAYBE_INTERCEPT_PVALLOC;
2496 TSAN_INTERCEPT(posix_memalign);
2498 TSAN_INTERCEPT(strcpy); // NOLINT
2499 TSAN_INTERCEPT(strncpy);
2500 TSAN_INTERCEPT(strdup);
2502 TSAN_INTERCEPT(pthread_create);
2503 TSAN_INTERCEPT(pthread_join);
2504 TSAN_INTERCEPT(pthread_detach);
2506 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2507 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2508 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2509 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2510 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2511 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2513 TSAN_INTERCEPT(pthread_mutex_init);
2514 TSAN_INTERCEPT(pthread_mutex_destroy);
2515 TSAN_INTERCEPT(pthread_mutex_trylock);
2516 TSAN_INTERCEPT(pthread_mutex_timedlock);
2518 TSAN_INTERCEPT(pthread_spin_init);
2519 TSAN_INTERCEPT(pthread_spin_destroy);
2520 TSAN_INTERCEPT(pthread_spin_lock);
2521 TSAN_INTERCEPT(pthread_spin_trylock);
2522 TSAN_INTERCEPT(pthread_spin_unlock);
2524 TSAN_INTERCEPT(pthread_rwlock_init);
2525 TSAN_INTERCEPT(pthread_rwlock_destroy);
2526 TSAN_INTERCEPT(pthread_rwlock_rdlock);
2527 TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2528 TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2529 TSAN_INTERCEPT(pthread_rwlock_wrlock);
2530 TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2531 TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2532 TSAN_INTERCEPT(pthread_rwlock_unlock);
2534 TSAN_INTERCEPT(pthread_barrier_init);
2535 TSAN_INTERCEPT(pthread_barrier_destroy);
2536 TSAN_INTERCEPT(pthread_barrier_wait);
2538 TSAN_INTERCEPT(pthread_once);
2540 TSAN_INTERCEPT(fstat);
2541 TSAN_MAYBE_INTERCEPT___FXSTAT;
2542 TSAN_MAYBE_INTERCEPT_FSTAT64;
2543 TSAN_MAYBE_INTERCEPT___FXSTAT64;
2544 TSAN_INTERCEPT(open);
2545 TSAN_MAYBE_INTERCEPT_OPEN64;
2546 TSAN_INTERCEPT(creat);
2547 TSAN_MAYBE_INTERCEPT_CREAT64;
2548 TSAN_INTERCEPT(dup);
2549 TSAN_INTERCEPT(dup2);
2550 TSAN_INTERCEPT(dup3);
2551 TSAN_MAYBE_INTERCEPT_EVENTFD;
2552 TSAN_MAYBE_INTERCEPT_SIGNALFD;
2553 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
2554 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
2555 TSAN_INTERCEPT(socket);
2556 TSAN_INTERCEPT(socketpair);
2557 TSAN_INTERCEPT(connect);
2558 TSAN_INTERCEPT(bind);
2559 TSAN_INTERCEPT(listen);
2560 TSAN_MAYBE_INTERCEPT_EPOLL;
2561 TSAN_INTERCEPT(close);
2562 TSAN_MAYBE_INTERCEPT___CLOSE;
2563 TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
2564 TSAN_INTERCEPT(pipe);
2565 TSAN_INTERCEPT(pipe2);
2567 TSAN_INTERCEPT(unlink);
2568 TSAN_INTERCEPT(tmpfile);
2569 TSAN_MAYBE_INTERCEPT_TMPFILE64;
2570 TSAN_INTERCEPT(fread);
2571 TSAN_INTERCEPT(fwrite);
2572 TSAN_INTERCEPT(abort);
2573 TSAN_INTERCEPT(puts);
2574 TSAN_INTERCEPT(rmdir);
2575 TSAN_INTERCEPT(closedir);
2577 TSAN_INTERCEPT(sigaction);
2578 TSAN_INTERCEPT(signal);
2579 TSAN_INTERCEPT(sigsuspend);
2580 TSAN_INTERCEPT(sigblock);
2581 TSAN_INTERCEPT(sigsetmask);
2582 TSAN_INTERCEPT(pthread_sigmask);
2583 TSAN_INTERCEPT(raise);
2584 TSAN_INTERCEPT(kill);
2585 TSAN_INTERCEPT(pthread_kill);
2586 TSAN_INTERCEPT(sleep);
2587 TSAN_INTERCEPT(usleep);
2588 TSAN_INTERCEPT(nanosleep);
2589 TSAN_INTERCEPT(pause);
2590 TSAN_INTERCEPT(gettimeofday);
2591 TSAN_INTERCEPT(getaddrinfo);
2593 TSAN_INTERCEPT(fork);
2594 TSAN_INTERCEPT(vfork);
2595 #if !SANITIZER_ANDROID
2596 TSAN_INTERCEPT(dl_iterate_phdr);
2597 #endif
2598 TSAN_INTERCEPT(on_exit);
2599 TSAN_INTERCEPT(__cxa_atexit);
2600 TSAN_INTERCEPT(_exit);
2602 #ifdef NEED_TLS_GET_ADDR
2603 TSAN_INTERCEPT(__tls_get_addr);
2604 #endif
2606 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2607 // Need to setup it, because interceptors check that the function is resolved.
2608 // But atexit is emitted directly into the module, so can't be resolved.
2609 REAL(atexit) = (int(*)(void(*)()))unreachable;
2610 #endif
2612 if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
2613 Printf("ThreadSanitizer: failed to setup atexit callback\n");
2614 Die();
2617 #if !SANITIZER_MAC
2618 if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
2619 Printf("ThreadSanitizer: failed to create thread key\n");
2620 Die();
2622 #endif
2624 FdInit();
2627 } // namespace __tsan
2629 // Invisible barrier for tests.
2630 // There were several unsuccessful iterations for this functionality:
2631 // 1. Initially it was implemented in user code using
2632 // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
2633 // MacOS. Futexes are linux-specific for this matter.
2634 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
2635 // "as-if synchronized via sleep" messages in reports which failed some
2636 // output tests.
2637 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
2638 // visible events, which lead to "failed to restore stack trace" failures.
2639 // Note that no_sanitize_thread attribute does not turn off atomic interception
2640 // so attaching it to the function defined in user code does not help.
2641 // That's why we now have what we have.
2642 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
2643 void __tsan_testonly_barrier_init(u64 *barrier, u32 count) {
2644 if (count >= (1 << 8)) {
2645 Printf("barrier_init: count is too large (%d)\n", count);
2646 Die();
2648 // 8 lsb is thread count, the remaining are count of entered threads.
2649 *barrier = count;
2652 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
2653 void __tsan_testonly_barrier_wait(u64 *barrier) {
2654 unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED);
2655 unsigned old_epoch = (old >> 8) / (old & 0xff);
2656 for (;;) {
2657 unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED);
2658 unsigned cur_epoch = (cur >> 8) / (cur & 0xff);
2659 if (cur_epoch != old_epoch)
2660 return;
2661 internal_sched_yield();