coarray_data_1.f90: Link against libatomic if target libatomic_available.
[official-gcc.git] / libsanitizer / tsan / tsan_interceptors.cc
blob069df5951d801b4c27f500d007b59560da285ff4
1 //===-- tsan_interceptors.cc ----------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // FIXME: move as many interceptors as possible into
11 // sanitizer_common/sanitizer_common_interceptors.inc
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_libc.h"
17 #include "sanitizer_common/sanitizer_linux.h"
18 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
19 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_posix.h"
22 #include "sanitizer_common/sanitizer_stacktrace.h"
23 #include "sanitizer_common/sanitizer_tls_get_addr.h"
24 #include "interception/interception.h"
25 #include "tsan_interceptors.h"
26 #include "tsan_interface.h"
27 #include "tsan_platform.h"
28 #include "tsan_suppressions.h"
29 #include "tsan_rtl.h"
30 #include "tsan_mman.h"
31 #include "tsan_fd.h"
34 using namespace __tsan; // NOLINT
36 #if SANITIZER_FREEBSD || SANITIZER_MAC
37 #define stdout __stdoutp
38 #define stderr __stderrp
39 #endif
41 #if SANITIZER_NETBSD
42 #define dirfd(dirp) (*(int *)(dirp))
43 #define fileno_unlocked fileno
45 #if _LP64
46 #define __sF_size 152
47 #else
48 #define __sF_size 88
49 #endif
51 #define stdout ((char*)&__sF + (__sF_size * 1))
52 #define stderr ((char*)&__sF + (__sF_size * 2))
54 #endif
56 #if SANITIZER_ANDROID
57 #define mallopt(a, b)
58 #endif
60 #ifdef __mips__
61 const int kSigCount = 129;
62 #else
63 const int kSigCount = 65;
64 #endif
66 #ifdef __mips__
67 struct ucontext_t {
68 u64 opaque[768 / sizeof(u64) + 1];
70 #else
71 struct ucontext_t {
72 // The size is determined by looking at sizeof of real ucontext_t on linux.
73 u64 opaque[936 / sizeof(u64) + 1];
75 #endif
77 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1
78 #define PTHREAD_ABI_BASE "GLIBC_2.3.2"
79 #elif defined(__aarch64__) || SANITIZER_PPC64V2
80 #define PTHREAD_ABI_BASE "GLIBC_2.17"
81 #endif
83 extern "C" int pthread_attr_init(void *attr);
84 extern "C" int pthread_attr_destroy(void *attr);
85 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
86 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
87 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
88 extern "C" int pthread_setspecific(unsigned key, const void *v);
89 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
90 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
91 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
92 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
93 extern "C" void *pthread_self();
94 extern "C" void _exit(int status);
95 extern "C" int fileno_unlocked(void *stream);
96 #if !SANITIZER_NETBSD
97 extern "C" int dirfd(void *dirp);
98 #endif
99 #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_NETBSD
100 extern "C" int mallopt(int param, int value);
101 #endif
102 #if SANITIZER_NETBSD
103 extern __sanitizer_FILE __sF[];
104 #else
105 extern __sanitizer_FILE *stdout, *stderr;
106 #endif
107 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
108 const int PTHREAD_MUTEX_RECURSIVE = 1;
109 const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
110 #else
111 const int PTHREAD_MUTEX_RECURSIVE = 2;
112 const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
113 #endif
114 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
115 const int EPOLL_CTL_ADD = 1;
116 #endif
117 const int SIGILL = 4;
118 const int SIGABRT = 6;
119 const int SIGFPE = 8;
120 const int SIGSEGV = 11;
121 const int SIGPIPE = 13;
122 const int SIGTERM = 15;
123 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
124 const int SIGBUS = 10;
125 const int SIGSYS = 12;
126 #else
127 const int SIGBUS = 7;
128 const int SIGSYS = 31;
129 #endif
130 void *const MAP_FAILED = (void*)-1;
131 #if SANITIZER_NETBSD
132 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
133 #elif !SANITIZER_MAC
134 const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
135 #endif
136 const int MAP_FIXED = 0x10;
137 typedef long long_t; // NOLINT
139 // From /usr/include/unistd.h
140 # define F_ULOCK 0 /* Unlock a previously locked region. */
141 # define F_LOCK 1 /* Lock a region for exclusive use. */
142 # define F_TLOCK 2 /* Test and lock a region for exclusive use. */
143 # define F_TEST 3 /* Test a region for other processes locks. */
145 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
146 const int SA_SIGINFO = 0x40;
147 const int SIG_SETMASK = 3;
148 #elif defined(__mips__)
149 const int SA_SIGINFO = 8;
150 const int SIG_SETMASK = 3;
151 #else
152 const int SA_SIGINFO = 4;
153 const int SIG_SETMASK = 2;
154 #endif
156 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
157 (!cur_thread()->is_inited)
159 namespace __tsan {
160 struct SignalDesc {
161 bool armed;
162 bool sigaction;
163 __sanitizer_siginfo siginfo;
164 ucontext_t ctx;
167 struct ThreadSignalContext {
168 int int_signal_send;
169 atomic_uintptr_t in_blocking_func;
170 atomic_uintptr_t have_pending_signals;
171 SignalDesc pending_signals[kSigCount];
172 // emptyset and oldset are too big for stack.
173 __sanitizer_sigset_t emptyset;
174 __sanitizer_sigset_t oldset;
177 // The sole reason tsan wraps atexit callbacks is to establish synchronization
178 // between callback setup and callback execution.
179 struct AtExitCtx {
180 void (*f)();
181 void *arg;
184 // InterceptorContext holds all global data required for interceptors.
185 // It's explicitly constructed in InitializeInterceptors with placement new
186 // and is never destroyed. This allows usage of members with non-trivial
187 // constructors and destructors.
188 struct InterceptorContext {
189 // The object is 64-byte aligned, because we want hot data to be located
190 // in a single cache line if possible (it's accessed in every interceptor).
191 ALIGNED(64) LibIgnore libignore;
192 __sanitizer_sigaction sigactions[kSigCount];
193 #if !SANITIZER_MAC && !SANITIZER_NETBSD
194 unsigned finalize_key;
195 #endif
197 BlockingMutex atexit_mu;
198 Vector<struct AtExitCtx *> AtExitStack;
200 InterceptorContext()
201 : libignore(LINKER_INITIALIZED), AtExitStack() {
205 static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
206 InterceptorContext *interceptor_ctx() {
207 return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
210 LibIgnore *libignore() {
211 return &interceptor_ctx()->libignore;
214 void InitializeLibIgnore() {
215 const SuppressionContext &supp = *Suppressions();
216 const uptr n = supp.SuppressionCount();
217 for (uptr i = 0; i < n; i++) {
218 const Suppression *s = supp.SuppressionAt(i);
219 if (0 == internal_strcmp(s->type, kSuppressionLib))
220 libignore()->AddIgnoredLibrary(s->templ);
222 if (flags()->ignore_noninstrumented_modules)
223 libignore()->IgnoreNoninstrumentedModules(true);
224 libignore()->OnLibraryLoaded(0);
227 } // namespace __tsan
229 static ThreadSignalContext *SigCtx(ThreadState *thr) {
230 ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
231 if (ctx == 0 && !thr->is_dead) {
232 ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
233 MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
234 thr->signal_ctx = ctx;
236 return ctx;
239 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
240 uptr pc)
241 : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
242 Initialize(thr);
243 if (!thr_->is_inited) return;
244 if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
245 DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
246 ignoring_ =
247 !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
248 libignore()->IsIgnored(pc, &in_ignored_lib_));
249 EnableIgnores();
252 ScopedInterceptor::~ScopedInterceptor() {
253 if (!thr_->is_inited) return;
254 DisableIgnores();
255 if (!thr_->ignore_interceptors) {
256 ProcessPendingSignals(thr_);
257 FuncExit(thr_);
258 CheckNoLocks(thr_);
262 void ScopedInterceptor::EnableIgnores() {
263 if (ignoring_) {
264 ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false);
265 if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
266 if (in_ignored_lib_) {
267 DCHECK(!thr_->in_ignored_lib);
268 thr_->in_ignored_lib = true;
273 void ScopedInterceptor::DisableIgnores() {
274 if (ignoring_) {
275 ThreadIgnoreEnd(thr_, pc_);
276 if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--;
277 if (in_ignored_lib_) {
278 DCHECK(thr_->in_ignored_lib);
279 thr_->in_ignored_lib = false;
284 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
285 #if SANITIZER_FREEBSD
286 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
287 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
288 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
289 #elif SANITIZER_NETBSD
290 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
291 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
292 INTERCEPT_FUNCTION(__libc_##func)
293 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
294 INTERCEPT_FUNCTION(__libc_thr_##func)
295 #else
296 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
297 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
298 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
299 #endif
301 #define READ_STRING_OF_LEN(thr, pc, s, len, n) \
302 MemoryAccessRange((thr), (pc), (uptr)(s), \
303 common_flags()->strict_string_checks ? (len) + 1 : (n), false)
305 #define READ_STRING(thr, pc, s, n) \
306 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
308 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
310 struct BlockingCall {
311 explicit BlockingCall(ThreadState *thr)
312 : thr(thr)
313 , ctx(SigCtx(thr)) {
314 for (;;) {
315 atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
316 if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
317 break;
318 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
319 ProcessPendingSignals(thr);
321 // When we are in a "blocking call", we process signals asynchronously
322 // (right when they arrive). In this context we do not expect to be
323 // executing any user/runtime code. The known interceptor sequence when
324 // this is not true is: pthread_join -> munmap(stack). It's fine
325 // to ignore munmap in this case -- we handle stack shadow separately.
326 thr->ignore_interceptors++;
329 ~BlockingCall() {
330 thr->ignore_interceptors--;
331 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
334 ThreadState *thr;
335 ThreadSignalContext *ctx;
338 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
339 SCOPED_TSAN_INTERCEPTOR(sleep, sec);
340 unsigned res = BLOCK_REAL(sleep)(sec);
341 AfterSleep(thr, pc);
342 return res;
345 TSAN_INTERCEPTOR(int, usleep, long_t usec) {
346 SCOPED_TSAN_INTERCEPTOR(usleep, usec);
347 int res = BLOCK_REAL(usleep)(usec);
348 AfterSleep(thr, pc);
349 return res;
352 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
353 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
354 int res = BLOCK_REAL(nanosleep)(req, rem);
355 AfterSleep(thr, pc);
356 return res;
359 TSAN_INTERCEPTOR(int, pause, int fake) {
360 SCOPED_TSAN_INTERCEPTOR(pause, fake);
361 return BLOCK_REAL(pause)(fake);
364 static void at_exit_wrapper() {
365 AtExitCtx *ctx;
367 // Ensure thread-safety.
368 BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
370 // Pop AtExitCtx from the top of the stack of callback functions
371 uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
372 ctx = interceptor_ctx()->AtExitStack[element];
373 interceptor_ctx()->AtExitStack.PopBack();
376 Acquire(cur_thread(), (uptr)0, (uptr)ctx);
377 ((void(*)())ctx->f)();
378 InternalFree(ctx);
381 static void cxa_at_exit_wrapper(void *arg) {
382 Acquire(cur_thread(), 0, (uptr)arg);
383 AtExitCtx *ctx = (AtExitCtx*)arg;
384 ((void(*)(void *arg))ctx->f)(ctx->arg);
385 InternalFree(ctx);
388 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
389 void *arg, void *dso);
391 #if !SANITIZER_ANDROID
392 TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
393 if (UNLIKELY(cur_thread()->in_symbolizer))
394 return 0;
395 // We want to setup the atexit callback even if we are in ignored lib
396 // or after fork.
397 SCOPED_INTERCEPTOR_RAW(atexit, f);
398 return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
400 #endif
402 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
403 if (UNLIKELY(cur_thread()->in_symbolizer))
404 return 0;
405 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
406 return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
409 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
410 void *arg, void *dso) {
411 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
412 ctx->f = f;
413 ctx->arg = arg;
414 Release(thr, pc, (uptr)ctx);
415 // Memory allocation in __cxa_atexit will race with free during exit,
416 // because we do not see synchronization around atexit callback list.
417 ThreadIgnoreBegin(thr, pc);
418 int res;
419 if (!dso) {
420 // NetBSD does not preserve the 2nd argument if dso is equal to 0
421 // Store ctx in a local stack-like structure
423 // Ensure thread-safety.
424 BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
426 res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0);
427 // Push AtExitCtx on the top of the stack of callback functions
428 if (!res) {
429 interceptor_ctx()->AtExitStack.PushBack(ctx);
431 } else {
432 res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso);
434 ThreadIgnoreEnd(thr, pc);
435 return res;
438 #if !SANITIZER_MAC && !SANITIZER_NETBSD
439 static void on_exit_wrapper(int status, void *arg) {
440 ThreadState *thr = cur_thread();
441 uptr pc = 0;
442 Acquire(thr, pc, (uptr)arg);
443 AtExitCtx *ctx = (AtExitCtx*)arg;
444 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
445 InternalFree(ctx);
448 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
449 if (UNLIKELY(cur_thread()->in_symbolizer))
450 return 0;
451 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
452 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
453 ctx->f = (void(*)())f;
454 ctx->arg = arg;
455 Release(thr, pc, (uptr)ctx);
456 // Memory allocation in __cxa_atexit will race with free during exit,
457 // because we do not see synchronization around atexit callback list.
458 ThreadIgnoreBegin(thr, pc);
459 int res = REAL(on_exit)(on_exit_wrapper, ctx);
460 ThreadIgnoreEnd(thr, pc);
461 return res;
463 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
464 #else
465 #define TSAN_MAYBE_INTERCEPT_ON_EXIT
466 #endif
468 // Cleanup old bufs.
469 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
470 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
471 JmpBuf *buf = &thr->jmp_bufs[i];
472 if (buf->sp <= sp) {
473 uptr sz = thr->jmp_bufs.Size();
474 internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
475 thr->jmp_bufs.PopBack();
476 i--;
481 static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
482 if (!thr->is_inited) // called from libc guts during bootstrap
483 return;
484 // Cleanup old bufs.
485 JmpBufGarbageCollect(thr, sp);
486 // Remember the buf.
487 JmpBuf *buf = thr->jmp_bufs.PushBack();
488 buf->sp = sp;
489 buf->mangled_sp = mangled_sp;
490 buf->shadow_stack_pos = thr->shadow_stack_pos;
491 ThreadSignalContext *sctx = SigCtx(thr);
492 buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
493 buf->in_blocking_func = sctx ?
494 atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
495 false;
496 buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
497 memory_order_relaxed);
500 static void LongJmp(ThreadState *thr, uptr *env) {
501 #ifdef __powerpc__
502 uptr mangled_sp = env[0];
503 #elif SANITIZER_FREEBSD
504 uptr mangled_sp = env[2];
505 #elif SANITIZER_NETBSD
506 uptr mangled_sp = env[6];
507 #elif SANITIZER_MAC
508 # ifdef __aarch64__
509 uptr mangled_sp =
510 (GetMacosVersion() >= MACOS_VERSION_MOJAVE) ? env[12] : env[13];
511 # else
512 uptr mangled_sp = env[2];
513 # endif
514 #elif SANITIZER_LINUX
515 # ifdef __aarch64__
516 uptr mangled_sp = env[13];
517 # elif defined(__mips64)
518 uptr mangled_sp = env[1];
519 # else
520 uptr mangled_sp = env[6];
521 # endif
522 #endif
523 // Find the saved buf by mangled_sp.
524 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
525 JmpBuf *buf = &thr->jmp_bufs[i];
526 if (buf->mangled_sp == mangled_sp) {
527 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
528 // Unwind the stack.
529 while (thr->shadow_stack_pos > buf->shadow_stack_pos)
530 FuncExit(thr);
531 ThreadSignalContext *sctx = SigCtx(thr);
532 if (sctx) {
533 sctx->int_signal_send = buf->int_signal_send;
534 atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
535 memory_order_relaxed);
537 atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
538 memory_order_relaxed);
539 JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
540 return;
543 Printf("ThreadSanitizer: can't find longjmp buf\n");
544 CHECK(0);
547 // FIXME: put everything below into a common extern "C" block?
548 extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) {
549 SetJmp(cur_thread(), sp, mangled_sp);
552 #if SANITIZER_MAC
553 TSAN_INTERCEPTOR(int, setjmp, void *env);
554 TSAN_INTERCEPTOR(int, _setjmp, void *env);
555 TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
556 #else // SANITIZER_MAC
558 #if SANITIZER_NETBSD
559 #define setjmp_symname __setjmp14
560 #define sigsetjmp_symname __sigsetjmp14
561 #else
562 #define setjmp_symname setjmp
563 #define sigsetjmp_symname sigsetjmp
564 #endif
566 #define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
567 #define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
568 #define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
569 #define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
571 #define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
572 #define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
574 // Not called. Merely to satisfy TSAN_INTERCEPT().
575 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
576 int TSAN_INTERCEPTOR_SETJMP(void *env);
577 extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
578 CHECK(0);
579 return 0;
582 // FIXME: any reason to have a separate declaration?
583 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
584 int __interceptor__setjmp(void *env);
585 extern "C" int __interceptor__setjmp(void *env) {
586 CHECK(0);
587 return 0;
590 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
591 int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
592 extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
593 CHECK(0);
594 return 0;
597 #if !SANITIZER_NETBSD
598 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
599 int __interceptor___sigsetjmp(void *env);
600 extern "C" int __interceptor___sigsetjmp(void *env) {
601 CHECK(0);
602 return 0;
604 #endif
606 extern "C" int setjmp_symname(void *env);
607 extern "C" int _setjmp(void *env);
608 extern "C" int sigsetjmp_symname(void *env);
609 #if !SANITIZER_NETBSD
610 extern "C" int __sigsetjmp(void *env);
611 #endif
612 DEFINE_REAL(int, setjmp_symname, void *env)
613 DEFINE_REAL(int, _setjmp, void *env)
614 DEFINE_REAL(int, sigsetjmp_symname, void *env)
615 #if !SANITIZER_NETBSD
616 DEFINE_REAL(int, __sigsetjmp, void *env)
617 #endif
618 #endif // SANITIZER_MAC
620 #if SANITIZER_NETBSD
621 #define longjmp_symname __longjmp14
622 #define siglongjmp_symname __siglongjmp14
623 #else
624 #define longjmp_symname longjmp
625 #define siglongjmp_symname siglongjmp
626 #endif
628 TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
629 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
630 // bad things will happen. We will jump over ScopedInterceptor dtor and can
631 // leave thr->in_ignored_lib set.
633 SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
635 LongJmp(cur_thread(), env);
636 REAL(longjmp_symname)(env, val);
639 TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
641 SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
643 LongJmp(cur_thread(), env);
644 REAL(siglongjmp_symname)(env, val);
647 #if SANITIZER_NETBSD
648 TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
650 SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
652 LongJmp(cur_thread(), env);
653 REAL(_longjmp)(env, val);
655 #endif
657 #if !SANITIZER_MAC
658 TSAN_INTERCEPTOR(void*, malloc, uptr size) {
659 if (UNLIKELY(cur_thread()->in_symbolizer))
660 return InternalAlloc(size);
661 void *p = 0;
663 SCOPED_INTERCEPTOR_RAW(malloc, size);
664 p = user_alloc(thr, pc, size);
666 invoke_malloc_hook(p, size);
667 return p;
670 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
671 SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
672 return user_memalign(thr, pc, align, sz);
675 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
676 if (UNLIKELY(cur_thread()->in_symbolizer))
677 return InternalCalloc(size, n);
678 void *p = 0;
680 SCOPED_INTERCEPTOR_RAW(calloc, size, n);
681 p = user_calloc(thr, pc, size, n);
683 invoke_malloc_hook(p, n * size);
684 return p;
687 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
688 if (UNLIKELY(cur_thread()->in_symbolizer))
689 return InternalRealloc(p, size);
690 if (p)
691 invoke_free_hook(p);
693 SCOPED_INTERCEPTOR_RAW(realloc, p, size);
694 p = user_realloc(thr, pc, p, size);
696 invoke_malloc_hook(p, size);
697 return p;
700 TSAN_INTERCEPTOR(void, free, void *p) {
701 if (p == 0)
702 return;
703 if (UNLIKELY(cur_thread()->in_symbolizer))
704 return InternalFree(p);
705 invoke_free_hook(p);
706 SCOPED_INTERCEPTOR_RAW(free, p);
707 user_free(thr, pc, p);
710 TSAN_INTERCEPTOR(void, cfree, void *p) {
711 if (p == 0)
712 return;
713 if (UNLIKELY(cur_thread()->in_symbolizer))
714 return InternalFree(p);
715 invoke_free_hook(p);
716 SCOPED_INTERCEPTOR_RAW(cfree, p);
717 user_free(thr, pc, p);
720 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
721 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
722 return user_alloc_usable_size(p);
724 #endif
726 TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT
727 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT
728 uptr srclen = internal_strlen(src);
729 MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
730 MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
731 return REAL(strcpy)(dst, src); // NOLINT
734 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
735 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
736 uptr srclen = internal_strnlen(src, n);
737 MemoryAccessRange(thr, pc, (uptr)dst, n, true);
738 MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
739 return REAL(strncpy)(dst, src, n);
742 TSAN_INTERCEPTOR(char*, strdup, const char *str) {
743 SCOPED_TSAN_INTERCEPTOR(strdup, str);
744 // strdup will call malloc, so no instrumentation is required here.
745 return REAL(strdup)(str);
748 static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
749 if (*addr) {
750 if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
751 if (flags & MAP_FIXED) {
752 errno = errno_EINVAL;
753 return false;
754 } else {
755 *addr = 0;
759 return true;
762 template <class Mmap>
763 static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
764 void *addr, SIZE_T sz, int prot, int flags,
765 int fd, OFF64_T off) {
766 if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
767 void *res = real_mmap(addr, sz, prot, flags, fd, off);
768 if (res != MAP_FAILED) {
769 if (fd > 0) FdAccess(thr, pc, fd);
770 if (thr->ignore_reads_and_writes == 0)
771 MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
772 else
773 MemoryResetRange(thr, pc, (uptr)res, sz);
775 return res;
778 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
779 SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
780 if (sz != 0) {
781 // If sz == 0, munmap will return EINVAL and don't unmap any memory.
782 DontNeedShadowFor((uptr)addr, sz);
783 ScopedGlobalProcessor sgp;
784 ctx->metamap.ResetRange(thr->proc(), (uptr)addr, (uptr)sz);
786 int res = REAL(munmap)(addr, sz);
787 return res;
790 #if SANITIZER_LINUX
791 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
792 SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
793 return user_memalign(thr, pc, align, sz);
795 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
796 #else
797 #define TSAN_MAYBE_INTERCEPT_MEMALIGN
798 #endif
800 #if !SANITIZER_MAC
801 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
802 if (UNLIKELY(cur_thread()->in_symbolizer))
803 return InternalAlloc(sz, nullptr, align);
804 SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
805 return user_aligned_alloc(thr, pc, align, sz);
808 TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
809 if (UNLIKELY(cur_thread()->in_symbolizer))
810 return InternalAlloc(sz, nullptr, GetPageSizeCached());
811 SCOPED_INTERCEPTOR_RAW(valloc, sz);
812 return user_valloc(thr, pc, sz);
814 #endif
816 #if SANITIZER_LINUX
817 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
818 if (UNLIKELY(cur_thread()->in_symbolizer)) {
819 uptr PageSize = GetPageSizeCached();
820 sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
821 return InternalAlloc(sz, nullptr, PageSize);
823 SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
824 return user_pvalloc(thr, pc, sz);
826 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
827 #else
828 #define TSAN_MAYBE_INTERCEPT_PVALLOC
829 #endif
831 #if !SANITIZER_MAC
832 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
833 if (UNLIKELY(cur_thread()->in_symbolizer)) {
834 void *p = InternalAlloc(sz, nullptr, align);
835 if (!p)
836 return errno_ENOMEM;
837 *memptr = p;
838 return 0;
840 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
841 return user_posix_memalign(thr, pc, memptr, align, sz);
843 #endif
845 // __cxa_guard_acquire and friends need to be intercepted in a special way -
846 // regular interceptors will break statically-linked libstdc++. Linux
847 // interceptors are especially defined as weak functions (so that they don't
848 // cause link errors when user defines them as well). So they silently
849 // auto-disable themselves when such symbol is already present in the binary. If
850 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
851 // will silently replace our interceptor. That's why on Linux we simply export
852 // these interceptors with INTERFACE_ATTRIBUTE.
853 // On OS X, we don't support statically linking, so we just use a regular
854 // interceptor.
855 #if SANITIZER_MAC
856 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
857 #else
858 #define STDCXX_INTERCEPTOR(rettype, name, ...) \
859 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
860 #endif
862 // Used in thread-safe function static initialization.
863 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
864 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
865 for (;;) {
866 u32 cmp = atomic_load(g, memory_order_acquire);
867 if (cmp == 0) {
868 if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed))
869 return 1;
870 } else if (cmp == 1) {
871 Acquire(thr, pc, (uptr)g);
872 return 0;
873 } else {
874 internal_sched_yield();
879 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
880 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
881 Release(thr, pc, (uptr)g);
882 atomic_store(g, 1, memory_order_release);
885 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
886 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
887 atomic_store(g, 0, memory_order_relaxed);
890 namespace __tsan {
891 void DestroyThreadState() {
892 ThreadState *thr = cur_thread();
893 Processor *proc = thr->proc();
894 ThreadFinish(thr);
895 ProcUnwire(proc, thr);
896 ProcDestroy(proc);
897 ThreadSignalContext *sctx = thr->signal_ctx;
898 if (sctx) {
899 thr->signal_ctx = 0;
900 UnmapOrDie(sctx, sizeof(*sctx));
902 DTLS_Destroy();
903 cur_thread_finalize();
905 } // namespace __tsan
907 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
908 static void thread_finalize(void *v) {
909 uptr iter = (uptr)v;
910 if (iter > 1) {
911 if (pthread_setspecific(interceptor_ctx()->finalize_key,
912 (void*)(iter - 1))) {
913 Printf("ThreadSanitizer: failed to set thread key\n");
914 Die();
916 return;
918 DestroyThreadState();
920 #endif
923 struct ThreadParam {
924 void* (*callback)(void *arg);
925 void *param;
926 atomic_uintptr_t tid;
929 extern "C" void *__tsan_thread_start_func(void *arg) {
930 ThreadParam *p = (ThreadParam*)arg;
931 void* (*callback)(void *arg) = p->callback;
932 void *param = p->param;
933 int tid = 0;
935 ThreadState *thr = cur_thread();
936 // Thread-local state is not initialized yet.
937 ScopedIgnoreInterceptors ignore;
938 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
939 ThreadIgnoreBegin(thr, 0);
940 if (pthread_setspecific(interceptor_ctx()->finalize_key,
941 (void *)GetPthreadDestructorIterations())) {
942 Printf("ThreadSanitizer: failed to set thread key\n");
943 Die();
945 ThreadIgnoreEnd(thr, 0);
946 #endif
947 while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
948 internal_sched_yield();
949 Processor *proc = ProcCreate();
950 ProcWire(proc, thr);
951 ThreadStart(thr, tid, GetTid(), /*workerthread*/ false);
952 atomic_store(&p->tid, 0, memory_order_release);
954 void *res = callback(param);
955 // Prevent the callback from being tail called,
956 // it mixes up stack traces.
957 volatile int foo = 42;
958 foo++;
959 return res;
962 TSAN_INTERCEPTOR(int, pthread_create,
963 void *th, void *attr, void *(*callback)(void*), void * param) {
964 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
966 MaybeSpawnBackgroundThread();
968 if (ctx->after_multithreaded_fork) {
969 if (flags()->die_after_fork) {
970 Report("ThreadSanitizer: starting new threads after multi-threaded "
971 "fork is not supported. Dying (set die_after_fork=0 to override)\n");
972 Die();
973 } else {
974 VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
975 "fork is not supported (pid %d). Continuing because of "
976 "die_after_fork=0, but you are on your own\n", internal_getpid());
979 __sanitizer_pthread_attr_t myattr;
980 if (attr == 0) {
981 pthread_attr_init(&myattr);
982 attr = &myattr;
984 int detached = 0;
985 REAL(pthread_attr_getdetachstate)(attr, &detached);
986 AdjustStackSize(attr);
988 ThreadParam p;
989 p.callback = callback;
990 p.param = param;
991 atomic_store(&p.tid, 0, memory_order_relaxed);
992 int res = -1;
994 // Otherwise we see false positives in pthread stack manipulation.
995 ScopedIgnoreInterceptors ignore;
996 ThreadIgnoreBegin(thr, pc);
997 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
998 ThreadIgnoreEnd(thr, pc);
1000 if (res == 0) {
1001 int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
1002 CHECK_NE(tid, 0);
1003 // Synchronization on p.tid serves two purposes:
1004 // 1. ThreadCreate must finish before the new thread starts.
1005 // Otherwise the new thread can call pthread_detach, but the pthread_t
1006 // identifier is not yet registered in ThreadRegistry by ThreadCreate.
1007 // 2. ThreadStart must finish before this thread continues.
1008 // Otherwise, this thread can call pthread_detach and reset thr->sync
1009 // before the new thread got a chance to acquire from it in ThreadStart.
1010 atomic_store(&p.tid, tid, memory_order_release);
1011 while (atomic_load(&p.tid, memory_order_acquire) != 0)
1012 internal_sched_yield();
1014 if (attr == &myattr)
1015 pthread_attr_destroy(&myattr);
1016 return res;
1019 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1020 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1021 int tid = ThreadTid(thr, pc, (uptr)th);
1022 ThreadIgnoreBegin(thr, pc);
1023 int res = BLOCK_REAL(pthread_join)(th, ret);
1024 ThreadIgnoreEnd(thr, pc);
1025 if (res == 0) {
1026 ThreadJoin(thr, pc, tid);
1028 return res;
1031 DEFINE_REAL_PTHREAD_FUNCTIONS
1033 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1034 SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
1035 int tid = ThreadTid(thr, pc, (uptr)th);
1036 int res = REAL(pthread_detach)(th);
1037 if (res == 0) {
1038 ThreadDetach(thr, pc, tid);
1040 return res;
1043 // Problem:
1044 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1045 // pthread_cond_t has different size in the different versions.
1046 // If call new REAL functions for old pthread_cond_t, they will corrupt memory
1047 // after pthread_cond_t (old cond is smaller).
1048 // If we call old REAL functions for new pthread_cond_t, we will lose some
1049 // functionality (e.g. old functions do not support waiting against
1050 // CLOCK_REALTIME).
1051 // Proper handling would require to have 2 versions of interceptors as well.
1052 // But this is messy, in particular requires linker scripts when sanitizer
1053 // runtime is linked into a shared library.
1054 // Instead we assume we don't have dynamic libraries built against old
1055 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1056 // that allows to work with old libraries (but this mode does not support
1057 // some features, e.g. pthread_condattr_getpshared).
1058 static void *init_cond(void *c, bool force = false) {
1059 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1060 // So we allocate additional memory on the side large enough to hold
1061 // any pthread_cond_t object. Always call new REAL functions, but pass
1062 // the aux object to them.
1063 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1064 // first word of pthread_cond_t to zero.
1065 // It's all relevant only for linux.
1066 if (!common_flags()->legacy_pthread_cond)
1067 return c;
1068 atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1069 uptr cond = atomic_load(p, memory_order_acquire);
1070 if (!force && cond != 0)
1071 return (void*)cond;
1072 void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1073 internal_memset(newcond, 0, pthread_cond_t_sz);
1074 if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1075 memory_order_acq_rel))
1076 return newcond;
1077 WRAP(free)(newcond);
1078 return (void*)cond;
1081 struct CondMutexUnlockCtx {
1082 ScopedInterceptor *si;
1083 ThreadState *thr;
1084 uptr pc;
1085 void *m;
1088 static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
1089 // pthread_cond_wait interceptor has enabled async signal delivery
1090 // (see BlockingCall below). Disable async signals since we are running
1091 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1092 // since the thread is cancelled, so we have to manually execute them
1093 // (the thread still can run some user code due to pthread_cleanup_push).
1094 ThreadSignalContext *ctx = SigCtx(arg->thr);
1095 CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
1096 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
1097 MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock);
1098 // Undo BlockingCall ctor effects.
1099 arg->thr->ignore_interceptors--;
1100 arg->si->~ScopedInterceptor();
1103 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1104 void *cond = init_cond(c, true);
1105 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1106 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1107 return REAL(pthread_cond_init)(cond, a);
1110 static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
1111 int (*fn)(void *c, void *m, void *abstime), void *c,
1112 void *m, void *t) {
1113 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1114 MutexUnlock(thr, pc, (uptr)m);
1115 CondMutexUnlockCtx arg = {si, thr, pc, m};
1116 int res = 0;
1117 // This ensures that we handle mutex lock even in case of pthread_cancel.
1118 // See test/tsan/cond_cancel.cc.
1120 // Enable signal delivery while the thread is blocked.
1121 BlockingCall bc(thr);
1122 res = call_pthread_cancel_with_cleanup(
1123 fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
1125 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1126 MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1127 return res;
1130 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1131 void *cond = init_cond(c);
1132 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1133 return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
1134 pthread_cond_wait),
1135 cond, m, 0);
1138 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1139 void *cond = init_cond(c);
1140 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1141 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m,
1142 abstime);
1145 #if SANITIZER_MAC
1146 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1147 void *reltime) {
1148 void *cond = init_cond(c);
1149 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1150 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond,
1151 m, reltime);
1153 #endif
1155 INTERCEPTOR(int, pthread_cond_signal, void *c) {
1156 void *cond = init_cond(c);
1157 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1158 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1159 return REAL(pthread_cond_signal)(cond);
1162 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1163 void *cond = init_cond(c);
1164 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1165 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1166 return REAL(pthread_cond_broadcast)(cond);
1169 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1170 void *cond = init_cond(c);
1171 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1172 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1173 int res = REAL(pthread_cond_destroy)(cond);
1174 if (common_flags()->legacy_pthread_cond) {
1175 // Free our aux cond and zero the pointer to not leave dangling pointers.
1176 WRAP(free)(cond);
1177 atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1179 return res;
1182 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1183 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1184 int res = REAL(pthread_mutex_init)(m, a);
1185 if (res == 0) {
1186 u32 flagz = 0;
1187 if (a) {
1188 int type = 0;
1189 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1190 if (type == PTHREAD_MUTEX_RECURSIVE ||
1191 type == PTHREAD_MUTEX_RECURSIVE_NP)
1192 flagz |= MutexFlagWriteReentrant;
1194 MutexCreate(thr, pc, (uptr)m, flagz);
1196 return res;
1199 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1200 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1201 int res = REAL(pthread_mutex_destroy)(m);
1202 if (res == 0 || res == errno_EBUSY) {
1203 MutexDestroy(thr, pc, (uptr)m);
1205 return res;
1208 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1209 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1210 int res = REAL(pthread_mutex_trylock)(m);
1211 if (res == errno_EOWNERDEAD)
1212 MutexRepair(thr, pc, (uptr)m);
1213 if (res == 0 || res == errno_EOWNERDEAD)
1214 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1215 return res;
1218 #if !SANITIZER_MAC
1219 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1220 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1221 int res = REAL(pthread_mutex_timedlock)(m, abstime);
1222 if (res == 0) {
1223 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1225 return res;
1227 #endif
1229 #if !SANITIZER_MAC
1230 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1231 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1232 int res = REAL(pthread_spin_init)(m, pshared);
1233 if (res == 0) {
1234 MutexCreate(thr, pc, (uptr)m);
1236 return res;
1239 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1240 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1241 int res = REAL(pthread_spin_destroy)(m);
1242 if (res == 0) {
1243 MutexDestroy(thr, pc, (uptr)m);
1245 return res;
1248 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1249 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1250 MutexPreLock(thr, pc, (uptr)m);
1251 int res = REAL(pthread_spin_lock)(m);
1252 if (res == 0) {
1253 MutexPostLock(thr, pc, (uptr)m);
1255 return res;
1258 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1259 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1260 int res = REAL(pthread_spin_trylock)(m);
1261 if (res == 0) {
1262 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1264 return res;
1267 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1268 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1269 MutexUnlock(thr, pc, (uptr)m);
1270 int res = REAL(pthread_spin_unlock)(m);
1271 return res;
1273 #endif
1275 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1276 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1277 int res = REAL(pthread_rwlock_init)(m, a);
1278 if (res == 0) {
1279 MutexCreate(thr, pc, (uptr)m);
1281 return res;
1284 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1285 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1286 int res = REAL(pthread_rwlock_destroy)(m);
1287 if (res == 0) {
1288 MutexDestroy(thr, pc, (uptr)m);
1290 return res;
1293 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1294 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1295 MutexPreReadLock(thr, pc, (uptr)m);
1296 int res = REAL(pthread_rwlock_rdlock)(m);
1297 if (res == 0) {
1298 MutexPostReadLock(thr, pc, (uptr)m);
1300 return res;
1303 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1304 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1305 int res = REAL(pthread_rwlock_tryrdlock)(m);
1306 if (res == 0) {
1307 MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
1309 return res;
1312 #if !SANITIZER_MAC
1313 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1314 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1315 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1316 if (res == 0) {
1317 MutexPostReadLock(thr, pc, (uptr)m);
1319 return res;
1321 #endif
1323 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1324 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1325 MutexPreLock(thr, pc, (uptr)m);
1326 int res = REAL(pthread_rwlock_wrlock)(m);
1327 if (res == 0) {
1328 MutexPostLock(thr, pc, (uptr)m);
1330 return res;
1333 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1334 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1335 int res = REAL(pthread_rwlock_trywrlock)(m);
1336 if (res == 0) {
1337 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1339 return res;
1342 #if !SANITIZER_MAC
1343 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1344 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1345 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1346 if (res == 0) {
1347 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1349 return res;
1351 #endif
1353 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1354 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1355 MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1356 int res = REAL(pthread_rwlock_unlock)(m);
1357 return res;
1360 #if !SANITIZER_MAC
1361 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1362 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1363 MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1364 int res = REAL(pthread_barrier_init)(b, a, count);
1365 return res;
1368 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1369 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1370 MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1371 int res = REAL(pthread_barrier_destroy)(b);
1372 return res;
1375 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1376 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1377 Release(thr, pc, (uptr)b);
1378 MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1379 int res = REAL(pthread_barrier_wait)(b);
1380 MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1381 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1382 Acquire(thr, pc, (uptr)b);
1384 return res;
1386 #endif
1388 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1389 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1390 if (o == 0 || f == 0)
1391 return errno_EINVAL;
1392 atomic_uint32_t *a;
1394 if (SANITIZER_MAC)
1395 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1396 else if (SANITIZER_NETBSD)
1397 a = static_cast<atomic_uint32_t*>
1398 ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1399 else
1400 a = static_cast<atomic_uint32_t*>(o);
1402 u32 v = atomic_load(a, memory_order_acquire);
1403 if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
1404 memory_order_relaxed)) {
1405 (*f)();
1406 if (!thr->in_ignored_lib)
1407 Release(thr, pc, (uptr)o);
1408 atomic_store(a, 2, memory_order_release);
1409 } else {
1410 while (v != 2) {
1411 internal_sched_yield();
1412 v = atomic_load(a, memory_order_acquire);
1414 if (!thr->in_ignored_lib)
1415 Acquire(thr, pc, (uptr)o);
1417 return 0;
1420 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1421 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1422 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1423 if (fd > 0)
1424 FdAccess(thr, pc, fd);
1425 return REAL(__fxstat)(version, fd, buf);
1427 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
1428 #else
1429 #define TSAN_MAYBE_INTERCEPT___FXSTAT
1430 #endif
1432 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1433 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
1434 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1435 if (fd > 0)
1436 FdAccess(thr, pc, fd);
1437 return REAL(fstat)(fd, buf);
1438 #else
1439 SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
1440 if (fd > 0)
1441 FdAccess(thr, pc, fd);
1442 return REAL(__fxstat)(0, fd, buf);
1443 #endif
1446 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1447 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1448 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1449 if (fd > 0)
1450 FdAccess(thr, pc, fd);
1451 return REAL(__fxstat64)(version, fd, buf);
1453 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
1454 #else
1455 #define TSAN_MAYBE_INTERCEPT___FXSTAT64
1456 #endif
1458 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1459 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1460 SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
1461 if (fd > 0)
1462 FdAccess(thr, pc, fd);
1463 return REAL(__fxstat64)(0, fd, buf);
1465 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1466 #else
1467 #define TSAN_MAYBE_INTERCEPT_FSTAT64
1468 #endif
1470 TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
1471 SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
1472 READ_STRING(thr, pc, name, 0);
1473 int fd = REAL(open)(name, flags, mode);
1474 if (fd >= 0)
1475 FdFileCreate(thr, pc, fd);
1476 return fd;
1479 #if SANITIZER_LINUX
1480 TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
1481 SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
1482 READ_STRING(thr, pc, name, 0);
1483 int fd = REAL(open64)(name, flags, mode);
1484 if (fd >= 0)
1485 FdFileCreate(thr, pc, fd);
1486 return fd;
1488 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1489 #else
1490 #define TSAN_MAYBE_INTERCEPT_OPEN64
1491 #endif
1493 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1494 SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1495 READ_STRING(thr, pc, name, 0);
1496 int fd = REAL(creat)(name, mode);
1497 if (fd >= 0)
1498 FdFileCreate(thr, pc, fd);
1499 return fd;
1502 #if SANITIZER_LINUX
1503 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1504 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1505 READ_STRING(thr, pc, name, 0);
1506 int fd = REAL(creat64)(name, mode);
1507 if (fd >= 0)
1508 FdFileCreate(thr, pc, fd);
1509 return fd;
1511 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1512 #else
1513 #define TSAN_MAYBE_INTERCEPT_CREAT64
1514 #endif
1516 TSAN_INTERCEPTOR(int, dup, int oldfd) {
1517 SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1518 int newfd = REAL(dup)(oldfd);
1519 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1520 FdDup(thr, pc, oldfd, newfd, true);
1521 return newfd;
1524 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1525 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1526 int newfd2 = REAL(dup2)(oldfd, newfd);
1527 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1528 FdDup(thr, pc, oldfd, newfd2, false);
1529 return newfd2;
1532 #if !SANITIZER_MAC
1533 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1534 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1535 int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1536 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1537 FdDup(thr, pc, oldfd, newfd2, false);
1538 return newfd2;
1540 #endif
1542 #if SANITIZER_LINUX
1543 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1544 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1545 int fd = REAL(eventfd)(initval, flags);
1546 if (fd >= 0)
1547 FdEventCreate(thr, pc, fd);
1548 return fd;
1550 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1551 #else
1552 #define TSAN_MAYBE_INTERCEPT_EVENTFD
1553 #endif
1555 #if SANITIZER_LINUX
1556 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1557 SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
1558 if (fd >= 0)
1559 FdClose(thr, pc, fd);
1560 fd = REAL(signalfd)(fd, mask, flags);
1561 if (fd >= 0)
1562 FdSignalCreate(thr, pc, fd);
1563 return fd;
1565 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1566 #else
1567 #define TSAN_MAYBE_INTERCEPT_SIGNALFD
1568 #endif
1570 #if SANITIZER_LINUX
1571 TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1572 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1573 int fd = REAL(inotify_init)(fake);
1574 if (fd >= 0)
1575 FdInotifyCreate(thr, pc, fd);
1576 return fd;
1578 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1579 #else
1580 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1581 #endif
1583 #if SANITIZER_LINUX
1584 TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1585 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1586 int fd = REAL(inotify_init1)(flags);
1587 if (fd >= 0)
1588 FdInotifyCreate(thr, pc, fd);
1589 return fd;
1591 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1592 #else
1593 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1594 #endif
1596 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1597 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1598 int fd = REAL(socket)(domain, type, protocol);
1599 if (fd >= 0)
1600 FdSocketCreate(thr, pc, fd);
1601 return fd;
1604 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1605 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1606 int res = REAL(socketpair)(domain, type, protocol, fd);
1607 if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1608 FdPipeCreate(thr, pc, fd[0], fd[1]);
1609 return res;
1612 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1613 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1614 FdSocketConnecting(thr, pc, fd);
1615 int res = REAL(connect)(fd, addr, addrlen);
1616 if (res == 0 && fd >= 0)
1617 FdSocketConnect(thr, pc, fd);
1618 return res;
1621 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1622 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1623 int res = REAL(bind)(fd, addr, addrlen);
1624 if (fd > 0 && res == 0)
1625 FdAccess(thr, pc, fd);
1626 return res;
1629 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1630 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1631 int res = REAL(listen)(fd, backlog);
1632 if (fd > 0 && res == 0)
1633 FdAccess(thr, pc, fd);
1634 return res;
1637 TSAN_INTERCEPTOR(int, close, int fd) {
1638 SCOPED_TSAN_INTERCEPTOR(close, fd);
1639 if (fd >= 0)
1640 FdClose(thr, pc, fd);
1641 return REAL(close)(fd);
1644 #if SANITIZER_LINUX
1645 TSAN_INTERCEPTOR(int, __close, int fd) {
1646 SCOPED_TSAN_INTERCEPTOR(__close, fd);
1647 if (fd >= 0)
1648 FdClose(thr, pc, fd);
1649 return REAL(__close)(fd);
1651 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1652 #else
1653 #define TSAN_MAYBE_INTERCEPT___CLOSE
1654 #endif
1656 // glibc guts
1657 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1658 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1659 SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
1660 int fds[64];
1661 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1662 for (int i = 0; i < cnt; i++) {
1663 if (fds[i] > 0)
1664 FdClose(thr, pc, fds[i]);
1666 REAL(__res_iclose)(state, free_addr);
1668 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1669 #else
1670 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1671 #endif
1673 TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1674 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1675 int res = REAL(pipe)(pipefd);
1676 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1677 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1678 return res;
1681 #if !SANITIZER_MAC
1682 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1683 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1684 int res = REAL(pipe2)(pipefd, flags);
1685 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1686 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1687 return res;
1689 #endif
1691 TSAN_INTERCEPTOR(int, unlink, char *path) {
1692 SCOPED_TSAN_INTERCEPTOR(unlink, path);
1693 Release(thr, pc, File2addr(path));
1694 int res = REAL(unlink)(path);
1695 return res;
1698 TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1699 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1700 void *res = REAL(tmpfile)(fake);
1701 if (res) {
1702 int fd = fileno_unlocked(res);
1703 if (fd >= 0)
1704 FdFileCreate(thr, pc, fd);
1706 return res;
1709 #if SANITIZER_LINUX
1710 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1711 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1712 void *res = REAL(tmpfile64)(fake);
1713 if (res) {
1714 int fd = fileno_unlocked(res);
1715 if (fd >= 0)
1716 FdFileCreate(thr, pc, fd);
1718 return res;
1720 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1721 #else
1722 #define TSAN_MAYBE_INTERCEPT_TMPFILE64
1723 #endif
1725 static void FlushStreams() {
1726 // Flushing all the streams here may freeze the process if a child thread is
1727 // performing file stream operations at the same time.
1728 REAL(fflush)(stdout);
1729 REAL(fflush)(stderr);
1732 TSAN_INTERCEPTOR(void, abort, int fake) {
1733 SCOPED_TSAN_INTERCEPTOR(abort, fake);
1734 FlushStreams();
1735 REAL(abort)(fake);
1738 TSAN_INTERCEPTOR(int, rmdir, char *path) {
1739 SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1740 Release(thr, pc, Dir2addr(path));
1741 int res = REAL(rmdir)(path);
1742 return res;
1745 TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1746 SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
1747 if (dirp) {
1748 int fd = dirfd(dirp);
1749 FdClose(thr, pc, fd);
1751 return REAL(closedir)(dirp);
1754 #if SANITIZER_LINUX
1755 TSAN_INTERCEPTOR(int, epoll_create, int size) {
1756 SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1757 int fd = REAL(epoll_create)(size);
1758 if (fd >= 0)
1759 FdPollCreate(thr, pc, fd);
1760 return fd;
1763 TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1764 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1765 int fd = REAL(epoll_create1)(flags);
1766 if (fd >= 0)
1767 FdPollCreate(thr, pc, fd);
1768 return fd;
1771 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1772 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1773 if (epfd >= 0)
1774 FdAccess(thr, pc, epfd);
1775 if (epfd >= 0 && fd >= 0)
1776 FdAccess(thr, pc, fd);
1777 if (op == EPOLL_CTL_ADD && epfd >= 0)
1778 FdRelease(thr, pc, epfd);
1779 int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1780 return res;
1783 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1784 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1785 if (epfd >= 0)
1786 FdAccess(thr, pc, epfd);
1787 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1788 if (res > 0 && epfd >= 0)
1789 FdAcquire(thr, pc, epfd);
1790 return res;
1793 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1794 void *sigmask) {
1795 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
1796 if (epfd >= 0)
1797 FdAccess(thr, pc, epfd);
1798 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
1799 if (res > 0 && epfd >= 0)
1800 FdAcquire(thr, pc, epfd);
1801 return res;
1804 #define TSAN_MAYBE_INTERCEPT_EPOLL \
1805 TSAN_INTERCEPT(epoll_create); \
1806 TSAN_INTERCEPT(epoll_create1); \
1807 TSAN_INTERCEPT(epoll_ctl); \
1808 TSAN_INTERCEPT(epoll_wait); \
1809 TSAN_INTERCEPT(epoll_pwait)
1810 #else
1811 #define TSAN_MAYBE_INTERCEPT_EPOLL
1812 #endif
1814 // The following functions are intercepted merely to process pending signals.
1815 // If program blocks signal X, we must deliver the signal before the function
1816 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
1817 // it's better to deliver the signal straight away.
1818 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
1819 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
1820 return REAL(sigsuspend)(mask);
1823 TSAN_INTERCEPTOR(int, sigblock, int mask) {
1824 SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
1825 return REAL(sigblock)(mask);
1828 TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
1829 SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
1830 return REAL(sigsetmask)(mask);
1833 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
1834 __sanitizer_sigset_t *oldset) {
1835 SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
1836 return REAL(pthread_sigmask)(how, set, oldset);
1839 namespace __tsan {
1841 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
1842 bool sigact, int sig,
1843 __sanitizer_siginfo *info, void *uctx) {
1844 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
1845 if (acquire)
1846 Acquire(thr, 0, (uptr)&sigactions[sig]);
1847 // Signals are generally asynchronous, so if we receive a signals when
1848 // ignores are enabled we should disable ignores. This is critical for sync
1849 // and interceptors, because otherwise we can miss syncronization and report
1850 // false races.
1851 int ignore_reads_and_writes = thr->ignore_reads_and_writes;
1852 int ignore_interceptors = thr->ignore_interceptors;
1853 int ignore_sync = thr->ignore_sync;
1854 if (!ctx->after_multithreaded_fork) {
1855 thr->ignore_reads_and_writes = 0;
1856 thr->fast_state.ClearIgnoreBit();
1857 thr->ignore_interceptors = 0;
1858 thr->ignore_sync = 0;
1860 // Ensure that the handler does not spoil errno.
1861 const int saved_errno = errno;
1862 errno = 99;
1863 // This code races with sigaction. Be careful to not read sa_sigaction twice.
1864 // Also need to remember pc for reporting before the call,
1865 // because the handler can reset it.
1866 volatile uptr pc =
1867 sigact ? (uptr)sigactions[sig].sigaction : (uptr)sigactions[sig].handler;
1868 if (pc != sig_dfl && pc != sig_ign) {
1869 if (sigact)
1870 ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
1871 else
1872 ((__sanitizer_sighandler_ptr)pc)(sig);
1874 if (!ctx->after_multithreaded_fork) {
1875 thr->ignore_reads_and_writes = ignore_reads_and_writes;
1876 if (ignore_reads_and_writes)
1877 thr->fast_state.SetIgnoreBit();
1878 thr->ignore_interceptors = ignore_interceptors;
1879 thr->ignore_sync = ignore_sync;
1881 // We do not detect errno spoiling for SIGTERM,
1882 // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
1883 // tsan reports false positive in such case.
1884 // It's difficult to properly detect this situation (reraise),
1885 // because in async signal processing case (when handler is called directly
1886 // from rtl_generic_sighandler) we have not yet received the reraised
1887 // signal; and it looks too fragile to intercept all ways to reraise a signal.
1888 if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
1889 VarSizeStackTrace stack;
1890 // StackTrace::GetNestInstructionPc(pc) is used because return address is
1891 // expected, OutputReport() will undo this.
1892 ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
1893 ThreadRegistryLock l(ctx->thread_registry);
1894 ScopedReport rep(ReportTypeErrnoInSignal);
1895 if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
1896 rep.AddStack(stack, true);
1897 OutputReport(thr, rep);
1900 errno = saved_errno;
1903 void ProcessPendingSignals(ThreadState *thr) {
1904 ThreadSignalContext *sctx = SigCtx(thr);
1905 if (sctx == 0 ||
1906 atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
1907 return;
1908 atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
1909 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
1910 internal_sigfillset(&sctx->emptyset);
1911 int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
1912 CHECK_EQ(res, 0);
1913 for (int sig = 0; sig < kSigCount; sig++) {
1914 SignalDesc *signal = &sctx->pending_signals[sig];
1915 if (signal->armed) {
1916 signal->armed = false;
1917 CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
1918 &signal->siginfo, &signal->ctx);
1921 res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
1922 CHECK_EQ(res, 0);
1923 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
1926 } // namespace __tsan
1928 static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
1929 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
1930 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
1931 // If we are sending signal to ourselves, we must process it now.
1932 (sctx && sig == sctx->int_signal_send);
1935 void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
1936 __sanitizer_siginfo *info,
1937 void *ctx) {
1938 ThreadState *thr = cur_thread();
1939 ThreadSignalContext *sctx = SigCtx(thr);
1940 if (sig < 0 || sig >= kSigCount) {
1941 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
1942 return;
1944 // Don't mess with synchronous signals.
1945 const bool sync = is_sync_signal(sctx, sig);
1946 if (sync ||
1947 // If we are in blocking function, we can safely process it now
1948 // (but check if we are in a recursive interceptor,
1949 // i.e. pthread_join()->munmap()).
1950 (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
1951 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
1952 if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
1953 atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
1954 CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
1955 atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
1956 } else {
1957 // Be very conservative with when we do acquire in this case.
1958 // It's unsafe to do acquire in async handlers, because ThreadState
1959 // can be in inconsistent state.
1960 // SIGSYS looks relatively safe -- it's synchronous and can actually
1961 // need some global state.
1962 bool acq = (sig == SIGSYS);
1963 CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
1965 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
1966 return;
1969 if (sctx == 0)
1970 return;
1971 SignalDesc *signal = &sctx->pending_signals[sig];
1972 if (signal->armed == false) {
1973 signal->armed = true;
1974 signal->sigaction = sigact;
1975 if (info)
1976 internal_memcpy(&signal->siginfo, info, sizeof(*info));
1977 if (ctx)
1978 internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
1979 atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
1983 static void rtl_sighandler(int sig) {
1984 rtl_generic_sighandler(false, sig, 0, 0);
1987 static void rtl_sigaction(int sig, __sanitizer_siginfo *info, void *ctx) {
1988 rtl_generic_sighandler(true, sig, info, ctx);
1991 TSAN_INTERCEPTOR(int, raise, int sig) {
1992 SCOPED_TSAN_INTERCEPTOR(raise, sig);
1993 ThreadSignalContext *sctx = SigCtx(thr);
1994 CHECK_NE(sctx, 0);
1995 int prev = sctx->int_signal_send;
1996 sctx->int_signal_send = sig;
1997 int res = REAL(raise)(sig);
1998 CHECK_EQ(sctx->int_signal_send, sig);
1999 sctx->int_signal_send = prev;
2000 return res;
2003 TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2004 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2005 ThreadSignalContext *sctx = SigCtx(thr);
2006 CHECK_NE(sctx, 0);
2007 int prev = sctx->int_signal_send;
2008 if (pid == (int)internal_getpid()) {
2009 sctx->int_signal_send = sig;
2011 int res = REAL(kill)(pid, sig);
2012 if (pid == (int)internal_getpid()) {
2013 CHECK_EQ(sctx->int_signal_send, sig);
2014 sctx->int_signal_send = prev;
2016 return res;
2019 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2020 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2021 ThreadSignalContext *sctx = SigCtx(thr);
2022 CHECK_NE(sctx, 0);
2023 int prev = sctx->int_signal_send;
2024 if (tid == pthread_self()) {
2025 sctx->int_signal_send = sig;
2027 int res = REAL(pthread_kill)(tid, sig);
2028 if (tid == pthread_self()) {
2029 CHECK_EQ(sctx->int_signal_send, sig);
2030 sctx->int_signal_send = prev;
2032 return res;
2035 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2036 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2037 // It's intercepted merely to process pending signals.
2038 return REAL(gettimeofday)(tv, tz);
2041 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2042 void *hints, void *rv) {
2043 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2044 // We miss atomic synchronization in getaddrinfo,
2045 // and can report false race between malloc and free
2046 // inside of getaddrinfo. So ignore memory accesses.
2047 ThreadIgnoreBegin(thr, pc);
2048 int res = REAL(getaddrinfo)(node, service, hints, rv);
2049 ThreadIgnoreEnd(thr, pc);
2050 return res;
2053 TSAN_INTERCEPTOR(int, fork, int fake) {
2054 if (UNLIKELY(cur_thread()->in_symbolizer))
2055 return REAL(fork)(fake);
2056 SCOPED_INTERCEPTOR_RAW(fork, fake);
2057 ForkBefore(thr, pc);
2058 int pid;
2060 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
2061 // we'll assert in CheckNoLocks() unless we ignore interceptors.
2062 ScopedIgnoreInterceptors ignore;
2063 pid = REAL(fork)(fake);
2065 if (pid == 0) {
2066 // child
2067 ForkChildAfter(thr, pc);
2068 FdOnFork(thr, pc);
2069 } else if (pid > 0) {
2070 // parent
2071 ForkParentAfter(thr, pc);
2072 } else {
2073 // error
2074 ForkParentAfter(thr, pc);
2076 return pid;
2079 TSAN_INTERCEPTOR(int, vfork, int fake) {
2080 // Some programs (e.g. openjdk) call close for all file descriptors
2081 // in the child process. Under tsan it leads to false positives, because
2082 // address space is shared, so the parent process also thinks that
2083 // the descriptors are closed (while they are actually not).
2084 // This leads to false positives due to missed synchronization.
2085 // Strictly saying this is undefined behavior, because vfork child is not
2086 // allowed to call any functions other than exec/exit. But this is what
2087 // openjdk does, so we want to handle it.
2088 // We could disable interceptors in the child process. But it's not possible
2089 // to simply intercept and wrap vfork, because vfork child is not allowed
2090 // to return from the function that calls vfork, and that's exactly what
2091 // we would do. So this would require some assembly trickery as well.
2092 // Instead we simply turn vfork into fork.
2093 return WRAP(fork)(fake);
2096 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2097 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2098 void *data);
2099 struct dl_iterate_phdr_data {
2100 ThreadState *thr;
2101 uptr pc;
2102 dl_iterate_phdr_cb_t cb;
2103 void *data;
2106 static bool IsAppNotRodata(uptr addr) {
2107 return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata;
2110 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2111 void *data) {
2112 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2113 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2114 // accessible in dl_iterate_phdr callback. But we don't see synchronization
2115 // inside of dynamic linker, so we "unpoison" it here in order to not
2116 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2117 // because some libc functions call __libc_dlopen.
2118 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2119 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2120 internal_strlen(info->dlpi_name));
2121 int res = cbdata->cb(info, size, cbdata->data);
2122 // Perform the check one more time in case info->dlpi_name was overwritten
2123 // by user callback.
2124 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2125 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2126 internal_strlen(info->dlpi_name));
2127 return res;
2130 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2131 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2132 dl_iterate_phdr_data cbdata;
2133 cbdata.thr = thr;
2134 cbdata.pc = pc;
2135 cbdata.cb = cb;
2136 cbdata.data = data;
2137 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2138 return res;
2140 #endif
2142 static int OnExit(ThreadState *thr) {
2143 int status = Finalize(thr);
2144 FlushStreams();
2145 return status;
2148 struct TsanInterceptorContext {
2149 ThreadState *thr;
2150 const uptr caller_pc;
2151 const uptr pc;
2154 #if !SANITIZER_MAC
2155 static void HandleRecvmsg(ThreadState *thr, uptr pc,
2156 __sanitizer_msghdr *msg) {
2157 int fds[64];
2158 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2159 for (int i = 0; i < cnt; i++)
2160 FdEventCreate(thr, pc, fds[i]);
2162 #endif
2164 #include "sanitizer_common/sanitizer_platform_interceptors.h"
2165 // Causes interceptor recursion (getaddrinfo() and fopen())
2166 #undef SANITIZER_INTERCEPT_GETADDRINFO
2167 // There interceptors do not seem to be strictly necessary for tsan.
2168 // But we see cases where the interceptors consume 70% of execution time.
2169 // Memory blocks passed to fgetgrent_r are "written to" by tsan several times.
2170 // First, there is some recursion (getgrnam_r calls fgetgrent_r), and each
2171 // function "writes to" the buffer. Then, the same memory is "written to"
2172 // twice, first as buf and then as pwbufp (both of them refer to the same
2173 // addresses).
2174 #undef SANITIZER_INTERCEPT_GETPWENT
2175 #undef SANITIZER_INTERCEPT_GETPWENT_R
2176 #undef SANITIZER_INTERCEPT_FGETPWENT
2177 #undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
2178 #undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
2179 // We define our own.
2180 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
2181 #define NEED_TLS_GET_ADDR
2182 #endif
2183 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2185 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
2186 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
2187 INTERCEPT_FUNCTION_VER(name, ver)
2189 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
2190 MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
2191 ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
2192 true)
2194 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
2195 MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
2196 ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
2197 false)
2199 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
2200 SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
2201 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
2202 ctx = (void *)&_ctx; \
2203 (void) ctx;
2205 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2206 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
2207 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
2208 ctx = (void *)&_ctx; \
2209 (void) ctx;
2211 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2212 Acquire(thr, pc, File2addr(path)); \
2213 if (file) { \
2214 int fd = fileno_unlocked(file); \
2215 if (fd >= 0) FdFileCreate(thr, pc, fd); \
2218 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2219 if (file) { \
2220 int fd = fileno_unlocked(file); \
2221 if (fd >= 0) FdClose(thr, pc, fd); \
2224 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2225 libignore()->OnLibraryLoaded(filename)
2227 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2228 libignore()->OnLibraryUnloaded()
2230 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2231 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2233 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2234 Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2236 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2237 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2239 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2240 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2242 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2243 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2245 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2246 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2248 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2249 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2251 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2252 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2254 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
2255 __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
2257 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2259 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2260 OnExit(((TsanInterceptorContext *) ctx)->thr)
2262 #define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
2263 MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
2264 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2266 #define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
2267 MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
2268 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2270 #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
2271 MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
2272 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2274 #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
2275 MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
2276 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2278 #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
2279 MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
2280 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2282 #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
2283 off) \
2284 do { \
2285 return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2286 off); \
2287 } while (false)
2289 #if !SANITIZER_MAC
2290 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2291 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2292 ((TsanInterceptorContext *)ctx)->pc, msg)
2293 #endif
2295 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
2296 if (TsanThread *t = GetCurrentThread()) { \
2297 *begin = t->tls_begin(); \
2298 *end = t->tls_end(); \
2299 } else { \
2300 *begin = *end = 0; \
2303 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2304 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2306 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2307 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2309 #include "sanitizer_common/sanitizer_common_interceptors.inc"
2311 static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2312 __sanitizer_sigaction *old);
2313 static __sanitizer_sighandler_ptr signal_impl(int sig,
2314 __sanitizer_sighandler_ptr h);
2316 #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2317 { return sigaction_impl(signo, act, oldact); }
2319 #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2320 { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2322 #include "sanitizer_common/sanitizer_signal_interceptors.inc"
2324 int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2325 __sanitizer_sigaction *old) {
2326 // Note: if we call REAL(sigaction) directly for any reason without proxying
2327 // the signal handler through rtl_sigaction, very bad things will happen.
2328 // The handler will run synchronously and corrupt tsan per-thread state.
2329 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2330 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2331 __sanitizer_sigaction old_stored;
2332 if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
2333 __sanitizer_sigaction newact;
2334 if (act) {
2335 // Copy act into sigactions[sig].
2336 // Can't use struct copy, because compiler can emit call to memcpy.
2337 // Can't use internal_memcpy, because it copies byte-by-byte,
2338 // and signal handler reads the handler concurrently. It it can read
2339 // some bytes from old value and some bytes from new value.
2340 // Use volatile to prevent insertion of memcpy.
2341 sigactions[sig].handler =
2342 *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2343 sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2344 internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
2345 sizeof(sigactions[sig].sa_mask));
2346 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
2347 sigactions[sig].sa_restorer = act->sa_restorer;
2348 #endif
2349 internal_memcpy(&newact, act, sizeof(newact));
2350 internal_sigfillset(&newact.sa_mask);
2351 if ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl) {
2352 if (newact.sa_flags & SA_SIGINFO)
2353 newact.sigaction = rtl_sigaction;
2354 else
2355 newact.handler = rtl_sighandler;
2357 ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
2358 act = &newact;
2360 int res = REAL(sigaction)(sig, act, old);
2361 if (res == 0 && old) {
2362 uptr cb = (uptr)old->sigaction;
2363 if (cb == (uptr)rtl_sigaction || cb == (uptr)rtl_sighandler) {
2364 internal_memcpy(old, &old_stored, sizeof(*old));
2367 return res;
2370 static __sanitizer_sighandler_ptr signal_impl(int sig,
2371 __sanitizer_sighandler_ptr h) {
2372 __sanitizer_sigaction act;
2373 act.handler = h;
2374 internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
2375 act.sa_flags = 0;
2376 __sanitizer_sigaction old;
2377 int res = sigaction_symname(sig, &act, &old);
2378 if (res) return (__sanitizer_sighandler_ptr)sig_err;
2379 return old.handler;
2382 #define TSAN_SYSCALL() \
2383 ThreadState *thr = cur_thread(); \
2384 if (thr->ignore_interceptors) \
2385 return; \
2386 ScopedSyscall scoped_syscall(thr) \
2387 /**/
2389 struct ScopedSyscall {
2390 ThreadState *thr;
2392 explicit ScopedSyscall(ThreadState *thr)
2393 : thr(thr) {
2394 Initialize(thr);
2397 ~ScopedSyscall() {
2398 ProcessPendingSignals(thr);
2402 #if !SANITIZER_FREEBSD && !SANITIZER_MAC
2403 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2404 TSAN_SYSCALL();
2405 MemoryAccessRange(thr, pc, p, s, write);
2408 static void syscall_acquire(uptr pc, uptr addr) {
2409 TSAN_SYSCALL();
2410 Acquire(thr, pc, addr);
2411 DPrintf("syscall_acquire(%p)\n", addr);
2414 static void syscall_release(uptr pc, uptr addr) {
2415 TSAN_SYSCALL();
2416 DPrintf("syscall_release(%p)\n", addr);
2417 Release(thr, pc, addr);
2420 static void syscall_fd_close(uptr pc, int fd) {
2421 TSAN_SYSCALL();
2422 FdClose(thr, pc, fd);
2425 static USED void syscall_fd_acquire(uptr pc, int fd) {
2426 TSAN_SYSCALL();
2427 FdAcquire(thr, pc, fd);
2428 DPrintf("syscall_fd_acquire(%p)\n", fd);
2431 static USED void syscall_fd_release(uptr pc, int fd) {
2432 TSAN_SYSCALL();
2433 DPrintf("syscall_fd_release(%p)\n", fd);
2434 FdRelease(thr, pc, fd);
2437 static void syscall_pre_fork(uptr pc) {
2438 TSAN_SYSCALL();
2439 ForkBefore(thr, pc);
2442 static void syscall_post_fork(uptr pc, int pid) {
2443 TSAN_SYSCALL();
2444 if (pid == 0) {
2445 // child
2446 ForkChildAfter(thr, pc);
2447 FdOnFork(thr, pc);
2448 } else if (pid > 0) {
2449 // parent
2450 ForkParentAfter(thr, pc);
2451 } else {
2452 // error
2453 ForkParentAfter(thr, pc);
2456 #endif
2458 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2459 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2461 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2462 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2464 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2465 do { \
2466 (void)(p); \
2467 (void)(s); \
2468 } while (false)
2470 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2471 do { \
2472 (void)(p); \
2473 (void)(s); \
2474 } while (false)
2476 #define COMMON_SYSCALL_ACQUIRE(addr) \
2477 syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2479 #define COMMON_SYSCALL_RELEASE(addr) \
2480 syscall_release(GET_CALLER_PC(), (uptr)(addr))
2482 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2484 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2486 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2488 #define COMMON_SYSCALL_PRE_FORK() \
2489 syscall_pre_fork(GET_CALLER_PC())
2491 #define COMMON_SYSCALL_POST_FORK(res) \
2492 syscall_post_fork(GET_CALLER_PC(), res)
2494 #include "sanitizer_common/sanitizer_common_syscalls.inc"
2495 #include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2497 #ifdef NEED_TLS_GET_ADDR
2498 // Define own interceptor instead of sanitizer_common's for three reasons:
2499 // 1. It must not process pending signals.
2500 // Signal handlers may contain MOVDQA instruction (see below).
2501 // 2. It must be as simple as possible to not contain MOVDQA.
2502 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2503 // is empty for tsan (meant only for msan).
2504 // Note: __tls_get_addr can be called with mis-aligned stack due to:
2505 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2506 // So the interceptor must work with mis-aligned stack, in particular, does not
2507 // execute MOVDQA with stack addresses.
2508 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2509 void *res = REAL(__tls_get_addr)(arg);
2510 ThreadState *thr = cur_thread();
2511 if (!thr)
2512 return res;
2513 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
2514 thr->tls_addr + thr->tls_size);
2515 if (!dtv)
2516 return res;
2517 // New DTLS block has been allocated.
2518 MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2519 return res;
2521 #endif
2523 #if SANITIZER_NETBSD
2524 TSAN_INTERCEPTOR(void, _lwp_exit) {
2525 SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2526 DestroyThreadState();
2527 REAL(_lwp_exit)();
2529 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2530 #else
2531 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2532 #endif
2534 #if SANITIZER_FREEBSD
2535 TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
2536 SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2537 DestroyThreadState();
2538 REAL(thr_exit(state));
2540 #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2541 #else
2542 #define TSAN_MAYBE_INTERCEPT_THR_EXIT
2543 #endif
2545 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2546 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2547 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2548 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2549 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2550 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2551 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2552 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2553 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2554 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2555 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2556 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2557 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2558 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2559 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2560 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2562 namespace __tsan {
2564 static void finalize(void *arg) {
2565 ThreadState *thr = cur_thread();
2566 int status = Finalize(thr);
2567 // Make sure the output is not lost.
2568 FlushStreams();
2569 if (status)
2570 Die();
2573 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2574 static void unreachable() {
2575 Report("FATAL: ThreadSanitizer: unreachable called\n");
2576 Die();
2578 #endif
2580 void InitializeInterceptors() {
2581 #if !SANITIZER_MAC
2582 // We need to setup it early, because functions like dlsym() can call it.
2583 REAL(memset) = internal_memset;
2584 REAL(memcpy) = internal_memcpy;
2585 #endif
2587 // Instruct libc malloc to consume less memory.
2588 #if SANITIZER_LINUX
2589 mallopt(1, 0); // M_MXFAST
2590 mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
2591 #endif
2593 new(interceptor_ctx()) InterceptorContext();
2595 InitializeCommonInterceptors();
2596 InitializeSignalInterceptors();
2598 #if !SANITIZER_MAC
2599 // We can not use TSAN_INTERCEPT to get setjmp addr,
2600 // because it does &setjmp and setjmp is not present in some versions of libc.
2601 using __interception::GetRealFunctionAddress;
2602 GetRealFunctionAddress(TSAN_STRING_SETJMP,
2603 (uptr*)&REAL(setjmp_symname), 0, 0);
2604 GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
2605 GetRealFunctionAddress(TSAN_STRING_SIGSETJMP,
2606 (uptr*)&REAL(sigsetjmp_symname), 0, 0);
2607 #if !SANITIZER_NETBSD
2608 GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
2609 #endif
2610 #endif
2612 TSAN_INTERCEPT(longjmp_symname);
2613 TSAN_INTERCEPT(siglongjmp_symname);
2614 #if SANITIZER_NETBSD
2615 TSAN_INTERCEPT(_longjmp);
2616 #endif
2618 TSAN_INTERCEPT(malloc);
2619 TSAN_INTERCEPT(__libc_memalign);
2620 TSAN_INTERCEPT(calloc);
2621 TSAN_INTERCEPT(realloc);
2622 TSAN_INTERCEPT(free);
2623 TSAN_INTERCEPT(cfree);
2624 TSAN_INTERCEPT(munmap);
2625 TSAN_MAYBE_INTERCEPT_MEMALIGN;
2626 TSAN_INTERCEPT(valloc);
2627 TSAN_MAYBE_INTERCEPT_PVALLOC;
2628 TSAN_INTERCEPT(posix_memalign);
2630 TSAN_INTERCEPT(strcpy); // NOLINT
2631 TSAN_INTERCEPT(strncpy);
2632 TSAN_INTERCEPT(strdup);
2634 TSAN_INTERCEPT(pthread_create);
2635 TSAN_INTERCEPT(pthread_join);
2636 TSAN_INTERCEPT(pthread_detach);
2638 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2639 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2640 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2641 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2642 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2643 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2645 TSAN_INTERCEPT(pthread_mutex_init);
2646 TSAN_INTERCEPT(pthread_mutex_destroy);
2647 TSAN_INTERCEPT(pthread_mutex_trylock);
2648 TSAN_INTERCEPT(pthread_mutex_timedlock);
2650 TSAN_INTERCEPT(pthread_spin_init);
2651 TSAN_INTERCEPT(pthread_spin_destroy);
2652 TSAN_INTERCEPT(pthread_spin_lock);
2653 TSAN_INTERCEPT(pthread_spin_trylock);
2654 TSAN_INTERCEPT(pthread_spin_unlock);
2656 TSAN_INTERCEPT(pthread_rwlock_init);
2657 TSAN_INTERCEPT(pthread_rwlock_destroy);
2658 TSAN_INTERCEPT(pthread_rwlock_rdlock);
2659 TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2660 TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2661 TSAN_INTERCEPT(pthread_rwlock_wrlock);
2662 TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2663 TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2664 TSAN_INTERCEPT(pthread_rwlock_unlock);
2666 TSAN_INTERCEPT(pthread_barrier_init);
2667 TSAN_INTERCEPT(pthread_barrier_destroy);
2668 TSAN_INTERCEPT(pthread_barrier_wait);
2670 TSAN_INTERCEPT(pthread_once);
2672 TSAN_INTERCEPT(fstat);
2673 TSAN_MAYBE_INTERCEPT___FXSTAT;
2674 TSAN_MAYBE_INTERCEPT_FSTAT64;
2675 TSAN_MAYBE_INTERCEPT___FXSTAT64;
2676 TSAN_INTERCEPT(open);
2677 TSAN_MAYBE_INTERCEPT_OPEN64;
2678 TSAN_INTERCEPT(creat);
2679 TSAN_MAYBE_INTERCEPT_CREAT64;
2680 TSAN_INTERCEPT(dup);
2681 TSAN_INTERCEPT(dup2);
2682 TSAN_INTERCEPT(dup3);
2683 TSAN_MAYBE_INTERCEPT_EVENTFD;
2684 TSAN_MAYBE_INTERCEPT_SIGNALFD;
2685 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
2686 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
2687 TSAN_INTERCEPT(socket);
2688 TSAN_INTERCEPT(socketpair);
2689 TSAN_INTERCEPT(connect);
2690 TSAN_INTERCEPT(bind);
2691 TSAN_INTERCEPT(listen);
2692 TSAN_MAYBE_INTERCEPT_EPOLL;
2693 TSAN_INTERCEPT(close);
2694 TSAN_MAYBE_INTERCEPT___CLOSE;
2695 TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
2696 TSAN_INTERCEPT(pipe);
2697 TSAN_INTERCEPT(pipe2);
2699 TSAN_INTERCEPT(unlink);
2700 TSAN_INTERCEPT(tmpfile);
2701 TSAN_MAYBE_INTERCEPT_TMPFILE64;
2702 TSAN_INTERCEPT(abort);
2703 TSAN_INTERCEPT(rmdir);
2704 TSAN_INTERCEPT(closedir);
2706 TSAN_INTERCEPT(sigsuspend);
2707 TSAN_INTERCEPT(sigblock);
2708 TSAN_INTERCEPT(sigsetmask);
2709 TSAN_INTERCEPT(pthread_sigmask);
2710 TSAN_INTERCEPT(raise);
2711 TSAN_INTERCEPT(kill);
2712 TSAN_INTERCEPT(pthread_kill);
2713 TSAN_INTERCEPT(sleep);
2714 TSAN_INTERCEPT(usleep);
2715 TSAN_INTERCEPT(nanosleep);
2716 TSAN_INTERCEPT(pause);
2717 TSAN_INTERCEPT(gettimeofday);
2718 TSAN_INTERCEPT(getaddrinfo);
2720 TSAN_INTERCEPT(fork);
2721 TSAN_INTERCEPT(vfork);
2722 #if !SANITIZER_ANDROID
2723 TSAN_INTERCEPT(dl_iterate_phdr);
2724 #endif
2725 TSAN_MAYBE_INTERCEPT_ON_EXIT;
2726 TSAN_INTERCEPT(__cxa_atexit);
2727 TSAN_INTERCEPT(_exit);
2729 #ifdef NEED_TLS_GET_ADDR
2730 TSAN_INTERCEPT(__tls_get_addr);
2731 #endif
2733 TSAN_MAYBE_INTERCEPT__LWP_EXIT;
2734 TSAN_MAYBE_INTERCEPT_THR_EXIT;
2736 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2737 // Need to setup it, because interceptors check that the function is resolved.
2738 // But atexit is emitted directly into the module, so can't be resolved.
2739 REAL(atexit) = (int(*)(void(*)()))unreachable;
2740 #endif
2742 if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
2743 Printf("ThreadSanitizer: failed to setup atexit callback\n");
2744 Die();
2747 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
2748 if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
2749 Printf("ThreadSanitizer: failed to create thread key\n");
2750 Die();
2752 #endif
2754 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
2755 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
2756 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
2757 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
2758 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
2759 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
2760 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
2761 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
2762 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
2763 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
2764 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
2765 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
2766 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
2767 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
2768 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
2769 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
2771 FdInit();
2774 } // namespace __tsan
2776 // Invisible barrier for tests.
2777 // There were several unsuccessful iterations for this functionality:
2778 // 1. Initially it was implemented in user code using
2779 // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
2780 // MacOS. Futexes are linux-specific for this matter.
2781 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
2782 // "as-if synchronized via sleep" messages in reports which failed some
2783 // output tests.
2784 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
2785 // visible events, which lead to "failed to restore stack trace" failures.
2786 // Note that no_sanitize_thread attribute does not turn off atomic interception
2787 // so attaching it to the function defined in user code does not help.
2788 // That's why we now have what we have.
2789 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
2790 void __tsan_testonly_barrier_init(u64 *barrier, u32 count) {
2791 if (count >= (1 << 8)) {
2792 Printf("barrier_init: count is too large (%d)\n", count);
2793 Die();
2795 // 8 lsb is thread count, the remaining are count of entered threads.
2796 *barrier = count;
2799 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
2800 void __tsan_testonly_barrier_wait(u64 *barrier) {
2801 unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED);
2802 unsigned old_epoch = (old >> 8) / (old & 0xff);
2803 for (;;) {
2804 unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED);
2805 unsigned cur_epoch = (cur >> 8) / (cur & 0xff);
2806 if (cur_epoch != old_epoch)
2807 return;
2808 internal_sched_yield();