gccrs: Add test
[official-gcc.git] / libsanitizer / tsan / tsan_interceptors_posix.cpp
blobc557d5ddc6abcce93b4494dbfad9515bdedd99d9
1 //===-- tsan_interceptors_posix.cpp ---------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 // FIXME: move as many interceptors as possible into
12 // sanitizer_common/sanitizer_common_interceptors.inc
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_linux.h"
19 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
20 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_posix.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_tls_get_addr.h"
25 #include "interception/interception.h"
26 #include "tsan_interceptors.h"
27 #include "tsan_interface.h"
28 #include "tsan_platform.h"
29 #include "tsan_suppressions.h"
30 #include "tsan_rtl.h"
31 #include "tsan_mman.h"
32 #include "tsan_fd.h"
34 #include <stdarg.h>
36 using namespace __tsan;
38 #if SANITIZER_FREEBSD || SANITIZER_APPLE
39 #define stdout __stdoutp
40 #define stderr __stderrp
41 #endif
43 #if SANITIZER_NETBSD
44 #define dirfd(dirp) (*(int *)(dirp))
45 #define fileno_unlocked(fp) \
46 (((__sanitizer_FILE *)fp)->_file == -1 \
47 ? -1 \
48 : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
50 #define stdout ((__sanitizer_FILE*)&__sF[1])
51 #define stderr ((__sanitizer_FILE*)&__sF[2])
53 #define nanosleep __nanosleep50
54 #define vfork __vfork14
55 #endif
57 #ifdef __mips__
58 const int kSigCount = 129;
59 #else
60 const int kSigCount = 65;
61 #endif
63 #ifdef __mips__
64 struct ucontext_t {
65 u64 opaque[768 / sizeof(u64) + 1];
67 #else
68 struct ucontext_t {
69 // The size is determined by looking at sizeof of real ucontext_t on linux.
70 u64 opaque[936 / sizeof(u64) + 1];
72 #endif
74 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
75 defined(__s390x__)
76 #define PTHREAD_ABI_BASE "GLIBC_2.3.2"
77 #elif defined(__aarch64__) || SANITIZER_PPC64V2
78 #define PTHREAD_ABI_BASE "GLIBC_2.17"
79 #endif
81 extern "C" int pthread_attr_init(void *attr);
82 extern "C" int pthread_attr_destroy(void *attr);
83 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
84 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
85 extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
86 void (*child)(void));
87 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
88 extern "C" int pthread_setspecific(unsigned key, const void *v);
89 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
90 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
91 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
92 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
93 extern "C" int pthread_equal(void *t1, void *t2);
94 extern "C" void *pthread_self();
95 extern "C" void _exit(int status);
96 #if !SANITIZER_NETBSD
97 extern "C" int fileno_unlocked(void *stream);
98 extern "C" int dirfd(void *dirp);
99 #endif
100 #if SANITIZER_NETBSD
101 extern __sanitizer_FILE __sF[];
102 #else
103 extern __sanitizer_FILE *stdout, *stderr;
104 #endif
105 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
106 const int PTHREAD_MUTEX_RECURSIVE = 1;
107 const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
108 #else
109 const int PTHREAD_MUTEX_RECURSIVE = 2;
110 const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
111 #endif
112 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
113 const int EPOLL_CTL_ADD = 1;
114 #endif
115 const int SIGILL = 4;
116 const int SIGTRAP = 5;
117 const int SIGABRT = 6;
118 const int SIGFPE = 8;
119 const int SIGSEGV = 11;
120 const int SIGPIPE = 13;
121 const int SIGTERM = 15;
122 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
123 const int SIGBUS = 10;
124 const int SIGSYS = 12;
125 #else
126 const int SIGBUS = 7;
127 const int SIGSYS = 31;
128 #endif
129 void *const MAP_FAILED = (void*)-1;
130 #if SANITIZER_NETBSD
131 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
132 #elif !SANITIZER_APPLE
133 const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
134 #endif
135 const int MAP_FIXED = 0x10;
136 typedef long long_t;
137 typedef __sanitizer::u16 mode_t;
139 // From /usr/include/unistd.h
140 # define F_ULOCK 0 /* Unlock a previously locked region. */
141 # define F_LOCK 1 /* Lock a region for exclusive use. */
142 # define F_TLOCK 2 /* Test and lock a region for exclusive use. */
143 # define F_TEST 3 /* Test a region for other processes locks. */
145 #if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
146 const int SA_SIGINFO = 0x40;
147 const int SIG_SETMASK = 3;
148 #elif defined(__mips__)
149 const int SA_SIGINFO = 8;
150 const int SIG_SETMASK = 3;
151 #else
152 const int SA_SIGINFO = 4;
153 const int SIG_SETMASK = 2;
154 #endif
156 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
157 (!cur_thread_init()->is_inited)
159 namespace __tsan {
160 struct SignalDesc {
161 bool armed;
162 __sanitizer_siginfo siginfo;
163 ucontext_t ctx;
166 struct ThreadSignalContext {
167 int int_signal_send;
168 SignalDesc pending_signals[kSigCount];
169 // emptyset and oldset are too big for stack.
170 __sanitizer_sigset_t emptyset;
171 __sanitizer_sigset_t oldset;
174 void EnterBlockingFunc(ThreadState *thr) {
175 for (;;) {
176 // The order is important to not delay a signal infinitely if it's
177 // delivered right before we set in_blocking_func. Note: we can't call
178 // ProcessPendingSignals when in_blocking_func is set, or we can handle
179 // a signal synchronously when we are already handling a signal.
180 atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
181 if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
182 break;
183 atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
184 ProcessPendingSignals(thr);
188 // The sole reason tsan wraps atexit callbacks is to establish synchronization
189 // between callback setup and callback execution.
190 struct AtExitCtx {
191 void (*f)();
192 void *arg;
193 uptr pc;
196 // InterceptorContext holds all global data required for interceptors.
197 // It's explicitly constructed in InitializeInterceptors with placement new
198 // and is never destroyed. This allows usage of members with non-trivial
199 // constructors and destructors.
200 struct InterceptorContext {
201 // The object is 64-byte aligned, because we want hot data to be located
202 // in a single cache line if possible (it's accessed in every interceptor).
203 ALIGNED(64) LibIgnore libignore;
204 __sanitizer_sigaction sigactions[kSigCount];
205 #if !SANITIZER_APPLE && !SANITIZER_NETBSD
206 unsigned finalize_key;
207 #endif
209 Mutex atexit_mu;
210 Vector<struct AtExitCtx *> AtExitStack;
212 InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
215 static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
216 InterceptorContext *interceptor_ctx() {
217 return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
220 LibIgnore *libignore() {
221 return &interceptor_ctx()->libignore;
224 void InitializeLibIgnore() {
225 const SuppressionContext &supp = *Suppressions();
226 const uptr n = supp.SuppressionCount();
227 for (uptr i = 0; i < n; i++) {
228 const Suppression *s = supp.SuppressionAt(i);
229 if (0 == internal_strcmp(s->type, kSuppressionLib))
230 libignore()->AddIgnoredLibrary(s->templ);
232 if (flags()->ignore_noninstrumented_modules)
233 libignore()->IgnoreNoninstrumentedModules(true);
234 libignore()->OnLibraryLoaded(0);
237 // The following two hooks can be used by for cooperative scheduling when
238 // locking.
239 #ifdef TSAN_EXTERNAL_HOOKS
240 void OnPotentiallyBlockingRegionBegin();
241 void OnPotentiallyBlockingRegionEnd();
242 #else
243 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
244 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
245 #endif
247 } // namespace __tsan
249 static ThreadSignalContext *SigCtx(ThreadState *thr) {
250 ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
251 if (ctx == 0 && !thr->is_dead) {
252 ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
253 MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
254 thr->signal_ctx = ctx;
256 return ctx;
259 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
260 uptr pc)
261 : thr_(thr) {
262 LazyInitialize(thr);
263 if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
264 // pthread_join is marked as blocking, but it's also known to call other
265 // intercepted functions (mmap, free). If we don't reset in_blocking_func
266 // we can get deadlocks and memory corruptions if we deliver a synchronous
267 // signal inside of an mmap/free interceptor.
268 // So reset it and restore it back in the destructor.
269 // See https://github.com/google/sanitizers/issues/1540
270 atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
271 in_blocking_func_ = true;
273 if (!thr_->is_inited) return;
274 if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
275 DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
276 ignoring_ =
277 !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
278 libignore()->IsIgnored(pc, &in_ignored_lib_));
279 EnableIgnores();
282 ScopedInterceptor::~ScopedInterceptor() {
283 if (!thr_->is_inited) return;
284 DisableIgnores();
285 if (UNLIKELY(in_blocking_func_))
286 EnterBlockingFunc(thr_);
287 if (!thr_->ignore_interceptors) {
288 ProcessPendingSignals(thr_);
289 FuncExit(thr_);
290 CheckedMutex::CheckNoLocks();
294 NOINLINE
295 void ScopedInterceptor::EnableIgnoresImpl() {
296 ThreadIgnoreBegin(thr_, 0);
297 if (flags()->ignore_noninstrumented_modules)
298 thr_->suppress_reports++;
299 if (in_ignored_lib_) {
300 DCHECK(!thr_->in_ignored_lib);
301 thr_->in_ignored_lib = true;
305 NOINLINE
306 void ScopedInterceptor::DisableIgnoresImpl() {
307 ThreadIgnoreEnd(thr_);
308 if (flags()->ignore_noninstrumented_modules)
309 thr_->suppress_reports--;
310 if (in_ignored_lib_) {
311 DCHECK(thr_->in_ignored_lib);
312 thr_->in_ignored_lib = false;
316 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
317 #if SANITIZER_FREEBSD || SANITIZER_NETBSD
318 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
319 #else
320 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
321 #endif
322 #if SANITIZER_FREEBSD
323 # define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
324 INTERCEPT_FUNCTION(_pthread_##func)
325 #else
326 # define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
327 #endif
328 #if SANITIZER_NETBSD
329 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
330 INTERCEPT_FUNCTION(__libc_##func)
331 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
332 INTERCEPT_FUNCTION(__libc_thr_##func)
333 #else
334 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
335 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
336 #endif
338 #define READ_STRING_OF_LEN(thr, pc, s, len, n) \
339 MemoryAccessRange((thr), (pc), (uptr)(s), \
340 common_flags()->strict_string_checks ? (len) + 1 : (n), false)
342 #define READ_STRING(thr, pc, s, n) \
343 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
345 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
347 struct BlockingCall {
348 explicit BlockingCall(ThreadState *thr)
349 : thr(thr) {
350 EnterBlockingFunc(thr);
351 // When we are in a "blocking call", we process signals asynchronously
352 // (right when they arrive). In this context we do not expect to be
353 // executing any user/runtime code. The known interceptor sequence when
354 // this is not true is: pthread_join -> munmap(stack). It's fine
355 // to ignore munmap in this case -- we handle stack shadow separately.
356 thr->ignore_interceptors++;
359 ~BlockingCall() {
360 thr->ignore_interceptors--;
361 atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
364 ThreadState *thr;
367 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
368 SCOPED_TSAN_INTERCEPTOR(sleep, sec);
369 unsigned res = BLOCK_REAL(sleep)(sec);
370 AfterSleep(thr, pc);
371 return res;
374 TSAN_INTERCEPTOR(int, usleep, long_t usec) {
375 SCOPED_TSAN_INTERCEPTOR(usleep, usec);
376 int res = BLOCK_REAL(usleep)(usec);
377 AfterSleep(thr, pc);
378 return res;
381 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
382 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
383 int res = BLOCK_REAL(nanosleep)(req, rem);
384 AfterSleep(thr, pc);
385 return res;
388 TSAN_INTERCEPTOR(int, pause, int fake) {
389 SCOPED_TSAN_INTERCEPTOR(pause, fake);
390 return BLOCK_REAL(pause)(fake);
393 // Note: we specifically call the function in such strange way
394 // with "installed_at" because in reports it will appear between
395 // callback frames and the frame that installed the callback.
396 static void at_exit_callback_installed_at() {
397 AtExitCtx *ctx;
399 // Ensure thread-safety.
400 Lock l(&interceptor_ctx()->atexit_mu);
402 // Pop AtExitCtx from the top of the stack of callback functions
403 uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
404 ctx = interceptor_ctx()->AtExitStack[element];
405 interceptor_ctx()->AtExitStack.PopBack();
408 ThreadState *thr = cur_thread();
409 Acquire(thr, ctx->pc, (uptr)ctx);
410 FuncEntry(thr, ctx->pc);
411 ((void(*)())ctx->f)();
412 FuncExit(thr);
413 Free(ctx);
416 static void cxa_at_exit_callback_installed_at(void *arg) {
417 ThreadState *thr = cur_thread();
418 AtExitCtx *ctx = (AtExitCtx*)arg;
419 Acquire(thr, ctx->pc, (uptr)arg);
420 FuncEntry(thr, ctx->pc);
421 ((void(*)(void *arg))ctx->f)(ctx->arg);
422 FuncExit(thr);
423 Free(ctx);
426 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
427 void *arg, void *dso);
429 #if !SANITIZER_ANDROID
430 TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
431 if (in_symbolizer())
432 return 0;
433 // We want to setup the atexit callback even if we are in ignored lib
434 // or after fork.
435 SCOPED_INTERCEPTOR_RAW(atexit, f);
436 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0);
438 #endif
440 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
441 if (in_symbolizer())
442 return 0;
443 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
444 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso);
447 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
448 void *arg, void *dso) {
449 auto *ctx = New<AtExitCtx>();
450 ctx->f = f;
451 ctx->arg = arg;
452 ctx->pc = pc;
453 Release(thr, pc, (uptr)ctx);
454 // Memory allocation in __cxa_atexit will race with free during exit,
455 // because we do not see synchronization around atexit callback list.
456 ThreadIgnoreBegin(thr, pc);
457 int res;
458 if (!dso) {
459 // NetBSD does not preserve the 2nd argument if dso is equal to 0
460 // Store ctx in a local stack-like structure
462 // Ensure thread-safety.
463 Lock l(&interceptor_ctx()->atexit_mu);
464 // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
465 // due to atexit_mu held on exit from the calloc interceptor.
466 ScopedIgnoreInterceptors ignore;
468 res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
469 0, 0);
470 // Push AtExitCtx on the top of the stack of callback functions
471 if (!res) {
472 interceptor_ctx()->AtExitStack.PushBack(ctx);
474 } else {
475 res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
477 ThreadIgnoreEnd(thr);
478 return res;
481 #if !SANITIZER_APPLE && !SANITIZER_NETBSD
482 static void on_exit_callback_installed_at(int status, void *arg) {
483 ThreadState *thr = cur_thread();
484 AtExitCtx *ctx = (AtExitCtx*)arg;
485 Acquire(thr, ctx->pc, (uptr)arg);
486 FuncEntry(thr, ctx->pc);
487 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
488 FuncExit(thr);
489 Free(ctx);
492 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
493 if (in_symbolizer())
494 return 0;
495 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
496 auto *ctx = New<AtExitCtx>();
497 ctx->f = (void(*)())f;
498 ctx->arg = arg;
499 ctx->pc = GET_CALLER_PC();
500 Release(thr, pc, (uptr)ctx);
501 // Memory allocation in __cxa_atexit will race with free during exit,
502 // because we do not see synchronization around atexit callback list.
503 ThreadIgnoreBegin(thr, pc);
504 int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
505 ThreadIgnoreEnd(thr);
506 return res;
508 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
509 #else
510 #define TSAN_MAYBE_INTERCEPT_ON_EXIT
511 #endif
513 // Cleanup old bufs.
514 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
515 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
516 JmpBuf *buf = &thr->jmp_bufs[i];
517 if (buf->sp <= sp) {
518 uptr sz = thr->jmp_bufs.Size();
519 internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
520 thr->jmp_bufs.PopBack();
521 i--;
526 static void SetJmp(ThreadState *thr, uptr sp) {
527 if (!thr->is_inited) // called from libc guts during bootstrap
528 return;
529 // Cleanup old bufs.
530 JmpBufGarbageCollect(thr, sp);
531 // Remember the buf.
532 JmpBuf *buf = thr->jmp_bufs.PushBack();
533 buf->sp = sp;
534 buf->shadow_stack_pos = thr->shadow_stack_pos;
535 ThreadSignalContext *sctx = SigCtx(thr);
536 buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
537 buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed);
538 buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
539 memory_order_relaxed);
542 static void LongJmp(ThreadState *thr, uptr *env) {
543 uptr sp = ExtractLongJmpSp(env);
544 // Find the saved buf with matching sp.
545 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
546 JmpBuf *buf = &thr->jmp_bufs[i];
547 if (buf->sp == sp) {
548 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
549 // Unwind the stack.
550 while (thr->shadow_stack_pos > buf->shadow_stack_pos)
551 FuncExit(thr);
552 ThreadSignalContext *sctx = SigCtx(thr);
553 if (sctx)
554 sctx->int_signal_send = buf->int_signal_send;
555 atomic_store(&thr->in_blocking_func, buf->in_blocking_func,
556 memory_order_relaxed);
557 atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
558 memory_order_relaxed);
559 JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
560 return;
563 Printf("ThreadSanitizer: can't find longjmp buf\n");
564 CHECK(0);
567 // FIXME: put everything below into a common extern "C" block?
568 extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
570 #if SANITIZER_APPLE
571 TSAN_INTERCEPTOR(int, setjmp, void *env);
572 TSAN_INTERCEPTOR(int, _setjmp, void *env);
573 TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
574 #else // SANITIZER_APPLE
576 #if SANITIZER_NETBSD
577 #define setjmp_symname __setjmp14
578 #define sigsetjmp_symname __sigsetjmp14
579 #else
580 #define setjmp_symname setjmp
581 #define sigsetjmp_symname sigsetjmp
582 #endif
584 #define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
585 #define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
586 #define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
587 #define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
589 #define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
590 #define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
592 // Not called. Merely to satisfy TSAN_INTERCEPT().
593 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
594 int TSAN_INTERCEPTOR_SETJMP(void *env);
595 extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
596 CHECK(0);
597 return 0;
600 // FIXME: any reason to have a separate declaration?
601 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
602 int __interceptor__setjmp(void *env);
603 extern "C" int __interceptor__setjmp(void *env) {
604 CHECK(0);
605 return 0;
608 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
609 int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
610 extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
611 CHECK(0);
612 return 0;
615 #if !SANITIZER_NETBSD
616 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
617 int __interceptor___sigsetjmp(void *env);
618 extern "C" int __interceptor___sigsetjmp(void *env) {
619 CHECK(0);
620 return 0;
622 #endif
624 extern "C" int setjmp_symname(void *env);
625 extern "C" int _setjmp(void *env);
626 extern "C" int sigsetjmp_symname(void *env);
627 #if !SANITIZER_NETBSD
628 extern "C" int __sigsetjmp(void *env);
629 #endif
630 DEFINE_REAL(int, setjmp_symname, void *env)
631 DEFINE_REAL(int, _setjmp, void *env)
632 DEFINE_REAL(int, sigsetjmp_symname, void *env)
633 #if !SANITIZER_NETBSD
634 DEFINE_REAL(int, __sigsetjmp, void *env)
635 #endif
636 #endif // SANITIZER_APPLE
638 #if SANITIZER_NETBSD
639 #define longjmp_symname __longjmp14
640 #define siglongjmp_symname __siglongjmp14
641 #else
642 #define longjmp_symname longjmp
643 #define siglongjmp_symname siglongjmp
644 #endif
646 TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
647 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
648 // bad things will happen. We will jump over ScopedInterceptor dtor and can
649 // leave thr->in_ignored_lib set.
651 SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
653 LongJmp(cur_thread(), env);
654 REAL(longjmp_symname)(env, val);
657 TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
659 SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
661 LongJmp(cur_thread(), env);
662 REAL(siglongjmp_symname)(env, val);
665 #if SANITIZER_NETBSD
666 TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
668 SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
670 LongJmp(cur_thread(), env);
671 REAL(_longjmp)(env, val);
673 #endif
675 #if !SANITIZER_APPLE
676 TSAN_INTERCEPTOR(void*, malloc, uptr size) {
677 if (in_symbolizer())
678 return InternalAlloc(size);
679 void *p = 0;
681 SCOPED_INTERCEPTOR_RAW(malloc, size);
682 p = user_alloc(thr, pc, size);
684 invoke_malloc_hook(p, size);
685 return p;
688 // In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
689 // __libc_memalign so that (1) we can detect races (2) free will not be called
690 // on libc internally allocated blocks.
691 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
692 SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
693 return user_memalign(thr, pc, align, sz);
696 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
697 if (in_symbolizer())
698 return InternalCalloc(size, n);
699 void *p = 0;
701 SCOPED_INTERCEPTOR_RAW(calloc, size, n);
702 p = user_calloc(thr, pc, size, n);
704 invoke_malloc_hook(p, n * size);
705 return p;
708 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
709 if (in_symbolizer())
710 return InternalRealloc(p, size);
711 if (p)
712 invoke_free_hook(p);
714 SCOPED_INTERCEPTOR_RAW(realloc, p, size);
715 p = user_realloc(thr, pc, p, size);
717 invoke_malloc_hook(p, size);
718 return p;
721 TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
722 if (in_symbolizer())
723 return InternalReallocArray(p, size, n);
724 if (p)
725 invoke_free_hook(p);
727 SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
728 p = user_reallocarray(thr, pc, p, size, n);
730 invoke_malloc_hook(p, size);
731 return p;
734 TSAN_INTERCEPTOR(void, free, void *p) {
735 if (p == 0)
736 return;
737 if (in_symbolizer())
738 return InternalFree(p);
739 invoke_free_hook(p);
740 SCOPED_INTERCEPTOR_RAW(free, p);
741 user_free(thr, pc, p);
744 TSAN_INTERCEPTOR(void, cfree, void *p) {
745 if (p == 0)
746 return;
747 if (in_symbolizer())
748 return InternalFree(p);
749 invoke_free_hook(p);
750 SCOPED_INTERCEPTOR_RAW(cfree, p);
751 user_free(thr, pc, p);
754 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
755 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
756 return user_alloc_usable_size(p);
758 #endif
760 TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
761 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
762 uptr srclen = internal_strlen(src);
763 MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
764 MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
765 return REAL(strcpy)(dst, src);
768 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
769 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
770 uptr srclen = internal_strnlen(src, n);
771 MemoryAccessRange(thr, pc, (uptr)dst, n, true);
772 MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
773 return REAL(strncpy)(dst, src, n);
776 TSAN_INTERCEPTOR(char*, strdup, const char *str) {
777 SCOPED_TSAN_INTERCEPTOR(strdup, str);
778 // strdup will call malloc, so no instrumentation is required here.
779 return REAL(strdup)(str);
782 // Zero out addr if it points into shadow memory and was provided as a hint
783 // only, i.e., MAP_FIXED is not set.
784 static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
785 if (*addr) {
786 if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
787 if (flags & MAP_FIXED) {
788 errno = errno_EINVAL;
789 return false;
790 } else {
791 *addr = 0;
795 return true;
798 template <class Mmap>
799 static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
800 void *addr, SIZE_T sz, int prot, int flags,
801 int fd, OFF64_T off) {
802 if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
803 void *res = real_mmap(addr, sz, prot, flags, fd, off);
804 if (res != MAP_FAILED) {
805 if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) {
806 Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
807 addr, (void*)sz, res);
808 Die();
810 if (fd > 0) FdAccess(thr, pc, fd);
811 MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
813 return res;
816 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
817 SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
818 UnmapShadow(thr, (uptr)addr, sz);
819 int res = REAL(munmap)(addr, sz);
820 return res;
823 #if SANITIZER_LINUX
824 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
825 SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
826 return user_memalign(thr, pc, align, sz);
828 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
829 #else
830 #define TSAN_MAYBE_INTERCEPT_MEMALIGN
831 #endif
833 #if !SANITIZER_APPLE
834 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
835 if (in_symbolizer())
836 return InternalAlloc(sz, nullptr, align);
837 SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
838 return user_aligned_alloc(thr, pc, align, sz);
841 TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
842 if (in_symbolizer())
843 return InternalAlloc(sz, nullptr, GetPageSizeCached());
844 SCOPED_INTERCEPTOR_RAW(valloc, sz);
845 return user_valloc(thr, pc, sz);
847 #endif
849 #if SANITIZER_LINUX
850 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
851 if (in_symbolizer()) {
852 uptr PageSize = GetPageSizeCached();
853 sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
854 return InternalAlloc(sz, nullptr, PageSize);
856 SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
857 return user_pvalloc(thr, pc, sz);
859 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
860 #else
861 #define TSAN_MAYBE_INTERCEPT_PVALLOC
862 #endif
864 #if !SANITIZER_APPLE
865 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
866 if (in_symbolizer()) {
867 void *p = InternalAlloc(sz, nullptr, align);
868 if (!p)
869 return errno_ENOMEM;
870 *memptr = p;
871 return 0;
873 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
874 return user_posix_memalign(thr, pc, memptr, align, sz);
876 #endif
878 // Both __cxa_guard_acquire and pthread_once 0-initialize
879 // the object initially. pthread_once does not have any
880 // other ABI requirements. __cxa_guard_acquire assumes
881 // that any non-0 value in the first byte means that
882 // initialization is completed. Contents of the remaining
883 // bytes are up to us.
884 constexpr u32 kGuardInit = 0;
885 constexpr u32 kGuardDone = 1;
886 constexpr u32 kGuardRunning = 1 << 16;
887 constexpr u32 kGuardWaiter = 1 << 17;
889 static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
890 bool blocking_hooks = true) {
891 if (blocking_hooks)
892 OnPotentiallyBlockingRegionBegin();
893 auto on_exit = at_scope_exit([blocking_hooks] {
894 if (blocking_hooks)
895 OnPotentiallyBlockingRegionEnd();
898 for (;;) {
899 u32 cmp = atomic_load(g, memory_order_acquire);
900 if (cmp == kGuardInit) {
901 if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
902 memory_order_relaxed))
903 return 1;
904 } else if (cmp == kGuardDone) {
905 if (!thr->in_ignored_lib)
906 Acquire(thr, pc, (uptr)g);
907 return 0;
908 } else {
909 if ((cmp & kGuardWaiter) ||
910 atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
911 memory_order_relaxed))
912 FutexWait(g, cmp | kGuardWaiter);
917 static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
918 u32 v) {
919 if (!thr->in_ignored_lib)
920 Release(thr, pc, (uptr)g);
921 u32 old = atomic_exchange(g, v, memory_order_release);
922 if (old & kGuardWaiter)
923 FutexWake(g, 1 << 30);
926 // __cxa_guard_acquire and friends need to be intercepted in a special way -
927 // regular interceptors will break statically-linked libstdc++. Linux
928 // interceptors are especially defined as weak functions (so that they don't
929 // cause link errors when user defines them as well). So they silently
930 // auto-disable themselves when such symbol is already present in the binary. If
931 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
932 // will silently replace our interceptor. That's why on Linux we simply export
933 // these interceptors with INTERFACE_ATTRIBUTE.
934 // On OS X, we don't support statically linking, so we just use a regular
935 // interceptor.
936 #if SANITIZER_APPLE
937 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
938 #else
939 #define STDCXX_INTERCEPTOR(rettype, name, ...) \
940 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
941 #endif
943 // Used in thread-safe function static initialization.
944 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
945 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
946 return guard_acquire(thr, pc, g);
949 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
950 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
951 guard_release(thr, pc, g, kGuardDone);
954 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
955 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
956 guard_release(thr, pc, g, kGuardInit);
959 namespace __tsan {
960 void DestroyThreadState() {
961 ThreadState *thr = cur_thread();
962 Processor *proc = thr->proc();
963 ThreadFinish(thr);
964 ProcUnwire(proc, thr);
965 ProcDestroy(proc);
966 DTLS_Destroy();
967 cur_thread_finalize();
970 void PlatformCleanUpThreadState(ThreadState *thr) {
971 ThreadSignalContext *sctx = thr->signal_ctx;
972 if (sctx) {
973 thr->signal_ctx = 0;
974 UnmapOrDie(sctx, sizeof(*sctx));
977 } // namespace __tsan
979 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
980 static void thread_finalize(void *v) {
981 uptr iter = (uptr)v;
982 if (iter > 1) {
983 if (pthread_setspecific(interceptor_ctx()->finalize_key,
984 (void*)(iter - 1))) {
985 Printf("ThreadSanitizer: failed to set thread key\n");
986 Die();
988 return;
990 DestroyThreadState();
992 #endif
995 struct ThreadParam {
996 void* (*callback)(void *arg);
997 void *param;
998 Tid tid;
999 Semaphore created;
1000 Semaphore started;
1003 extern "C" void *__tsan_thread_start_func(void *arg) {
1004 ThreadParam *p = (ThreadParam*)arg;
1005 void* (*callback)(void *arg) = p->callback;
1006 void *param = p->param;
1008 ThreadState *thr = cur_thread_init();
1009 // Thread-local state is not initialized yet.
1010 ScopedIgnoreInterceptors ignore;
1011 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
1012 ThreadIgnoreBegin(thr, 0);
1013 if (pthread_setspecific(interceptor_ctx()->finalize_key,
1014 (void *)GetPthreadDestructorIterations())) {
1015 Printf("ThreadSanitizer: failed to set thread key\n");
1016 Die();
1018 ThreadIgnoreEnd(thr);
1019 #endif
1020 p->created.Wait();
1021 Processor *proc = ProcCreate();
1022 ProcWire(proc, thr);
1023 ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
1024 p->started.Post();
1026 void *res = callback(param);
1027 // Prevent the callback from being tail called,
1028 // it mixes up stack traces.
1029 volatile int foo = 42;
1030 foo++;
1031 return res;
1034 TSAN_INTERCEPTOR(int, pthread_create,
1035 void *th, void *attr, void *(*callback)(void*), void * param) {
1036 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
1038 MaybeSpawnBackgroundThread();
1040 if (ctx->after_multithreaded_fork) {
1041 if (flags()->die_after_fork) {
1042 Report("ThreadSanitizer: starting new threads after multi-threaded "
1043 "fork is not supported. Dying (set die_after_fork=0 to override)\n");
1044 Die();
1045 } else {
1046 VPrintf(1,
1047 "ThreadSanitizer: starting new threads after multi-threaded "
1048 "fork is not supported (pid %lu). Continuing because of "
1049 "die_after_fork=0, but you are on your own\n",
1050 internal_getpid());
1053 __sanitizer_pthread_attr_t myattr;
1054 if (attr == 0) {
1055 pthread_attr_init(&myattr);
1056 attr = &myattr;
1058 int detached = 0;
1059 REAL(pthread_attr_getdetachstate)(attr, &detached);
1060 AdjustStackSize(attr);
1062 ThreadParam p;
1063 p.callback = callback;
1064 p.param = param;
1065 p.tid = kMainTid;
1066 int res = -1;
1068 // Otherwise we see false positives in pthread stack manipulation.
1069 ScopedIgnoreInterceptors ignore;
1070 ThreadIgnoreBegin(thr, pc);
1071 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
1072 ThreadIgnoreEnd(thr);
1074 if (res == 0) {
1075 p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
1076 CHECK_NE(p.tid, kMainTid);
1077 // Synchronization on p.tid serves two purposes:
1078 // 1. ThreadCreate must finish before the new thread starts.
1079 // Otherwise the new thread can call pthread_detach, but the pthread_t
1080 // identifier is not yet registered in ThreadRegistry by ThreadCreate.
1081 // 2. ThreadStart must finish before this thread continues.
1082 // Otherwise, this thread can call pthread_detach and reset thr->sync
1083 // before the new thread got a chance to acquire from it in ThreadStart.
1084 p.created.Post();
1085 p.started.Wait();
1087 if (attr == &myattr)
1088 pthread_attr_destroy(&myattr);
1089 return res;
1092 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1093 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1094 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1095 ThreadIgnoreBegin(thr, pc);
1096 int res = BLOCK_REAL(pthread_join)(th, ret);
1097 ThreadIgnoreEnd(thr);
1098 if (res == 0) {
1099 ThreadJoin(thr, pc, tid);
1101 return res;
1104 DEFINE_REAL_PTHREAD_FUNCTIONS
1106 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1107 SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
1108 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1109 int res = REAL(pthread_detach)(th);
1110 if (res == 0) {
1111 ThreadDetach(thr, pc, tid);
1113 return res;
1116 TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
1118 SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
1119 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
1120 CHECK_EQ(thr, &cur_thread_placeholder);
1121 #endif
1123 REAL(pthread_exit)(retval);
1126 #if SANITIZER_LINUX
1127 TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
1128 SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
1129 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1130 ThreadIgnoreBegin(thr, pc);
1131 int res = REAL(pthread_tryjoin_np)(th, ret);
1132 ThreadIgnoreEnd(thr);
1133 if (res == 0)
1134 ThreadJoin(thr, pc, tid);
1135 else
1136 ThreadNotJoined(thr, pc, tid, (uptr)th);
1137 return res;
1140 TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
1141 const struct timespec *abstime) {
1142 SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
1143 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1144 ThreadIgnoreBegin(thr, pc);
1145 int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
1146 ThreadIgnoreEnd(thr);
1147 if (res == 0)
1148 ThreadJoin(thr, pc, tid);
1149 else
1150 ThreadNotJoined(thr, pc, tid, (uptr)th);
1151 return res;
1153 #endif
1155 // Problem:
1156 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1157 // pthread_cond_t has different size in the different versions.
1158 // If call new REAL functions for old pthread_cond_t, they will corrupt memory
1159 // after pthread_cond_t (old cond is smaller).
1160 // If we call old REAL functions for new pthread_cond_t, we will lose some
1161 // functionality (e.g. old functions do not support waiting against
1162 // CLOCK_REALTIME).
1163 // Proper handling would require to have 2 versions of interceptors as well.
1164 // But this is messy, in particular requires linker scripts when sanitizer
1165 // runtime is linked into a shared library.
1166 // Instead we assume we don't have dynamic libraries built against old
1167 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1168 // that allows to work with old libraries (but this mode does not support
1169 // some features, e.g. pthread_condattr_getpshared).
1170 static void *init_cond(void *c, bool force = false) {
1171 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1172 // So we allocate additional memory on the side large enough to hold
1173 // any pthread_cond_t object. Always call new REAL functions, but pass
1174 // the aux object to them.
1175 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1176 // first word of pthread_cond_t to zero.
1177 // It's all relevant only for linux.
1178 if (!common_flags()->legacy_pthread_cond)
1179 return c;
1180 atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1181 uptr cond = atomic_load(p, memory_order_acquire);
1182 if (!force && cond != 0)
1183 return (void*)cond;
1184 void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1185 internal_memset(newcond, 0, pthread_cond_t_sz);
1186 if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1187 memory_order_acq_rel))
1188 return newcond;
1189 WRAP(free)(newcond);
1190 return (void*)cond;
1193 namespace {
1195 template <class Fn>
1196 struct CondMutexUnlockCtx {
1197 ScopedInterceptor *si;
1198 ThreadState *thr;
1199 uptr pc;
1200 void *m;
1201 void *c;
1202 const Fn &fn;
1204 int Cancel() const { return fn(); }
1205 void Unlock() const;
1208 template <class Fn>
1209 void CondMutexUnlockCtx<Fn>::Unlock() const {
1210 // pthread_cond_wait interceptor has enabled async signal delivery
1211 // (see BlockingCall below). Disable async signals since we are running
1212 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1213 // since the thread is cancelled, so we have to manually execute them
1214 // (the thread still can run some user code due to pthread_cleanup_push).
1215 CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
1216 atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
1217 MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1218 // Undo BlockingCall ctor effects.
1219 thr->ignore_interceptors--;
1220 si->~ScopedInterceptor();
1222 } // namespace
1224 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1225 void *cond = init_cond(c, true);
1226 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1227 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1228 return REAL(pthread_cond_init)(cond, a);
1231 template <class Fn>
1232 int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
1233 void *c, void *m) {
1234 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1235 MutexUnlock(thr, pc, (uptr)m);
1236 int res = 0;
1237 // This ensures that we handle mutex lock even in case of pthread_cancel.
1238 // See test/tsan/cond_cancel.cpp.
1240 // Enable signal delivery while the thread is blocked.
1241 BlockingCall bc(thr);
1242 CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
1243 res = call_pthread_cancel_with_cleanup(
1244 [](void *arg) -> int {
1245 return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
1247 [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
1248 &arg);
1250 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1251 MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1252 return res;
1255 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1256 void *cond = init_cond(c);
1257 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1258 return cond_wait(
1259 thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
1263 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1264 void *cond = init_cond(c);
1265 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1266 return cond_wait(
1267 thr, pc, &si,
1268 [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
1272 #if SANITIZER_LINUX
1273 INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
1274 __sanitizer_clockid_t clock, void *abstime) {
1275 void *cond = init_cond(c);
1276 SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
1277 return cond_wait(
1278 thr, pc, &si,
1279 [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
1280 cond, m);
1282 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
1283 #else
1284 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
1285 #endif
1287 #if SANITIZER_APPLE
1288 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1289 void *reltime) {
1290 void *cond = init_cond(c);
1291 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1292 return cond_wait(
1293 thr, pc, &si,
1294 [=]() {
1295 return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
1297 cond, m);
1299 #endif
1301 INTERCEPTOR(int, pthread_cond_signal, void *c) {
1302 void *cond = init_cond(c);
1303 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1304 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1305 return REAL(pthread_cond_signal)(cond);
1308 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1309 void *cond = init_cond(c);
1310 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1311 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1312 return REAL(pthread_cond_broadcast)(cond);
1315 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1316 void *cond = init_cond(c);
1317 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1318 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1319 int res = REAL(pthread_cond_destroy)(cond);
1320 if (common_flags()->legacy_pthread_cond) {
1321 // Free our aux cond and zero the pointer to not leave dangling pointers.
1322 WRAP(free)(cond);
1323 atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1325 return res;
1328 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1329 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1330 int res = REAL(pthread_mutex_init)(m, a);
1331 if (res == 0) {
1332 u32 flagz = 0;
1333 if (a) {
1334 int type = 0;
1335 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1336 if (type == PTHREAD_MUTEX_RECURSIVE ||
1337 type == PTHREAD_MUTEX_RECURSIVE_NP)
1338 flagz |= MutexFlagWriteReentrant;
1340 MutexCreate(thr, pc, (uptr)m, flagz);
1342 return res;
1345 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1346 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1347 int res = REAL(pthread_mutex_destroy)(m);
1348 if (res == 0 || res == errno_EBUSY) {
1349 MutexDestroy(thr, pc, (uptr)m);
1351 return res;
1354 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1355 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1356 int res = REAL(pthread_mutex_trylock)(m);
1357 if (res == errno_EOWNERDEAD)
1358 MutexRepair(thr, pc, (uptr)m);
1359 if (res == 0 || res == errno_EOWNERDEAD)
1360 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1361 return res;
1364 #if !SANITIZER_APPLE
1365 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1366 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1367 int res = REAL(pthread_mutex_timedlock)(m, abstime);
1368 if (res == 0) {
1369 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1371 return res;
1373 #endif
1375 #if !SANITIZER_APPLE
1376 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1377 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1378 int res = REAL(pthread_spin_init)(m, pshared);
1379 if (res == 0) {
1380 MutexCreate(thr, pc, (uptr)m);
1382 return res;
1385 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1386 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1387 int res = REAL(pthread_spin_destroy)(m);
1388 if (res == 0) {
1389 MutexDestroy(thr, pc, (uptr)m);
1391 return res;
1394 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1395 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1396 MutexPreLock(thr, pc, (uptr)m);
1397 int res = REAL(pthread_spin_lock)(m);
1398 if (res == 0) {
1399 MutexPostLock(thr, pc, (uptr)m);
1401 return res;
1404 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1405 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1406 int res = REAL(pthread_spin_trylock)(m);
1407 if (res == 0) {
1408 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1410 return res;
1413 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1414 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1415 MutexUnlock(thr, pc, (uptr)m);
1416 int res = REAL(pthread_spin_unlock)(m);
1417 return res;
1419 #endif
1421 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1422 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1423 int res = REAL(pthread_rwlock_init)(m, a);
1424 if (res == 0) {
1425 MutexCreate(thr, pc, (uptr)m);
1427 return res;
1430 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1431 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1432 int res = REAL(pthread_rwlock_destroy)(m);
1433 if (res == 0) {
1434 MutexDestroy(thr, pc, (uptr)m);
1436 return res;
1439 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1440 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1441 MutexPreReadLock(thr, pc, (uptr)m);
1442 int res = REAL(pthread_rwlock_rdlock)(m);
1443 if (res == 0) {
1444 MutexPostReadLock(thr, pc, (uptr)m);
1446 return res;
1449 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1450 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1451 int res = REAL(pthread_rwlock_tryrdlock)(m);
1452 if (res == 0) {
1453 MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
1455 return res;
1458 #if !SANITIZER_APPLE
1459 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1460 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1461 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1462 if (res == 0) {
1463 MutexPostReadLock(thr, pc, (uptr)m);
1465 return res;
1467 #endif
1469 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1470 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1471 MutexPreLock(thr, pc, (uptr)m);
1472 int res = REAL(pthread_rwlock_wrlock)(m);
1473 if (res == 0) {
1474 MutexPostLock(thr, pc, (uptr)m);
1476 return res;
1479 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1480 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1481 int res = REAL(pthread_rwlock_trywrlock)(m);
1482 if (res == 0) {
1483 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1485 return res;
1488 #if !SANITIZER_APPLE
1489 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1490 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1491 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1492 if (res == 0) {
1493 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1495 return res;
1497 #endif
1499 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1500 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1501 MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1502 int res = REAL(pthread_rwlock_unlock)(m);
1503 return res;
1506 #if !SANITIZER_APPLE
1507 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1508 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1509 MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1510 int res = REAL(pthread_barrier_init)(b, a, count);
1511 return res;
1514 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1515 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1516 MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1517 int res = REAL(pthread_barrier_destroy)(b);
1518 return res;
1521 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1522 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1523 Release(thr, pc, (uptr)b);
1524 MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1525 int res = REAL(pthread_barrier_wait)(b);
1526 MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1527 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1528 Acquire(thr, pc, (uptr)b);
1530 return res;
1532 #endif
1534 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1535 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1536 if (o == 0 || f == 0)
1537 return errno_EINVAL;
1538 atomic_uint32_t *a;
1540 if (SANITIZER_APPLE)
1541 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1542 else if (SANITIZER_NETBSD)
1543 a = static_cast<atomic_uint32_t*>
1544 ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1545 else
1546 a = static_cast<atomic_uint32_t*>(o);
1548 // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
1549 // result in crashes due to too little stack space.
1550 if (guard_acquire(thr, pc, a, !SANITIZER_APPLE)) {
1551 (*f)();
1552 guard_release(thr, pc, a, kGuardDone);
1554 return 0;
1557 #if SANITIZER_GLIBC
1558 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1559 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1560 if (fd > 0)
1561 FdAccess(thr, pc, fd);
1562 return REAL(__fxstat)(version, fd, buf);
1564 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
1565 #else
1566 #define TSAN_MAYBE_INTERCEPT___FXSTAT
1567 #endif
1569 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1570 #if SANITIZER_GLIBC
1571 SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
1572 if (fd > 0)
1573 FdAccess(thr, pc, fd);
1574 return REAL(__fxstat)(0, fd, buf);
1575 #else
1576 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1577 if (fd > 0)
1578 FdAccess(thr, pc, fd);
1579 return REAL(fstat)(fd, buf);
1580 #endif
1583 #if SANITIZER_GLIBC
1584 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1585 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1586 if (fd > 0)
1587 FdAccess(thr, pc, fd);
1588 return REAL(__fxstat64)(version, fd, buf);
1590 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
1591 #else
1592 #define TSAN_MAYBE_INTERCEPT___FXSTAT64
1593 #endif
1595 #if SANITIZER_GLIBC
1596 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1597 SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
1598 if (fd > 0)
1599 FdAccess(thr, pc, fd);
1600 return REAL(__fxstat64)(0, fd, buf);
1602 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1603 #else
1604 #define TSAN_MAYBE_INTERCEPT_FSTAT64
1605 #endif
1607 TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
1608 va_list ap;
1609 va_start(ap, oflag);
1610 mode_t mode = va_arg(ap, int);
1611 va_end(ap);
1612 SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
1613 READ_STRING(thr, pc, name, 0);
1614 int fd = REAL(open)(name, oflag, mode);
1615 if (fd >= 0)
1616 FdFileCreate(thr, pc, fd);
1617 return fd;
1620 #if SANITIZER_LINUX
1621 TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
1622 va_list ap;
1623 va_start(ap, oflag);
1624 mode_t mode = va_arg(ap, int);
1625 va_end(ap);
1626 SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
1627 READ_STRING(thr, pc, name, 0);
1628 int fd = REAL(open64)(name, oflag, mode);
1629 if (fd >= 0)
1630 FdFileCreate(thr, pc, fd);
1631 return fd;
1633 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1634 #else
1635 #define TSAN_MAYBE_INTERCEPT_OPEN64
1636 #endif
1638 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1639 SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1640 READ_STRING(thr, pc, name, 0);
1641 int fd = REAL(creat)(name, mode);
1642 if (fd >= 0)
1643 FdFileCreate(thr, pc, fd);
1644 return fd;
1647 #if SANITIZER_LINUX
1648 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1649 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1650 READ_STRING(thr, pc, name, 0);
1651 int fd = REAL(creat64)(name, mode);
1652 if (fd >= 0)
1653 FdFileCreate(thr, pc, fd);
1654 return fd;
1656 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1657 #else
1658 #define TSAN_MAYBE_INTERCEPT_CREAT64
1659 #endif
1661 TSAN_INTERCEPTOR(int, dup, int oldfd) {
1662 SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1663 int newfd = REAL(dup)(oldfd);
1664 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1665 FdDup(thr, pc, oldfd, newfd, true);
1666 return newfd;
1669 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1670 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1671 int newfd2 = REAL(dup2)(oldfd, newfd);
1672 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1673 FdDup(thr, pc, oldfd, newfd2, false);
1674 return newfd2;
1677 #if !SANITIZER_APPLE
1678 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1679 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1680 int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1681 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1682 FdDup(thr, pc, oldfd, newfd2, false);
1683 return newfd2;
1685 #endif
1687 #if SANITIZER_LINUX
1688 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1689 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1690 int fd = REAL(eventfd)(initval, flags);
1691 if (fd >= 0)
1692 FdEventCreate(thr, pc, fd);
1693 return fd;
1695 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1696 #else
1697 #define TSAN_MAYBE_INTERCEPT_EVENTFD
1698 #endif
1700 #if SANITIZER_LINUX
1701 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1702 SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
1703 FdClose(thr, pc, fd);
1704 fd = REAL(signalfd)(fd, mask, flags);
1705 if (!MustIgnoreInterceptor(thr))
1706 FdSignalCreate(thr, pc, fd);
1707 return fd;
1709 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1710 #else
1711 #define TSAN_MAYBE_INTERCEPT_SIGNALFD
1712 #endif
1714 #if SANITIZER_LINUX
1715 TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1716 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1717 int fd = REAL(inotify_init)(fake);
1718 if (fd >= 0)
1719 FdInotifyCreate(thr, pc, fd);
1720 return fd;
1722 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1723 #else
1724 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1725 #endif
1727 #if SANITIZER_LINUX
1728 TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1729 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1730 int fd = REAL(inotify_init1)(flags);
1731 if (fd >= 0)
1732 FdInotifyCreate(thr, pc, fd);
1733 return fd;
1735 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1736 #else
1737 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1738 #endif
1740 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1741 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1742 int fd = REAL(socket)(domain, type, protocol);
1743 if (fd >= 0)
1744 FdSocketCreate(thr, pc, fd);
1745 return fd;
1748 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1749 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1750 int res = REAL(socketpair)(domain, type, protocol, fd);
1751 if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1752 FdPipeCreate(thr, pc, fd[0], fd[1]);
1753 return res;
1756 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1757 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1758 FdSocketConnecting(thr, pc, fd);
1759 int res = REAL(connect)(fd, addr, addrlen);
1760 if (res == 0 && fd >= 0)
1761 FdSocketConnect(thr, pc, fd);
1762 return res;
1765 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1766 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1767 int res = REAL(bind)(fd, addr, addrlen);
1768 if (fd > 0 && res == 0)
1769 FdAccess(thr, pc, fd);
1770 return res;
1773 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1774 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1775 int res = REAL(listen)(fd, backlog);
1776 if (fd > 0 && res == 0)
1777 FdAccess(thr, pc, fd);
1778 return res;
1781 TSAN_INTERCEPTOR(int, close, int fd) {
1782 SCOPED_INTERCEPTOR_RAW(close, fd);
1783 if (!in_symbolizer())
1784 FdClose(thr, pc, fd);
1785 return REAL(close)(fd);
1788 #if SANITIZER_LINUX
1789 TSAN_INTERCEPTOR(int, __close, int fd) {
1790 SCOPED_INTERCEPTOR_RAW(__close, fd);
1791 FdClose(thr, pc, fd);
1792 return REAL(__close)(fd);
1794 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1795 #else
1796 #define TSAN_MAYBE_INTERCEPT___CLOSE
1797 #endif
1799 // glibc guts
1800 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1801 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1802 SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
1803 int fds[64];
1804 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1805 for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]);
1806 REAL(__res_iclose)(state, free_addr);
1808 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1809 #else
1810 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1811 #endif
1813 TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1814 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1815 int res = REAL(pipe)(pipefd);
1816 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1817 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1818 return res;
1821 #if !SANITIZER_APPLE
1822 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1823 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1824 int res = REAL(pipe2)(pipefd, flags);
1825 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1826 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1827 return res;
1829 #endif
1831 TSAN_INTERCEPTOR(int, unlink, char *path) {
1832 SCOPED_TSAN_INTERCEPTOR(unlink, path);
1833 Release(thr, pc, File2addr(path));
1834 int res = REAL(unlink)(path);
1835 return res;
1838 TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1839 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1840 void *res = REAL(tmpfile)(fake);
1841 if (res) {
1842 int fd = fileno_unlocked(res);
1843 if (fd >= 0)
1844 FdFileCreate(thr, pc, fd);
1846 return res;
1849 #if SANITIZER_LINUX
1850 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1851 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1852 void *res = REAL(tmpfile64)(fake);
1853 if (res) {
1854 int fd = fileno_unlocked(res);
1855 if (fd >= 0)
1856 FdFileCreate(thr, pc, fd);
1858 return res;
1860 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1861 #else
1862 #define TSAN_MAYBE_INTERCEPT_TMPFILE64
1863 #endif
1865 static void FlushStreams() {
1866 // Flushing all the streams here may freeze the process if a child thread is
1867 // performing file stream operations at the same time.
1868 REAL(fflush)(stdout);
1869 REAL(fflush)(stderr);
1872 TSAN_INTERCEPTOR(void, abort, int fake) {
1873 SCOPED_TSAN_INTERCEPTOR(abort, fake);
1874 FlushStreams();
1875 REAL(abort)(fake);
1878 TSAN_INTERCEPTOR(int, rmdir, char *path) {
1879 SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1880 Release(thr, pc, Dir2addr(path));
1881 int res = REAL(rmdir)(path);
1882 return res;
1885 TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1886 SCOPED_INTERCEPTOR_RAW(closedir, dirp);
1887 if (dirp) {
1888 int fd = dirfd(dirp);
1889 FdClose(thr, pc, fd);
1891 return REAL(closedir)(dirp);
1894 #if SANITIZER_LINUX
1895 TSAN_INTERCEPTOR(int, epoll_create, int size) {
1896 SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1897 int fd = REAL(epoll_create)(size);
1898 if (fd >= 0)
1899 FdPollCreate(thr, pc, fd);
1900 return fd;
1903 TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1904 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1905 int fd = REAL(epoll_create1)(flags);
1906 if (fd >= 0)
1907 FdPollCreate(thr, pc, fd);
1908 return fd;
1911 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1912 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1913 if (epfd >= 0)
1914 FdAccess(thr, pc, epfd);
1915 if (epfd >= 0 && fd >= 0)
1916 FdAccess(thr, pc, fd);
1917 if (op == EPOLL_CTL_ADD && epfd >= 0) {
1918 FdPollAdd(thr, pc, epfd, fd);
1919 FdRelease(thr, pc, epfd);
1921 int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1922 return res;
1925 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1926 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1927 if (epfd >= 0)
1928 FdAccess(thr, pc, epfd);
1929 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1930 if (res > 0 && epfd >= 0)
1931 FdAcquire(thr, pc, epfd);
1932 return res;
1935 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1936 void *sigmask) {
1937 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
1938 if (epfd >= 0)
1939 FdAccess(thr, pc, epfd);
1940 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
1941 if (res > 0 && epfd >= 0)
1942 FdAcquire(thr, pc, epfd);
1943 return res;
1946 #define TSAN_MAYBE_INTERCEPT_EPOLL \
1947 TSAN_INTERCEPT(epoll_create); \
1948 TSAN_INTERCEPT(epoll_create1); \
1949 TSAN_INTERCEPT(epoll_ctl); \
1950 TSAN_INTERCEPT(epoll_wait); \
1951 TSAN_INTERCEPT(epoll_pwait)
1952 #else
1953 #define TSAN_MAYBE_INTERCEPT_EPOLL
1954 #endif
1956 // The following functions are intercepted merely to process pending signals.
1957 // If program blocks signal X, we must deliver the signal before the function
1958 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
1959 // it's better to deliver the signal straight away.
1960 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
1961 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
1962 return REAL(sigsuspend)(mask);
1965 TSAN_INTERCEPTOR(int, sigblock, int mask) {
1966 SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
1967 return REAL(sigblock)(mask);
1970 TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
1971 SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
1972 return REAL(sigsetmask)(mask);
1975 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
1976 __sanitizer_sigset_t *oldset) {
1977 SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
1978 return REAL(pthread_sigmask)(how, set, oldset);
1981 namespace __tsan {
1983 static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
1984 VarSizeStackTrace stack;
1985 // StackTrace::GetNestInstructionPc(pc) is used because return address is
1986 // expected, OutputReport() will undo this.
1987 ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
1988 ThreadRegistryLock l(&ctx->thread_registry);
1989 ScopedReport rep(ReportTypeErrnoInSignal);
1990 rep.SetSigNum(sig);
1991 if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
1992 rep.AddStack(stack, true);
1993 OutputReport(thr, rep);
1997 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
1998 int sig, __sanitizer_siginfo *info,
1999 void *uctx) {
2000 CHECK(thr->slot);
2001 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2002 if (acquire)
2003 Acquire(thr, 0, (uptr)&sigactions[sig]);
2004 // Signals are generally asynchronous, so if we receive a signals when
2005 // ignores are enabled we should disable ignores. This is critical for sync
2006 // and interceptors, because otherwise we can miss synchronization and report
2007 // false races.
2008 int ignore_reads_and_writes = thr->ignore_reads_and_writes;
2009 int ignore_interceptors = thr->ignore_interceptors;
2010 int ignore_sync = thr->ignore_sync;
2011 // For symbolizer we only process SIGSEGVs synchronously
2012 // (bug in symbolizer or in tsan). But we want to reset
2013 // in_symbolizer to fail gracefully. Symbolizer and user code
2014 // use different memory allocators, so if we don't reset
2015 // in_symbolizer we can get memory allocated with one being
2016 // feed with another, which can cause more crashes.
2017 int in_symbolizer = thr->in_symbolizer;
2018 if (!ctx->after_multithreaded_fork) {
2019 thr->ignore_reads_and_writes = 0;
2020 thr->fast_state.ClearIgnoreBit();
2021 thr->ignore_interceptors = 0;
2022 thr->ignore_sync = 0;
2023 thr->in_symbolizer = 0;
2025 // Ensure that the handler does not spoil errno.
2026 const int saved_errno = errno;
2027 errno = 99;
2028 // This code races with sigaction. Be careful to not read sa_sigaction twice.
2029 // Also need to remember pc for reporting before the call,
2030 // because the handler can reset it.
2031 volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
2032 ? (uptr)sigactions[sig].sigaction
2033 : (uptr)sigactions[sig].handler;
2034 if (pc != sig_dfl && pc != sig_ign) {
2035 // The callback can be either sa_handler or sa_sigaction.
2036 // They have different signatures, but we assume that passing
2037 // additional arguments to sa_handler works and is harmless.
2038 ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
2040 if (!ctx->after_multithreaded_fork) {
2041 thr->ignore_reads_and_writes = ignore_reads_and_writes;
2042 if (ignore_reads_and_writes)
2043 thr->fast_state.SetIgnoreBit();
2044 thr->ignore_interceptors = ignore_interceptors;
2045 thr->ignore_sync = ignore_sync;
2046 thr->in_symbolizer = in_symbolizer;
2048 // We do not detect errno spoiling for SIGTERM,
2049 // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
2050 // tsan reports false positive in such case.
2051 // It's difficult to properly detect this situation (reraise),
2052 // because in async signal processing case (when handler is called directly
2053 // from rtl_generic_sighandler) we have not yet received the reraised
2054 // signal; and it looks too fragile to intercept all ways to reraise a signal.
2055 if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
2056 errno != 99)
2057 ReportErrnoSpoiling(thr, pc, sig);
2058 errno = saved_errno;
2061 void ProcessPendingSignalsImpl(ThreadState *thr) {
2062 atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
2063 ThreadSignalContext *sctx = SigCtx(thr);
2064 if (sctx == 0)
2065 return;
2066 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2067 internal_sigfillset(&sctx->emptyset);
2068 int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
2069 CHECK_EQ(res, 0);
2070 for (int sig = 0; sig < kSigCount; sig++) {
2071 SignalDesc *signal = &sctx->pending_signals[sig];
2072 if (signal->armed) {
2073 signal->armed = false;
2074 CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
2075 &signal->ctx);
2078 res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
2079 CHECK_EQ(res, 0);
2080 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2083 } // namespace __tsan
2085 static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
2086 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
2087 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
2088 // If we are sending signal to ourselves, we must process it now.
2089 (sctx && sig == sctx->int_signal_send);
2092 void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
2093 ThreadState *thr = cur_thread_init();
2094 ThreadSignalContext *sctx = SigCtx(thr);
2095 if (sig < 0 || sig >= kSigCount) {
2096 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
2097 return;
2099 // Don't mess with synchronous signals.
2100 const bool sync = is_sync_signal(sctx, sig);
2101 if (sync ||
2102 // If we are in blocking function, we can safely process it now
2103 // (but check if we are in a recursive interceptor,
2104 // i.e. pthread_join()->munmap()).
2105 atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2106 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2107 if (atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2108 atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
2109 CallUserSignalHandler(thr, sync, true, sig, info, ctx);
2110 atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
2111 } else {
2112 // Be very conservative with when we do acquire in this case.
2113 // It's unsafe to do acquire in async handlers, because ThreadState
2114 // can be in inconsistent state.
2115 // SIGSYS looks relatively safe -- it's synchronous and can actually
2116 // need some global state.
2117 bool acq = (sig == SIGSYS);
2118 CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
2120 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2121 return;
2124 if (sctx == 0)
2125 return;
2126 SignalDesc *signal = &sctx->pending_signals[sig];
2127 if (signal->armed == false) {
2128 signal->armed = true;
2129 internal_memcpy(&signal->siginfo, info, sizeof(*info));
2130 internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
2131 atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
2135 TSAN_INTERCEPTOR(int, raise, int sig) {
2136 SCOPED_TSAN_INTERCEPTOR(raise, sig);
2137 ThreadSignalContext *sctx = SigCtx(thr);
2138 CHECK_NE(sctx, 0);
2139 int prev = sctx->int_signal_send;
2140 sctx->int_signal_send = sig;
2141 int res = REAL(raise)(sig);
2142 CHECK_EQ(sctx->int_signal_send, sig);
2143 sctx->int_signal_send = prev;
2144 return res;
2147 TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2148 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2149 ThreadSignalContext *sctx = SigCtx(thr);
2150 CHECK_NE(sctx, 0);
2151 int prev = sctx->int_signal_send;
2152 if (pid == (int)internal_getpid()) {
2153 sctx->int_signal_send = sig;
2155 int res = REAL(kill)(pid, sig);
2156 if (pid == (int)internal_getpid()) {
2157 CHECK_EQ(sctx->int_signal_send, sig);
2158 sctx->int_signal_send = prev;
2160 return res;
2163 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2164 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2165 ThreadSignalContext *sctx = SigCtx(thr);
2166 CHECK_NE(sctx, 0);
2167 int prev = sctx->int_signal_send;
2168 bool self = pthread_equal(tid, pthread_self());
2169 if (self)
2170 sctx->int_signal_send = sig;
2171 int res = REAL(pthread_kill)(tid, sig);
2172 if (self) {
2173 CHECK_EQ(sctx->int_signal_send, sig);
2174 sctx->int_signal_send = prev;
2176 return res;
2179 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2180 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2181 // It's intercepted merely to process pending signals.
2182 return REAL(gettimeofday)(tv, tz);
2185 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2186 void *hints, void *rv) {
2187 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2188 // We miss atomic synchronization in getaddrinfo,
2189 // and can report false race between malloc and free
2190 // inside of getaddrinfo. So ignore memory accesses.
2191 ThreadIgnoreBegin(thr, pc);
2192 int res = REAL(getaddrinfo)(node, service, hints, rv);
2193 ThreadIgnoreEnd(thr);
2194 return res;
2197 TSAN_INTERCEPTOR(int, fork, int fake) {
2198 if (in_symbolizer())
2199 return REAL(fork)(fake);
2200 SCOPED_INTERCEPTOR_RAW(fork, fake);
2201 return REAL(fork)(fake);
2204 void atfork_prepare() {
2205 if (in_symbolizer())
2206 return;
2207 ThreadState *thr = cur_thread();
2208 const uptr pc = StackTrace::GetCurrentPc();
2209 ForkBefore(thr, pc);
2212 void atfork_parent() {
2213 if (in_symbolizer())
2214 return;
2215 ThreadState *thr = cur_thread();
2216 const uptr pc = StackTrace::GetCurrentPc();
2217 ForkParentAfter(thr, pc);
2220 void atfork_child() {
2221 if (in_symbolizer())
2222 return;
2223 ThreadState *thr = cur_thread();
2224 const uptr pc = StackTrace::GetCurrentPc();
2225 ForkChildAfter(thr, pc, true);
2226 FdOnFork(thr, pc);
2229 #if !SANITIZER_IOS
2230 TSAN_INTERCEPTOR(int, vfork, int fake) {
2231 // Some programs (e.g. openjdk) call close for all file descriptors
2232 // in the child process. Under tsan it leads to false positives, because
2233 // address space is shared, so the parent process also thinks that
2234 // the descriptors are closed (while they are actually not).
2235 // This leads to false positives due to missed synchronization.
2236 // Strictly saying this is undefined behavior, because vfork child is not
2237 // allowed to call any functions other than exec/exit. But this is what
2238 // openjdk does, so we want to handle it.
2239 // We could disable interceptors in the child process. But it's not possible
2240 // to simply intercept and wrap vfork, because vfork child is not allowed
2241 // to return from the function that calls vfork, and that's exactly what
2242 // we would do. So this would require some assembly trickery as well.
2243 // Instead we simply turn vfork into fork.
2244 return WRAP(fork)(fake);
2246 #endif
2248 #if SANITIZER_LINUX
2249 TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
2250 void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
2251 SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
2252 child_tid);
2253 struct Arg {
2254 int (*fn)(void *);
2255 void *arg;
2257 auto wrapper = +[](void *p) -> int {
2258 auto *thr = cur_thread();
2259 uptr pc = GET_CURRENT_PC();
2260 // Start the background thread for fork, but not for clone.
2261 // For fork we did this always and it's known to work (or user code has
2262 // adopted). But if we do this for the new clone interceptor some code
2263 // (sandbox2) fails. So model we used to do for years and don't start the
2264 // background thread after clone.
2265 ForkChildAfter(thr, pc, false);
2266 FdOnFork(thr, pc);
2267 auto *arg = static_cast<Arg *>(p);
2268 return arg->fn(arg->arg);
2270 ForkBefore(thr, pc);
2271 Arg arg_wrapper = {fn, arg};
2272 int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
2273 child_tid);
2274 ForkParentAfter(thr, pc);
2275 return pid;
2277 #endif
2279 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
2280 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2281 void *data);
2282 struct dl_iterate_phdr_data {
2283 ThreadState *thr;
2284 uptr pc;
2285 dl_iterate_phdr_cb_t cb;
2286 void *data;
2289 static bool IsAppNotRodata(uptr addr) {
2290 return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata;
2293 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2294 void *data) {
2295 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2296 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2297 // accessible in dl_iterate_phdr callback. But we don't see synchronization
2298 // inside of dynamic linker, so we "unpoison" it here in order to not
2299 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2300 // because some libc functions call __libc_dlopen.
2301 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2302 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2303 internal_strlen(info->dlpi_name));
2304 int res = cbdata->cb(info, size, cbdata->data);
2305 // Perform the check one more time in case info->dlpi_name was overwritten
2306 // by user callback.
2307 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2308 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2309 internal_strlen(info->dlpi_name));
2310 return res;
2313 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2314 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2315 dl_iterate_phdr_data cbdata;
2316 cbdata.thr = thr;
2317 cbdata.pc = pc;
2318 cbdata.cb = cb;
2319 cbdata.data = data;
2320 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2321 return res;
2323 #endif
2325 static int OnExit(ThreadState *thr) {
2326 int status = Finalize(thr);
2327 FlushStreams();
2328 return status;
2331 struct TsanInterceptorContext {
2332 ThreadState *thr;
2333 const uptr pc;
2336 #if !SANITIZER_APPLE
2337 static void HandleRecvmsg(ThreadState *thr, uptr pc,
2338 __sanitizer_msghdr *msg) {
2339 int fds[64];
2340 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2341 for (int i = 0; i < cnt; i++)
2342 FdEventCreate(thr, pc, fds[i]);
2344 #endif
2346 #include "sanitizer_common/sanitizer_platform_interceptors.h"
2347 // Causes interceptor recursion (getaddrinfo() and fopen())
2348 #undef SANITIZER_INTERCEPT_GETADDRINFO
2349 // We define our own.
2350 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
2351 #define NEED_TLS_GET_ADDR
2352 #endif
2353 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2354 #define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
2355 #undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
2357 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
2358 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
2359 INTERCEPT_FUNCTION_VER(name, ver)
2360 #define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
2361 (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
2363 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
2364 MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
2365 ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
2366 true)
2368 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
2369 MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
2370 ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
2371 false)
2373 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
2374 SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
2375 TsanInterceptorContext _ctx = {thr, pc}; \
2376 ctx = (void *)&_ctx; \
2377 (void)ctx;
2379 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2380 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
2381 TsanInterceptorContext _ctx = {thr, pc}; \
2382 ctx = (void *)&_ctx; \
2383 (void)ctx;
2385 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2386 if (path) \
2387 Acquire(thr, pc, File2addr(path)); \
2388 if (file) { \
2389 int fd = fileno_unlocked(file); \
2390 if (fd >= 0) FdFileCreate(thr, pc, fd); \
2393 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2394 if (file) { \
2395 int fd = fileno_unlocked(file); \
2396 FdClose(thr, pc, fd); \
2399 #define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
2400 ({ \
2401 CheckNoDeepBind(filename, flag); \
2402 ThreadIgnoreBegin(thr, 0); \
2403 void *res = REAL(dlopen)(filename, flag); \
2404 ThreadIgnoreEnd(thr); \
2405 res; \
2408 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2409 libignore()->OnLibraryLoaded(filename)
2411 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2412 libignore()->OnLibraryUnloaded()
2414 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2415 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2417 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2418 Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2420 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2421 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2423 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2424 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2426 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2427 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2429 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2430 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2432 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2433 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2435 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2436 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2438 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
2439 if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
2440 COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name); \
2441 else \
2442 __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
2444 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2446 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2447 OnExit(((TsanInterceptorContext *) ctx)->thr)
2449 #define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
2450 MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
2451 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2453 #define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
2454 MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
2455 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2457 #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
2458 MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
2459 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2461 #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
2462 MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
2463 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2465 #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
2466 MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
2467 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2469 #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
2470 off) \
2471 do { \
2472 return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2473 off); \
2474 } while (false)
2476 #if !SANITIZER_APPLE
2477 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2478 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2479 ((TsanInterceptorContext *)ctx)->pc, msg)
2480 #endif
2482 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
2483 if (TsanThread *t = GetCurrentThread()) { \
2484 *begin = t->tls_begin(); \
2485 *end = t->tls_end(); \
2486 } else { \
2487 *begin = *end = 0; \
2490 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2491 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2493 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2494 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2496 #include "sanitizer_common/sanitizer_common_interceptors.inc"
2498 static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2499 __sanitizer_sigaction *old);
2500 static __sanitizer_sighandler_ptr signal_impl(int sig,
2501 __sanitizer_sighandler_ptr h);
2503 #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2504 { return sigaction_impl(signo, act, oldact); }
2506 #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2507 { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2509 #include "sanitizer_common/sanitizer_signal_interceptors.inc"
2511 int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2512 __sanitizer_sigaction *old) {
2513 // Note: if we call REAL(sigaction) directly for any reason without proxying
2514 // the signal handler through sighandler, very bad things will happen.
2515 // The handler will run synchronously and corrupt tsan per-thread state.
2516 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2517 if (sig <= 0 || sig >= kSigCount) {
2518 errno = errno_EINVAL;
2519 return -1;
2521 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2522 __sanitizer_sigaction old_stored;
2523 if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
2524 __sanitizer_sigaction newact;
2525 if (act) {
2526 // Copy act into sigactions[sig].
2527 // Can't use struct copy, because compiler can emit call to memcpy.
2528 // Can't use internal_memcpy, because it copies byte-by-byte,
2529 // and signal handler reads the handler concurrently. It it can read
2530 // some bytes from old value and some bytes from new value.
2531 // Use volatile to prevent insertion of memcpy.
2532 sigactions[sig].handler =
2533 *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2534 sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2535 internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
2536 sizeof(sigactions[sig].sa_mask));
2537 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
2538 sigactions[sig].sa_restorer = act->sa_restorer;
2539 #endif
2540 internal_memcpy(&newact, act, sizeof(newact));
2541 internal_sigfillset(&newact.sa_mask);
2542 if ((act->sa_flags & SA_SIGINFO) ||
2543 ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
2544 newact.sa_flags |= SA_SIGINFO;
2545 newact.sigaction = sighandler;
2547 ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
2548 act = &newact;
2550 int res = REAL(sigaction)(sig, act, old);
2551 if (res == 0 && old && old->sigaction == sighandler)
2552 internal_memcpy(old, &old_stored, sizeof(*old));
2553 return res;
2556 static __sanitizer_sighandler_ptr signal_impl(int sig,
2557 __sanitizer_sighandler_ptr h) {
2558 __sanitizer_sigaction act;
2559 act.handler = h;
2560 internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
2561 act.sa_flags = 0;
2562 __sanitizer_sigaction old;
2563 int res = sigaction_symname(sig, &act, &old);
2564 if (res) return (__sanitizer_sighandler_ptr)sig_err;
2565 return old.handler;
2568 #define TSAN_SYSCALL() \
2569 ThreadState *thr = cur_thread(); \
2570 if (thr->ignore_interceptors) \
2571 return; \
2572 ScopedSyscall scoped_syscall(thr)
2574 struct ScopedSyscall {
2575 ThreadState *thr;
2577 explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
2579 ~ScopedSyscall() {
2580 ProcessPendingSignals(thr);
2584 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE
2585 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2586 TSAN_SYSCALL();
2587 MemoryAccessRange(thr, pc, p, s, write);
2590 static USED void syscall_acquire(uptr pc, uptr addr) {
2591 TSAN_SYSCALL();
2592 Acquire(thr, pc, addr);
2593 DPrintf("syscall_acquire(0x%zx))\n", addr);
2596 static USED void syscall_release(uptr pc, uptr addr) {
2597 TSAN_SYSCALL();
2598 DPrintf("syscall_release(0x%zx)\n", addr);
2599 Release(thr, pc, addr);
2602 static void syscall_fd_close(uptr pc, int fd) {
2603 auto *thr = cur_thread();
2604 FdClose(thr, pc, fd);
2607 static USED void syscall_fd_acquire(uptr pc, int fd) {
2608 TSAN_SYSCALL();
2609 FdAcquire(thr, pc, fd);
2610 DPrintf("syscall_fd_acquire(%d)\n", fd);
2613 static USED void syscall_fd_release(uptr pc, int fd) {
2614 TSAN_SYSCALL();
2615 DPrintf("syscall_fd_release(%d)\n", fd);
2616 FdRelease(thr, pc, fd);
2619 static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
2621 static void syscall_post_fork(uptr pc, int pid) {
2622 ThreadState *thr = cur_thread();
2623 if (pid == 0) {
2624 // child
2625 ForkChildAfter(thr, pc, true);
2626 FdOnFork(thr, pc);
2627 } else if (pid > 0) {
2628 // parent
2629 ForkParentAfter(thr, pc);
2630 } else {
2631 // error
2632 ForkParentAfter(thr, pc);
2635 #endif
2637 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2638 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2640 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2641 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2643 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2644 do { \
2645 (void)(p); \
2646 (void)(s); \
2647 } while (false)
2649 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2650 do { \
2651 (void)(p); \
2652 (void)(s); \
2653 } while (false)
2655 #define COMMON_SYSCALL_ACQUIRE(addr) \
2656 syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2658 #define COMMON_SYSCALL_RELEASE(addr) \
2659 syscall_release(GET_CALLER_PC(), (uptr)(addr))
2661 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2663 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2665 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2667 #define COMMON_SYSCALL_PRE_FORK() \
2668 syscall_pre_fork(GET_CALLER_PC())
2670 #define COMMON_SYSCALL_POST_FORK(res) \
2671 syscall_post_fork(GET_CALLER_PC(), res)
2673 #include "sanitizer_common/sanitizer_common_syscalls.inc"
2674 #include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2676 #ifdef NEED_TLS_GET_ADDR
2678 static void handle_tls_addr(void *arg, void *res) {
2679 ThreadState *thr = cur_thread();
2680 if (!thr)
2681 return;
2682 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
2683 thr->tls_addr + thr->tls_size);
2684 if (!dtv)
2685 return;
2686 // New DTLS block has been allocated.
2687 MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2690 #if !SANITIZER_S390
2691 // Define own interceptor instead of sanitizer_common's for three reasons:
2692 // 1. It must not process pending signals.
2693 // Signal handlers may contain MOVDQA instruction (see below).
2694 // 2. It must be as simple as possible to not contain MOVDQA.
2695 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2696 // is empty for tsan (meant only for msan).
2697 // Note: __tls_get_addr can be called with mis-aligned stack due to:
2698 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2699 // So the interceptor must work with mis-aligned stack, in particular, does not
2700 // execute MOVDQA with stack addresses.
2701 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2702 void *res = REAL(__tls_get_addr)(arg);
2703 handle_tls_addr(arg, res);
2704 return res;
2706 #else // SANITIZER_S390
2707 TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
2708 uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
2709 char *tp = static_cast<char *>(__builtin_thread_pointer());
2710 handle_tls_addr(arg, res + tp);
2711 return res;
2713 #endif
2714 #endif
2716 #if SANITIZER_NETBSD
2717 TSAN_INTERCEPTOR(void, _lwp_exit) {
2718 SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2719 DestroyThreadState();
2720 REAL(_lwp_exit)();
2722 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2723 #else
2724 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2725 #endif
2727 #if SANITIZER_FREEBSD
2728 TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
2729 SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2730 DestroyThreadState();
2731 REAL(thr_exit(state));
2733 #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2734 #else
2735 #define TSAN_MAYBE_INTERCEPT_THR_EXIT
2736 #endif
2738 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
2739 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
2740 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
2741 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
2742 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
2743 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
2744 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
2745 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
2746 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
2747 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
2748 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
2749 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
2750 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
2751 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
2752 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
2753 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
2754 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
2755 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
2756 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
2758 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2759 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2760 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2761 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2762 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2763 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2764 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2765 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2766 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2767 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2768 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2769 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2770 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2771 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2772 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2773 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2774 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
2775 void *c)
2777 namespace __tsan {
2779 static void finalize(void *arg) {
2780 ThreadState *thr = cur_thread();
2781 int status = Finalize(thr);
2782 // Make sure the output is not lost.
2783 FlushStreams();
2784 if (status)
2785 Die();
2788 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
2789 static void unreachable() {
2790 Report("FATAL: ThreadSanitizer: unreachable called\n");
2791 Die();
2793 #endif
2795 // Define default implementation since interception of libdispatch is optional.
2796 SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
2798 void InitializeInterceptors() {
2799 #if !SANITIZER_APPLE
2800 // We need to setup it early, because functions like dlsym() can call it.
2801 REAL(memset) = internal_memset;
2802 REAL(memcpy) = internal_memcpy;
2803 #endif
2805 new(interceptor_ctx()) InterceptorContext();
2807 InitializeCommonInterceptors();
2808 InitializeSignalInterceptors();
2809 InitializeLibdispatchInterceptors();
2811 #if !SANITIZER_APPLE
2812 // We can not use TSAN_INTERCEPT to get setjmp addr,
2813 // because it does &setjmp and setjmp is not present in some versions of libc.
2814 using __interception::InterceptFunction;
2815 InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0);
2816 InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
2817 InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0,
2819 #if !SANITIZER_NETBSD
2820 InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
2821 #endif
2822 #endif
2824 TSAN_INTERCEPT(longjmp_symname);
2825 TSAN_INTERCEPT(siglongjmp_symname);
2826 #if SANITIZER_NETBSD
2827 TSAN_INTERCEPT(_longjmp);
2828 #endif
2830 TSAN_INTERCEPT(malloc);
2831 TSAN_INTERCEPT(__libc_memalign);
2832 TSAN_INTERCEPT(calloc);
2833 TSAN_INTERCEPT(realloc);
2834 TSAN_INTERCEPT(reallocarray);
2835 TSAN_INTERCEPT(free);
2836 TSAN_INTERCEPT(cfree);
2837 TSAN_INTERCEPT(munmap);
2838 TSAN_MAYBE_INTERCEPT_MEMALIGN;
2839 TSAN_INTERCEPT(valloc);
2840 TSAN_MAYBE_INTERCEPT_PVALLOC;
2841 TSAN_INTERCEPT(posix_memalign);
2843 TSAN_INTERCEPT(strcpy);
2844 TSAN_INTERCEPT(strncpy);
2845 TSAN_INTERCEPT(strdup);
2847 TSAN_INTERCEPT(pthread_create);
2848 TSAN_INTERCEPT(pthread_join);
2849 TSAN_INTERCEPT(pthread_detach);
2850 TSAN_INTERCEPT(pthread_exit);
2851 #if SANITIZER_LINUX
2852 TSAN_INTERCEPT(pthread_tryjoin_np);
2853 TSAN_INTERCEPT(pthread_timedjoin_np);
2854 #endif
2856 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2857 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2858 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2859 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2860 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2861 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2863 TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
2865 TSAN_INTERCEPT(pthread_mutex_init);
2866 TSAN_INTERCEPT(pthread_mutex_destroy);
2867 TSAN_INTERCEPT(pthread_mutex_trylock);
2868 TSAN_INTERCEPT(pthread_mutex_timedlock);
2870 TSAN_INTERCEPT(pthread_spin_init);
2871 TSAN_INTERCEPT(pthread_spin_destroy);
2872 TSAN_INTERCEPT(pthread_spin_lock);
2873 TSAN_INTERCEPT(pthread_spin_trylock);
2874 TSAN_INTERCEPT(pthread_spin_unlock);
2876 TSAN_INTERCEPT(pthread_rwlock_init);
2877 TSAN_INTERCEPT(pthread_rwlock_destroy);
2878 TSAN_INTERCEPT(pthread_rwlock_rdlock);
2879 TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2880 TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2881 TSAN_INTERCEPT(pthread_rwlock_wrlock);
2882 TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2883 TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2884 TSAN_INTERCEPT(pthread_rwlock_unlock);
2886 TSAN_INTERCEPT(pthread_barrier_init);
2887 TSAN_INTERCEPT(pthread_barrier_destroy);
2888 TSAN_INTERCEPT(pthread_barrier_wait);
2890 TSAN_INTERCEPT(pthread_once);
2892 TSAN_INTERCEPT(fstat);
2893 TSAN_MAYBE_INTERCEPT___FXSTAT;
2894 TSAN_MAYBE_INTERCEPT_FSTAT64;
2895 TSAN_MAYBE_INTERCEPT___FXSTAT64;
2896 TSAN_INTERCEPT(open);
2897 TSAN_MAYBE_INTERCEPT_OPEN64;
2898 TSAN_INTERCEPT(creat);
2899 TSAN_MAYBE_INTERCEPT_CREAT64;
2900 TSAN_INTERCEPT(dup);
2901 TSAN_INTERCEPT(dup2);
2902 TSAN_INTERCEPT(dup3);
2903 TSAN_MAYBE_INTERCEPT_EVENTFD;
2904 TSAN_MAYBE_INTERCEPT_SIGNALFD;
2905 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
2906 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
2907 TSAN_INTERCEPT(socket);
2908 TSAN_INTERCEPT(socketpair);
2909 TSAN_INTERCEPT(connect);
2910 TSAN_INTERCEPT(bind);
2911 TSAN_INTERCEPT(listen);
2912 TSAN_MAYBE_INTERCEPT_EPOLL;
2913 TSAN_INTERCEPT(close);
2914 TSAN_MAYBE_INTERCEPT___CLOSE;
2915 TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
2916 TSAN_INTERCEPT(pipe);
2917 TSAN_INTERCEPT(pipe2);
2919 TSAN_INTERCEPT(unlink);
2920 TSAN_INTERCEPT(tmpfile);
2921 TSAN_MAYBE_INTERCEPT_TMPFILE64;
2922 TSAN_INTERCEPT(abort);
2923 TSAN_INTERCEPT(rmdir);
2924 TSAN_INTERCEPT(closedir);
2926 TSAN_INTERCEPT(sigsuspend);
2927 TSAN_INTERCEPT(sigblock);
2928 TSAN_INTERCEPT(sigsetmask);
2929 TSAN_INTERCEPT(pthread_sigmask);
2930 TSAN_INTERCEPT(raise);
2931 TSAN_INTERCEPT(kill);
2932 TSAN_INTERCEPT(pthread_kill);
2933 TSAN_INTERCEPT(sleep);
2934 TSAN_INTERCEPT(usleep);
2935 TSAN_INTERCEPT(nanosleep);
2936 TSAN_INTERCEPT(pause);
2937 TSAN_INTERCEPT(gettimeofday);
2938 TSAN_INTERCEPT(getaddrinfo);
2940 TSAN_INTERCEPT(fork);
2941 TSAN_INTERCEPT(vfork);
2942 #if SANITIZER_LINUX
2943 TSAN_INTERCEPT(clone);
2944 #endif
2945 #if !SANITIZER_ANDROID
2946 TSAN_INTERCEPT(dl_iterate_phdr);
2947 #endif
2948 TSAN_MAYBE_INTERCEPT_ON_EXIT;
2949 TSAN_INTERCEPT(__cxa_atexit);
2950 TSAN_INTERCEPT(_exit);
2952 #ifdef NEED_TLS_GET_ADDR
2953 #if !SANITIZER_S390
2954 TSAN_INTERCEPT(__tls_get_addr);
2955 #else
2956 TSAN_INTERCEPT(__tls_get_addr_internal);
2957 TSAN_INTERCEPT(__tls_get_offset);
2958 #endif
2959 #endif
2961 TSAN_MAYBE_INTERCEPT__LWP_EXIT;
2962 TSAN_MAYBE_INTERCEPT_THR_EXIT;
2964 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
2965 // Need to setup it, because interceptors check that the function is resolved.
2966 // But atexit is emitted directly into the module, so can't be resolved.
2967 REAL(atexit) = (int(*)(void(*)()))unreachable;
2968 #endif
2970 if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
2971 Printf("ThreadSanitizer: failed to setup atexit callback\n");
2972 Die();
2974 if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) {
2975 Printf("ThreadSanitizer: failed to setup atfork callbacks\n");
2976 Die();
2979 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
2980 if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
2981 Printf("ThreadSanitizer: failed to create thread key\n");
2982 Die();
2984 #endif
2986 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
2987 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
2988 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
2989 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
2990 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
2991 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
2992 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
2993 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
2994 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
2995 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
2996 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
2997 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
2998 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
2999 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
3000 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
3001 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
3002 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
3003 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
3004 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
3006 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
3007 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
3008 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
3009 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
3010 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
3011 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
3012 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
3013 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
3014 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
3015 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
3016 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
3017 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
3018 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
3019 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
3020 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
3021 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
3022 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
3024 FdInit();
3027 } // namespace __tsan
3029 // Invisible barrier for tests.
3030 // There were several unsuccessful iterations for this functionality:
3031 // 1. Initially it was implemented in user code using
3032 // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
3033 // MacOS. Futexes are linux-specific for this matter.
3034 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
3035 // "as-if synchronized via sleep" messages in reports which failed some
3036 // output tests.
3037 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
3038 // visible events, which lead to "failed to restore stack trace" failures.
3039 // Note that no_sanitize_thread attribute does not turn off atomic interception
3040 // so attaching it to the function defined in user code does not help.
3041 // That's why we now have what we have.
3042 constexpr u32 kBarrierThreadBits = 10;
3043 constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
3045 extern "C" {
3047 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
3048 atomic_uint32_t *barrier, u32 num_threads) {
3049 if (num_threads >= kBarrierThreads) {
3050 Printf("barrier_init: count is too large (%d)\n", num_threads);
3051 Die();
3053 // kBarrierThreadBits lsb is thread count,
3054 // the remaining are count of entered threads.
3055 atomic_store(barrier, num_threads, memory_order_relaxed);
3058 static u32 barrier_epoch(u32 value) {
3059 return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
3062 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
3063 atomic_uint32_t *barrier) {
3064 u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
3065 u32 old_epoch = barrier_epoch(old);
3066 if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
3067 FutexWake(barrier, (1 << 30));
3068 return;
3070 for (;;) {
3071 u32 cur = atomic_load(barrier, memory_order_relaxed);
3072 if (barrier_epoch(cur) != old_epoch)
3073 return;
3074 FutexWait(barrier, cur);
3078 void *__tsan_memcpy(void *dst, const void *src, uptr size) {
3079 void *ctx;
3080 #if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
3081 COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
3082 #else
3083 COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
3084 #endif
3087 void *__tsan_memset(void *dst, int c, uptr size) {
3088 void *ctx;
3089 COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, c, size);
3092 void *__tsan_memmove(void *dst, const void *src, uptr size) {
3093 void *ctx;
3094 COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);