1 //===-- tsan_platform_linux.cpp -------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 // Linux- and BSD-specific code.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_platform.h"
15 #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_libc.h"
19 #include "sanitizer_common/sanitizer_linux.h"
20 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
21 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
22 #include "sanitizer_common/sanitizer_posix.h"
23 #include "sanitizer_common/sanitizer_procmaps.h"
24 #include "sanitizer_common/sanitizer_stackdepot.h"
25 #include "sanitizer_common/sanitizer_stoptheworld.h"
26 #include "tsan_flags.h"
27 #include "tsan_platform.h"
39 #include <sys/personality.h>
42 #include <sys/syscall.h>
43 #include <sys/socket.h>
45 #include <sys/types.h>
46 #include <sys/resource.h>
52 #define __need_res_state
65 extern "C" void *__libc_stack_end
;
66 void *__libc_stack_end
= 0;
69 #if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64)) && \
71 # define INIT_LONGJMP_XOR_KEY 1
73 # define INIT_LONGJMP_XOR_KEY 0
76 #if INIT_LONGJMP_XOR_KEY
77 #include "interception/interception.h"
78 // Must be declared outside of other namespaces.
79 DECLARE_REAL(int, _setjmp
, void *env
)
84 #if INIT_LONGJMP_XOR_KEY
85 static void InitializeLongjmpXorKey();
86 static uptr longjmp_xor_key
;
89 // Runtime detected VMA size.
103 void FillProfileCallback(uptr p
, uptr rss
, bool file
, uptr
*mem
) {
104 mem
[MemTotal
] += rss
;
105 if (p
>= ShadowBeg() && p
< ShadowEnd())
106 mem
[MemShadow
] += rss
;
107 else if (p
>= MetaShadowBeg() && p
< MetaShadowEnd())
109 else if ((p
>= LoAppMemBeg() && p
< LoAppMemEnd()) ||
110 (p
>= MidAppMemBeg() && p
< MidAppMemEnd()) ||
111 (p
>= HiAppMemBeg() && p
< HiAppMemEnd()))
112 mem
[file
? MemFile
: MemMmap
] += rss
;
113 else if (p
>= HeapMemBeg() && p
< HeapMemEnd())
116 mem
[MemOther
] += rss
;
119 void WriteMemoryProfile(char *buf
, uptr buf_size
, u64 uptime_ns
) {
121 internal_memset(mem
, 0, sizeof(mem
));
122 GetMemoryProfile(FillProfileCallback
, mem
);
123 auto meta
= ctx
->metamap
.GetMemoryStats();
124 StackDepotStats stacks
= StackDepotGetStats();
126 ctx
->thread_registry
.GetNumberOfThreads(&nthread
, &nlive
);
129 Lock
l(&ctx
->slot_mtx
);
130 trace_mem
= ctx
->trace_part_total_allocated
* sizeof(TracePart
);
132 uptr internal_stats
[AllocatorStatCount
];
133 internal_allocator()->GetStats(internal_stats
);
134 // All these are allocated from the common mmap region.
135 mem
[MemMmap
] -= meta
.mem_block
+ meta
.sync_obj
+ trace_mem
+
136 stacks
.allocated
+ internal_stats
[AllocatorStatMapped
];
137 if (s64(mem
[MemMmap
]) < 0)
141 "==%zu== %llus [%zu]: RSS %zd MB: shadow:%zd meta:%zd file:%zd"
142 " mmap:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu"
143 " trace:%zu stacks=%zd threads=%zu/%zu\n",
144 internal_getpid(), uptime_ns
/ (1000 * 1000 * 1000), ctx
->global_epoch
,
145 mem
[MemTotal
] >> 20, mem
[MemShadow
] >> 20, mem
[MemMeta
] >> 20,
146 mem
[MemFile
] >> 20, mem
[MemMmap
] >> 20, mem
[MemHeap
] >> 20,
147 mem
[MemOther
] >> 20, internal_stats
[AllocatorStatMapped
] >> 20,
148 meta
.mem_block
>> 20, meta
.sync_obj
>> 20, trace_mem
>> 20,
149 stacks
.allocated
>> 20, nlive
, nthread
);
153 // Mark shadow for .rodata sections with the special Shadow::kRodata marker.
154 // Accesses to .rodata can't race, so this saves time, memory and trace space.
155 static void MapRodata() {
156 // First create temp file.
157 const char *tmpdir
= GetEnv("TMPDIR");
159 tmpdir
= GetEnv("TEST_TMPDIR");
167 internal_snprintf(name
, sizeof(name
), "%s/tsan.rodata.%d",
168 tmpdir
, (int)internal_getpid());
169 uptr openrv
= internal_open(name
, O_RDWR
| O_CREAT
| O_EXCL
, 0600);
170 if (internal_iserror(openrv
))
172 internal_unlink(name
); // Unlink it now, so that we can reuse the buffer.
174 // Fill the file with Shadow::kRodata.
175 const uptr kMarkerSize
= 512 * 1024 / sizeof(RawShadow
);
176 InternalMmapVector
<RawShadow
> marker(kMarkerSize
);
177 // volatile to prevent insertion of memset
178 for (volatile RawShadow
*p
= marker
.data(); p
< marker
.data() + kMarkerSize
;
180 *p
= Shadow::kRodata
;
181 internal_write(fd
, marker
.data(), marker
.size() * sizeof(RawShadow
));
182 // Map the file into memory.
183 uptr page
= internal_mmap(0, GetPageSizeCached(), PROT_READ
| PROT_WRITE
,
184 MAP_PRIVATE
| MAP_ANONYMOUS
, fd
, 0);
185 if (internal_iserror(page
)) {
189 // Map the file into shadow of .rodata sections.
190 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
191 // Reusing the buffer 'name'.
192 MemoryMappedSegment
segment(name
, ARRAY_SIZE(name
));
193 while (proc_maps
.Next(&segment
)) {
194 if (segment
.filename
[0] != 0 && segment
.filename
[0] != '[' &&
195 segment
.IsReadable() && segment
.IsExecutable() &&
196 !segment
.IsWritable() && IsAppMem(segment
.start
)) {
197 // Assume it's .rodata
198 char *shadow_start
= (char *)MemToShadow(segment
.start
);
199 char *shadow_end
= (char *)MemToShadow(segment
.end
);
200 for (char *p
= shadow_start
; p
< shadow_end
;
201 p
+= marker
.size() * sizeof(RawShadow
)) {
203 p
, Min
<uptr
>(marker
.size() * sizeof(RawShadow
), shadow_end
- p
),
204 PROT_READ
, MAP_PRIVATE
| MAP_FIXED
, fd
, 0);
211 void InitializeShadowMemoryPlatform() {
215 #endif // #if !SANITIZER_GO
217 void InitializePlatformEarly() {
219 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
220 #if defined(__aarch64__)
222 if (vmaSize
!= 39 && vmaSize
!= 42 && vmaSize
!= 48) {
223 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
224 Printf("FATAL: Found %zd - Supported 39, 42 and 48\n", vmaSize
);
229 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
230 Printf("FATAL: Found %zd - Supported 48\n", vmaSize
);
234 #elif SANITIZER_LOONGARCH64
237 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
238 Printf("FATAL: Found %zd - Supported 47\n", vmaSize
);
242 #elif defined(__powerpc64__)
244 if (vmaSize
!= 44 && vmaSize
!= 46 && vmaSize
!= 47) {
245 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
246 Printf("FATAL: Found %zd - Supported 44, 46, and 47\n", vmaSize
);
250 if (vmaSize
!= 46 && vmaSize
!= 47) {
251 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
252 Printf("FATAL: Found %zd - Supported 46, and 47\n", vmaSize
);
256 #elif defined(__mips64)
259 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
260 Printf("FATAL: Found %zd - Supported 40\n", vmaSize
);
265 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
266 Printf("FATAL: Found %zd - Supported 47\n", vmaSize
);
273 void InitializePlatform() {
274 DisableCoreDumperIfNecessary();
276 // Go maps shadow memory lazily and works fine with limited address space.
277 // Unlimited stack is not a problem as well, because the executable
278 // is not compiled with -pie.
282 // TSan doesn't play well with unlimited stack size (as stack
283 // overlaps with shadow memory). If we detect unlimited stack size,
284 // we re-exec the program with limited stack size as a best effort.
285 if (StackSizeIsUnlimited()) {
286 const uptr kMaxStackSize
= 32 * 1024 * 1024;
287 VReport(1, "Program is run with unlimited stack size, which wouldn't "
288 "work with ThreadSanitizer.\n"
289 "Re-execing with stack size limited to %zd bytes.\n",
291 SetStackSizeLimitInBytes(kMaxStackSize
);
295 if (!AddressSpaceIsUnlimited()) {
296 Report("WARNING: Program is run with limited virtual address space,"
297 " which wouldn't work with ThreadSanitizer.\n");
298 Report("Re-execing with unlimited virtual address space.\n");
299 SetAddressSpaceUnlimited();
302 #if SANITIZER_ANDROID && (defined(__aarch64__) || defined(__x86_64__))
303 // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in
304 // linux kernel, the random gap between stack and mapped area is increased
305 // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover
306 // this big range, we should disable randomized virtual space on aarch64.
307 // ASLR personality check.
308 int old_personality
= personality(0xffffffff);
309 if (old_personality
!= -1 && (old_personality
& ADDR_NO_RANDOMIZE
) == 0) {
310 VReport(1, "WARNING: Program is run with randomized virtual address "
311 "space, which wouldn't work with ThreadSanitizer.\n"
312 "Re-execing with fixed virtual address space.\n");
313 CHECK_NE(personality(old_personality
| ADDR_NO_RANDOMIZE
), -1);
318 #if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64))
319 // Initialize the xor key used in {sig}{set,long}jump.
320 InitializeLongjmpXorKey();
328 #endif // !SANITIZER_GO
332 // Extract file descriptors passed to glibc internal __res_iclose function.
333 // This is required to properly "close" the fds, because we do not see internal
334 // closes within glibc. The code is a pure hack.
335 int ExtractResolvFDs(void *state
, int *fds
, int nfd
) {
336 #if SANITIZER_LINUX && !SANITIZER_ANDROID
338 struct __res_state
*statp
= (struct __res_state
*)state
;
339 for (int i
= 0; i
< MAXNS
&& cnt
< nfd
; i
++) {
340 if (statp
->_u
._ext
.nsaddrs
[i
] && statp
->_u
._ext
.nssocks
[i
] != -1)
341 fds
[cnt
++] = statp
->_u
._ext
.nssocks
[i
];
349 // Extract file descriptors passed via UNIX domain sockets.
350 // This is required to properly handle "open" of these fds.
351 // see 'man recvmsg' and 'man 3 cmsg'.
352 int ExtractRecvmsgFDs(void *msgp
, int *fds
, int nfd
) {
354 msghdr
*msg
= (msghdr
*)msgp
;
355 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msg
);
356 for (; cmsg
; cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
357 if (cmsg
->cmsg_level
!= SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
)
359 int n
= (cmsg
->cmsg_len
- CMSG_LEN(0)) / sizeof(fds
[0]);
360 for (int i
= 0; i
< n
; i
++) {
361 fds
[res
++] = ((int*)CMSG_DATA(cmsg
))[i
];
369 // Reverse operation of libc stack pointer mangling
370 static uptr
UnmangleLongJmpSp(uptr mangled_sp
) {
371 #if defined(__x86_64__)
374 // xor %fs:0x30, %rsi
377 asm("ror $0x11, %0 \n"
378 "xor %%fs:0x30, %0 \n"
385 #elif defined(__aarch64__)
387 return mangled_sp
^ longjmp_xor_key
;
391 #elif defined(__loongarch_lp64)
392 return mangled_sp
^ longjmp_xor_key
;
393 #elif defined(__powerpc64__)
395 // ld r4, -28696(r13)
398 asm("ld %0, -28696(%%r13)" : "=r" (xor_key
));
399 return mangled_sp
^ xor_key
;
400 #elif defined(__mips__)
402 #elif defined(__s390x__)
403 // tcbhead_t.stack_guard
404 uptr xor_key
= ((uptr
*)__builtin_thread_pointer())[5];
405 return mangled_sp
^ xor_key
;
407 #error "Unknown platform"
413 # define LONG_JMP_SP_ENV_SLOT 6
417 #elif defined(__powerpc__)
418 # define LONG_JMP_SP_ENV_SLOT 0
419 #elif SANITIZER_FREEBSD
421 # define LONG_JMP_SP_ENV_SLOT 1
423 # define LONG_JMP_SP_ENV_SLOT 2
425 #elif SANITIZER_LINUX
427 # define LONG_JMP_SP_ENV_SLOT 13
428 # elif defined(__loongarch__)
429 # define LONG_JMP_SP_ENV_SLOT 1
430 # elif defined(__mips64)
431 # define LONG_JMP_SP_ENV_SLOT 1
432 # elif defined(__s390x__)
433 # define LONG_JMP_SP_ENV_SLOT 9
435 # define LONG_JMP_SP_ENV_SLOT 6
439 uptr
ExtractLongJmpSp(uptr
*env
) {
440 uptr mangled_sp
= env
[LONG_JMP_SP_ENV_SLOT
];
441 return UnmangleLongJmpSp(mangled_sp
);
444 #if INIT_LONGJMP_XOR_KEY
445 // GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp
446 // functions) by XORing them with a random key. For AArch64 it is a global
447 // variable rather than a TCB one (as for x86_64/powerpc). We obtain the key by
448 // issuing a setjmp and XORing the SP pointer values to derive the key.
449 static void InitializeLongjmpXorKey() {
450 // 1. Call REAL(setjmp), which stores the mangled SP in env.
454 // 2. Retrieve vanilla/mangled SP.
457 asm("move %0, $sp" : "=r" (sp
));
459 asm("mov %0, sp" : "=r" (sp
));
461 uptr mangled_sp
= ((uptr
*)&env
)[LONG_JMP_SP_ENV_SLOT
];
463 // 3. xor SPs to obtain key.
464 longjmp_xor_key
= mangled_sp
^ sp
;
468 extern "C" void __tsan_tls_initialization() {}
470 void ImitateTlsWrite(ThreadState
*thr
, uptr tls_addr
, uptr tls_size
) {
471 // Check that the thr object is in tls;
472 const uptr thr_beg
= (uptr
)thr
;
473 const uptr thr_end
= (uptr
)thr
+ sizeof(*thr
);
474 CHECK_GE(thr_beg
, tls_addr
);
475 CHECK_LE(thr_beg
, tls_addr
+ tls_size
);
476 CHECK_GE(thr_end
, tls_addr
);
477 CHECK_LE(thr_end
, tls_addr
+ tls_size
);
478 // Since the thr object is huge, skip it.
479 const uptr pc
= StackTrace::GetNextInstructionPc(
480 reinterpret_cast<uptr
>(__tsan_tls_initialization
));
481 MemoryRangeImitateWrite(thr
, pc
, tls_addr
, thr_beg
- tls_addr
);
482 MemoryRangeImitateWrite(thr
, pc
, thr_end
, tls_addr
+ tls_size
- thr_end
);
485 // Note: this function runs with async signals enabled,
486 // so it must not touch any tsan state.
487 int call_pthread_cancel_with_cleanup(int (*fn
)(void *arg
),
488 void (*cleanup
)(void *arg
), void *arg
) {
489 // pthread_cleanup_push/pop are hardcore macros mess.
490 // We can't intercept nor call them w/o including pthread.h.
492 pthread_cleanup_push(cleanup
, arg
);
494 pthread_cleanup_pop(0);
497 #endif // !SANITIZER_GO
500 void ReplaceSystemMalloc() { }
504 #if SANITIZER_ANDROID
505 // On Android, one thread can call intercepted functions after
506 // DestroyThreadState(), so add a fake thread state for "dead" threads.
507 static ThreadState
*dead_thread_state
= nullptr;
509 ThreadState
*cur_thread() {
510 ThreadState
* thr
= reinterpret_cast<ThreadState
*>(*get_android_tls_ptr());
511 if (thr
== nullptr) {
512 __sanitizer_sigset_t emptyset
;
513 internal_sigfillset(&emptyset
);
514 __sanitizer_sigset_t oldset
;
515 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK
, &emptyset
, &oldset
));
516 thr
= reinterpret_cast<ThreadState
*>(*get_android_tls_ptr());
517 if (thr
== nullptr) {
518 thr
= reinterpret_cast<ThreadState
*>(MmapOrDie(sizeof(ThreadState
),
520 *get_android_tls_ptr() = reinterpret_cast<uptr
>(thr
);
521 if (dead_thread_state
== nullptr) {
522 dead_thread_state
= reinterpret_cast<ThreadState
*>(
523 MmapOrDie(sizeof(ThreadState
), "ThreadState"));
524 dead_thread_state
->fast_state
.SetIgnoreBit();
525 dead_thread_state
->ignore_interceptors
= 1;
526 dead_thread_state
->is_dead
= true;
527 *const_cast<u32
*>(&dead_thread_state
->tid
) = -1;
528 CHECK_EQ(0, internal_mprotect(dead_thread_state
, sizeof(ThreadState
),
532 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK
, &oldset
, nullptr));
537 void set_cur_thread(ThreadState
*thr
) {
538 *get_android_tls_ptr() = reinterpret_cast<uptr
>(thr
);
541 void cur_thread_finalize() {
542 __sanitizer_sigset_t emptyset
;
543 internal_sigfillset(&emptyset
);
544 __sanitizer_sigset_t oldset
;
545 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK
, &emptyset
, &oldset
));
546 ThreadState
* thr
= reinterpret_cast<ThreadState
*>(*get_android_tls_ptr());
547 if (thr
!= dead_thread_state
) {
548 *get_android_tls_ptr() = reinterpret_cast<uptr
>(dead_thread_state
);
549 UnmapOrDie(thr
, sizeof(ThreadState
));
551 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK
, &oldset
, nullptr));
553 #endif // SANITIZER_ANDROID
554 #endif // if !SANITIZER_GO
556 } // namespace __tsan
558 #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD