1 //===-- tsan_platform_linux.cc --------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 // Linux-specific code.
11 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_platform.h"
15 #if SANITIZER_LINUX || SANITIZER_FREEBSD
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_libc.h"
19 #include "sanitizer_common/sanitizer_procmaps.h"
20 #include "sanitizer_common/sanitizer_stoptheworld.h"
21 #include "tsan_platform.h"
23 #include "tsan_flags.h"
33 #include <sys/syscall.h>
34 #include <sys/socket.h>
36 #include <sys/types.h>
37 #include <sys/resource.h>
44 #define __need_res_state
57 extern "C" void *__libc_stack_end
;
58 void *__libc_stack_end
= 0;
63 const uptr kPageSize
= 4096;
77 void FillProfileCallback(uptr start
, uptr rss
, bool file
,
78 uptr
*mem
, uptr stats_size
) {
82 mem
[MemShadow
] += rss
;
83 else if (start
>= 0x20 && start
< 0x30)
84 mem
[file
? MemFile
: MemMmap
] += rss
;
85 else if (start
>= 0x30 && start
< 0x40)
87 else if (start
>= 0x7e)
88 mem
[file
? MemFile
: MemMmap
] += rss
;
89 else if (start
>= 0x60 && start
< 0x62)
91 else if (start
>= 0x7d && start
< 0x7e)
97 void WriteMemoryProfile(char *buf
, uptr buf_size
, uptr nthread
, uptr nlive
) {
98 uptr mem
[MemCount
] = {};
99 __sanitizer::GetMemoryProfile(FillProfileCallback
, mem
, 7);
100 internal_snprintf(buf
, buf_size
,
101 "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
102 " trace:%zd heap:%zd other:%zd nthr=%zd/%zd\n",
103 mem
[MemTotal
] >> 20, mem
[MemShadow
] >> 20, mem
[MemMeta
] >> 20,
104 mem
[MemFile
] >> 20, mem
[MemMmap
] >> 20, mem
[MemTrace
] >> 20,
105 mem
[MemHeap
] >> 20, mem
[MemOther
] >> 20,
110 uptr fd
= OpenFile("/proc/self/statm", false);
114 uptr len
= internal_read(fd
, buf
, sizeof(buf
) - 1);
119 // The format of the file is:
120 // 1084 89 69 11 0 79 0
121 // We need the second number which is RSS in 4K units.
123 // Skip the first number.
124 while (*pos
>= '0' && *pos
<= '9')
127 while (!(*pos
>= '0' && *pos
<= '9') && *pos
!= 0)
131 while (*pos
>= '0' && *pos
<= '9')
132 rss
= rss
* 10 + *pos
++ - '0';
137 void FlushShadowMemoryCallback(
138 const SuspendedThreadsList
&suspended_threads_list
,
140 FlushUnneededShadowMemory(kLinuxShadowBeg
, kLinuxShadowEnd
- kLinuxShadowBeg
);
144 void FlushShadowMemory() {
146 StopTheWorld(FlushShadowMemoryCallback
, 0);
151 static void ProtectRange(uptr beg
, uptr end
) {
155 if (beg
!= (uptr
)Mprotect(beg
, end
- beg
)) {
156 Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg
, end
);
157 Printf("FATAL: Make sure you are not using unlimited stack\n");
162 // Mark shadow for .rodata sections with the special kShadowRodata marker.
163 // Accesses to .rodata can't race, so this saves time, memory and trace space.
164 static void MapRodata() {
165 // First create temp file.
166 const char *tmpdir
= GetEnv("TMPDIR");
168 tmpdir
= GetEnv("TEST_TMPDIR");
176 internal_snprintf(name
, sizeof(name
), "%s/tsan.rodata.%d",
177 tmpdir
, (int)internal_getpid());
178 uptr openrv
= internal_open(name
, O_RDWR
| O_CREAT
| O_EXCL
, 0600);
179 if (internal_iserror(openrv
))
181 internal_unlink(name
); // Unlink it now, so that we can reuse the buffer.
183 // Fill the file with kShadowRodata.
184 const uptr kMarkerSize
= 512 * 1024 / sizeof(u64
);
185 InternalScopedBuffer
<u64
> marker(kMarkerSize
);
186 // volatile to prevent insertion of memset
187 for (volatile u64
*p
= marker
.data(); p
< marker
.data() + kMarkerSize
; p
++)
189 internal_write(fd
, marker
.data(), marker
.size());
190 // Map the file into memory.
191 uptr page
= internal_mmap(0, kPageSize
, PROT_READ
| PROT_WRITE
,
192 MAP_PRIVATE
| MAP_ANONYMOUS
, fd
, 0);
193 if (internal_iserror(page
)) {
197 // Map the file into shadow of .rodata sections.
198 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
199 uptr start
, end
, offset
, prot
;
200 // Reusing the buffer 'name'.
201 while (proc_maps
.Next(&start
, &end
, &offset
, name
, ARRAY_SIZE(name
), &prot
)) {
202 if (name
[0] != 0 && name
[0] != '['
203 && (prot
& MemoryMappingLayout::kProtectionRead
)
204 && (prot
& MemoryMappingLayout::kProtectionExecute
)
205 && !(prot
& MemoryMappingLayout::kProtectionWrite
)
206 && IsAppMem(start
)) {
207 // Assume it's .rodata
208 char *shadow_start
= (char*)MemToShadow(start
);
209 char *shadow_end
= (char*)MemToShadow(end
);
210 for (char *p
= shadow_start
; p
< shadow_end
; p
+= marker
.size()) {
211 internal_mmap(p
, Min
<uptr
>(marker
.size(), shadow_end
- p
),
212 PROT_READ
, MAP_PRIVATE
| MAP_FIXED
, fd
, 0);
219 void InitializeShadowMemory() {
220 // Map memory shadow.
221 uptr shadow
= (uptr
)MmapFixedNoReserve(kLinuxShadowBeg
,
222 kLinuxShadowEnd
- kLinuxShadowBeg
);
223 if (shadow
!= kLinuxShadowBeg
) {
224 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
225 Printf("FATAL: Make sure to compile with -fPIE and "
226 "to link with -pie (%p, %p).\n", shadow
, kLinuxShadowBeg
);
229 // This memory range is used for thread stacks and large user mmaps.
230 // Frequently a thread uses only a small part of stack and similarly
231 // a program uses a small part of large mmap. On some programs
232 // we see 20% memory usage reduction without huge pages for this range.
233 #ifdef MADV_NOHUGEPAGE
234 madvise((void*)MemToShadow(0x7f0000000000ULL
),
235 0x10000000000ULL
* kShadowMultiplier
, MADV_NOHUGEPAGE
);
237 DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
238 kLinuxShadowBeg
, kLinuxShadowEnd
,
239 (kLinuxShadowEnd
- kLinuxShadowBeg
) >> 30);
242 if (MemToMeta(kLinuxAppMemBeg
) < (u32
*)kMetaShadow
) {
243 Printf("ThreadSanitizer: bad meta shadow (%p -> %p < %p)\n",
244 kLinuxAppMemBeg
, MemToMeta(kLinuxAppMemBeg
), kMetaShadow
);
247 if (MemToMeta(kLinuxAppMemEnd
) >= (u32
*)(kMetaShadow
+ kMetaSize
)) {
248 Printf("ThreadSanitizer: bad meta shadow (%p -> %p >= %p)\n",
249 kLinuxAppMemEnd
, MemToMeta(kLinuxAppMemEnd
), kMetaShadow
+ kMetaSize
);
252 uptr meta
= (uptr
)MmapFixedNoReserve(kMetaShadow
, kMetaSize
);
253 if (meta
!= kMetaShadow
) {
254 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
255 Printf("FATAL: Make sure to compile with -fPIE and "
256 "to link with -pie (%p, %p).\n", meta
, kMetaShadow
);
259 DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
260 kMetaShadow
, kMetaShadow
+ kMetaSize
, kMetaSize
>> 30);
263 const uptr kClosedLowBeg
= 0x200000;
264 const uptr kClosedLowEnd
= kLinuxShadowBeg
- 1;
265 const uptr kClosedMidBeg
= kLinuxShadowEnd
+ 1;
266 const uptr kClosedMidEnd
= min(min(kLinuxAppMemBeg
, kTraceMemBegin
),
269 ProtectRange(kClosedLowBeg
, kClosedLowEnd
);
270 ProtectRange(kClosedMidBeg
, kClosedMidEnd
);
271 VPrintf(2, "kClosedLow %zx-%zx (%zuGB)\n",
272 kClosedLowBeg
, kClosedLowEnd
, (kClosedLowEnd
- kClosedLowBeg
) >> 30);
273 VPrintf(2, "kClosedMid %zx-%zx (%zuGB)\n",
274 kClosedMidBeg
, kClosedMidEnd
, (kClosedMidEnd
- kClosedMidBeg
) >> 30);
275 VPrintf(2, "app mem: %zx-%zx (%zuGB)\n",
276 kLinuxAppMemBeg
, kLinuxAppMemEnd
,
277 (kLinuxAppMemEnd
- kLinuxAppMemBeg
) >> 30);
278 VPrintf(2, "stack: %zx\n", (uptr
)&shadow
);
284 static uptr g_data_start
;
285 static uptr g_data_end
;
288 static void CheckPIE() {
289 // Ensure that the binary is indeed compiled with -pie.
290 MemoryMappingLayout
proc_maps(true);
292 if (proc_maps
.Next(&start
, &end
,
293 /*offset*/0, /*filename*/0, /*filename_size*/0,
295 if ((u64
)start
< kLinuxAppMemBeg
) {
296 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory ("
297 "something is mapped at 0x%zx < 0x%zx)\n",
298 start
, kLinuxAppMemBeg
);
299 Printf("FATAL: Make sure to compile with -fPIE"
300 " and to link with -pie.\n");
306 static void InitDataSeg() {
307 MemoryMappingLayout
proc_maps(true);
308 uptr start
, end
, offset
;
310 bool prev_is_data
= false;
311 while (proc_maps
.Next(&start
, &end
, &offset
, name
, ARRAY_SIZE(name
),
313 DPrintf("%p-%p %p %s\n", start
, end
, offset
, name
);
314 bool is_data
= offset
!= 0 && name
[0] != 0;
315 // BSS may get merged with [heap] in /proc/self/maps. This is not very
317 bool is_bss
= offset
== 0 &&
318 (name
[0] == 0 || internal_strcmp(name
, "[heap]") == 0) && prev_is_data
;
319 if (g_data_start
== 0 && is_data
)
320 g_data_start
= start
;
323 prev_is_data
= is_data
;
325 DPrintf("guessed data_start=%p data_end=%p\n", g_data_start
, g_data_end
);
326 CHECK_LT(g_data_start
, g_data_end
);
327 CHECK_GE((uptr
)&g_data_start
, g_data_start
);
328 CHECK_LT((uptr
)&g_data_start
, g_data_end
);
331 #endif // #ifndef TSAN_GO
333 void InitializePlatform() {
334 DisableCoreDumperIfNecessary();
336 // Go maps shadow memory lazily and works fine with limited address space.
337 // Unlimited stack is not a problem as well, because the executable
338 // is not compiled with -pie.
341 // TSan doesn't play well with unlimited stack size (as stack
342 // overlaps with shadow memory). If we detect unlimited stack size,
343 // we re-exec the program with limited stack size as a best effort.
344 if (StackSizeIsUnlimited()) {
345 const uptr kMaxStackSize
= 32 * 1024 * 1024;
346 VReport(1, "Program is run with unlimited stack size, which wouldn't "
347 "work with ThreadSanitizer.\n"
348 "Re-execing with stack size limited to %zd bytes.\n",
350 SetStackSizeLimitInBytes(kMaxStackSize
);
354 if (!AddressSpaceIsUnlimited()) {
355 Report("WARNING: Program is run with limited virtual address space,"
356 " which wouldn't work with ThreadSanitizer.\n");
357 Report("Re-execing with unlimited virtual address space.\n");
358 SetAddressSpaceUnlimited();
372 bool IsGlobalVar(uptr addr
) {
373 return g_data_start
&& addr
>= g_data_start
&& addr
< g_data_end
;
377 // Extract file descriptors passed to glibc internal __res_iclose function.
378 // This is required to properly "close" the fds, because we do not see internal
379 // closes within glibc. The code is a pure hack.
380 int ExtractResolvFDs(void *state
, int *fds
, int nfd
) {
383 __res_state
*statp
= (__res_state
*)state
;
384 for (int i
= 0; i
< MAXNS
&& cnt
< nfd
; i
++) {
385 if (statp
->_u
._ext
.nsaddrs
[i
] && statp
->_u
._ext
.nssocks
[i
] != -1)
386 fds
[cnt
++] = statp
->_u
._ext
.nssocks
[i
];
394 // Extract file descriptors passed via UNIX domain sockets.
395 // This is requried to properly handle "open" of these fds.
396 // see 'man recvmsg' and 'man 3 cmsg'.
397 int ExtractRecvmsgFDs(void *msgp
, int *fds
, int nfd
) {
399 msghdr
*msg
= (msghdr
*)msgp
;
400 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msg
);
401 for (; cmsg
; cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
402 if (cmsg
->cmsg_level
!= SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
)
404 int n
= (cmsg
->cmsg_len
- CMSG_LEN(0)) / sizeof(fds
[0]);
405 for (int i
= 0; i
< n
; i
++) {
406 fds
[res
++] = ((int*)CMSG_DATA(cmsg
))[i
];
414 int call_pthread_cancel_with_cleanup(int(*fn
)(void *c
, void *m
,
415 void *abstime
), void *c
, void *m
, void *abstime
,
416 void(*cleanup
)(void *arg
), void *arg
) {
417 // pthread_cleanup_push/pop are hardcore macros mess.
418 // We can't intercept nor call them w/o including pthread.h.
420 pthread_cleanup_push(cleanup
, arg
);
421 res
= fn(c
, m
, abstime
);
422 pthread_cleanup_pop(0);
427 } // namespace __tsan
429 #endif // SANITIZER_LINUX