1 //===-- tsan_platform_linux.cc --------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 // Linux- and FreeBSD-specific code.
11 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_platform.h"
15 #if SANITIZER_LINUX || SANITIZER_FREEBSD
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_libc.h"
19 #include "sanitizer_common/sanitizer_procmaps.h"
20 #include "sanitizer_common/sanitizer_stoptheworld.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "tsan_platform.h"
24 #include "tsan_flags.h"
34 #include <sys/syscall.h>
35 #include <sys/socket.h>
37 #include <sys/types.h>
38 #include <sys/resource.h>
45 #define __need_res_state
58 extern "C" void *__libc_stack_end
;
59 void *__libc_stack_end
= 0;
64 static uptr g_data_start
;
65 static uptr g_data_end
;
67 const uptr kPageSize
= 4096;
81 void FillProfileCallback(uptr p
, uptr rss
, bool file
,
82 uptr
*mem
, uptr stats_size
) {
84 if (p
>= kShadowBeg
&& p
< kShadowEnd
)
85 mem
[MemShadow
] += rss
;
86 else if (p
>= kMetaShadowBeg
&& p
< kMetaShadowEnd
)
89 else if (p
>= kHeapMemBeg
&& p
< kHeapMemEnd
)
91 else if (p
>= kLoAppMemBeg
&& p
< kLoAppMemEnd
)
92 mem
[file
? MemFile
: MemMmap
] += rss
;
93 else if (p
>= kHiAppMemBeg
&& p
< kHiAppMemEnd
)
94 mem
[file
? MemFile
: MemMmap
] += rss
;
96 else if (p
>= kAppMemBeg
&& p
< kAppMemEnd
)
97 mem
[file
? MemFile
: MemMmap
] += rss
;
99 else if (p
>= kTraceMemBeg
&& p
< kTraceMemEnd
)
100 mem
[MemTrace
] += rss
;
102 mem
[MemOther
] += rss
;
105 void WriteMemoryProfile(char *buf
, uptr buf_size
, uptr nthread
, uptr nlive
) {
106 uptr mem
[MemCount
] = {};
107 __sanitizer::GetMemoryProfile(FillProfileCallback
, mem
, 7);
108 StackDepotStats
*stacks
= StackDepotGetStats();
109 internal_snprintf(buf
, buf_size
,
110 "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
111 " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
112 mem
[MemTotal
] >> 20, mem
[MemShadow
] >> 20, mem
[MemMeta
] >> 20,
113 mem
[MemFile
] >> 20, mem
[MemMmap
] >> 20, mem
[MemTrace
] >> 20,
114 mem
[MemHeap
] >> 20, mem
[MemOther
] >> 20,
115 stacks
->allocated
>> 20, stacks
->n_uniq_ids
,
120 uptr fd
= OpenFile("/proc/self/statm", false);
124 uptr len
= internal_read(fd
, buf
, sizeof(buf
) - 1);
129 // The format of the file is:
130 // 1084 89 69 11 0 79 0
131 // We need the second number which is RSS in 4K units.
133 // Skip the first number.
134 while (*pos
>= '0' && *pos
<= '9')
137 while (!(*pos
>= '0' && *pos
<= '9') && *pos
!= 0)
141 while (*pos
>= '0' && *pos
<= '9')
142 rss
= rss
* 10 + *pos
++ - '0';
147 void FlushShadowMemoryCallback(
148 const SuspendedThreadsList
&suspended_threads_list
,
150 FlushUnneededShadowMemory(kShadowBeg
, kShadowEnd
- kShadowBeg
);
154 void FlushShadowMemory() {
156 StopTheWorld(FlushShadowMemoryCallback
, 0);
161 static void ProtectRange(uptr beg
, uptr end
) {
165 if (beg
!= (uptr
)Mprotect(beg
, end
- beg
)) {
166 Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg
, end
);
167 Printf("FATAL: Make sure you are not using unlimited stack\n");
172 // Mark shadow for .rodata sections with the special kShadowRodata marker.
173 // Accesses to .rodata can't race, so this saves time, memory and trace space.
174 static void MapRodata() {
175 // First create temp file.
176 const char *tmpdir
= GetEnv("TMPDIR");
178 tmpdir
= GetEnv("TEST_TMPDIR");
186 internal_snprintf(name
, sizeof(name
), "%s/tsan.rodata.%d",
187 tmpdir
, (int)internal_getpid());
188 uptr openrv
= internal_open(name
, O_RDWR
| O_CREAT
| O_EXCL
, 0600);
189 if (internal_iserror(openrv
))
191 internal_unlink(name
); // Unlink it now, so that we can reuse the buffer.
193 // Fill the file with kShadowRodata.
194 const uptr kMarkerSize
= 512 * 1024 / sizeof(u64
);
195 InternalScopedBuffer
<u64
> marker(kMarkerSize
);
196 // volatile to prevent insertion of memset
197 for (volatile u64
*p
= marker
.data(); p
< marker
.data() + kMarkerSize
; p
++)
199 internal_write(fd
, marker
.data(), marker
.size());
200 // Map the file into memory.
201 uptr page
= internal_mmap(0, kPageSize
, PROT_READ
| PROT_WRITE
,
202 MAP_PRIVATE
| MAP_ANONYMOUS
, fd
, 0);
203 if (internal_iserror(page
)) {
207 // Map the file into shadow of .rodata sections.
208 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
209 uptr start
, end
, offset
, prot
;
210 // Reusing the buffer 'name'.
211 while (proc_maps
.Next(&start
, &end
, &offset
, name
, ARRAY_SIZE(name
), &prot
)) {
212 if (name
[0] != 0 && name
[0] != '['
213 && (prot
& MemoryMappingLayout::kProtectionRead
)
214 && (prot
& MemoryMappingLayout::kProtectionExecute
)
215 && !(prot
& MemoryMappingLayout::kProtectionWrite
)
216 && IsAppMem(start
)) {
217 // Assume it's .rodata
218 char *shadow_start
= (char*)MemToShadow(start
);
219 char *shadow_end
= (char*)MemToShadow(end
);
220 for (char *p
= shadow_start
; p
< shadow_end
; p
+= marker
.size()) {
221 internal_mmap(p
, Min
<uptr
>(marker
.size(), shadow_end
- p
),
222 PROT_READ
, MAP_PRIVATE
| MAP_FIXED
, fd
, 0);
229 void InitializeShadowMemory() {
230 // Map memory shadow.
231 uptr shadow
= (uptr
)MmapFixedNoReserve(kShadowBeg
,
232 kShadowEnd
- kShadowBeg
);
233 if (shadow
!= kShadowBeg
) {
234 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
235 Printf("FATAL: Make sure to compile with -fPIE and "
236 "to link with -pie (%p, %p).\n", shadow
, kShadowBeg
);
239 // This memory range is used for thread stacks and large user mmaps.
240 // Frequently a thread uses only a small part of stack and similarly
241 // a program uses a small part of large mmap. On some programs
242 // we see 20% memory usage reduction without huge pages for this range.
243 #ifdef MADV_NOHUGEPAGE
244 madvise((void*)MemToShadow(0x7f0000000000ULL
),
245 0x10000000000ULL
* kShadowMultiplier
, MADV_NOHUGEPAGE
);
247 DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
248 kShadowBeg
, kShadowEnd
,
249 (kShadowEnd
- kShadowBeg
) >> 30);
252 uptr meta_size
= kMetaShadowEnd
- kMetaShadowBeg
;
253 uptr meta
= (uptr
)MmapFixedNoReserve(kMetaShadowBeg
, meta_size
);
254 if (meta
!= kMetaShadowBeg
) {
255 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
256 Printf("FATAL: Make sure to compile with -fPIE and "
257 "to link with -pie (%p, %p).\n", meta
, kMetaShadowBeg
);
260 DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
261 meta
, meta
+ meta_size
, meta_size
>> 30);
266 static void InitDataSeg() {
267 MemoryMappingLayout
proc_maps(true);
268 uptr start
, end
, offset
;
270 #if SANITIZER_FREEBSD
271 // On FreeBSD BSS is usually the last block allocated within the
272 // low range and heap is the last block allocated within the range
273 // 0x800000000-0x8ffffffff.
274 while (proc_maps
.Next(&start
, &end
, &offset
, name
, ARRAY_SIZE(name
),
276 DPrintf("%p-%p %p %s\n", start
, end
, offset
, name
);
277 if ((start
& 0xffff00000000ULL
) == 0 && (end
& 0xffff00000000ULL
) == 0 &&
279 g_data_start
= start
;
284 bool prev_is_data
= false;
285 while (proc_maps
.Next(&start
, &end
, &offset
, name
, ARRAY_SIZE(name
),
287 DPrintf("%p-%p %p %s\n", start
, end
, offset
, name
);
288 bool is_data
= offset
!= 0 && name
[0] != 0;
289 // BSS may get merged with [heap] in /proc/self/maps. This is not very
291 bool is_bss
= offset
== 0 &&
292 (name
[0] == 0 || internal_strcmp(name
, "[heap]") == 0) && prev_is_data
;
293 if (g_data_start
== 0 && is_data
)
294 g_data_start
= start
;
297 prev_is_data
= is_data
;
300 DPrintf("guessed data_start=%p data_end=%p\n", g_data_start
, g_data_end
);
301 CHECK_LT(g_data_start
, g_data_end
);
302 CHECK_GE((uptr
)&g_data_start
, g_data_start
);
303 CHECK_LT((uptr
)&g_data_start
, g_data_end
);
306 static void CheckAndProtect() {
307 // Ensure that the binary is indeed compiled with -pie.
308 MemoryMappingLayout
proc_maps(true);
310 while (proc_maps
.Next(&p
, &end
, 0, 0, 0, 0)) {
313 if (p
>= kHeapMemEnd
&&
314 p
< kHeapMemEnd
+ PrimaryAllocator::AdditionalSize())
316 if (p
>= 0xf000000000000000ull
) // vdso
318 Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p
, end
);
322 ProtectRange(kLoAppMemEnd
, kShadowBeg
);
323 ProtectRange(kShadowEnd
, kMetaShadowBeg
);
324 ProtectRange(kMetaShadowEnd
, kTraceMemBeg
);
325 ProtectRange(kTraceMemEnd
, kHeapMemBeg
);
326 ProtectRange(kHeapMemEnd
+ PrimaryAllocator::AdditionalSize(), kHiAppMemBeg
);
328 #endif // #ifndef TSAN_GO
330 void InitializePlatform() {
331 DisableCoreDumperIfNecessary();
333 // Go maps shadow memory lazily and works fine with limited address space.
334 // Unlimited stack is not a problem as well, because the executable
335 // is not compiled with -pie.
338 // TSan doesn't play well with unlimited stack size (as stack
339 // overlaps with shadow memory). If we detect unlimited stack size,
340 // we re-exec the program with limited stack size as a best effort.
341 if (StackSizeIsUnlimited()) {
342 const uptr kMaxStackSize
= 32 * 1024 * 1024;
343 VReport(1, "Program is run with unlimited stack size, which wouldn't "
344 "work with ThreadSanitizer.\n"
345 "Re-execing with stack size limited to %zd bytes.\n",
347 SetStackSizeLimitInBytes(kMaxStackSize
);
351 if (!AddressSpaceIsUnlimited()) {
352 Report("WARNING: Program is run with limited virtual address space,"
353 " which wouldn't work with ThreadSanitizer.\n");
354 Report("Re-execing with unlimited virtual address space.\n");
355 SetAddressSpaceUnlimited();
369 bool IsGlobalVar(uptr addr
) {
370 return g_data_start
&& addr
>= g_data_start
&& addr
< g_data_end
;
374 // Extract file descriptors passed to glibc internal __res_iclose function.
375 // This is required to properly "close" the fds, because we do not see internal
376 // closes within glibc. The code is a pure hack.
377 int ExtractResolvFDs(void *state
, int *fds
, int nfd
) {
380 __res_state
*statp
= (__res_state
*)state
;
381 for (int i
= 0; i
< MAXNS
&& cnt
< nfd
; i
++) {
382 if (statp
->_u
._ext
.nsaddrs
[i
] && statp
->_u
._ext
.nssocks
[i
] != -1)
383 fds
[cnt
++] = statp
->_u
._ext
.nssocks
[i
];
391 // Extract file descriptors passed via UNIX domain sockets.
392 // This is requried to properly handle "open" of these fds.
393 // see 'man recvmsg' and 'man 3 cmsg'.
394 int ExtractRecvmsgFDs(void *msgp
, int *fds
, int nfd
) {
396 msghdr
*msg
= (msghdr
*)msgp
;
397 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msg
);
398 for (; cmsg
; cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
399 if (cmsg
->cmsg_level
!= SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
)
401 int n
= (cmsg
->cmsg_len
- CMSG_LEN(0)) / sizeof(fds
[0]);
402 for (int i
= 0; i
< n
; i
++) {
403 fds
[res
++] = ((int*)CMSG_DATA(cmsg
))[i
];
411 int call_pthread_cancel_with_cleanup(int(*fn
)(void *c
, void *m
,
412 void *abstime
), void *c
, void *m
, void *abstime
,
413 void(*cleanup
)(void *arg
), void *arg
) {
414 // pthread_cleanup_push/pop are hardcore macros mess.
415 // We can't intercept nor call them w/o including pthread.h.
417 pthread_cleanup_push(cleanup
, arg
);
418 res
= fn(c
, m
, abstime
);
419 pthread_cleanup_pop(0);
424 } // namespace __tsan
426 #endif // SANITIZER_LINUX || SANITIZER_FREEBSD