1 //===-- tsan_platform_linux.cc --------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 // Linux- and FreeBSD-specific code.
11 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_platform.h"
15 #if SANITIZER_LINUX || SANITIZER_FREEBSD
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_libc.h"
19 #include "sanitizer_common/sanitizer_posix.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_stoptheworld.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "tsan_platform.h"
25 #include "tsan_flags.h"
35 #include <sys/syscall.h>
36 #include <sys/socket.h>
38 #include <sys/types.h>
39 #include <sys/resource.h>
46 #define __need_res_state
59 extern "C" void *__libc_stack_end
;
60 void *__libc_stack_end
= 0;
65 static uptr g_data_start
;
66 static uptr g_data_end
;
80 void FillProfileCallback(uptr p
, uptr rss
, bool file
,
81 uptr
*mem
, uptr stats_size
) {
83 if (p
>= kShadowBeg
&& p
< kShadowEnd
)
84 mem
[MemShadow
] += rss
;
85 else if (p
>= kMetaShadowBeg
&& p
< kMetaShadowEnd
)
88 else if (p
>= kHeapMemBeg
&& p
< kHeapMemEnd
)
90 else if (p
>= kLoAppMemBeg
&& p
< kLoAppMemEnd
)
91 mem
[file
? MemFile
: MemMmap
] += rss
;
92 else if (p
>= kHiAppMemBeg
&& p
< kHiAppMemEnd
)
93 mem
[file
? MemFile
: MemMmap
] += rss
;
95 else if (p
>= kAppMemBeg
&& p
< kAppMemEnd
)
96 mem
[file
? MemFile
: MemMmap
] += rss
;
98 else if (p
>= kTraceMemBeg
&& p
< kTraceMemEnd
)
101 mem
[MemOther
] += rss
;
104 void WriteMemoryProfile(char *buf
, uptr buf_size
, uptr nthread
, uptr nlive
) {
105 uptr mem
[MemCount
] = {};
106 __sanitizer::GetMemoryProfile(FillProfileCallback
, mem
, 7);
107 StackDepotStats
*stacks
= StackDepotGetStats();
108 internal_snprintf(buf
, buf_size
,
109 "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
110 " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
111 mem
[MemTotal
] >> 20, mem
[MemShadow
] >> 20, mem
[MemMeta
] >> 20,
112 mem
[MemFile
] >> 20, mem
[MemMmap
] >> 20, mem
[MemTrace
] >> 20,
113 mem
[MemHeap
] >> 20, mem
[MemOther
] >> 20,
114 stacks
->allocated
>> 20, stacks
->n_uniq_ids
,
119 void FlushShadowMemoryCallback(
120 const SuspendedThreadsList
&suspended_threads_list
,
122 FlushUnneededShadowMemory(kShadowBeg
, kShadowEnd
- kShadowBeg
);
126 void FlushShadowMemory() {
128 StopTheWorld(FlushShadowMemoryCallback
, 0);
133 // Mark shadow for .rodata sections with the special kShadowRodata marker.
134 // Accesses to .rodata can't race, so this saves time, memory and trace space.
135 static void MapRodata() {
136 // First create temp file.
137 const char *tmpdir
= GetEnv("TMPDIR");
139 tmpdir
= GetEnv("TEST_TMPDIR");
147 internal_snprintf(name
, sizeof(name
), "%s/tsan.rodata.%d",
148 tmpdir
, (int)internal_getpid());
149 uptr openrv
= internal_open(name
, O_RDWR
| O_CREAT
| O_EXCL
, 0600);
150 if (internal_iserror(openrv
))
152 internal_unlink(name
); // Unlink it now, so that we can reuse the buffer.
154 // Fill the file with kShadowRodata.
155 const uptr kMarkerSize
= 512 * 1024 / sizeof(u64
);
156 InternalScopedBuffer
<u64
> marker(kMarkerSize
);
157 // volatile to prevent insertion of memset
158 for (volatile u64
*p
= marker
.data(); p
< marker
.data() + kMarkerSize
; p
++)
160 internal_write(fd
, marker
.data(), marker
.size());
161 // Map the file into memory.
162 uptr page
= internal_mmap(0, GetPageSizeCached(), PROT_READ
| PROT_WRITE
,
163 MAP_PRIVATE
| MAP_ANONYMOUS
, fd
, 0);
164 if (internal_iserror(page
)) {
168 // Map the file into shadow of .rodata sections.
169 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
170 uptr start
, end
, offset
, prot
;
171 // Reusing the buffer 'name'.
172 while (proc_maps
.Next(&start
, &end
, &offset
, name
, ARRAY_SIZE(name
), &prot
)) {
173 if (name
[0] != 0 && name
[0] != '['
174 && (prot
& MemoryMappingLayout::kProtectionRead
)
175 && (prot
& MemoryMappingLayout::kProtectionExecute
)
176 && !(prot
& MemoryMappingLayout::kProtectionWrite
)
177 && IsAppMem(start
)) {
178 // Assume it's .rodata
179 char *shadow_start
= (char*)MemToShadow(start
);
180 char *shadow_end
= (char*)MemToShadow(end
);
181 for (char *p
= shadow_start
; p
< shadow_end
; p
+= marker
.size()) {
182 internal_mmap(p
, Min
<uptr
>(marker
.size(), shadow_end
- p
),
183 PROT_READ
, MAP_PRIVATE
| MAP_FIXED
, fd
, 0);
190 void InitializeShadowMemoryPlatform() {
194 static void InitDataSeg() {
195 MemoryMappingLayout
proc_maps(true);
196 uptr start
, end
, offset
;
198 #if SANITIZER_FREEBSD
199 // On FreeBSD BSS is usually the last block allocated within the
200 // low range and heap is the last block allocated within the range
201 // 0x800000000-0x8ffffffff.
202 while (proc_maps
.Next(&start
, &end
, &offset
, name
, ARRAY_SIZE(name
),
204 DPrintf("%p-%p %p %s\n", start
, end
, offset
, name
);
205 if ((start
& 0xffff00000000ULL
) == 0 && (end
& 0xffff00000000ULL
) == 0 &&
207 g_data_start
= start
;
212 bool prev_is_data
= false;
213 while (proc_maps
.Next(&start
, &end
, &offset
, name
, ARRAY_SIZE(name
),
215 DPrintf("%p-%p %p %s\n", start
, end
, offset
, name
);
216 bool is_data
= offset
!= 0 && name
[0] != 0;
217 // BSS may get merged with [heap] in /proc/self/maps. This is not very
219 bool is_bss
= offset
== 0 &&
220 (name
[0] == 0 || internal_strcmp(name
, "[heap]") == 0) && prev_is_data
;
221 if (g_data_start
== 0 && is_data
)
222 g_data_start
= start
;
225 prev_is_data
= is_data
;
228 DPrintf("guessed data_start=%p data_end=%p\n", g_data_start
, g_data_end
);
229 CHECK_LT(g_data_start
, g_data_end
);
230 CHECK_GE((uptr
)&g_data_start
, g_data_start
);
231 CHECK_LT((uptr
)&g_data_start
, g_data_end
);
234 #endif // #ifndef SANITIZER_GO
236 void InitializePlatform() {
237 DisableCoreDumperIfNecessary();
239 // Go maps shadow memory lazily and works fine with limited address space.
240 // Unlimited stack is not a problem as well, because the executable
241 // is not compiled with -pie.
244 // TSan doesn't play well with unlimited stack size (as stack
245 // overlaps with shadow memory). If we detect unlimited stack size,
246 // we re-exec the program with limited stack size as a best effort.
247 if (StackSizeIsUnlimited()) {
248 const uptr kMaxStackSize
= 32 * 1024 * 1024;
249 VReport(1, "Program is run with unlimited stack size, which wouldn't "
250 "work with ThreadSanitizer.\n"
251 "Re-execing with stack size limited to %zd bytes.\n",
253 SetStackSizeLimitInBytes(kMaxStackSize
);
257 if (!AddressSpaceIsUnlimited()) {
258 Report("WARNING: Program is run with limited virtual address space,"
259 " which wouldn't work with ThreadSanitizer.\n");
260 Report("Re-execing with unlimited virtual address space.\n");
261 SetAddressSpaceUnlimited();
275 bool IsGlobalVar(uptr addr
) {
276 return g_data_start
&& addr
>= g_data_start
&& addr
< g_data_end
;
280 // Extract file descriptors passed to glibc internal __res_iclose function.
281 // This is required to properly "close" the fds, because we do not see internal
282 // closes within glibc. The code is a pure hack.
283 int ExtractResolvFDs(void *state
, int *fds
, int nfd
) {
286 __res_state
*statp
= (__res_state
*)state
;
287 for (int i
= 0; i
< MAXNS
&& cnt
< nfd
; i
++) {
288 if (statp
->_u
._ext
.nsaddrs
[i
] && statp
->_u
._ext
.nssocks
[i
] != -1)
289 fds
[cnt
++] = statp
->_u
._ext
.nssocks
[i
];
297 // Extract file descriptors passed via UNIX domain sockets.
298 // This is requried to properly handle "open" of these fds.
299 // see 'man recvmsg' and 'man 3 cmsg'.
300 int ExtractRecvmsgFDs(void *msgp
, int *fds
, int nfd
) {
302 msghdr
*msg
= (msghdr
*)msgp
;
303 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msg
);
304 for (; cmsg
; cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
305 if (cmsg
->cmsg_level
!= SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
)
307 int n
= (cmsg
->cmsg_len
- CMSG_LEN(0)) / sizeof(fds
[0]);
308 for (int i
= 0; i
< n
; i
++) {
309 fds
[res
++] = ((int*)CMSG_DATA(cmsg
))[i
];
317 // Note: this function runs with async signals enabled,
318 // so it must not touch any tsan state.
319 int call_pthread_cancel_with_cleanup(int(*fn
)(void *c
, void *m
,
320 void *abstime
), void *c
, void *m
, void *abstime
,
321 void(*cleanup
)(void *arg
), void *arg
) {
322 // pthread_cleanup_push/pop are hardcore macros mess.
323 // We can't intercept nor call them w/o including pthread.h.
325 pthread_cleanup_push(cleanup
, arg
);
326 res
= fn(c
, m
, abstime
);
327 pthread_cleanup_pop(0);
333 void ReplaceSystemMalloc() { }
336 } // namespace __tsan
338 #endif // SANITIZER_LINUX || SANITIZER_FREEBSD