1 //===-- sanitizer_linux_libcdep.cc ----------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between AddressSanitizer and ThreadSanitizer
9 // run-time libraries and implements linux-specific functions from
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_platform.h"
16 #include "sanitizer_common.h"
17 #include "sanitizer_flags.h"
18 #include "sanitizer_linux.h"
19 #include "sanitizer_placement_new.h"
20 #include "sanitizer_procmaps.h"
21 #include "sanitizer_stacktrace.h"
22 #include "sanitizer_atomic.h"
26 #include <sys/prctl.h>
27 #include <sys/resource.h>
30 #if !SANITIZER_ANDROID
36 // This function is defined elsewhere if we intercepted pthread_attr_getstack.
37 SANITIZER_WEAK_ATTRIBUTE
38 int __sanitizer_pthread_attr_getstack(void *attr
, void **addr
, size_t *size
) {
39 return pthread_attr_getstack((pthread_attr_t
*)attr
, addr
, size
);
42 namespace __sanitizer
{
44 void GetThreadStackTopAndBottom(bool at_initialization
, uptr
*stack_top
,
46 static const uptr kMaxThreadStackSize
= 1 << 30; // 1Gb
49 if (at_initialization
) {
50 // This is the main thread. Libpthread may not be initialized yet.
52 CHECK_EQ(getrlimit(RLIMIT_STACK
, &rl
), 0);
54 // Find the mapping that contains a stack variable.
55 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
56 uptr start
, end
, offset
;
58 while (proc_maps
.Next(&start
, &end
, &offset
, 0, 0, /* protection */0)) {
63 CHECK((uptr
)&rl
>= start
&& (uptr
)&rl
< end
);
65 // Get stacksize from rlimit, but clip it so that it does not overlap
66 // with other mappings.
67 uptr stacksize
= rl
.rlim_cur
;
68 if (stacksize
> end
- prev_end
)
69 stacksize
= end
- prev_end
;
70 // When running with unlimited stack size, we still want to set some limit.
71 // The unlimited stack size is caused by 'ulimit -s unlimited'.
72 // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
73 if (stacksize
> kMaxThreadStackSize
)
74 stacksize
= kMaxThreadStackSize
;
76 *stack_bottom
= end
- stacksize
;
80 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr
), 0);
83 __sanitizer_pthread_attr_getstack(&attr
, &stackaddr
, (size_t*)&stacksize
);
84 pthread_attr_destroy(&attr
);
86 CHECK_LE(stacksize
, kMaxThreadStackSize
); // Sanity check.
87 *stack_top
= (uptr
)stackaddr
+ stacksize
;
88 *stack_bottom
= (uptr
)stackaddr
;
91 // Does not compile for Go because dlsym() requires -ldl
93 bool SetEnv(const char *name
, const char *value
) {
94 void *f
= dlsym(RTLD_NEXT
, "setenv");
97 typedef int(*setenv_ft
)(const char *name
, const char *value
, int overwrite
);
99 CHECK_EQ(sizeof(setenv_f
), sizeof(f
));
100 internal_memcpy(&setenv_f
, &f
, sizeof(f
));
101 return setenv_f(name
, value
, 1) == 0;
105 bool SanitizerSetThreadName(const char *name
) {
107 return 0 == prctl(PR_SET_NAME
, (unsigned long)name
, 0, 0, 0); // NOLINT
113 bool SanitizerGetThreadName(char *name
, int max_len
) {
116 if (prctl(PR_GET_NAME
, (unsigned long)buff
, 0, 0, 0)) // NOLINT
118 internal_strncpy(name
, buff
, max_len
);
127 //------------------------- SlowUnwindStack -----------------------------------
129 #define UNWIND_STOP _URC_END_OF_STACK
130 #define UNWIND_CONTINUE _URC_NO_REASON
132 #define UNWIND_STOP _URC_NORMAL_STOP
133 #define UNWIND_CONTINUE _URC_NO_REASON
136 uptr
Unwind_GetIP(struct _Unwind_Context
*ctx
) {
139 _Unwind_VRS_Result res
= _Unwind_VRS_Get(ctx
, _UVRSC_CORE
,
140 15 /* r15 = PC */, _UVRSD_UINT32
, &val
);
141 CHECK(res
== _UVRSR_OK
&& "_Unwind_VRS_Get failed");
142 // Clear the Thumb bit.
143 return val
& ~(uptr
)1;
145 return _Unwind_GetIP(ctx
);
149 struct UnwindTraceArg
{
154 _Unwind_Reason_Code
Unwind_Trace(struct _Unwind_Context
*ctx
, void *param
) {
155 UnwindTraceArg
*arg
= (UnwindTraceArg
*)param
;
156 CHECK_LT(arg
->stack
->size
, arg
->max_depth
);
157 uptr pc
= Unwind_GetIP(ctx
);
158 arg
->stack
->trace
[arg
->stack
->size
++] = pc
;
159 if (arg
->stack
->size
== arg
->max_depth
) return UNWIND_STOP
;
160 return UNWIND_CONTINUE
;
163 void StackTrace::SlowUnwindStack(uptr pc
, uptr max_depth
) {
167 UnwindTraceArg arg
= {this, Min(max_depth
+ 1, kStackTraceMax
)};
168 _Unwind_Backtrace(Unwind_Trace
, &arg
);
169 // We need to pop a few frames so that pc is on top.
170 uptr to_pop
= LocatePcInTrace(pc
);
171 // trace[0] belongs to the current function so we always pop it.
174 PopStackFrames(to_pop
);
178 #endif // !SANITIZER_GO
180 static uptr g_tls_size
;
183 # define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
185 # define DL_INTERNAL_FUNCTION
189 #if !defined(SANITIZER_GO) && !SANITIZER_ANDROID
190 typedef void (*get_tls_func
)(size_t*, size_t*) DL_INTERNAL_FUNCTION
;
191 get_tls_func get_tls
;
192 void *get_tls_static_info_ptr
= dlsym(RTLD_NEXT
, "_dl_get_tls_static_info");
193 CHECK_EQ(sizeof(get_tls
), sizeof(get_tls_static_info_ptr
));
194 internal_memcpy(&get_tls
, &get_tls_static_info_ptr
,
195 sizeof(get_tls_static_info_ptr
));
196 CHECK_NE(get_tls
, 0);
198 size_t tls_align
= 0;
199 get_tls(&tls_size
, &tls_align
);
200 g_tls_size
= tls_size
;
208 #if defined(__x86_64__) || defined(__i386__)
209 // sizeof(struct thread) from glibc.
210 static atomic_uintptr_t kThreadDescriptorSize
;
212 uptr
ThreadDescriptorSize() {
214 uptr val
= atomic_load(&kThreadDescriptorSize
, memory_order_relaxed
);
217 #ifdef _CS_GNU_LIBC_VERSION
218 uptr len
= confstr(_CS_GNU_LIBC_VERSION
, buf
, sizeof(buf
));
219 if (len
< sizeof(buf
) && internal_strncmp(buf
, "glibc 2.", 8) == 0) {
221 int minor
= internal_simple_strtoll(buf
+ 8, &end
, 10);
222 if (end
!= buf
+ 8 && (*end
== '\0' || *end
== '.')) {
223 /* sizeof(struct thread) values from various glibc versions. */
225 val
= FIRST_32_SECOND_64(1104, 1696);
227 val
= FIRST_32_SECOND_64(1120, 1728);
229 val
= FIRST_32_SECOND_64(1136, 1728);
231 val
= FIRST_32_SECOND_64(1136, 1712);
232 else if (minor
== 10)
233 val
= FIRST_32_SECOND_64(1168, 1776);
234 else if (minor
<= 12)
235 val
= FIRST_32_SECOND_64(1168, 2288);
237 val
= FIRST_32_SECOND_64(1216, 2304);
240 atomic_store(&kThreadDescriptorSize
, val
, memory_order_relaxed
);
247 // The offset at which pointer to self is located in the thread descriptor.
248 const uptr kThreadSelfOffset
= FIRST_32_SECOND_64(8, 16);
250 uptr
ThreadSelfOffset() {
251 return kThreadSelfOffset
;
257 asm("mov %%gs:%c1,%0" : "=r"(descr_addr
) : "i"(kThreadSelfOffset
));
259 asm("mov %%fs:%c1,%0" : "=r"(descr_addr
) : "i"(kThreadSelfOffset
));
263 #endif // defined(__x86_64__) || defined(__i386__)
265 void GetThreadStackAndTls(bool main
, uptr
*stk_addr
, uptr
*stk_size
,
266 uptr
*tls_addr
, uptr
*tls_size
) {
268 #if defined(__x86_64__) || defined(__i386__)
269 *tls_addr
= ThreadSelf();
270 *tls_size
= GetTlsSize();
271 *tls_addr
-= *tls_size
;
272 *tls_addr
+= ThreadDescriptorSize();
278 uptr stack_top
, stack_bottom
;
279 GetThreadStackTopAndBottom(main
, &stack_top
, &stack_bottom
);
280 *stk_addr
= stack_bottom
;
281 *stk_size
= stack_top
- stack_bottom
;
284 // If stack and tls intersect, make them non-intersecting.
285 if (*tls_addr
> *stk_addr
&& *tls_addr
< *stk_addr
+ *stk_size
) {
286 CHECK_GT(*tls_addr
+ *tls_size
, *stk_addr
);
287 CHECK_LE(*tls_addr
+ *tls_size
, *stk_addr
+ *stk_size
);
288 *stk_size
-= *tls_size
;
289 *tls_addr
= *stk_addr
+ *stk_size
;
292 #else // SANITIZER_GO
297 #endif // SANITIZER_GO
300 void AdjustStackSizeLinux(void *attr_
) {
301 pthread_attr_t
*attr
= (pthread_attr_t
*)attr_
;
303 size_t stacksize
= 0;
304 __sanitizer_pthread_attr_getstack(attr
, (void**)&stackaddr
, &stacksize
);
305 // GLibC will return (0 - stacksize) as the stack address in the case when
306 // stacksize is set, but stackaddr is not.
307 bool stack_set
= (stackaddr
!= 0) && (stackaddr
+ stacksize
!= 0);
308 // We place a lot of tool data into TLS, account for that.
309 const uptr minstacksize
= GetTlsSize() + 128*1024;
310 if (stacksize
< minstacksize
) {
312 if (common_flags()->verbosity
&& stacksize
!= 0)
313 Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize
,
315 pthread_attr_setstacksize(attr
, minstacksize
);
317 Printf("Sanitizer: pre-allocated stack size is insufficient: "
318 "%zu < %zu\n", stacksize
, minstacksize
);
319 Printf("Sanitizer: pthread_create is likely to fail.\n");
324 #if SANITIZER_ANDROID
325 uptr
GetListOfModules(LoadedModule
*modules
, uptr max_modules
,
326 string_predicate_t filter
) {
329 #else // SANITIZER_ANDROID
330 typedef ElfW(Phdr
) Elf_Phdr
;
332 struct DlIteratePhdrData
{
333 LoadedModule
*modules
;
337 string_predicate_t filter
;
340 static int dl_iterate_phdr_cb(dl_phdr_info
*info
, size_t size
, void *arg
) {
341 DlIteratePhdrData
*data
= (DlIteratePhdrData
*)arg
;
342 if (data
->current_n
== data
->max_n
)
344 InternalScopedBuffer
<char> module_name(kMaxPathLength
);
345 module_name
.data()[0] = '\0';
348 // First module is the binary itself.
349 ReadBinaryName(module_name
.data(), module_name
.size());
350 } else if (info
->dlpi_name
) {
351 internal_strncpy(module_name
.data(), info
->dlpi_name
, module_name
.size());
353 if (module_name
.data()[0] == '\0')
355 if (data
->filter
&& !data
->filter(module_name
.data()))
357 void *mem
= &data
->modules
[data
->current_n
];
358 LoadedModule
*cur_module
= new(mem
) LoadedModule(module_name
.data(),
361 for (int i
= 0; i
< info
->dlpi_phnum
; i
++) {
362 const Elf_Phdr
*phdr
= &info
->dlpi_phdr
[i
];
363 if (phdr
->p_type
== PT_LOAD
) {
364 uptr cur_beg
= info
->dlpi_addr
+ phdr
->p_vaddr
;
365 uptr cur_end
= cur_beg
+ phdr
->p_memsz
;
366 cur_module
->addAddressRange(cur_beg
, cur_end
);
372 uptr
GetListOfModules(LoadedModule
*modules
, uptr max_modules
,
373 string_predicate_t filter
) {
375 DlIteratePhdrData data
= {modules
, 0, true, max_modules
, filter
};
376 dl_iterate_phdr(dl_iterate_phdr_cb
, &data
);
377 return data
.current_n
;
379 #endif // SANITIZER_ANDROID
381 } // namespace __sanitizer
383 #endif // SANITIZER_LINUX