[Sanitizer] extend internal libc with stat/fstat/lstat functions
[blocksruntime.git] / lib / sanitizer_common / sanitizer_linux.cc
blob70f330aa9b3d27ca5c2f9654ddc09f2aa256fea1
1 //===-- sanitizer_linux.cc ------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries and implements linux-specific functions from
12 // sanitizer_libc.h.
13 //===----------------------------------------------------------------------===//
14 #ifdef __linux__
16 #include "sanitizer_common.h"
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_libc.h"
19 #include "sanitizer_mutex.h"
20 #include "sanitizer_placement_new.h"
21 #include "sanitizer_procmaps.h"
22 #include "sanitizer_stacktrace.h"
24 #include <fcntl.h>
25 #include <pthread.h>
26 #include <sched.h>
27 #include <sys/mman.h>
28 #include <sys/resource.h>
29 #include <sys/stat.h>
30 #include <sys/syscall.h>
31 #include <sys/time.h>
32 #include <sys/types.h>
33 #include <sys/prctl.h>
34 #include <unistd.h>
35 #include <unwind.h>
36 #include <errno.h>
38 // <linux/futex.h> is broken on some linux distributions.
39 const int FUTEX_WAIT = 0;
40 const int FUTEX_WAKE = 1;
42 // Are we using 32-bit or 64-bit syscalls?
43 // x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
44 // but it still needs to use 64-bit syscalls.
45 #if defined(__x86_64__) || SANITIZER_WORDSIZE == 64
46 # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
47 #else
48 # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
49 #endif
51 namespace __sanitizer {
53 // --------------- sanitizer_libc.h
54 void *internal_mmap(void *addr, uptr length, int prot, int flags,
55 int fd, u64 offset) {
56 #if SANITIZER_LINUX_USES_64BIT_SYSCALLS
57 return (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
58 #else
59 return (void *)syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
60 #endif
63 int internal_munmap(void *addr, uptr length) {
64 return syscall(__NR_munmap, addr, length);
67 int internal_close(fd_t fd) {
68 return syscall(__NR_close, fd);
71 fd_t internal_open(const char *filename, int flags) {
72 return syscall(__NR_open, filename, flags);
75 fd_t internal_open(const char *filename, int flags, u32 mode) {
76 return syscall(__NR_open, filename, flags, mode);
79 fd_t OpenFile(const char *filename, bool write) {
80 return internal_open(filename,
81 write ? O_WRONLY | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660);
84 uptr internal_read(fd_t fd, void *buf, uptr count) {
85 sptr res;
86 HANDLE_EINTR(res, (sptr)syscall(__NR_read, fd, buf, count));
87 return res;
90 uptr internal_write(fd_t fd, const void *buf, uptr count) {
91 sptr res;
92 HANDLE_EINTR(res, (sptr)syscall(__NR_write, fd, buf, count));
93 return res;
96 int internal_stat(const char *path, void *buf) {
97 #if SANITIZER_LINUX_USES_64BIT_SYSCALLS
98 return syscall(__NR_stat, path, buf);
99 #else
100 return syscall(__NR_stat64, path, buf);
101 #endif
104 int internal_lstat(const char *path, void *buf) {
105 #if SANITIZER_LINUX_USES_64BIT_SYSCALLS
106 return syscall(__NR_lstat, path, buf);
107 #else
108 return syscall(__NR_lstat64, path, buf);
109 #endif
112 int internal_fstat(fd_t fd, void *buf) {
113 #if SANITIZER_LINUX_USES_64BIT_SYSCALLS
114 return syscall(__NR_fstat, fd, buf);
115 #else
116 return syscall(__NR_fstat64, fd, buf);
117 #endif
120 uptr internal_filesize(fd_t fd) {
121 #if SANITIZER_LINUX_USES_64BIT_SYSCALLS
122 struct stat st;
123 #else
124 struct stat64 st;
125 #endif
126 if (internal_fstat(fd, &st))
127 return -1;
128 return (uptr)st.st_size;
131 int internal_dup2(int oldfd, int newfd) {
132 return syscall(__NR_dup2, oldfd, newfd);
135 uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
136 return (uptr)syscall(__NR_readlink, path, buf, bufsize);
139 int internal_sched_yield() {
140 return syscall(__NR_sched_yield);
143 // ----------------- sanitizer_common.h
144 bool FileExists(const char *filename) {
145 #if SANITIZER_LINUX_USES_64BIT_SYSCALLS
146 struct stat st;
147 if (syscall(__NR_stat, filename, &st))
148 return false;
149 #else
150 struct stat64 st;
151 if (syscall(__NR_stat64, filename, &st))
152 return false;
153 #endif
154 // Sanity check: filename is a regular file.
155 return S_ISREG(st.st_mode);
158 uptr GetTid() {
159 return syscall(__NR_gettid);
162 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
163 uptr *stack_bottom) {
164 static const uptr kMaxThreadStackSize = 256 * (1 << 20); // 256M
165 CHECK(stack_top);
166 CHECK(stack_bottom);
167 if (at_initialization) {
168 // This is the main thread. Libpthread may not be initialized yet.
169 struct rlimit rl;
170 CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
172 // Find the mapping that contains a stack variable.
173 MemoryMappingLayout proc_maps;
174 uptr start, end, offset;
175 uptr prev_end = 0;
176 while (proc_maps.Next(&start, &end, &offset, 0, 0)) {
177 if ((uptr)&rl < end)
178 break;
179 prev_end = end;
181 CHECK((uptr)&rl >= start && (uptr)&rl < end);
183 // Get stacksize from rlimit, but clip it so that it does not overlap
184 // with other mappings.
185 uptr stacksize = rl.rlim_cur;
186 if (stacksize > end - prev_end)
187 stacksize = end - prev_end;
188 // When running with unlimited stack size, we still want to set some limit.
189 // The unlimited stack size is caused by 'ulimit -s unlimited'.
190 // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
191 if (stacksize > kMaxThreadStackSize)
192 stacksize = kMaxThreadStackSize;
193 *stack_top = end;
194 *stack_bottom = end - stacksize;
195 return;
197 pthread_attr_t attr;
198 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
199 uptr stacksize = 0;
200 void *stackaddr = 0;
201 pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
202 pthread_attr_destroy(&attr);
204 *stack_top = (uptr)stackaddr + stacksize;
205 *stack_bottom = (uptr)stackaddr;
206 CHECK(stacksize < kMaxThreadStackSize); // Sanity check.
209 // Like getenv, but reads env directly from /proc and does not use libc.
210 // This function should be called first inside __asan_init.
211 const char *GetEnv(const char *name) {
212 static char *environ;
213 static uptr len;
214 static bool inited;
215 if (!inited) {
216 inited = true;
217 uptr environ_size;
218 len = ReadFileToBuffer("/proc/self/environ",
219 &environ, &environ_size, 1 << 26);
221 if (!environ || len == 0) return 0;
222 uptr namelen = internal_strlen(name);
223 const char *p = environ;
224 while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
225 // proc file has the format NAME=value\0NAME=value\0NAME=value\0...
226 const char* endp =
227 (char*)internal_memchr(p, '\0', len - (p - environ));
228 if (endp == 0) // this entry isn't NUL terminated
229 return 0;
230 else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match.
231 return p + namelen + 1; // point after =
232 p = endp + 1;
234 return 0; // Not found.
237 static void ReadNullSepFileToArray(const char *path, char ***arr,
238 int arr_size) {
239 char *buff;
240 uptr buff_size = 0;
241 *arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray");
242 ReadFileToBuffer(path, &buff, &buff_size, 1024 * 1024);
243 (*arr)[0] = buff;
244 int count, i;
245 for (count = 1, i = 1; ; i++) {
246 if (buff[i] == 0) {
247 if (buff[i+1] == 0) break;
248 (*arr)[count] = &buff[i+1];
249 CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible.
250 count++;
253 (*arr)[count] = 0;
256 void ReExec() {
257 static const int kMaxArgv = 100, kMaxEnvp = 1000;
258 char **argv, **envp;
259 ReadNullSepFileToArray("/proc/self/cmdline", &argv, kMaxArgv);
260 ReadNullSepFileToArray("/proc/self/environ", &envp, kMaxEnvp);
261 execve(argv[0], argv, envp);
264 void PrepareForSandboxing() {
265 // Some kinds of sandboxes may forbid filesystem access, so we won't be able
266 // to read the file mappings from /proc/self/maps. Luckily, neither the
267 // process will be able to load additional libraries, so it's fine to use the
268 // cached mappings.
269 MemoryMappingLayout::CacheMemoryMappings();
272 // ----------------- sanitizer_procmaps.h
273 // Linker initialized.
274 ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
275 StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
277 MemoryMappingLayout::MemoryMappingLayout() {
278 proc_self_maps_.len =
279 ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
280 &proc_self_maps_.mmaped_size, 1 << 26);
281 if (proc_self_maps_.mmaped_size == 0) {
282 LoadFromCache();
283 CHECK_GT(proc_self_maps_.len, 0);
285 // internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
286 Reset();
287 // FIXME: in the future we may want to cache the mappings on demand only.
288 CacheMemoryMappings();
291 MemoryMappingLayout::~MemoryMappingLayout() {
292 // Only unmap the buffer if it is different from the cached one. Otherwise
293 // it will be unmapped when the cache is refreshed.
294 if (proc_self_maps_.data != cached_proc_self_maps_.data) {
295 UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
299 void MemoryMappingLayout::Reset() {
300 current_ = proc_self_maps_.data;
303 // static
304 void MemoryMappingLayout::CacheMemoryMappings() {
305 SpinMutexLock l(&cache_lock_);
306 // Don't invalidate the cache if the mappings are unavailable.
307 ProcSelfMapsBuff old_proc_self_maps;
308 old_proc_self_maps = cached_proc_self_maps_;
309 cached_proc_self_maps_.len =
310 ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_.data,
311 &cached_proc_self_maps_.mmaped_size, 1 << 26);
312 if (cached_proc_self_maps_.mmaped_size == 0) {
313 cached_proc_self_maps_ = old_proc_self_maps;
314 } else {
315 if (old_proc_self_maps.mmaped_size) {
316 UnmapOrDie(old_proc_self_maps.data,
317 old_proc_self_maps.mmaped_size);
322 void MemoryMappingLayout::LoadFromCache() {
323 SpinMutexLock l(&cache_lock_);
324 if (cached_proc_self_maps_.data) {
325 proc_self_maps_ = cached_proc_self_maps_;
329 // Parse a hex value in str and update str.
330 static uptr ParseHex(char **str) {
331 uptr x = 0;
332 char *s;
333 for (s = *str; ; s++) {
334 char c = *s;
335 uptr v = 0;
336 if (c >= '0' && c <= '9')
337 v = c - '0';
338 else if (c >= 'a' && c <= 'f')
339 v = c - 'a' + 10;
340 else if (c >= 'A' && c <= 'F')
341 v = c - 'A' + 10;
342 else
343 break;
344 x = x * 16 + v;
346 *str = s;
347 return x;
350 static bool IsOnOf(char c, char c1, char c2) {
351 return c == c1 || c == c2;
354 static bool IsDecimal(char c) {
355 return c >= '0' && c <= '9';
358 bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
359 char filename[], uptr filename_size) {
360 char *last = proc_self_maps_.data + proc_self_maps_.len;
361 if (current_ >= last) return false;
362 uptr dummy;
363 if (!start) start = &dummy;
364 if (!end) end = &dummy;
365 if (!offset) offset = &dummy;
366 char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
367 if (next_line == 0)
368 next_line = last;
369 // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
370 *start = ParseHex(&current_);
371 CHECK_EQ(*current_++, '-');
372 *end = ParseHex(&current_);
373 CHECK_EQ(*current_++, ' ');
374 CHECK(IsOnOf(*current_++, '-', 'r'));
375 CHECK(IsOnOf(*current_++, '-', 'w'));
376 CHECK(IsOnOf(*current_++, '-', 'x'));
377 CHECK(IsOnOf(*current_++, 's', 'p'));
378 CHECK_EQ(*current_++, ' ');
379 *offset = ParseHex(&current_);
380 CHECK_EQ(*current_++, ' ');
381 ParseHex(&current_);
382 CHECK_EQ(*current_++, ':');
383 ParseHex(&current_);
384 CHECK_EQ(*current_++, ' ');
385 while (IsDecimal(*current_))
386 current_++;
387 CHECK_EQ(*current_++, ' ');
388 // Skip spaces.
389 while (current_ < next_line && *current_ == ' ')
390 current_++;
391 // Fill in the filename.
392 uptr i = 0;
393 while (current_ < next_line) {
394 if (filename && i < filename_size - 1)
395 filename[i++] = *current_;
396 current_++;
398 if (filename && i < filename_size)
399 filename[i] = 0;
400 current_ = next_line + 1;
401 return true;
404 // Gets the object name and the offset by walking MemoryMappingLayout.
405 bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
406 char filename[],
407 uptr filename_size) {
408 return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
411 bool SanitizerSetThreadName(const char *name) {
412 #ifdef PR_SET_NAME
413 return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
414 #else
415 return false;
416 #endif
419 bool SanitizerGetThreadName(char *name, int max_len) {
420 #ifdef PR_GET_NAME
421 char buff[17];
422 if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
423 return false;
424 internal_strncpy(name, buff, max_len);
425 name[max_len] = 0;
426 return true;
427 #else
428 return false;
429 #endif
432 #ifndef SANITIZER_GO
433 //------------------------- SlowUnwindStack -----------------------------------
434 #ifdef __arm__
435 #define UNWIND_STOP _URC_END_OF_STACK
436 #define UNWIND_CONTINUE _URC_NO_REASON
437 #else
438 #define UNWIND_STOP _URC_NORMAL_STOP
439 #define UNWIND_CONTINUE _URC_NO_REASON
440 #endif
442 uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
443 #ifdef __arm__
444 uptr val;
445 _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
446 15 /* r15 = PC */, _UVRSD_UINT32, &val);
447 CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
448 // Clear the Thumb bit.
449 return val & ~(uptr)1;
450 #else
451 return _Unwind_GetIP(ctx);
452 #endif
455 _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
456 StackTrace *b = (StackTrace*)param;
457 CHECK(b->size < b->max_size);
458 uptr pc = Unwind_GetIP(ctx);
459 b->trace[b->size++] = pc;
460 if (b->size == b->max_size) return UNWIND_STOP;
461 return UNWIND_CONTINUE;
464 static bool MatchPc(uptr cur_pc, uptr trace_pc) {
465 return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
468 void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
469 this->size = 0;
470 this->max_size = max_depth;
471 if (max_depth > 1) {
472 _Unwind_Backtrace(Unwind_Trace, this);
473 // We need to pop a few frames so that pc is on top.
474 // trace[0] belongs to the current function so we always pop it.
475 int to_pop = 1;
476 /**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
477 else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
478 else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
479 else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
480 else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
481 this->PopStackFrames(to_pop);
483 this->trace[0] = pc;
486 #endif // #ifndef SANITIZER_GO
488 enum MutexState {
489 MtxUnlocked = 0,
490 MtxLocked = 1,
491 MtxSleeping = 2
494 BlockingMutex::BlockingMutex(LinkerInitialized) {
495 CHECK_EQ(owner_, 0);
498 void BlockingMutex::Lock() {
499 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
500 if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
501 return;
502 while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
503 syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
506 void BlockingMutex::Unlock() {
507 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
508 u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
509 CHECK_NE(v, MtxUnlocked);
510 if (v == MtxSleeping)
511 syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0);
514 } // namespace __sanitizer
516 #endif // __linux__