1 //===-- sanitizer_posix.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between AddressSanitizer and ThreadSanitizer
9 // run-time libraries and implements POSIX-specific functions from
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_platform.h"
16 #include "sanitizer_common.h"
17 #include "sanitizer_libc.h"
18 #include "sanitizer_procmaps.h"
19 #include "sanitizer_stacktrace.h"
24 #include <sys/utsname.h>
27 #if SANITIZER_LINUX && !SANITIZER_ANDROID
28 #include <sys/personality.h>
31 namespace __sanitizer
{
33 // ------------- sanitizer_common.h
34 uptr
GetMmapGranularity() {
38 #if SANITIZER_WORDSIZE == 32
39 // Take care of unusable kernel area in top gigabyte.
40 static uptr
GetKernelAreaSize() {
42 const uptr gbyte
= 1UL << 30;
44 // Firstly check if there are writable segments
45 // mapped to top gigabyte (e.g. stack).
46 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
48 while (proc_maps
.Next(/*start*/0, &end
,
49 /*offset*/0, /*filename*/0,
50 /*filename_size*/0, &prot
)) {
51 if ((end
>= 3 * gbyte
)
52 && (prot
& MemoryMappingLayout::kProtectionWrite
) != 0)
56 #if !SANITIZER_ANDROID
57 // Even if nothing is mapped, top Gb may still be accessible
58 // if we are running on 64-bit kernel.
59 // Uname may report misleading results if personality type
60 // is modified (e.g. under schroot) so check this as well.
61 struct utsname uname_info
;
62 int pers
= personality(0xffffffffUL
);
63 if (!(pers
& PER_MASK
)
64 && uname(&uname_info
) == 0
65 && internal_strstr(uname_info
.machine
, "64"))
67 #endif // SANITIZER_ANDROID
69 // Top gigabyte is reserved for kernel.
73 #endif // SANITIZER_LINUX
75 #endif // SANITIZER_WORDSIZE == 32
77 uptr
GetMaxVirtualAddress() {
78 #if SANITIZER_WORDSIZE == 64
79 # if defined(__powerpc64__)
80 // On PowerPC64 we have two different address space layouts: 44- and 46-bit.
81 // We somehow need to figure out which one we are using now and choose
82 // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
83 // Note that with 'ulimit -s unlimited' the stack is moved away from the top
84 // of the address space, so simply checking the stack address is not enough.
85 return (1ULL << 44) - 1; // 0x00000fffffffffffUL
86 # elif defined(__aarch64__)
87 return (1ULL << 39) - 1;
89 return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
91 #else // SANITIZER_WORDSIZE == 32
92 uptr res
= (1ULL << 32) - 1; // 0xffffffff;
93 if (!common_flags()->full_address_space
)
94 res
-= GetKernelAreaSize();
95 CHECK_LT(reinterpret_cast<uptr
>(&res
), res
);
97 #endif // SANITIZER_WORDSIZE
100 void *MmapOrDie(uptr size
, const char *mem_type
) {
101 size
= RoundUpTo(size
, GetPageSizeCached());
102 uptr res
= internal_mmap(0, size
,
103 PROT_READ
| PROT_WRITE
,
104 MAP_PRIVATE
| MAP_ANON
, -1, 0);
106 if (internal_iserror(res
, &reserrno
)) {
107 static int recursion_count
;
108 if (recursion_count
) {
109 // The Report() and CHECK calls below may call mmap recursively and fail.
110 // If we went into recursion, just die.
111 RawWrite("ERROR: Failed to mmap\n");
115 Report("ERROR: %s failed to "
116 "allocate 0x%zx (%zd) bytes of %s (errno: %d)\n",
117 SanitizerToolName
, size
, size
, mem_type
, reserrno
);
119 CHECK("unable to mmap" && 0);
121 IncreaseTotalMmap(size
);
125 void UnmapOrDie(void *addr
, uptr size
) {
126 if (!addr
|| !size
) return;
127 uptr res
= internal_munmap(addr
, size
);
128 if (internal_iserror(res
)) {
129 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
130 SanitizerToolName
, size
, size
, addr
);
131 CHECK("unable to unmap" && 0);
133 DecreaseTotalMmap(size
);
136 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
) {
137 uptr PageSize
= GetPageSizeCached();
138 uptr p
= internal_mmap(0,
139 RoundUpTo(size
, PageSize
),
140 PROT_READ
| PROT_WRITE
,
141 MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
,
144 if (internal_iserror(p
, &reserrno
)) {
145 Report("ERROR: %s failed to "
146 "allocate noreserve 0x%zx (%zd) bytes for '%s' (errno: %d)\n",
147 SanitizerToolName
, size
, size
, mem_type
, reserrno
);
148 CHECK("unable to mmap" && 0);
150 IncreaseTotalMmap(size
);
154 void *MmapFixedNoReserve(uptr fixed_addr
, uptr size
) {
155 uptr PageSize
= GetPageSizeCached();
156 uptr p
= internal_mmap((void*)(fixed_addr
& ~(PageSize
- 1)),
157 RoundUpTo(size
, PageSize
),
158 PROT_READ
| PROT_WRITE
,
159 MAP_PRIVATE
| MAP_ANON
| MAP_FIXED
| MAP_NORESERVE
,
162 if (internal_iserror(p
, &reserrno
))
163 Report("ERROR: %s failed to "
164 "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
165 SanitizerToolName
, size
, size
, fixed_addr
, reserrno
);
166 IncreaseTotalMmap(size
);
170 void *MmapFixedOrDie(uptr fixed_addr
, uptr size
) {
171 uptr PageSize
= GetPageSizeCached();
172 uptr p
= internal_mmap((void*)(fixed_addr
& ~(PageSize
- 1)),
173 RoundUpTo(size
, PageSize
),
174 PROT_READ
| PROT_WRITE
,
175 MAP_PRIVATE
| MAP_ANON
| MAP_FIXED
,
178 if (internal_iserror(p
, &reserrno
)) {
179 Report("ERROR: %s failed to "
180 "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
181 SanitizerToolName
, size
, size
, fixed_addr
, reserrno
);
182 CHECK("unable to mmap" && 0);
184 IncreaseTotalMmap(size
);
188 void *Mprotect(uptr fixed_addr
, uptr size
) {
189 return (void *)internal_mmap((void*)fixed_addr
, size
,
191 MAP_PRIVATE
| MAP_ANON
| MAP_FIXED
|
192 MAP_NORESERVE
, -1, 0);
195 void *MapFileToMemory(const char *file_name
, uptr
*buff_size
) {
196 uptr openrv
= OpenFile(file_name
, false);
197 CHECK(!internal_iserror(openrv
));
199 uptr fsize
= internal_filesize(fd
);
200 CHECK_NE(fsize
, (uptr
)-1);
202 *buff_size
= RoundUpTo(fsize
, GetPageSizeCached());
203 uptr map
= internal_mmap(0, *buff_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
204 return internal_iserror(map
) ? 0 : (void *)map
;
207 void *MapWritableFileToMemory(void *addr
, uptr size
, uptr fd
, uptr offset
) {
208 uptr flags
= MAP_SHARED
;
209 if (addr
) flags
|= MAP_FIXED
;
210 uptr p
= internal_mmap(addr
, size
, PROT_READ
| PROT_WRITE
, flags
, fd
, offset
);
211 if (internal_iserror(p
)) {
212 Printf("could not map writable file (%zd, %zu, %zu): %zd\n", fd
, offset
,
219 static inline bool IntervalsAreSeparate(uptr start1
, uptr end1
,
220 uptr start2
, uptr end2
) {
221 CHECK(start1
<= end1
);
222 CHECK(start2
<= end2
);
223 return (end1
< start2
) || (end2
< start1
);
226 // FIXME: this is thread-unsafe, but should not cause problems most of the time.
227 // When the shadow is mapped only a single thread usually exists (plus maybe
228 // several worker threads on Mac, which aren't expected to map big chunks of
230 bool MemoryRangeIsAvailable(uptr range_start
, uptr range_end
) {
231 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
233 while (proc_maps
.Next(&start
, &end
,
234 /*offset*/0, /*filename*/0, /*filename_size*/0,
236 if (!IntervalsAreSeparate(start
, end
, range_start
, range_end
))
242 void DumpProcessMap() {
243 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
245 const sptr kBufSize
= 4095;
246 char *filename
= (char*)MmapOrDie(kBufSize
, __func__
);
247 Report("Process memory map follows:\n");
248 while (proc_maps
.Next(&start
, &end
, /* file_offset */0,
249 filename
, kBufSize
, /* protection */0)) {
250 Printf("\t%p-%p\t%s\n", (void*)start
, (void*)end
, filename
);
252 Report("End of process memory map.\n");
253 UnmapOrDie(filename
, kBufSize
);
256 const char *GetPwd() {
257 return GetEnv("PWD");
260 char *FindPathToBinary(const char *name
) {
261 const char *path
= GetEnv("PATH");
264 uptr name_len
= internal_strlen(name
);
265 InternalScopedBuffer
<char> buffer(kMaxPathLength
);
266 const char *beg
= path
;
268 const char *end
= internal_strchrnul(beg
, ':');
269 uptr prefix_len
= end
- beg
;
270 if (prefix_len
+ name_len
+ 2 <= kMaxPathLength
) {
271 internal_memcpy(buffer
.data(), beg
, prefix_len
);
272 buffer
[prefix_len
] = '/';
273 internal_memcpy(&buffer
[prefix_len
+ 1], name
, name_len
);
274 buffer
[prefix_len
+ 1 + name_len
] = '\0';
275 if (FileExists(buffer
.data()))
276 return internal_strdup(buffer
.data());
278 if (*end
== '\0') break;
284 void MaybeOpenReportFile() {
285 if (!log_to_file
) return;
286 uptr pid
= internal_getpid();
287 // If in tracer, use the parent's file.
288 if (pid
== stoptheworld_tracer_pid
)
289 pid
= stoptheworld_tracer_ppid
;
290 if (report_fd_pid
== pid
) return;
291 InternalScopedBuffer
<char> report_path_full(4096);
292 internal_snprintf(report_path_full
.data(), report_path_full
.size(),
293 "%s.%zu", report_path_prefix
, pid
);
294 uptr openrv
= OpenFile(report_path_full
.data(), true);
295 if (internal_iserror(openrv
)) {
296 report_fd
= kStderrFd
;
298 Report("ERROR: Can't open file: %s\n", report_path_full
.data());
301 if (report_fd
!= kInvalidFd
) {
302 // We're in the child. Close the parent's log.
303 internal_close(report_fd
);
309 void RawWrite(const char *buffer
) {
310 static const char *kRawWriteError
=
311 "RawWrite can't output requested buffer!\n";
312 uptr length
= (uptr
)internal_strlen(buffer
);
313 MaybeOpenReportFile();
314 if (length
!= internal_write(report_fd
, buffer
, length
)) {
315 internal_write(report_fd
, kRawWriteError
, internal_strlen(kRawWriteError
));
320 bool GetCodeRangeForFile(const char *module
, uptr
*start
, uptr
*end
) {
321 uptr s
, e
, off
, prot
;
322 InternalScopedString
buff(4096);
323 MemoryMappingLayout
proc_maps(/*cache_enabled*/false);
324 while (proc_maps
.Next(&s
, &e
, &off
, buff
.data(), buff
.size(), &prot
)) {
325 if ((prot
& MemoryMappingLayout::kProtectionExecute
) != 0
326 && internal_strcmp(module
, buff
.data()) == 0) {
335 } // namespace __sanitizer
337 #endif // SANITIZER_POSIX