1 //===-- sanitizer_posix.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries and implements POSIX-specific functions from
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_platform.h"
18 #include "sanitizer_common.h"
19 #include "sanitizer_file.h"
20 #include "sanitizer_flags.h"
21 #include "sanitizer_libc.h"
22 #include "sanitizer_posix.h"
23 #include "sanitizer_procmaps.h"
31 // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
32 // that, it was never implemented. So just define it to zero.
34 #define MAP_NORESERVE 0
37 namespace __sanitizer
{
39 // ------------- sanitizer_common.h
40 uptr
GetMmapGranularity() {
44 bool ErrorIsOOM(error_t err
) { return err
== ENOMEM
; }
46 void *MmapOrDie(uptr size
, const char *mem_type
, bool raw_report
) {
47 size
= RoundUpTo(size
, GetPageSizeCached());
48 uptr res
= MmapNamed(nullptr, size
, PROT_READ
| PROT_WRITE
,
49 MAP_PRIVATE
| MAP_ANON
, mem_type
);
51 if (UNLIKELY(internal_iserror(res
, &reserrno
)))
52 ReportMmapFailureAndDie(size
, mem_type
, "allocate", reserrno
, raw_report
);
53 IncreaseTotalMmap(size
);
57 void UnmapOrDie(void *addr
, uptr size
) {
58 if (!addr
|| !size
) return;
59 uptr res
= internal_munmap(addr
, size
);
61 if (UNLIKELY(internal_iserror(res
, &reserrno
)))
62 ReportMunmapFailureAndDie(addr
, size
, reserrno
);
63 DecreaseTotalMmap(size
);
66 void *MmapOrDieOnFatalError(uptr size
, const char *mem_type
) {
67 size
= RoundUpTo(size
, GetPageSizeCached());
68 uptr res
= MmapNamed(nullptr, size
, PROT_READ
| PROT_WRITE
,
69 MAP_PRIVATE
| MAP_ANON
, mem_type
);
71 if (UNLIKELY(internal_iserror(res
, &reserrno
))) {
72 if (reserrno
== ENOMEM
)
74 ReportMmapFailureAndDie(size
, mem_type
, "allocate", reserrno
);
76 IncreaseTotalMmap(size
);
80 // We want to map a chunk of address space aligned to 'alignment'.
81 // We do it by mapping a bit more and then unmapping redundant pieces.
82 // We probably can do it with fewer syscalls in some OS-dependent way.
83 void *MmapAlignedOrDieOnFatalError(uptr size
, uptr alignment
,
84 const char *mem_type
) {
85 CHECK(IsPowerOfTwo(size
));
86 CHECK(IsPowerOfTwo(alignment
));
87 uptr map_size
= size
+ alignment
;
88 // mmap maps entire pages and rounds up map_size needs to be a an integral
90 // We need to be aware of this size for calculating end and for unmapping
91 // fragments before and after the alignment region.
92 map_size
= RoundUpTo(map_size
, GetPageSizeCached());
93 uptr map_res
= (uptr
)MmapOrDieOnFatalError(map_size
, mem_type
);
94 if (UNLIKELY(!map_res
))
97 if (!IsAligned(res
, alignment
)) {
98 res
= (map_res
+ alignment
- 1) & ~(alignment
- 1);
99 UnmapOrDie((void*)map_res
, res
- map_res
);
101 uptr map_end
= map_res
+ map_size
;
102 uptr end
= res
+ size
;
103 end
= RoundUpTo(end
, GetPageSizeCached());
104 if (end
!= map_end
) {
105 CHECK_LT(end
, map_end
);
106 UnmapOrDie((void*)end
, map_end
- end
);
111 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
) {
112 size
= RoundUpTo(size
, GetPageSizeCached());
113 uptr p
= MmapNamed(nullptr, size
, PROT_READ
| PROT_WRITE
,
114 MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
, mem_type
);
116 if (UNLIKELY(internal_iserror(p
, &reserrno
)))
117 ReportMmapFailureAndDie(size
, mem_type
, "allocate noreserve", reserrno
);
118 IncreaseTotalMmap(size
);
122 static void *MmapFixedImpl(uptr fixed_addr
, uptr size
, bool tolerate_enomem
,
124 size
= RoundUpTo(size
, GetPageSizeCached());
125 fixed_addr
= RoundDownTo(fixed_addr
, GetPageSizeCached());
126 uptr p
= MmapNamed((void *)fixed_addr
, size
, PROT_READ
| PROT_WRITE
,
127 MAP_PRIVATE
| MAP_ANON
| MAP_FIXED
, name
);
129 if (UNLIKELY(internal_iserror(p
, &reserrno
))) {
130 if (tolerate_enomem
&& reserrno
== ENOMEM
)
133 internal_snprintf(mem_type
, sizeof(mem_type
), "memory at address 0x%zx",
135 ReportMmapFailureAndDie(size
, mem_type
, "allocate", reserrno
);
137 IncreaseTotalMmap(size
);
141 void *MmapFixedOrDie(uptr fixed_addr
, uptr size
, const char *name
) {
142 return MmapFixedImpl(fixed_addr
, size
, false /*tolerate_enomem*/, name
);
145 void *MmapFixedOrDieOnFatalError(uptr fixed_addr
, uptr size
, const char *name
) {
146 return MmapFixedImpl(fixed_addr
, size
, true /*tolerate_enomem*/, name
);
149 bool MprotectNoAccess(uptr addr
, uptr size
) {
150 return 0 == internal_mprotect((void*)addr
, size
, PROT_NONE
);
153 bool MprotectReadOnly(uptr addr
, uptr size
) {
154 return 0 == internal_mprotect((void *)addr
, size
, PROT_READ
);
158 void MprotectMallocZones(void *addr
, int prot
) {}
161 fd_t
OpenFile(const char *filename
, FileAccessMode mode
, error_t
*errno_p
) {
162 if (ShouldMockFailureToOpen(filename
))
166 case RdOnly
: flags
= O_RDONLY
; break;
167 case WrOnly
: flags
= O_WRONLY
| O_CREAT
| O_TRUNC
; break;
168 case RdWr
: flags
= O_RDWR
| O_CREAT
; break;
170 fd_t res
= internal_open(filename
, flags
, 0660);
171 if (internal_iserror(res
, errno_p
))
173 return ReserveStandardFds(res
);
176 void CloseFile(fd_t fd
) {
180 bool ReadFromFile(fd_t fd
, void *buff
, uptr buff_size
, uptr
*bytes_read
,
182 uptr res
= internal_read(fd
, buff
, buff_size
);
183 if (internal_iserror(res
, error_p
))
190 bool WriteToFile(fd_t fd
, const void *buff
, uptr buff_size
, uptr
*bytes_written
,
192 uptr res
= internal_write(fd
, buff
, buff_size
);
193 if (internal_iserror(res
, error_p
))
196 *bytes_written
= res
;
200 void *MapFileToMemory(const char *file_name
, uptr
*buff_size
) {
201 fd_t fd
= OpenFile(file_name
, RdOnly
);
202 CHECK(fd
!= kInvalidFd
);
203 uptr fsize
= internal_filesize(fd
);
204 CHECK_NE(fsize
, (uptr
)-1);
206 *buff_size
= RoundUpTo(fsize
, GetPageSizeCached());
207 uptr map
= internal_mmap(nullptr, *buff_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
208 return internal_iserror(map
) ? nullptr : (void *)map
;
211 void *MapWritableFileToMemory(void *addr
, uptr size
, fd_t fd
, OFF_T offset
) {
212 uptr flags
= MAP_SHARED
;
213 if (addr
) flags
|= MAP_FIXED
;
214 uptr p
= internal_mmap(addr
, size
, PROT_READ
| PROT_WRITE
, flags
, fd
, offset
);
216 if (internal_iserror(p
, &mmap_errno
)) {
217 Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n",
218 fd
, (long long)offset
, size
, p
, mmap_errno
);
224 static inline bool IntervalsAreSeparate(uptr start1
, uptr end1
,
225 uptr start2
, uptr end2
) {
226 CHECK(start1
<= end1
);
227 CHECK(start2
<= end2
);
228 return (end1
< start2
) || (end2
< start1
);
231 // FIXME: this is thread-unsafe, but should not cause problems most of the time.
232 // When the shadow is mapped only a single thread usually exists (plus maybe
233 // several worker threads on Mac, which aren't expected to map big chunks of
235 bool MemoryRangeIsAvailable(uptr range_start
, uptr range_end
) {
236 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
237 if (proc_maps
.Error())
238 return true; // and hope for the best
239 MemoryMappedSegment segment
;
240 while (proc_maps
.Next(&segment
)) {
241 if (segment
.start
== segment
.end
) continue; // Empty range.
242 CHECK_NE(0, segment
.end
);
243 if (!IntervalsAreSeparate(segment
.start
, segment
.end
- 1, range_start
,
251 void DumpProcessMap() {
252 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
253 const sptr kBufSize
= 4095;
254 char *filename
= (char*)MmapOrDie(kBufSize
, __func__
);
255 MemoryMappedSegment
segment(filename
, kBufSize
);
256 Report("Process memory map follows:\n");
257 while (proc_maps
.Next(&segment
)) {
258 Printf("\t%p-%p\t%s\n", (void *)segment
.start
, (void *)segment
.end
,
261 Report("End of process memory map.\n");
262 UnmapOrDie(filename
, kBufSize
);
266 const char *GetPwd() {
267 return GetEnv("PWD");
270 bool IsPathSeparator(const char c
) {
274 bool IsAbsolutePath(const char *path
) {
275 return path
!= nullptr && IsPathSeparator(path
[0]);
278 void ReportFile::Write(const char *buffer
, uptr length
) {
281 internal_write(fd
, buffer
, length
);
284 bool GetCodeRangeForFile(const char *module
, uptr
*start
, uptr
*end
) {
285 MemoryMappingLayout
proc_maps(/*cache_enabled*/false);
286 InternalMmapVector
<char> buff(kMaxPathLength
);
287 MemoryMappedSegment
segment(buff
.data(), buff
.size());
288 while (proc_maps
.Next(&segment
)) {
289 if (segment
.IsExecutable() &&
290 internal_strcmp(module
, segment
.filename
) == 0) {
291 *start
= segment
.start
;
299 uptr
SignalContext::GetAddress() const {
300 auto si
= static_cast<const siginfo_t
*>(siginfo
);
301 return (uptr
)si
->si_addr
;
304 bool SignalContext::IsMemoryAccess() const {
305 auto si
= static_cast<const siginfo_t
*>(siginfo
);
306 return si
->si_signo
== SIGSEGV
|| si
->si_signo
== SIGBUS
;
309 int SignalContext::GetType() const {
310 return static_cast<const siginfo_t
*>(siginfo
)->si_signo
;
313 const char *SignalContext::Describe() const {
328 return "UNKNOWN SIGNAL";
331 fd_t
ReserveStandardFds(fd_t fd
) {
336 internal_memset(used
, 0, sizeof(used
));
339 fd
= internal_dup(fd
);
341 for (int i
= 0; i
<= 2; ++i
)
347 bool ShouldMockFailureToOpen(const char *path
) {
348 return common_flags()->test_only_emulate_no_memorymap
&&
349 internal_strncmp(path
, "/proc/", 6) == 0;
352 #if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
353 int GetNamedMappingFd(const char *name
, uptr size
, int *flags
) {
354 if (!common_flags()->decorate_proc_maps
|| !name
)
357 CHECK(internal_strlen(name
) < sizeof(shmname
) - 10);
358 internal_snprintf(shmname
, sizeof(shmname
), "/dev/shm/%zu [%s]",
359 internal_getpid(), name
);
361 #if defined(O_CLOEXEC)
362 o_cloexec
= O_CLOEXEC
;
364 int fd
= ReserveStandardFds(
365 internal_open(shmname
, O_RDWR
| O_CREAT
| O_TRUNC
| o_cloexec
, S_IRWXU
));
367 int res
= internal_ftruncate(fd
, size
);
368 #if !defined(O_CLOEXEC)
369 res
= fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
373 res
= internal_unlink(shmname
);
375 *flags
&= ~(MAP_ANON
| MAP_ANONYMOUS
);
379 int GetNamedMappingFd(const char *name
, uptr size
, int *flags
) {
384 #if SANITIZER_ANDROID
385 #define PR_SET_VMA 0x53564d41
386 #define PR_SET_VMA_ANON_NAME 0
387 void DecorateMapping(uptr addr
, uptr size
, const char *name
) {
388 if (!common_flags()->decorate_proc_maps
|| !name
)
390 internal_prctl(PR_SET_VMA
, PR_SET_VMA_ANON_NAME
, addr
, size
, (uptr
)name
);
393 void DecorateMapping(uptr addr
, uptr size
, const char *name
) {
397 uptr
MmapNamed(void *addr
, uptr length
, int prot
, int flags
, const char *name
) {
398 int fd
= GetNamedMappingFd(name
, length
, &flags
);
399 uptr res
= internal_mmap(addr
, length
, prot
, flags
, fd
, 0);
400 if (!internal_iserror(res
))
401 DecorateMapping(res
, length
, name
);
406 } // namespace __sanitizer
408 #endif // SANITIZER_POSIX