1 //===-- sanitizer_mac.cc --------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between AddressSanitizer and ThreadSanitizer
9 // run-time libraries and implements mac-specific functions from
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_platform.h"
16 // Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
17 // the clients will most certainly use 64-bit ones as well.
18 #ifndef _DARWIN_USE_64_BIT_INODE
19 #define _DARWIN_USE_64_BIT_INODE 1
23 #include "sanitizer_common.h"
24 #include "sanitizer_internal_defs.h"
25 #include "sanitizer_libc.h"
26 #include "sanitizer_placement_new.h"
27 #include "sanitizer_procmaps.h"
29 #include <crt_externs.h> // for _NSGetEnviron
31 #include <mach-o/dyld.h>
32 #include <mach-o/loader.h>
36 #include <sys/resource.h>
38 #include <sys/types.h>
40 #include <libkern/OSAtomic.h>
43 namespace __sanitizer
{
45 #include "sanitizer_syscall_generic.inc"
47 // ---------------------- sanitizer_libc.h
48 uptr
internal_mmap(void *addr
, size_t length
, int prot
, int flags
,
50 return (uptr
)mmap(addr
, length
, prot
, flags
, fd
, offset
);
53 uptr
internal_munmap(void *addr
, uptr length
) {
54 return munmap(addr
, length
);
57 uptr
internal_close(fd_t fd
) {
61 uptr
internal_open(const char *filename
, int flags
) {
62 return open(filename
, flags
);
65 uptr
internal_open(const char *filename
, int flags
, u32 mode
) {
66 return open(filename
, flags
, mode
);
69 uptr
OpenFile(const char *filename
, bool write
) {
70 return internal_open(filename
,
71 write
? O_WRONLY
| O_CREAT
: O_RDONLY
, 0660);
74 uptr
internal_read(fd_t fd
, void *buf
, uptr count
) {
75 return read(fd
, buf
, count
);
78 uptr
internal_write(fd_t fd
, const void *buf
, uptr count
) {
79 return write(fd
, buf
, count
);
82 uptr
internal_stat(const char *path
, void *buf
) {
83 return stat(path
, (struct stat
*)buf
);
86 uptr
internal_lstat(const char *path
, void *buf
) {
87 return lstat(path
, (struct stat
*)buf
);
90 uptr
internal_fstat(fd_t fd
, void *buf
) {
91 return fstat(fd
, (struct stat
*)buf
);
94 uptr
internal_filesize(fd_t fd
) {
96 if (internal_fstat(fd
, &st
))
98 return (uptr
)st
.st_size
;
101 uptr
internal_dup2(int oldfd
, int newfd
) {
102 return dup2(oldfd
, newfd
);
105 uptr
internal_readlink(const char *path
, char *buf
, uptr bufsize
) {
106 return readlink(path
, buf
, bufsize
);
109 uptr
internal_sched_yield() {
110 return sched_yield();
113 void internal__exit(int exitcode
) {
117 uptr
internal_getpid() {
121 // ----------------- sanitizer_common.h
122 bool FileExists(const char *filename
) {
124 if (stat(filename
, &st
))
126 // Sanity check: filename is a regular file.
127 return S_ISREG(st
.st_mode
);
131 return reinterpret_cast<uptr
>(pthread_self());
134 void GetThreadStackTopAndBottom(bool at_initialization
, uptr
*stack_top
,
135 uptr
*stack_bottom
) {
138 uptr stacksize
= pthread_get_stacksize_np(pthread_self());
139 void *stackaddr
= pthread_get_stackaddr_np(pthread_self());
140 *stack_top
= (uptr
)stackaddr
;
141 *stack_bottom
= *stack_top
- stacksize
;
144 const char *GetEnv(const char *name
) {
145 char ***env_ptr
= _NSGetEnviron();
147 Report("_NSGetEnviron() returned NULL. Please make sure __asan_init() is "
148 "called after libSystem_initializer().\n");
151 char **environ
= *env_ptr
;
153 uptr name_len
= internal_strlen(name
);
154 while (*environ
!= 0) {
155 uptr len
= internal_strlen(*environ
);
156 if (len
> name_len
) {
157 const char *p
= *environ
;
158 if (!internal_memcmp(p
, name
, name_len
) &&
159 p
[name_len
] == '=') { // Match.
160 return *environ
+ name_len
+ 1; // String starting after =.
172 void PrepareForSandboxing() {
173 // Nothing here for now.
177 return sysconf(_SC_PAGESIZE
);
180 // ----------------- sanitizer_procmaps.h
182 MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled
) {
186 MemoryMappingLayout::~MemoryMappingLayout() {
189 // More information about Mach-O headers can be found in mach-o/loader.h
190 // Each Mach-O image has a header (mach_header or mach_header_64) starting with
191 // a magic number, and a list of linker load commands directly following the
193 // A load command is at least two 32-bit words: the command type and the
194 // command size in bytes. We're interested only in segment load commands
195 // (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
196 // into the task's address space.
197 // The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
198 // segment_command_64 correspond to the memory address, memory size and the
199 // file offset of the current memory segment.
200 // Because these fields are taken from the images as is, one needs to add
201 // _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
203 void MemoryMappingLayout::Reset() {
204 // Count down from the top.
205 // TODO(glider): as per man 3 dyld, iterating over the headers with
206 // _dyld_image_count is thread-unsafe. We need to register callbacks for
207 // adding and removing images which will invalidate the MemoryMappingLayout
209 current_image_
= _dyld_image_count();
210 current_load_cmd_count_
= -1;
211 current_load_cmd_addr_
= 0;
213 current_filetype_
= 0;
217 void MemoryMappingLayout::CacheMemoryMappings() {
218 // No-op on Mac for now.
221 void MemoryMappingLayout::LoadFromCache() {
222 // No-op on Mac for now.
225 // Next and NextSegmentLoad were inspired by base/sysinfo.cc in
226 // Google Perftools, http://code.google.com/p/google-perftools.
228 // NextSegmentLoad scans the current image for the next segment load command
229 // and returns the start and end addresses and file offset of the corresponding
231 // Note that the segment addresses are not necessarily sorted.
232 template<u32 kLCSegment
, typename SegmentCommand
>
233 bool MemoryMappingLayout::NextSegmentLoad(
234 uptr
*start
, uptr
*end
, uptr
*offset
,
235 char filename
[], uptr filename_size
, uptr
*protection
) {
238 const char* lc
= current_load_cmd_addr_
;
239 current_load_cmd_addr_
+= ((const load_command
*)lc
)->cmdsize
;
240 if (((const load_command
*)lc
)->cmd
== kLCSegment
) {
241 const sptr dlloff
= _dyld_get_image_vmaddr_slide(current_image_
);
242 const SegmentCommand
* sc
= (const SegmentCommand
*)lc
;
243 if (start
) *start
= sc
->vmaddr
+ dlloff
;
244 if (end
) *end
= sc
->vmaddr
+ sc
->vmsize
+ dlloff
;
246 if (current_filetype_
== /*MH_EXECUTE*/ 0x2) {
247 *offset
= sc
->vmaddr
;
249 *offset
= sc
->fileoff
;
253 internal_strncpy(filename
, _dyld_get_image_name(current_image_
),
261 bool MemoryMappingLayout::Next(uptr
*start
, uptr
*end
, uptr
*offset
,
262 char filename
[], uptr filename_size
,
264 for (; current_image_
>= 0; current_image_
--) {
265 const mach_header
* hdr
= _dyld_get_image_header(current_image_
);
267 if (current_load_cmd_count_
< 0) {
268 // Set up for this image;
269 current_load_cmd_count_
= hdr
->ncmds
;
270 current_magic_
= hdr
->magic
;
271 current_filetype_
= hdr
->filetype
;
272 switch (current_magic_
) {
275 current_load_cmd_addr_
= (char*)hdr
+ sizeof(mach_header_64
);
280 current_load_cmd_addr_
= (char*)hdr
+ sizeof(mach_header
);
289 for (; current_load_cmd_count_
>= 0; current_load_cmd_count_
--) {
290 switch (current_magic_
) {
291 // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
294 if (NextSegmentLoad
<LC_SEGMENT_64
, struct segment_command_64
>(
295 start
, end
, offset
, filename
, filename_size
, protection
))
301 if (NextSegmentLoad
<LC_SEGMENT
, struct segment_command
>(
302 start
, end
, offset
, filename
, filename_size
, protection
))
308 // If we get here, no more load_cmd's in this image talk about
309 // segments. Go on to the next image.
314 bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr
, uptr
*offset
,
318 return IterateForObjectNameAndOffset(addr
, offset
, filename
, filename_size
,
322 BlockingMutex::BlockingMutex(LinkerInitialized
) {
323 // We assume that OS_SPINLOCK_INIT is zero
326 BlockingMutex::BlockingMutex() {
327 internal_memset(this, 0, sizeof(*this));
330 void BlockingMutex::Lock() {
331 CHECK(sizeof(OSSpinLock
) <= sizeof(opaque_storage_
));
332 CHECK_EQ(OS_SPINLOCK_INIT
, 0);
333 CHECK_NE(owner_
, (uptr
)pthread_self());
334 OSSpinLockLock((OSSpinLock
*)&opaque_storage_
);
336 owner_
= (uptr
)pthread_self();
339 void BlockingMutex::Unlock() {
340 CHECK(owner_
== (uptr
)pthread_self());
342 OSSpinLockUnlock((OSSpinLock
*)&opaque_storage_
);
345 void BlockingMutex::CheckLocked() {
346 CHECK_EQ((uptr
)pthread_self(), owner_
);
360 void GetThreadStackAndTls(bool main
, uptr
*stk_addr
, uptr
*stk_size
,
361 uptr
*tls_addr
, uptr
*tls_size
) {
363 uptr stack_top
, stack_bottom
;
364 GetThreadStackTopAndBottom(main
, &stack_top
, &stack_bottom
);
365 *stk_addr
= stack_bottom
;
366 *stk_size
= stack_top
- stack_bottom
;
377 uptr
GetListOfModules(LoadedModule
*modules
, uptr max_modules
,
378 string_predicate_t filter
) {
379 MemoryMappingLayout
memory_mapping(false);
380 memory_mapping
.Reset();
381 uptr cur_beg
, cur_end
, cur_offset
;
382 InternalScopedBuffer
<char> module_name(kMaxPathLength
);
385 n_modules
< max_modules
&&
386 memory_mapping
.Next(&cur_beg
, &cur_end
, &cur_offset
,
387 module_name
.data(), module_name
.size(), 0);
389 const char *cur_name
= module_name
.data();
390 if (cur_name
[0] == '\0')
392 if (filter
&& !filter(cur_name
))
394 LoadedModule
*cur_module
= 0;
396 0 == internal_strcmp(cur_name
, modules
[n_modules
- 1].full_name())) {
397 cur_module
= &modules
[n_modules
- 1];
399 void *mem
= &modules
[n_modules
];
400 cur_module
= new(mem
) LoadedModule(cur_name
, cur_beg
);
403 cur_module
->addAddressRange(cur_beg
, cur_end
);
408 } // namespace __sanitizer
410 #endif // SANITIZER_MAC