1 //===-- sanitizer_fuchsia.cpp ---------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between AddressSanitizer and other sanitizer
10 // run-time libraries and implements Fuchsia-specific functions from
11 // sanitizer_common.h.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_fuchsia.h"
21 #include <zircon/errors.h>
22 #include <zircon/process.h>
23 #include <zircon/syscalls.h>
24 #include <zircon/utc.h>
26 #include "sanitizer_common.h"
27 #include "sanitizer_libc.h"
28 #include "sanitizer_mutex.h"
30 namespace __sanitizer
{
32 void NORETURN
internal__exit(int exitcode
) { _zx_process_exit(exitcode
); }
34 uptr
internal_sched_yield() {
35 zx_status_t status
= _zx_nanosleep(0);
36 CHECK_EQ(status
, ZX_OK
);
37 return 0; // Why doesn't this return void?
40 static void internal_nanosleep(zx_time_t ns
) {
41 zx_status_t status
= _zx_nanosleep(_zx_deadline_after(ns
));
42 CHECK_EQ(status
, ZX_OK
);
45 unsigned int internal_sleep(unsigned int seconds
) {
46 internal_nanosleep(ZX_SEC(seconds
));
51 zx_handle_t utc_clock
= _zx_utc_reference_get();
52 CHECK_NE(utc_clock
, ZX_HANDLE_INVALID
);
54 zx_status_t status
= _zx_clock_read(utc_clock
, &time
);
55 CHECK_EQ(status
, ZX_OK
);
59 u64
MonotonicNanoTime() { return _zx_clock_get_monotonic(); }
61 uptr
internal_getpid() {
62 zx_info_handle_basic_t info
;
64 _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC
, &info
,
65 sizeof(info
), NULL
, NULL
);
66 CHECK_EQ(status
, ZX_OK
);
67 uptr pid
= static_cast<uptr
>(info
.koid
);
68 CHECK_EQ(pid
, info
.koid
);
72 int internal_dlinfo(void *handle
, int request
, void *p
) {
76 uptr
GetThreadSelf() { return reinterpret_cast<uptr
>(thrd_current()); }
78 tid_t
GetTid() { return GetThreadSelf(); }
80 void Abort() { abort(); }
82 int Atexit(void (*function
)(void)) { return atexit(function
); }
84 void SleepForSeconds(int seconds
) { internal_sleep(seconds
); }
86 void SleepForMillis(int millis
) { internal_nanosleep(ZX_MSEC(millis
)); }
88 void GetThreadStackTopAndBottom(bool, uptr
*stack_top
, uptr
*stack_bottom
) {
90 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr
), 0);
93 CHECK_EQ(pthread_attr_getstack(&attr
, &base
, &size
), 0);
94 CHECK_EQ(pthread_attr_destroy(&attr
), 0);
96 *stack_bottom
= reinterpret_cast<uptr
>(base
);
97 *stack_top
= *stack_bottom
+ size
;
100 void InitializePlatformEarly() {}
101 void MaybeReexec() {}
103 void CheckMPROTECT() {}
104 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments
*args
) {}
105 void DisableCoreDumperIfNecessary() {}
106 void InstallDeadlySignalHandlers(SignalHandlerType handler
) {}
107 void SetAlternateSignalStack() {}
108 void UnsetAlternateSignalStack() {}
109 void InitTlsSize() {}
111 bool SignalContext::IsStackOverflow() const { return false; }
112 void SignalContext::DumpAllRegisters(void *context
) { UNIMPLEMENTED(); }
113 const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
115 enum MutexState
: int { MtxUnlocked
= 0, MtxLocked
= 1, MtxSleeping
= 2 };
117 BlockingMutex::BlockingMutex() {
118 // NOTE! It's important that this use internal_memset, because plain
119 // memset might be intercepted (e.g., actually be __asan_memset).
120 // Defining this so the compiler initializes each field, e.g.:
121 // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
122 // might result in the compiler generating a call to memset, which would
123 // have the same problem.
124 internal_memset(this, 0, sizeof(*this));
127 void BlockingMutex::Lock() {
129 atomic_uint32_t
*m
= reinterpret_cast<atomic_uint32_t
*>(&opaque_storage_
);
130 if (atomic_exchange(m
, MtxLocked
, memory_order_acquire
) == MtxUnlocked
)
132 while (atomic_exchange(m
, MtxSleeping
, memory_order_acquire
) != MtxUnlocked
) {
134 _zx_futex_wait(reinterpret_cast<zx_futex_t
*>(m
), MtxSleeping
,
135 ZX_HANDLE_INVALID
, ZX_TIME_INFINITE
);
136 if (status
!= ZX_ERR_BAD_STATE
) // Normal race.
137 CHECK_EQ(status
, ZX_OK
);
141 void BlockingMutex::Unlock() {
142 atomic_uint32_t
*m
= reinterpret_cast<atomic_uint32_t
*>(&opaque_storage_
);
143 u32 v
= atomic_exchange(m
, MtxUnlocked
, memory_order_release
);
144 CHECK_NE(v
, MtxUnlocked
);
145 if (v
== MtxSleeping
) {
146 zx_status_t status
= _zx_futex_wake(reinterpret_cast<zx_futex_t
*>(m
), 1);
147 CHECK_EQ(status
, ZX_OK
);
151 void BlockingMutex::CheckLocked() {
152 atomic_uint32_t
*m
= reinterpret_cast<atomic_uint32_t
*>(&opaque_storage_
);
153 CHECK_NE(MtxUnlocked
, atomic_load(m
, memory_order_relaxed
));
156 uptr
GetPageSize() { return PAGE_SIZE
; }
158 uptr
GetMmapGranularity() { return PAGE_SIZE
; }
160 sanitizer_shadow_bounds_t ShadowBounds
;
162 uptr
GetMaxUserVirtualAddress() {
163 ShadowBounds
= __sanitizer_shadow_bounds();
164 return ShadowBounds
.memory_limit
- 1;
167 uptr
GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
169 static void *DoAnonymousMmapOrDie(uptr size
, const char *mem_type
,
170 bool raw_report
, bool die_for_nomem
) {
171 size
= RoundUpTo(size
, PAGE_SIZE
);
174 zx_status_t status
= _zx_vmo_create(size
, 0, &vmo
);
175 if (status
!= ZX_OK
) {
176 if (status
!= ZX_ERR_NO_MEMORY
|| die_for_nomem
)
177 ReportMmapFailureAndDie(size
, mem_type
, "zx_vmo_create", status
,
181 _zx_object_set_property(vmo
, ZX_PROP_NAME
, mem_type
,
182 internal_strlen(mem_type
));
184 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
187 _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ
| ZX_VM_PERM_WRITE
, 0,
188 vmo
, 0, size
, &addr
);
189 _zx_handle_close(vmo
);
191 if (status
!= ZX_OK
) {
192 if (status
!= ZX_ERR_NO_MEMORY
|| die_for_nomem
)
193 ReportMmapFailureAndDie(size
, mem_type
, "zx_vmar_map", status
,
198 IncreaseTotalMmap(size
);
200 return reinterpret_cast<void *>(addr
);
203 void *MmapOrDie(uptr size
, const char *mem_type
, bool raw_report
) {
204 return DoAnonymousMmapOrDie(size
, mem_type
, raw_report
, true);
207 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
) {
208 return MmapOrDie(size
, mem_type
);
211 void *MmapOrDieOnFatalError(uptr size
, const char *mem_type
) {
212 return DoAnonymousMmapOrDie(size
, mem_type
, false, false);
215 uptr
ReservedAddressRange::Init(uptr init_size
, const char *name
,
217 init_size
= RoundUpTo(init_size
, PAGE_SIZE
);
218 DCHECK_EQ(os_handle_
, ZX_HANDLE_INVALID
);
223 _zx_vmar_root_self(),
224 ZX_VM_CAN_MAP_READ
| ZX_VM_CAN_MAP_WRITE
| ZX_VM_CAN_MAP_SPECIFIC
,
225 0, init_size
, &vmar
, &base
);
227 ReportMmapFailureAndDie(init_size
, name
, "zx_vmar_allocate", status
);
228 base_
= reinterpret_cast<void *>(base
);
233 return reinterpret_cast<uptr
>(base_
);
236 static uptr
DoMmapFixedOrDie(zx_handle_t vmar
, uptr fixed_addr
, uptr map_size
,
237 void *base
, const char *name
, bool die_for_nomem
) {
238 uptr offset
= fixed_addr
- reinterpret_cast<uptr
>(base
);
239 map_size
= RoundUpTo(map_size
, PAGE_SIZE
);
241 zx_status_t status
= _zx_vmo_create(map_size
, 0, &vmo
);
242 if (status
!= ZX_OK
) {
243 if (status
!= ZX_ERR_NO_MEMORY
|| die_for_nomem
)
244 ReportMmapFailureAndDie(map_size
, name
, "zx_vmo_create", status
);
247 _zx_object_set_property(vmo
, ZX_PROP_NAME
, name
, internal_strlen(name
));
248 DCHECK_GE(base
+ size_
, map_size
+ offset
);
252 _zx_vmar_map(vmar
, ZX_VM_PERM_READ
| ZX_VM_PERM_WRITE
| ZX_VM_SPECIFIC
,
253 offset
, vmo
, 0, map_size
, &addr
);
254 _zx_handle_close(vmo
);
255 if (status
!= ZX_OK
) {
256 if (status
!= ZX_ERR_NO_MEMORY
|| die_for_nomem
) {
257 ReportMmapFailureAndDie(map_size
, name
, "zx_vmar_map", status
);
261 IncreaseTotalMmap(map_size
);
265 uptr
ReservedAddressRange::Map(uptr fixed_addr
, uptr map_size
,
267 return DoMmapFixedOrDie(os_handle_
, fixed_addr
, map_size
, base_
,
271 uptr
ReservedAddressRange::MapOrDie(uptr fixed_addr
, uptr map_size
,
273 return DoMmapFixedOrDie(os_handle_
, fixed_addr
, map_size
, base_
,
277 void UnmapOrDieVmar(void *addr
, uptr size
, zx_handle_t target_vmar
) {
278 if (!addr
|| !size
) return;
279 size
= RoundUpTo(size
, PAGE_SIZE
);
282 _zx_vmar_unmap(target_vmar
, reinterpret_cast<uintptr_t>(addr
), size
);
283 if (status
!= ZX_OK
) {
284 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
285 SanitizerToolName
, size
, size
, addr
);
286 CHECK("unable to unmap" && 0);
289 DecreaseTotalMmap(size
);
292 void ReservedAddressRange::Unmap(uptr addr
, uptr size
) {
293 CHECK_LE(size
, size_
);
294 const zx_handle_t vmar
= static_cast<zx_handle_t
>(os_handle_
);
295 if (addr
== reinterpret_cast<uptr
>(base_
)) {
297 // Destroying the vmar effectively unmaps the whole mapping.
298 _zx_vmar_destroy(vmar
);
299 _zx_handle_close(vmar
);
300 os_handle_
= static_cast<uptr
>(ZX_HANDLE_INVALID
);
301 DecreaseTotalMmap(size
);
305 CHECK_EQ(addr
+ size
, reinterpret_cast<uptr
>(base_
) + size_
);
307 // Partial unmapping does not affect the fact that the initial range is still
308 // reserved, and the resulting unmapped memory can't be reused.
309 UnmapOrDieVmar(reinterpret_cast<void *>(addr
), size
, vmar
);
312 // This should never be called.
313 void *MmapFixedNoAccess(uptr fixed_addr
, uptr size
, const char *name
) {
317 void *MmapAlignedOrDieOnFatalError(uptr size
, uptr alignment
,
318 const char *mem_type
) {
319 CHECK_GE(size
, PAGE_SIZE
);
320 CHECK(IsPowerOfTwo(size
));
321 CHECK(IsPowerOfTwo(alignment
));
324 zx_status_t status
= _zx_vmo_create(size
, 0, &vmo
);
325 if (status
!= ZX_OK
) {
326 if (status
!= ZX_ERR_NO_MEMORY
)
327 ReportMmapFailureAndDie(size
, mem_type
, "zx_vmo_create", status
, false);
330 _zx_object_set_property(vmo
, ZX_PROP_NAME
, mem_type
,
331 internal_strlen(mem_type
));
333 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
335 // Map a larger size to get a chunk of address space big enough that
336 // it surely contains an aligned region of the requested size. Then
337 // overwrite the aligned middle portion with a mapping from the
338 // beginning of the VMO, and unmap the excess before and after.
339 size_t map_size
= size
+ alignment
;
342 _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ
| ZX_VM_PERM_WRITE
, 0,
343 vmo
, 0, map_size
, &addr
);
344 if (status
== ZX_OK
) {
345 uintptr_t map_addr
= addr
;
346 uintptr_t map_end
= map_addr
+ map_size
;
347 addr
= RoundUpTo(map_addr
, alignment
);
348 uintptr_t end
= addr
+ size
;
349 if (addr
!= map_addr
) {
351 status
= _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR
, &info
,
352 sizeof(info
), NULL
, NULL
);
353 if (status
== ZX_OK
) {
355 status
= _zx_vmar_map(
356 _zx_vmar_root_self(),
357 ZX_VM_PERM_READ
| ZX_VM_PERM_WRITE
| ZX_VM_SPECIFIC_OVERWRITE
,
358 addr
- info
.base
, vmo
, 0, size
, &new_addr
);
359 if (status
== ZX_OK
) CHECK_EQ(new_addr
, addr
);
362 if (status
== ZX_OK
&& addr
!= map_addr
)
363 status
= _zx_vmar_unmap(_zx_vmar_root_self(), map_addr
, addr
- map_addr
);
364 if (status
== ZX_OK
&& end
!= map_end
)
365 status
= _zx_vmar_unmap(_zx_vmar_root_self(), end
, map_end
- end
);
367 _zx_handle_close(vmo
);
369 if (status
!= ZX_OK
) {
370 if (status
!= ZX_ERR_NO_MEMORY
)
371 ReportMmapFailureAndDie(size
, mem_type
, "zx_vmar_map", status
, false);
375 IncreaseTotalMmap(size
);
377 return reinterpret_cast<void *>(addr
);
380 void UnmapOrDie(void *addr
, uptr size
) {
381 UnmapOrDieVmar(addr
, size
, _zx_vmar_root_self());
384 // This is used on the shadow mapping, which cannot be changed.
385 // Zircon doesn't have anything like MADV_DONTNEED.
386 void ReleaseMemoryPagesToOS(uptr beg
, uptr end
) {}
388 void DumpProcessMap() {
389 // TODO(mcgrathr): write it
393 bool IsAccessibleMemoryRange(uptr beg
, uptr size
) {
394 // TODO(mcgrathr): Figure out a better way.
396 zx_status_t status
= _zx_vmo_create(size
, 0, &vmo
);
397 if (status
== ZX_OK
) {
398 status
= _zx_vmo_write(vmo
, reinterpret_cast<const void *>(beg
), 0, size
);
399 _zx_handle_close(vmo
);
401 return status
== ZX_OK
;
404 // FIXME implement on this platform.
405 void GetMemoryProfile(fill_profile_f cb
, uptr
*stats
, uptr stats_size
) {}
407 bool ReadFileToBuffer(const char *file_name
, char **buff
, uptr
*buff_size
,
408 uptr
*read_len
, uptr max_len
, error_t
*errno_p
) {
410 zx_status_t status
= __sanitizer_get_configuration(file_name
, &vmo
);
411 if (status
== ZX_OK
) {
413 status
= _zx_vmo_get_size(vmo
, &vmo_size
);
414 if (status
== ZX_OK
) {
415 if (vmo_size
< max_len
) max_len
= vmo_size
;
416 size_t map_size
= RoundUpTo(max_len
, PAGE_SIZE
);
418 status
= _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ
, 0, vmo
, 0,
420 if (status
== ZX_OK
) {
421 *buff
= reinterpret_cast<char *>(addr
);
422 *buff_size
= map_size
;
426 _zx_handle_close(vmo
);
428 if (status
!= ZX_OK
&& errno_p
) *errno_p
= status
;
429 return status
== ZX_OK
;
432 void RawWrite(const char *buffer
) {
433 constexpr size_t size
= 128;
434 static _Thread_local
char line
[size
];
435 static _Thread_local
size_t lastLineEnd
= 0;
436 static _Thread_local
size_t cur
= 0;
440 if (lastLineEnd
== 0)
442 __sanitizer_log_write(line
, lastLineEnd
);
443 internal_memmove(line
, line
+ lastLineEnd
, cur
- lastLineEnd
);
444 cur
= cur
- lastLineEnd
;
448 lastLineEnd
= cur
+ 1;
449 line
[cur
++] = *buffer
++;
451 // Flush all complete lines before returning.
452 if (lastLineEnd
!= 0) {
453 __sanitizer_log_write(line
, lastLineEnd
);
454 internal_memmove(line
, line
+ lastLineEnd
, cur
- lastLineEnd
);
455 cur
= cur
- lastLineEnd
;
460 void CatastrophicErrorWrite(const char *buffer
, uptr length
) {
461 __sanitizer_log_write(buffer
, length
);
465 char **StoredEnviron
;
467 char **GetArgv() { return StoredArgv
; }
468 char **GetEnviron() { return StoredEnviron
; }
470 const char *GetEnv(const char *name
) {
472 uptr NameLen
= internal_strlen(name
);
473 for (char **Env
= StoredEnviron
; *Env
!= 0; Env
++) {
474 if (internal_strncmp(*Env
, name
, NameLen
) == 0 && (*Env
)[NameLen
] == '=')
475 return (*Env
) + NameLen
+ 1;
481 uptr
ReadBinaryName(/*out*/ char *buf
, uptr buf_len
) {
482 const char *argv0
= "<UNKNOWN>";
483 if (StoredArgv
&& StoredArgv
[0]) {
484 argv0
= StoredArgv
[0];
486 internal_strncpy(buf
, argv0
, buf_len
);
487 return internal_strlen(buf
);
490 uptr
ReadLongProcessName(/*out*/ char *buf
, uptr buf_len
) {
491 return ReadBinaryName(buf
, buf_len
);
494 uptr MainThreadStackBase
, MainThreadStackSize
;
496 bool GetRandom(void *buffer
, uptr length
, bool blocking
) {
497 CHECK_LE(length
, ZX_CPRNG_DRAW_MAX_LEN
);
498 _zx_cprng_draw(buffer
, length
);
502 u32
GetNumberOfCPUs() {
503 return zx_system_get_num_cpus();
506 uptr
GetRSS() { UNIMPLEMENTED(); }
508 void InitializePlatformCommonFlags(CommonFlags
*cf
) {}
510 } // namespace __sanitizer
512 using namespace __sanitizer
;
515 void __sanitizer_startup_hook(int argc
, char **argv
, char **envp
,
516 void *stack_base
, size_t stack_size
) {
517 __sanitizer::StoredArgv
= argv
;
518 __sanitizer::StoredEnviron
= envp
;
519 __sanitizer::MainThreadStackBase
= reinterpret_cast<uintptr_t>(stack_base
);
520 __sanitizer::MainThreadStackSize
= stack_size
;
523 void __sanitizer_set_report_path(const char *path
) {
524 // Handle the initialization code in each sanitizer, but no other calls.
525 // This setting is never consulted on Fuchsia.
526 DCHECK_EQ(path
, common_flags()->log_path
);
529 void __sanitizer_set_report_fd(void *fd
) {
530 UNREACHABLE("not available on Fuchsia");
534 #endif // SANITIZER_FUCHSIA