1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between run-time libraries of sanitizers.
11 // It declares common functions and classes that are used in both runtimes.
12 // Implementation of some functions are provided in sanitizer_common, while
13 // others must be defined by run-time library itself.
14 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_COMMON_H
16 #define SANITIZER_COMMON_H
18 #include "sanitizer_flags.h"
19 #include "sanitizer_interface_internal.h"
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_libc.h"
22 #include "sanitizer_list.h"
23 #include "sanitizer_mutex.h"
25 #if defined(_MSC_VER) && !defined(__clang__)
26 extern "C" void _ReadWriteBarrier();
27 #pragma intrinsic(_ReadWriteBarrier)
30 namespace __sanitizer
{
33 struct BufferedStackTrace
;
38 const uptr kWordSize
= SANITIZER_WORDSIZE
/ 8;
39 const uptr kWordSizeInBits
= 8 * kWordSize
;
41 const uptr kCacheLineSize
= SANITIZER_CACHE_LINE_SIZE
;
43 const uptr kMaxPathLength
= 4096;
45 const uptr kMaxThreadStackSize
= 1 << 30; // 1Gb
47 static const uptr kErrorMessageBufferSize
= 1 << 16;
49 // Denotes fake PC values that come from JIT/JAVA/etc.
50 // For such PC values __tsan_symbolize_external_ex() will be called.
51 const u64 kExternalPCBit
= 1ULL << 60;
53 extern const char *SanitizerToolName
; // Can be changed by the tool.
55 extern atomic_uint32_t current_verbosity
;
56 inline void SetVerbosity(int verbosity
) {
57 atomic_store(¤t_verbosity
, verbosity
, memory_order_relaxed
);
59 inline int Verbosity() {
60 return atomic_load(¤t_verbosity
, memory_order_relaxed
);
64 inline uptr
GetPageSize() {
65 // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
68 inline uptr
GetPageSizeCached() {
73 extern uptr PageSizeCached
;
74 inline uptr
GetPageSizeCached() {
76 PageSizeCached
= GetPageSize();
77 return PageSizeCached
;
80 uptr
GetMmapGranularity();
81 uptr
GetMaxVirtualAddress();
82 uptr
GetMaxUserVirtualAddress();
85 int TgKill(pid_t pid
, tid_t tid
, int sig
);
87 void GetThreadStackTopAndBottom(bool at_initialization
, uptr
*stack_top
,
89 void GetThreadStackAndTls(bool main
, uptr
*stk_addr
, uptr
*stk_size
,
90 uptr
*tls_addr
, uptr
*tls_size
);
93 void *MmapOrDie(uptr size
, const char *mem_type
, bool raw_report
= false);
94 inline void *MmapOrDieQuietly(uptr size
, const char *mem_type
) {
95 return MmapOrDie(size
, mem_type
, /*raw_report*/ true);
97 void UnmapOrDie(void *addr
, uptr size
);
98 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
99 // case returns nullptr.
100 void *MmapOrDieOnFatalError(uptr size
, const char *mem_type
);
101 bool MmapFixedNoReserve(uptr fixed_addr
, uptr size
, const char *name
= nullptr)
103 bool MmapFixedSuperNoReserve(uptr fixed_addr
, uptr size
,
104 const char *name
= nullptr) WARN_UNUSED_RESULT
;
105 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
);
106 void *MmapFixedOrDie(uptr fixed_addr
, uptr size
, const char *name
= nullptr);
107 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
108 // that case returns nullptr.
109 void *MmapFixedOrDieOnFatalError(uptr fixed_addr
, uptr size
,
110 const char *name
= nullptr);
111 void *MmapFixedNoAccess(uptr fixed_addr
, uptr size
, const char *name
= nullptr);
112 void *MmapNoAccess(uptr size
);
113 // Map aligned chunk of address space; size and alignment are powers of two.
114 // Dies on all but out of memory errors, in the latter case returns nullptr.
115 void *MmapAlignedOrDieOnFatalError(uptr size
, uptr alignment
,
116 const char *mem_type
);
117 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
118 // unaccessible memory.
119 bool MprotectNoAccess(uptr addr
, uptr size
);
120 bool MprotectReadOnly(uptr addr
, uptr size
);
122 void MprotectMallocZones(void *addr
, int prot
);
125 // Unmap memory. Currently only used on Linux.
126 void UnmapFromTo(uptr from
, uptr to
);
129 // Maps shadow_size_bytes of shadow memory and returns shadow address. It will
130 // be aligned to the mmap granularity * 2^shadow_scale, or to
131 // 2^min_shadow_base_alignment if that is larger. The returned address will
132 // have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
133 // shadow_size_bytes bytes on the right, which on linux is mapped no access.
134 // The high_mem_end may be updated if the original shadow size doesn't fit.
135 uptr
MapDynamicShadow(uptr shadow_size_bytes
, uptr shadow_scale
,
136 uptr min_shadow_base_alignment
, uptr
&high_mem_end
);
138 // Reserve memory range [beg, end]. If madvise_shadow is true then apply
139 // madvise (e.g. hugepages, core dumping) requested by options.
140 void ReserveShadowMemoryRange(uptr beg
, uptr end
, const char *name
,
141 bool madvise_shadow
= true);
143 // Protect size bytes of memory starting at addr. Also try to protect
144 // several pages at the start of the address space as specified by
145 // zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
146 void ProtectGap(uptr addr
, uptr size
, uptr zero_base_shadow_start
,
147 uptr zero_base_max_shadow_start
);
149 // Find an available address space.
150 uptr
FindAvailableMemoryRange(uptr size
, uptr alignment
, uptr left_padding
,
151 uptr
*largest_gap_found
, uptr
*max_occupied_addr
);
153 // Used to check if we can map shadow memory to a fixed location.
154 bool MemoryRangeIsAvailable(uptr range_start
, uptr range_end
);
155 // Releases memory pages entirely within the [beg, end] address range. Noop if
156 // the provided range does not contain at least one entire page.
157 void ReleaseMemoryPagesToOS(uptr beg
, uptr end
);
158 void IncreaseTotalMmap(uptr size
);
159 void DecreaseTotalMmap(uptr size
);
161 void SetShadowRegionHugePageMode(uptr addr
, uptr length
);
162 bool DontDumpShadowMemory(uptr addr
, uptr length
);
163 // Check if the built VMA size matches the runtime one.
165 void RunMallocHooks(const void *ptr
, uptr size
);
166 void RunFreeHooks(const void *ptr
);
168 class ReservedAddressRange
{
170 uptr
Init(uptr size
, const char *name
= nullptr, uptr fixed_addr
= 0);
171 uptr
InitAligned(uptr size
, uptr align
, const char *name
= nullptr);
172 uptr
Map(uptr fixed_addr
, uptr size
, const char *name
= nullptr);
173 uptr
MapOrDie(uptr fixed_addr
, uptr size
, const char *name
= nullptr);
174 void Unmap(uptr addr
, uptr size
);
175 void *base() const { return base_
; }
176 uptr
size() const { return size_
; }
185 typedef void (*fill_profile_f
)(uptr start
, uptr rss
, bool file
,
186 /*out*/uptr
*stats
, uptr stats_size
);
188 // Parse the contents of /proc/self/smaps and generate a memory profile.
189 // |cb| is a tool-specific callback that fills the |stats| array containing
190 // |stats_size| elements.
191 void GetMemoryProfile(fill_profile_f cb
, uptr
*stats
, uptr stats_size
);
193 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
194 // constructor, so all instances of LowLevelAllocator should be
195 // linker initialized.
196 class LowLevelAllocator
{
198 // Requires an external lock.
199 void *Allocate(uptr size
);
201 char *allocated_end_
;
202 char *allocated_current_
;
204 // Set the min alignment of LowLevelAllocator to at least alignment.
205 void SetLowLevelAllocateMinAlignment(uptr alignment
);
206 typedef void (*LowLevelAllocateCallback
)(uptr ptr
, uptr size
);
207 // Allows to register tool-specific callbacks for LowLevelAllocator.
208 // Passing NULL removes the callback.
209 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback
);
212 void CatastrophicErrorWrite(const char *buffer
, uptr length
);
213 void RawWrite(const char *buffer
);
214 bool ColorizeReports();
215 void RemoveANSIEscapeSequencesFromString(char *buffer
);
216 void Printf(const char *format
, ...);
217 void Report(const char *format
, ...);
218 void SetPrintfAndReportCallback(void (*callback
)(const char *));
219 #define VReport(level, ...) \
221 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
223 #define VPrintf(level, ...) \
225 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
228 // Lock sanitizer error reporting and protects against nested errors.
229 class ScopedErrorReportLock
{
231 ScopedErrorReportLock();
232 ~ScopedErrorReportLock();
234 static void CheckLocked();
237 extern uptr stoptheworld_tracer_pid
;
238 extern uptr stoptheworld_tracer_ppid
;
240 bool IsAccessibleMemoryRange(uptr beg
, uptr size
);
242 // Error report formatting.
243 const char *StripPathPrefix(const char *filepath
,
244 const char *strip_file_prefix
);
245 // Strip the directories from the module name.
246 const char *StripModuleName(const char *module
);
249 uptr
ReadBinaryName(/*out*/char *buf
, uptr buf_len
);
250 uptr
ReadBinaryNameCached(/*out*/char *buf
, uptr buf_len
);
251 uptr
ReadLongProcessName(/*out*/ char *buf
, uptr buf_len
);
252 const char *GetProcessName();
253 void UpdateProcessName();
254 void CacheBinaryName();
255 void DisableCoreDumperIfNecessary();
256 void DumpProcessMap();
257 const char *GetEnv(const char *name
);
258 bool SetEnv(const char *name
, const char *value
);
263 void CheckMPROTECT();
267 bool StackSizeIsUnlimited();
268 void SetStackSizeLimitInBytes(uptr limit
);
269 bool AddressSpaceIsUnlimited();
270 void SetAddressSpaceUnlimited();
271 void AdjustStackSize(void *attr
);
272 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments
*args
);
273 void SetSandboxingCallback(void (*f
)());
275 void InitializeCoverage(bool enabled
, const char *coverage_dir
);
281 void SleepForSeconds(int seconds
);
282 void SleepForMillis(int millis
);
284 u64
MonotonicNanoTime();
285 int Atexit(void (*function
)(void));
286 bool TemplateMatch(const char *templ
, const char *str
);
289 void NORETURN
Abort();
292 CheckFailed(const char *file
, int line
, const char *cond
, u64 v1
, u64 v2
);
293 void NORETURN
ReportMmapFailureAndDie(uptr size
, const char *mem_type
,
294 const char *mmap_type
, error_t err
,
295 bool raw_report
= false);
297 // Specific tools may override behavior of "Die" and "CheckFailed" functions
298 // to do tool-specific job.
299 typedef void (*DieCallbackType
)(void);
301 // It's possible to add several callbacks that would be run when "Die" is
302 // called. The callbacks will be run in the opposite order. The tools are
303 // strongly recommended to setup all callbacks during initialization, when there
304 // is only a single thread.
305 bool AddDieCallback(DieCallbackType callback
);
306 bool RemoveDieCallback(DieCallbackType callback
);
308 void SetUserDieCallback(DieCallbackType callback
);
310 typedef void (*CheckFailedCallbackType
)(const char *, int, const char *,
312 void SetCheckFailedCallback(CheckFailedCallbackType callback
);
314 // Callback will be called if soft_rss_limit_mb is given and the limit is
315 // exceeded (exceeded==true) or if rss went down below the limit
316 // (exceeded==false).
317 // The callback should be registered once at the tool init time.
318 void SetSoftRssLimitExceededCallback(void (*Callback
)(bool exceeded
));
320 // Functions related to signal handling.
321 typedef void (*SignalHandlerType
)(int, void *, void *);
322 HandleSignalMode
GetHandleSignalMode(int signum
);
323 void InstallDeadlySignalHandlers(SignalHandlerType handler
);
326 // Each sanitizer uses slightly different implementation of stack unwinding.
327 typedef void (*UnwindSignalStackCallbackType
)(const SignalContext
&sig
,
328 const void *callback_context
,
329 BufferedStackTrace
*stack
);
330 // Print deadly signal report and die.
331 void HandleDeadlySignal(void *siginfo
, void *context
, u32 tid
,
332 UnwindSignalStackCallbackType unwind
,
333 const void *unwind_context
);
335 // Part of HandleDeadlySignal, exposed for asan.
336 void StartReportDeadlySignal();
337 // Part of HandleDeadlySignal, exposed for asan.
338 void ReportDeadlySignal(const SignalContext
&sig
, u32 tid
,
339 UnwindSignalStackCallbackType unwind
,
340 const void *unwind_context
);
342 // Alternative signal stack (POSIX-only).
343 void SetAlternateSignalStack();
344 void UnsetAlternateSignalStack();
346 // We don't want a summary too long.
347 const int kMaxSummaryLength
= 1024;
348 // Construct a one-line string:
349 // SUMMARY: SanitizerToolName: error_message
350 // and pass it to __sanitizer_report_error_summary.
351 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
352 void ReportErrorSummary(const char *error_message
,
353 const char *alt_tool_name
= nullptr);
354 // Same as above, but construct error_message as:
355 // error_type file:line[:column][ function]
356 void ReportErrorSummary(const char *error_type
, const AddressInfo
&info
,
357 const char *alt_tool_name
= nullptr);
358 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
359 void ReportErrorSummary(const char *error_type
, const StackTrace
*trace
,
360 const char *alt_tool_name
= nullptr);
362 void ReportMmapWriteExec(int prot
);
365 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
367 unsigned char _BitScanForward(unsigned long *index
, unsigned long mask
);
368 unsigned char _BitScanReverse(unsigned long *index
, unsigned long mask
);
370 unsigned char _BitScanForward64(unsigned long *index
, unsigned __int64 mask
);
371 unsigned char _BitScanReverse64(unsigned long *index
, unsigned __int64 mask
);
376 inline uptr
MostSignificantSetBitIndex(uptr x
) {
379 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
381 up
= SANITIZER_WORDSIZE
- 1 - __builtin_clzll(x
);
383 up
= SANITIZER_WORDSIZE
- 1 - __builtin_clzl(x
);
385 #elif defined(_WIN64)
386 _BitScanReverse64(&up
, x
);
388 _BitScanReverse(&up
, x
);
393 inline uptr
LeastSignificantSetBitIndex(uptr x
) {
396 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
398 up
= __builtin_ctzll(x
);
400 up
= __builtin_ctzl(x
);
402 #elif defined(_WIN64)
403 _BitScanForward64(&up
, x
);
405 _BitScanForward(&up
, x
);
410 inline bool IsPowerOfTwo(uptr x
) {
411 return (x
& (x
- 1)) == 0;
414 inline uptr
RoundUpToPowerOfTwo(uptr size
) {
416 if (IsPowerOfTwo(size
)) return size
;
418 uptr up
= MostSignificantSetBitIndex(size
);
419 CHECK_LT(size
, (1ULL << (up
+ 1)));
420 CHECK_GT(size
, (1ULL << up
));
421 return 1ULL << (up
+ 1);
424 inline uptr
RoundUpTo(uptr size
, uptr boundary
) {
425 RAW_CHECK(IsPowerOfTwo(boundary
));
426 return (size
+ boundary
- 1) & ~(boundary
- 1);
429 inline uptr
RoundDownTo(uptr x
, uptr boundary
) {
430 return x
& ~(boundary
- 1);
433 inline bool IsAligned(uptr a
, uptr alignment
) {
434 return (a
& (alignment
- 1)) == 0;
437 inline uptr
Log2(uptr x
) {
438 CHECK(IsPowerOfTwo(x
));
439 return LeastSignificantSetBitIndex(x
);
442 // Don't use std::min, std::max or std::swap, to minimize dependency
444 template<class T
> T
Min(T a
, T b
) { return a
< b
? a
: b
; }
445 template<class T
> T
Max(T a
, T b
) { return a
> b
? a
: b
; }
446 template<class T
> void Swap(T
& a
, T
& b
) {
453 inline bool IsSpace(int c
) {
454 return (c
== ' ') || (c
== '\n') || (c
== '\t') ||
455 (c
== '\f') || (c
== '\r') || (c
== '\v');
457 inline bool IsDigit(int c
) {
458 return (c
>= '0') && (c
<= '9');
460 inline int ToLower(int c
) {
461 return (c
>= 'A' && c
<= 'Z') ? (c
+ 'a' - 'A') : c
;
464 // A low-level vector based on mmap. May incur a significant memory overhead for
466 // WARNING: The current implementation supports only POD types.
468 class InternalMmapVectorNoCtor
{
470 void Initialize(uptr initial_capacity
) {
474 reserve(initial_capacity
);
476 void Destroy() { UnmapOrDie(data_
, capacity_bytes_
); }
477 T
&operator[](uptr i
) {
481 const T
&operator[](uptr i
) const {
485 void push_back(const T
&element
) {
486 CHECK_LE(size_
, capacity());
487 if (size_
== capacity()) {
488 uptr new_capacity
= RoundUpToPowerOfTwo(size_
+ 1);
489 Realloc(new_capacity
);
491 internal_memcpy(&data_
[size_
++], &element
, sizeof(T
));
495 return data_
[size_
- 1];
504 const T
*data() const {
510 uptr
capacity() const { return capacity_bytes_
/ sizeof(T
); }
511 void reserve(uptr new_size
) {
512 // Never downsize internal buffer.
513 if (new_size
> capacity())
516 void resize(uptr new_size
) {
517 if (new_size
> size_
) {
519 internal_memset(&data_
[size_
], 0, sizeof(T
) * (new_size
- size_
));
524 void clear() { size_
= 0; }
525 bool empty() const { return size() == 0; }
527 const T
*begin() const {
533 const T
*end() const {
534 return data() + size();
537 return data() + size();
540 void swap(InternalMmapVectorNoCtor
&other
) {
541 Swap(data_
, other
.data_
);
542 Swap(capacity_bytes_
, other
.capacity_bytes_
);
543 Swap(size_
, other
.size_
);
547 void Realloc(uptr new_capacity
) {
548 CHECK_GT(new_capacity
, 0);
549 CHECK_LE(size_
, new_capacity
);
550 uptr new_capacity_bytes
=
551 RoundUpTo(new_capacity
* sizeof(T
), GetPageSizeCached());
552 T
*new_data
= (T
*)MmapOrDie(new_capacity_bytes
, "InternalMmapVector");
553 internal_memcpy(new_data
, data_
, size_
* sizeof(T
));
554 UnmapOrDie(data_
, capacity_bytes_
);
556 capacity_bytes_
= new_capacity_bytes
;
560 uptr capacity_bytes_
;
564 template <typename T
>
565 bool operator==(const InternalMmapVectorNoCtor
<T
> &lhs
,
566 const InternalMmapVectorNoCtor
<T
> &rhs
) {
567 if (lhs
.size() != rhs
.size()) return false;
568 return internal_memcmp(lhs
.data(), rhs
.data(), lhs
.size() * sizeof(T
)) == 0;
571 template <typename T
>
572 bool operator!=(const InternalMmapVectorNoCtor
<T
> &lhs
,
573 const InternalMmapVectorNoCtor
<T
> &rhs
) {
574 return !(lhs
== rhs
);
578 class InternalMmapVector
: public InternalMmapVectorNoCtor
<T
> {
580 InternalMmapVector() { InternalMmapVectorNoCtor
<T
>::Initialize(0); }
581 explicit InternalMmapVector(uptr cnt
) {
582 InternalMmapVectorNoCtor
<T
>::Initialize(cnt
);
585 ~InternalMmapVector() { InternalMmapVectorNoCtor
<T
>::Destroy(); }
586 // Disallow copies and moves.
587 InternalMmapVector(const InternalMmapVector
&) = delete;
588 InternalMmapVector
&operator=(const InternalMmapVector
&) = delete;
589 InternalMmapVector(InternalMmapVector
&&) = delete;
590 InternalMmapVector
&operator=(InternalMmapVector
&&) = delete;
593 class InternalScopedString
: public InternalMmapVector
<char> {
595 explicit InternalScopedString(uptr max_length
)
596 : InternalMmapVector
<char>(max_length
), length_(0) {
599 uptr
length() { return length_
; }
604 void append(const char *format
, ...);
612 bool operator()(const T
&a
, const T
&b
) const { return a
< b
; }
615 // HeapSort for arrays and InternalMmapVector.
616 template <class T
, class Compare
= CompareLess
<T
>>
617 void Sort(T
*v
, uptr size
, Compare comp
= {}) {
620 // Stage 1: insert elements to the heap.
621 for (uptr i
= 1; i
< size
; i
++) {
623 for (j
= i
; j
> 0; j
= p
) {
625 if (comp(v
[p
], v
[j
]))
631 // Stage 2: swap largest element with the last one,
632 // and sink the new top.
633 for (uptr i
= size
- 1; i
> 0; i
--) {
636 for (j
= 0; j
< i
; j
= max_ind
) {
637 uptr left
= 2 * j
+ 1;
638 uptr right
= 2 * j
+ 2;
640 if (left
< i
&& comp(v
[max_ind
], v
[left
]))
642 if (right
< i
&& comp(v
[max_ind
], v
[right
]))
645 Swap(v
[j
], v
[max_ind
]);
652 // Works like std::lower_bound: finds the first element that is not less
654 template <class Container
, class Value
, class Compare
>
655 uptr
InternalLowerBound(const Container
&v
, uptr first
, uptr last
,
656 const Value
&val
, Compare comp
) {
657 while (last
> first
) {
658 uptr mid
= (first
+ last
) / 2;
659 if (comp(v
[mid
], val
))
680 // Opens the file 'file_name" and reads up to 'max_len' bytes.
681 // The resulting buffer is mmaped and stored in '*buff'.
682 // Returns true if file was successfully opened and read.
683 bool ReadFileToVector(const char *file_name
,
684 InternalMmapVectorNoCtor
<char> *buff
,
685 uptr max_len
= 1 << 26, error_t
*errno_p
= nullptr);
687 // Opens the file 'file_name" and reads up to 'max_len' bytes.
688 // This function is less I/O efficient than ReadFileToVector as it may reread
689 // file multiple times to avoid mmap during read attempts. It's used to read
690 // procmap, so short reads with mmap in between can produce inconsistent result.
691 // The resulting buffer is mmaped and stored in '*buff'.
692 // The size of the mmaped region is stored in '*buff_size'.
693 // The total number of read bytes is stored in '*read_len'.
694 // Returns true if file was successfully opened and read.
695 bool ReadFileToBuffer(const char *file_name
, char **buff
, uptr
*buff_size
,
696 uptr
*read_len
, uptr max_len
= 1 << 26,
697 error_t
*errno_p
= nullptr);
699 // When adding a new architecture, don't forget to also update
700 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
701 inline const char *ModuleArchToString(ModuleArch arch
) {
703 case kModuleArchUnknown
:
705 case kModuleArchI386
:
707 case kModuleArchX86_64
:
709 case kModuleArchX86_64H
:
711 case kModuleArchARMV6
:
713 case kModuleArchARMV7
:
715 case kModuleArchARMV7S
:
717 case kModuleArchARMV7K
:
719 case kModuleArchARM64
:
721 case kModuleArchRISCV64
:
724 CHECK(0 && "Invalid module arch");
728 const uptr kModuleUUIDSize
= 16;
729 const uptr kMaxSegName
= 16;
731 // Represents a binary loaded into virtual memory (e.g. this can be an
732 // executable or a shared object).
736 : full_name_(nullptr),
738 max_executable_address_(0),
739 arch_(kModuleArchUnknown
),
740 instrumented_(false) {
741 internal_memset(uuid_
, 0, kModuleUUIDSize
);
744 void set(const char *module_name
, uptr base_address
);
745 void set(const char *module_name
, uptr base_address
, ModuleArch arch
,
746 u8 uuid
[kModuleUUIDSize
], bool instrumented
);
748 void addAddressRange(uptr beg
, uptr end
, bool executable
, bool writable
,
749 const char *name
= nullptr);
750 bool containsAddress(uptr address
) const;
752 const char *full_name() const { return full_name_
; }
753 uptr
base_address() const { return base_address_
; }
754 uptr
max_executable_address() const { return max_executable_address_
; }
755 ModuleArch
arch() const { return arch_
; }
756 const u8
*uuid() const { return uuid_
; }
757 bool instrumented() const { return instrumented_
; }
759 struct AddressRange
{
765 char name
[kMaxSegName
];
767 AddressRange(uptr beg
, uptr end
, bool executable
, bool writable
,
772 executable(executable
),
774 internal_strncpy(this->name
, (name
? name
: ""), ARRAY_SIZE(this->name
));
778 const IntrusiveList
<AddressRange
> &ranges() const { return ranges_
; }
781 char *full_name_
; // Owned.
783 uptr max_executable_address_
;
785 u8 uuid_
[kModuleUUIDSize
];
787 IntrusiveList
<AddressRange
> ranges_
;
790 // List of LoadedModules. OS-dependent implementation is responsible for
791 // filling this information.
792 class ListOfModules
{
794 ListOfModules() : initialized(false) {}
795 ~ListOfModules() { clear(); }
797 void fallbackInit(); // Uses fallback init if available, otherwise clears
798 const LoadedModule
*begin() const { return modules_
.begin(); }
799 LoadedModule
*begin() { return modules_
.begin(); }
800 const LoadedModule
*end() const { return modules_
.end(); }
801 LoadedModule
*end() { return modules_
.end(); }
802 uptr
size() const { return modules_
.size(); }
803 const LoadedModule
&operator[](uptr i
) const {
804 CHECK_LT(i
, modules_
.size());
810 for (auto &module
: modules_
) module
.clear();
814 initialized
? clear() : modules_
.Initialize(kInitialCapacity
);
818 InternalMmapVectorNoCtor
<LoadedModule
> modules_
;
819 // We rarely have more than 16K loaded modules.
820 static const uptr kInitialCapacity
= 1 << 14;
824 // Callback type for iterating over a set of memory ranges.
825 typedef void (*RangeIteratorCallback
)(uptr begin
, uptr end
, void *arg
);
827 enum AndroidApiLevel
{
828 ANDROID_NOT_ANDROID
= 0,
830 ANDROID_LOLLIPOP_MR1
= 22,
831 ANDROID_POST_LOLLIPOP
= 23
834 void WriteToSyslog(const char *buffer
);
836 #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
837 #define SANITIZER_WIN_TRACE 1
839 #define SANITIZER_WIN_TRACE 0
842 #if SANITIZER_MAC || SANITIZER_WIN_TRACE
843 void LogFullErrorReport(const char *buffer
);
845 inline void LogFullErrorReport(const char *buffer
) {}
848 #if SANITIZER_LINUX || SANITIZER_MAC
849 void WriteOneLineToSyslog(const char *s
);
850 void LogMessageOnPrintf(const char *str
);
852 inline void WriteOneLineToSyslog(const char *s
) {}
853 inline void LogMessageOnPrintf(const char *str
) {}
856 #if SANITIZER_LINUX || SANITIZER_WIN_TRACE
857 // Initialize Android logging. Any writes before this are silently lost.
858 void AndroidLogInit();
859 void SetAbortMessage(const char *);
861 inline void AndroidLogInit() {}
862 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
863 inline void SetAbortMessage(const char *) {}
866 #if SANITIZER_ANDROID
867 void SanitizerInitializeUnwinder();
868 AndroidApiLevel
AndroidGetApiLevel();
870 inline void AndroidLogWrite(const char *buffer_unused
) {}
871 inline void SanitizerInitializeUnwinder() {}
872 inline AndroidApiLevel
AndroidGetApiLevel() { return ANDROID_NOT_ANDROID
; }
875 inline uptr
GetPthreadDestructorIterations() {
876 #if SANITIZER_ANDROID
877 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1
) ? 8 : 4;
878 #elif SANITIZER_POSIX
881 // Unused on Windows.
886 void *internal_start_thread(void *(*func
)(void*), void *arg
);
887 void internal_join_thread(void *th
);
888 void MaybeStartBackgroudThread();
890 // Make the compiler think that something is going on there.
891 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
892 // compiler from recognising it and turning it into an actual call to
893 // memset/memcpy/etc.
894 static inline void SanitizerBreakOptimization(void *arg
) {
895 #if defined(_MSC_VER) && !defined(__clang__)
898 __asm__
__volatile__("" : : "r" (arg
) : "memory");
902 struct SignalContext
{
909 bool is_memory_access
;
910 enum WriteFlag
{ UNKNOWN
, READ
, WRITE
} write_flag
;
912 // In some cases the kernel cannot provide the true faulting address; `addr`
913 // will be zero then. This field allows to distinguish between these cases
914 // and dereferences of null.
915 bool is_true_faulting_addr
;
917 // VS2013 doesn't implement unrestricted unions, so we need a trivial default
919 SignalContext() = default;
921 // Creates signal context in a platform-specific manner.
922 // SignalContext is going to keep pointers to siginfo and context without
924 SignalContext(void *siginfo
, void *context
)
928 is_memory_access(IsMemoryAccess()),
929 write_flag(GetWriteFlag()),
930 is_true_faulting_addr(IsTrueFaultingAddress()) {
934 static void DumpAllRegisters(void *context
);
936 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
939 // String description of the signal.
940 const char *Describe() const;
942 // Returns true if signal is stack overflow.
943 bool IsStackOverflow() const;
946 // Platform specific initialization.
948 uptr
GetAddress() const;
949 WriteFlag
GetWriteFlag() const;
950 bool IsMemoryAccess() const;
951 bool IsTrueFaultingAddress() const;
954 void InitializePlatformEarly();
957 template <typename Fn
>
958 class RunOnDestruction
{
960 explicit RunOnDestruction(Fn fn
) : fn_(fn
) {}
961 ~RunOnDestruction() { fn_(); }
967 // A simple scope guard. Usage:
968 // auto cleanup = at_scope_exit([]{ do_cleanup; });
969 template <typename Fn
>
970 RunOnDestruction
<Fn
> at_scope_exit(Fn fn
) {
971 return RunOnDestruction
<Fn
>(fn
);
974 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
975 // if a process uses virtual memory over 4TB (as many sanitizers like
976 // to do). This function will abort the process if running on a kernel
977 // that looks vulnerable.
978 #if SANITIZER_LINUX && SANITIZER_S390_64
979 void AvoidCVE_2016_2143();
981 inline void AvoidCVE_2016_2143() {}
984 struct StackDepotStats
{
989 // The default value for allocator_release_to_os_interval_ms common flag to
990 // indicate that sanitizer allocator should not attempt to release memory to OS.
991 const s32 kReleaseToOSIntervalNever
= -1;
993 void CheckNoDeepBind(const char *filename
, int flag
);
995 // Returns the requested amount of random data (up to 256 bytes) that can then
996 // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
997 bool GetRandom(void *buffer
, uptr length
, bool blocking
= true);
999 // Returns the number of logical processors on the system.
1000 u32
GetNumberOfCPUs();
1001 extern u32 NumberOfCPUsCached
;
1002 inline u32
GetNumberOfCPUsCached() {
1003 if (!NumberOfCPUsCached
)
1004 NumberOfCPUsCached
= GetNumberOfCPUs();
1005 return NumberOfCPUsCached
;
1008 template <typename T
>
1012 ArrayRef(T
*begin
, T
*end
) : begin_(begin
), end_(end
) {}
1014 T
*begin() { return begin_
; }
1015 T
*end() { return end_
; }
1018 T
*begin_
= nullptr;
1022 } // namespace __sanitizer
1024 inline void *operator new(__sanitizer::operator_new_size_type size
,
1025 __sanitizer::LowLevelAllocator
&alloc
) { // NOLINT
1026 return alloc
.Allocate(size
);
1029 #endif // SANITIZER_COMMON_H