1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between run-time libraries of sanitizers.
11 // It declares common functions and classes that are used in both runtimes.
12 // Implementation of some functions are provided in sanitizer_common, while
13 // others must be defined by run-time library itself.
14 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_COMMON_H
16 #define SANITIZER_COMMON_H
18 #include "sanitizer_flags.h"
19 #include "sanitizer_interface_internal.h"
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_libc.h"
22 #include "sanitizer_list.h"
23 #include "sanitizer_mutex.h"
25 #if defined(_MSC_VER) && !defined(__clang__)
26 extern "C" void _ReadWriteBarrier();
27 #pragma intrinsic(_ReadWriteBarrier)
30 namespace __sanitizer
{
33 struct BufferedStackTrace
;
38 const uptr kWordSize
= SANITIZER_WORDSIZE
/ 8;
39 const uptr kWordSizeInBits
= 8 * kWordSize
;
41 const uptr kCacheLineSize
= SANITIZER_CACHE_LINE_SIZE
;
43 const uptr kMaxPathLength
= 4096;
45 const uptr kMaxThreadStackSize
= 1 << 30; // 1Gb
47 static const uptr kErrorMessageBufferSize
= 1 << 16;
49 // Denotes fake PC values that come from JIT/JAVA/etc.
50 // For such PC values __tsan_symbolize_external_ex() will be called.
51 const u64 kExternalPCBit
= 1ULL << 60;
53 extern const char *SanitizerToolName
; // Can be changed by the tool.
55 extern atomic_uint32_t current_verbosity
;
56 INLINE
void SetVerbosity(int verbosity
) {
57 atomic_store(¤t_verbosity
, verbosity
, memory_order_relaxed
);
59 INLINE
int Verbosity() {
60 return atomic_load(¤t_verbosity
, memory_order_relaxed
);
64 INLINE uptr
GetPageSize() {
65 // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
68 INLINE uptr
GetPageSizeCached() {
73 extern uptr PageSizeCached
;
74 INLINE uptr
GetPageSizeCached() {
76 PageSizeCached
= GetPageSize();
77 return PageSizeCached
;
80 uptr
GetMmapGranularity();
81 uptr
GetMaxVirtualAddress();
82 uptr
GetMaxUserVirtualAddress();
85 int TgKill(pid_t pid
, tid_t tid
, int sig
);
87 void GetThreadStackTopAndBottom(bool at_initialization
, uptr
*stack_top
,
89 void GetThreadStackAndTls(bool main
, uptr
*stk_addr
, uptr
*stk_size
,
90 uptr
*tls_addr
, uptr
*tls_size
);
93 void *MmapOrDie(uptr size
, const char *mem_type
, bool raw_report
= false);
94 INLINE
void *MmapOrDieQuietly(uptr size
, const char *mem_type
) {
95 return MmapOrDie(size
, mem_type
, /*raw_report*/ true);
97 void UnmapOrDie(void *addr
, uptr size
);
98 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
99 // case returns nullptr.
100 void *MmapOrDieOnFatalError(uptr size
, const char *mem_type
);
101 bool MmapFixedNoReserve(uptr fixed_addr
, uptr size
, const char *name
= nullptr)
103 bool MmapFixedSuperNoReserve(uptr fixed_addr
, uptr size
,
104 const char *name
= nullptr) WARN_UNUSED_RESULT
;
105 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
);
106 void *MmapFixedOrDie(uptr fixed_addr
, uptr size
, const char *name
= nullptr);
107 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
108 // that case returns nullptr.
109 void *MmapFixedOrDieOnFatalError(uptr fixed_addr
, uptr size
,
110 const char *name
= nullptr);
111 void *MmapFixedNoAccess(uptr fixed_addr
, uptr size
, const char *name
= nullptr);
112 void *MmapNoAccess(uptr size
);
113 // Map aligned chunk of address space; size and alignment are powers of two.
114 // Dies on all but out of memory errors, in the latter case returns nullptr.
115 void *MmapAlignedOrDieOnFatalError(uptr size
, uptr alignment
,
116 const char *mem_type
);
117 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
118 // unaccessible memory.
119 bool MprotectNoAccess(uptr addr
, uptr size
);
120 bool MprotectReadOnly(uptr addr
, uptr size
);
122 void MprotectMallocZones(void *addr
, int prot
);
124 // Find an available address space.
125 uptr
FindAvailableMemoryRange(uptr size
, uptr alignment
, uptr left_padding
,
126 uptr
*largest_gap_found
, uptr
*max_occupied_addr
);
128 // Used to check if we can map shadow memory to a fixed location.
129 bool MemoryRangeIsAvailable(uptr range_start
, uptr range_end
);
130 // Releases memory pages entirely within the [beg, end] address range. Noop if
131 // the provided range does not contain at least one entire page.
132 void ReleaseMemoryPagesToOS(uptr beg
, uptr end
);
133 void IncreaseTotalMmap(uptr size
);
134 void DecreaseTotalMmap(uptr size
);
136 void SetShadowRegionHugePageMode(uptr addr
, uptr length
);
137 bool DontDumpShadowMemory(uptr addr
, uptr length
);
138 // Check if the built VMA size matches the runtime one.
140 void RunMallocHooks(const void *ptr
, uptr size
);
141 void RunFreeHooks(const void *ptr
);
143 class ReservedAddressRange
{
145 uptr
Init(uptr size
, const char *name
= nullptr, uptr fixed_addr
= 0);
146 uptr
Map(uptr fixed_addr
, uptr size
, const char *name
= nullptr);
147 uptr
MapOrDie(uptr fixed_addr
, uptr size
, const char *name
= nullptr);
148 void Unmap(uptr addr
, uptr size
);
149 void *base() const { return base_
; }
150 uptr
size() const { return size_
; }
159 typedef void (*fill_profile_f
)(uptr start
, uptr rss
, bool file
,
160 /*out*/uptr
*stats
, uptr stats_size
);
162 // Parse the contents of /proc/self/smaps and generate a memory profile.
163 // |cb| is a tool-specific callback that fills the |stats| array containing
164 // |stats_size| elements.
165 void GetMemoryProfile(fill_profile_f cb
, uptr
*stats
, uptr stats_size
);
167 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
168 // constructor, so all instances of LowLevelAllocator should be
169 // linker initialized.
170 class LowLevelAllocator
{
172 // Requires an external lock.
173 void *Allocate(uptr size
);
175 char *allocated_end_
;
176 char *allocated_current_
;
178 // Set the min alignment of LowLevelAllocator to at least alignment.
179 void SetLowLevelAllocateMinAlignment(uptr alignment
);
180 typedef void (*LowLevelAllocateCallback
)(uptr ptr
, uptr size
);
181 // Allows to register tool-specific callbacks for LowLevelAllocator.
182 // Passing NULL removes the callback.
183 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback
);
186 void CatastrophicErrorWrite(const char *buffer
, uptr length
);
187 void RawWrite(const char *buffer
);
188 bool ColorizeReports();
189 void RemoveANSIEscapeSequencesFromString(char *buffer
);
190 void Printf(const char *format
, ...);
191 void Report(const char *format
, ...);
192 void SetPrintfAndReportCallback(void (*callback
)(const char *));
193 #define VReport(level, ...) \
195 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
197 #define VPrintf(level, ...) \
199 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
202 // Lock sanitizer error reporting and protects against nested errors.
203 class ScopedErrorReportLock
{
205 ScopedErrorReportLock();
206 ~ScopedErrorReportLock();
208 static void CheckLocked();
211 extern uptr stoptheworld_tracer_pid
;
212 extern uptr stoptheworld_tracer_ppid
;
214 bool IsAccessibleMemoryRange(uptr beg
, uptr size
);
216 // Error report formatting.
217 const char *StripPathPrefix(const char *filepath
,
218 const char *strip_file_prefix
);
219 // Strip the directories from the module name.
220 const char *StripModuleName(const char *module
);
223 uptr
ReadBinaryName(/*out*/char *buf
, uptr buf_len
);
224 uptr
ReadBinaryNameCached(/*out*/char *buf
, uptr buf_len
);
225 uptr
ReadLongProcessName(/*out*/ char *buf
, uptr buf_len
);
226 const char *GetProcessName();
227 void UpdateProcessName();
228 void CacheBinaryName();
229 void DisableCoreDumperIfNecessary();
230 void DumpProcessMap();
231 void PrintModuleMap();
232 const char *GetEnv(const char *name
);
233 bool SetEnv(const char *name
, const char *value
);
238 void CheckMPROTECT();
242 bool StackSizeIsUnlimited();
243 void SetStackSizeLimitInBytes(uptr limit
);
244 bool AddressSpaceIsUnlimited();
245 void SetAddressSpaceUnlimited();
246 void AdjustStackSize(void *attr
);
247 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments
*args
);
248 void SetSandboxingCallback(void (*f
)());
250 void InitializeCoverage(bool enabled
, const char *coverage_dir
);
256 void SleepForSeconds(int seconds
);
257 void SleepForMillis(int millis
);
259 u64
MonotonicNanoTime();
260 int Atexit(void (*function
)(void));
261 bool TemplateMatch(const char *templ
, const char *str
);
264 void NORETURN
Abort();
267 CheckFailed(const char *file
, int line
, const char *cond
, u64 v1
, u64 v2
);
268 void NORETURN
ReportMmapFailureAndDie(uptr size
, const char *mem_type
,
269 const char *mmap_type
, error_t err
,
270 bool raw_report
= false);
272 // Specific tools may override behavior of "Die" and "CheckFailed" functions
273 // to do tool-specific job.
274 typedef void (*DieCallbackType
)(void);
276 // It's possible to add several callbacks that would be run when "Die" is
277 // called. The callbacks will be run in the opposite order. The tools are
278 // strongly recommended to setup all callbacks during initialization, when there
279 // is only a single thread.
280 bool AddDieCallback(DieCallbackType callback
);
281 bool RemoveDieCallback(DieCallbackType callback
);
283 void SetUserDieCallback(DieCallbackType callback
);
285 typedef void (*CheckFailedCallbackType
)(const char *, int, const char *,
287 void SetCheckFailedCallback(CheckFailedCallbackType callback
);
289 // Callback will be called if soft_rss_limit_mb is given and the limit is
290 // exceeded (exceeded==true) or if rss went down below the limit
291 // (exceeded==false).
292 // The callback should be registered once at the tool init time.
293 void SetSoftRssLimitExceededCallback(void (*Callback
)(bool exceeded
));
295 // Functions related to signal handling.
296 typedef void (*SignalHandlerType
)(int, void *, void *);
297 HandleSignalMode
GetHandleSignalMode(int signum
);
298 void InstallDeadlySignalHandlers(SignalHandlerType handler
);
301 // Each sanitizer uses slightly different implementation of stack unwinding.
302 typedef void (*UnwindSignalStackCallbackType
)(const SignalContext
&sig
,
303 const void *callback_context
,
304 BufferedStackTrace
*stack
);
305 // Print deadly signal report and die.
306 void HandleDeadlySignal(void *siginfo
, void *context
, u32 tid
,
307 UnwindSignalStackCallbackType unwind
,
308 const void *unwind_context
);
310 // Part of HandleDeadlySignal, exposed for asan.
311 void StartReportDeadlySignal();
312 // Part of HandleDeadlySignal, exposed for asan.
313 void ReportDeadlySignal(const SignalContext
&sig
, u32 tid
,
314 UnwindSignalStackCallbackType unwind
,
315 const void *unwind_context
);
317 // Alternative signal stack (POSIX-only).
318 void SetAlternateSignalStack();
319 void UnsetAlternateSignalStack();
321 // We don't want a summary too long.
322 const int kMaxSummaryLength
= 1024;
323 // Construct a one-line string:
324 // SUMMARY: SanitizerToolName: error_message
325 // and pass it to __sanitizer_report_error_summary.
326 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
327 void ReportErrorSummary(const char *error_message
,
328 const char *alt_tool_name
= nullptr);
329 // Same as above, but construct error_message as:
330 // error_type file:line[:column][ function]
331 void ReportErrorSummary(const char *error_type
, const AddressInfo
&info
,
332 const char *alt_tool_name
= nullptr);
333 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
334 void ReportErrorSummary(const char *error_type
, const StackTrace
*trace
,
335 const char *alt_tool_name
= nullptr);
337 void ReportMmapWriteExec(int prot
);
340 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
342 unsigned char _BitScanForward(unsigned long *index
, unsigned long mask
);
343 unsigned char _BitScanReverse(unsigned long *index
, unsigned long mask
);
345 unsigned char _BitScanForward64(unsigned long *index
, unsigned __int64 mask
);
346 unsigned char _BitScanReverse64(unsigned long *index
, unsigned __int64 mask
);
351 INLINE uptr
MostSignificantSetBitIndex(uptr x
) {
354 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
356 up
= SANITIZER_WORDSIZE
- 1 - __builtin_clzll(x
);
358 up
= SANITIZER_WORDSIZE
- 1 - __builtin_clzl(x
);
360 #elif defined(_WIN64)
361 _BitScanReverse64(&up
, x
);
363 _BitScanReverse(&up
, x
);
368 INLINE uptr
LeastSignificantSetBitIndex(uptr x
) {
371 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
373 up
= __builtin_ctzll(x
);
375 up
= __builtin_ctzl(x
);
377 #elif defined(_WIN64)
378 _BitScanForward64(&up
, x
);
380 _BitScanForward(&up
, x
);
385 INLINE
bool IsPowerOfTwo(uptr x
) {
386 return (x
& (x
- 1)) == 0;
389 INLINE uptr
RoundUpToPowerOfTwo(uptr size
) {
391 if (IsPowerOfTwo(size
)) return size
;
393 uptr up
= MostSignificantSetBitIndex(size
);
394 CHECK_LT(size
, (1ULL << (up
+ 1)));
395 CHECK_GT(size
, (1ULL << up
));
396 return 1ULL << (up
+ 1);
399 INLINE uptr
RoundUpTo(uptr size
, uptr boundary
) {
400 RAW_CHECK(IsPowerOfTwo(boundary
));
401 return (size
+ boundary
- 1) & ~(boundary
- 1);
404 INLINE uptr
RoundDownTo(uptr x
, uptr boundary
) {
405 return x
& ~(boundary
- 1);
408 INLINE
bool IsAligned(uptr a
, uptr alignment
) {
409 return (a
& (alignment
- 1)) == 0;
412 INLINE uptr
Log2(uptr x
) {
413 CHECK(IsPowerOfTwo(x
));
414 return LeastSignificantSetBitIndex(x
);
417 // Don't use std::min, std::max or std::swap, to minimize dependency
419 template<class T
> T
Min(T a
, T b
) { return a
< b
? a
: b
; }
420 template<class T
> T
Max(T a
, T b
) { return a
> b
? a
: b
; }
421 template<class T
> void Swap(T
& a
, T
& b
) {
428 INLINE
bool IsSpace(int c
) {
429 return (c
== ' ') || (c
== '\n') || (c
== '\t') ||
430 (c
== '\f') || (c
== '\r') || (c
== '\v');
432 INLINE
bool IsDigit(int c
) {
433 return (c
>= '0') && (c
<= '9');
435 INLINE
int ToLower(int c
) {
436 return (c
>= 'A' && c
<= 'Z') ? (c
+ 'a' - 'A') : c
;
439 // A low-level vector based on mmap. May incur a significant memory overhead for
441 // WARNING: The current implementation supports only POD types.
443 class InternalMmapVectorNoCtor
{
445 void Initialize(uptr initial_capacity
) {
449 reserve(initial_capacity
);
451 void Destroy() { UnmapOrDie(data_
, capacity_bytes_
); }
452 T
&operator[](uptr i
) {
456 const T
&operator[](uptr i
) const {
460 void push_back(const T
&element
) {
461 CHECK_LE(size_
, capacity());
462 if (size_
== capacity()) {
463 uptr new_capacity
= RoundUpToPowerOfTwo(size_
+ 1);
464 Realloc(new_capacity
);
466 internal_memcpy(&data_
[size_
++], &element
, sizeof(T
));
470 return data_
[size_
- 1];
479 const T
*data() const {
485 uptr
capacity() const { return capacity_bytes_
/ sizeof(T
); }
486 void reserve(uptr new_size
) {
487 // Never downsize internal buffer.
488 if (new_size
> capacity())
491 void resize(uptr new_size
) {
492 if (new_size
> size_
) {
494 internal_memset(&data_
[size_
], 0, sizeof(T
) * (new_size
- size_
));
499 void clear() { size_
= 0; }
500 bool empty() const { return size() == 0; }
502 const T
*begin() const {
508 const T
*end() const {
509 return data() + size();
512 return data() + size();
515 void swap(InternalMmapVectorNoCtor
&other
) {
516 Swap(data_
, other
.data_
);
517 Swap(capacity_bytes_
, other
.capacity_bytes_
);
518 Swap(size_
, other
.size_
);
522 void Realloc(uptr new_capacity
) {
523 CHECK_GT(new_capacity
, 0);
524 CHECK_LE(size_
, new_capacity
);
525 uptr new_capacity_bytes
=
526 RoundUpTo(new_capacity
* sizeof(T
), GetPageSizeCached());
527 T
*new_data
= (T
*)MmapOrDie(new_capacity_bytes
, "InternalMmapVector");
528 internal_memcpy(new_data
, data_
, size_
* sizeof(T
));
529 UnmapOrDie(data_
, capacity_bytes_
);
531 capacity_bytes_
= new_capacity_bytes
;
535 uptr capacity_bytes_
;
539 template <typename T
>
540 bool operator==(const InternalMmapVectorNoCtor
<T
> &lhs
,
541 const InternalMmapVectorNoCtor
<T
> &rhs
) {
542 if (lhs
.size() != rhs
.size()) return false;
543 return internal_memcmp(lhs
.data(), rhs
.data(), lhs
.size() * sizeof(T
)) == 0;
546 template <typename T
>
547 bool operator!=(const InternalMmapVectorNoCtor
<T
> &lhs
,
548 const InternalMmapVectorNoCtor
<T
> &rhs
) {
549 return !(lhs
== rhs
);
553 class InternalMmapVector
: public InternalMmapVectorNoCtor
<T
> {
555 InternalMmapVector() { InternalMmapVectorNoCtor
<T
>::Initialize(1); }
556 explicit InternalMmapVector(uptr cnt
) {
557 InternalMmapVectorNoCtor
<T
>::Initialize(cnt
);
560 ~InternalMmapVector() { InternalMmapVectorNoCtor
<T
>::Destroy(); }
561 // Disallow copies and moves.
562 InternalMmapVector(const InternalMmapVector
&) = delete;
563 InternalMmapVector
&operator=(const InternalMmapVector
&) = delete;
564 InternalMmapVector(InternalMmapVector
&&) = delete;
565 InternalMmapVector
&operator=(InternalMmapVector
&&) = delete;
568 class InternalScopedString
: public InternalMmapVector
<char> {
570 explicit InternalScopedString(uptr max_length
)
571 : InternalMmapVector
<char>(max_length
), length_(0) {
574 uptr
length() { return length_
; }
579 void append(const char *format
, ...);
587 bool operator()(const T
&a
, const T
&b
) const { return a
< b
; }
590 // HeapSort for arrays and InternalMmapVector.
591 template <class T
, class Compare
= CompareLess
<T
>>
592 void Sort(T
*v
, uptr size
, Compare comp
= {}) {
595 // Stage 1: insert elements to the heap.
596 for (uptr i
= 1; i
< size
; i
++) {
598 for (j
= i
; j
> 0; j
= p
) {
600 if (comp(v
[p
], v
[j
]))
606 // Stage 2: swap largest element with the last one,
607 // and sink the new top.
608 for (uptr i
= size
- 1; i
> 0; i
--) {
611 for (j
= 0; j
< i
; j
= max_ind
) {
612 uptr left
= 2 * j
+ 1;
613 uptr right
= 2 * j
+ 2;
615 if (left
< i
&& comp(v
[max_ind
], v
[left
]))
617 if (right
< i
&& comp(v
[max_ind
], v
[right
]))
620 Swap(v
[j
], v
[max_ind
]);
627 // Works like std::lower_bound: finds the first element that is not less
629 template <class Container
, class Value
, class Compare
>
630 uptr
InternalLowerBound(const Container
&v
, uptr first
, uptr last
,
631 const Value
&val
, Compare comp
) {
632 while (last
> first
) {
633 uptr mid
= (first
+ last
) / 2;
634 if (comp(v
[mid
], val
))
654 // Opens the file 'file_name" and reads up to 'max_len' bytes.
655 // The resulting buffer is mmaped and stored in '*buff'.
656 // Returns true if file was successfully opened and read.
657 bool ReadFileToVector(const char *file_name
,
658 InternalMmapVectorNoCtor
<char> *buff
,
659 uptr max_len
= 1 << 26, error_t
*errno_p
= nullptr);
661 // Opens the file 'file_name" and reads up to 'max_len' bytes.
662 // This function is less I/O efficient than ReadFileToVector as it may reread
663 // file multiple times to avoid mmap during read attempts. It's used to read
664 // procmap, so short reads with mmap in between can produce inconsistent result.
665 // The resulting buffer is mmaped and stored in '*buff'.
666 // The size of the mmaped region is stored in '*buff_size'.
667 // The total number of read bytes is stored in '*read_len'.
668 // Returns true if file was successfully opened and read.
669 bool ReadFileToBuffer(const char *file_name
, char **buff
, uptr
*buff_size
,
670 uptr
*read_len
, uptr max_len
= 1 << 26,
671 error_t
*errno_p
= nullptr);
673 // When adding a new architecture, don't forget to also update
674 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
675 inline const char *ModuleArchToString(ModuleArch arch
) {
677 case kModuleArchUnknown
:
679 case kModuleArchI386
:
681 case kModuleArchX86_64
:
683 case kModuleArchX86_64H
:
685 case kModuleArchARMV6
:
687 case kModuleArchARMV7
:
689 case kModuleArchARMV7S
:
691 case kModuleArchARMV7K
:
693 case kModuleArchARM64
:
696 CHECK(0 && "Invalid module arch");
700 const uptr kModuleUUIDSize
= 16;
701 const uptr kMaxSegName
= 16;
703 // Represents a binary loaded into virtual memory (e.g. this can be an
704 // executable or a shared object).
708 : full_name_(nullptr),
710 max_executable_address_(0),
711 arch_(kModuleArchUnknown
),
712 instrumented_(false) {
713 internal_memset(uuid_
, 0, kModuleUUIDSize
);
716 void set(const char *module_name
, uptr base_address
);
717 void set(const char *module_name
, uptr base_address
, ModuleArch arch
,
718 u8 uuid
[kModuleUUIDSize
], bool instrumented
);
720 void addAddressRange(uptr beg
, uptr end
, bool executable
, bool writable
,
721 const char *name
= nullptr);
722 bool containsAddress(uptr address
) const;
724 const char *full_name() const { return full_name_
; }
725 uptr
base_address() const { return base_address_
; }
726 uptr
max_executable_address() const { return max_executable_address_
; }
727 ModuleArch
arch() const { return arch_
; }
728 const u8
*uuid() const { return uuid_
; }
729 bool instrumented() const { return instrumented_
; }
731 struct AddressRange
{
737 char name
[kMaxSegName
];
739 AddressRange(uptr beg
, uptr end
, bool executable
, bool writable
,
744 executable(executable
),
746 internal_strncpy(this->name
, (name
? name
: ""), ARRAY_SIZE(this->name
));
750 const IntrusiveList
<AddressRange
> &ranges() const { return ranges_
; }
753 char *full_name_
; // Owned.
755 uptr max_executable_address_
;
757 u8 uuid_
[kModuleUUIDSize
];
759 IntrusiveList
<AddressRange
> ranges_
;
762 // List of LoadedModules. OS-dependent implementation is responsible for
763 // filling this information.
764 class ListOfModules
{
766 ListOfModules() : initialized(false) {}
767 ~ListOfModules() { clear(); }
769 void fallbackInit(); // Uses fallback init if available, otherwise clears
770 const LoadedModule
*begin() const { return modules_
.begin(); }
771 LoadedModule
*begin() { return modules_
.begin(); }
772 const LoadedModule
*end() const { return modules_
.end(); }
773 LoadedModule
*end() { return modules_
.end(); }
774 uptr
size() const { return modules_
.size(); }
775 const LoadedModule
&operator[](uptr i
) const {
776 CHECK_LT(i
, modules_
.size());
782 for (auto &module
: modules_
) module
.clear();
786 initialized
? clear() : modules_
.Initialize(kInitialCapacity
);
790 InternalMmapVectorNoCtor
<LoadedModule
> modules_
;
791 // We rarely have more than 16K loaded modules.
792 static const uptr kInitialCapacity
= 1 << 14;
796 // Callback type for iterating over a set of memory ranges.
797 typedef void (*RangeIteratorCallback
)(uptr begin
, uptr end
, void *arg
);
799 enum AndroidApiLevel
{
800 ANDROID_NOT_ANDROID
= 0,
802 ANDROID_LOLLIPOP_MR1
= 22,
803 ANDROID_POST_LOLLIPOP
= 23
806 void WriteToSyslog(const char *buffer
);
808 #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
809 #define SANITIZER_WIN_TRACE 1
811 #define SANITIZER_WIN_TRACE 0
814 #if SANITIZER_MAC || SANITIZER_WIN_TRACE
815 void LogFullErrorReport(const char *buffer
);
817 INLINE
void LogFullErrorReport(const char *buffer
) {}
820 #if SANITIZER_LINUX || SANITIZER_MAC
821 void WriteOneLineToSyslog(const char *s
);
822 void LogMessageOnPrintf(const char *str
);
824 INLINE
void WriteOneLineToSyslog(const char *s
) {}
825 INLINE
void LogMessageOnPrintf(const char *str
) {}
828 #if SANITIZER_LINUX || SANITIZER_WIN_TRACE
829 // Initialize Android logging. Any writes before this are silently lost.
830 void AndroidLogInit();
831 void SetAbortMessage(const char *);
833 INLINE
void AndroidLogInit() {}
834 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
835 INLINE
void SetAbortMessage(const char *) {}
838 #if SANITIZER_ANDROID
839 void SanitizerInitializeUnwinder();
840 AndroidApiLevel
AndroidGetApiLevel();
842 INLINE
void AndroidLogWrite(const char *buffer_unused
) {}
843 INLINE
void SanitizerInitializeUnwinder() {}
844 INLINE AndroidApiLevel
AndroidGetApiLevel() { return ANDROID_NOT_ANDROID
; }
847 INLINE uptr
GetPthreadDestructorIterations() {
848 #if SANITIZER_ANDROID
849 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1
) ? 8 : 4;
850 #elif SANITIZER_POSIX
853 // Unused on Windows.
858 void *internal_start_thread(void(*func
)(void*), void *arg
);
859 void internal_join_thread(void *th
);
860 void MaybeStartBackgroudThread();
862 // Make the compiler think that something is going on there.
863 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
864 // compiler from recognising it and turning it into an actual call to
865 // memset/memcpy/etc.
866 static inline void SanitizerBreakOptimization(void *arg
) {
867 #if defined(_MSC_VER) && !defined(__clang__)
870 __asm__
__volatile__("" : : "r" (arg
) : "memory");
874 struct SignalContext
{
881 bool is_memory_access
;
882 enum WriteFlag
{ UNKNOWN
, READ
, WRITE
} write_flag
;
884 // In some cases the kernel cannot provide the true faulting address; `addr`
885 // will be zero then. This field allows to distinguish between these cases
886 // and dereferences of null.
887 bool is_true_faulting_addr
;
889 // VS2013 doesn't implement unrestricted unions, so we need a trivial default
891 SignalContext() = default;
893 // Creates signal context in a platform-specific manner.
894 // SignalContext is going to keep pointers to siginfo and context without
896 SignalContext(void *siginfo
, void *context
)
900 is_memory_access(IsMemoryAccess()),
901 write_flag(GetWriteFlag()),
902 is_true_faulting_addr(IsTrueFaultingAddress()) {
906 static void DumpAllRegisters(void *context
);
908 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
911 // String description of the signal.
912 const char *Describe() const;
914 // Returns true if signal is stack overflow.
915 bool IsStackOverflow() const;
918 // Platform specific initialization.
920 uptr
GetAddress() const;
921 WriteFlag
GetWriteFlag() const;
922 bool IsMemoryAccess() const;
923 bool IsTrueFaultingAddress() const;
926 void InitializePlatformEarly();
929 template <typename Fn
>
930 class RunOnDestruction
{
932 explicit RunOnDestruction(Fn fn
) : fn_(fn
) {}
933 ~RunOnDestruction() { fn_(); }
939 // A simple scope guard. Usage:
940 // auto cleanup = at_scope_exit([]{ do_cleanup; });
941 template <typename Fn
>
942 RunOnDestruction
<Fn
> at_scope_exit(Fn fn
) {
943 return RunOnDestruction
<Fn
>(fn
);
946 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
947 // if a process uses virtual memory over 4TB (as many sanitizers like
948 // to do). This function will abort the process if running on a kernel
949 // that looks vulnerable.
950 #if SANITIZER_LINUX && SANITIZER_S390_64
951 void AvoidCVE_2016_2143();
953 INLINE
void AvoidCVE_2016_2143() {}
956 struct StackDepotStats
{
961 // The default value for allocator_release_to_os_interval_ms common flag to
962 // indicate that sanitizer allocator should not attempt to release memory to OS.
963 const s32 kReleaseToOSIntervalNever
= -1;
965 void CheckNoDeepBind(const char *filename
, int flag
);
967 // Returns the requested amount of random data (up to 256 bytes) that can then
968 // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
969 bool GetRandom(void *buffer
, uptr length
, bool blocking
= true);
971 // Returns the number of logical processors on the system.
972 u32
GetNumberOfCPUs();
973 extern u32 NumberOfCPUsCached
;
974 INLINE u32
GetNumberOfCPUsCached() {
975 if (!NumberOfCPUsCached
)
976 NumberOfCPUsCached
= GetNumberOfCPUs();
977 return NumberOfCPUsCached
;
980 } // namespace __sanitizer
982 inline void *operator new(__sanitizer::operator_new_size_type size
,
983 __sanitizer::LowLevelAllocator
&alloc
) { // NOLINT
984 return alloc
.Allocate(size
);
987 #endif // SANITIZER_COMMON_H