1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between run-time libraries of sanitizers.
10 // It declares common functions and classes that are used in both runtimes.
11 // Implementation of some functions are provided in sanitizer_common, while
12 // others must be defined by run-time library itself.
13 //===----------------------------------------------------------------------===//
14 #ifndef SANITIZER_COMMON_H
15 #define SANITIZER_COMMON_H
17 #include "sanitizer_flags.h"
18 #include "sanitizer_interface_internal.h"
19 #include "sanitizer_internal_defs.h"
20 #include "sanitizer_libc.h"
21 #include "sanitizer_list.h"
22 #include "sanitizer_mutex.h"
24 #if defined(_MSC_VER) && !defined(__clang__)
25 extern "C" void _ReadWriteBarrier();
26 #pragma intrinsic(_ReadWriteBarrier)
29 namespace __sanitizer
{
32 struct BufferedStackTrace
;
37 const uptr kWordSize
= SANITIZER_WORDSIZE
/ 8;
38 const uptr kWordSizeInBits
= 8 * kWordSize
;
40 #if defined(__powerpc__) || defined(__powerpc64__)
41 const uptr kCacheLineSize
= 128;
43 const uptr kCacheLineSize
= 64;
46 const uptr kMaxPathLength
= 4096;
48 const uptr kMaxThreadStackSize
= 1 << 30; // 1Gb
50 static const uptr kErrorMessageBufferSize
= 1 << 16;
52 // Denotes fake PC values that come from JIT/JAVA/etc.
53 // For such PC values __tsan_symbolize_external() will be called.
54 const u64 kExternalPCBit
= 1ULL << 60;
56 extern const char *SanitizerToolName
; // Can be changed by the tool.
58 extern atomic_uint32_t current_verbosity
;
59 INLINE
void SetVerbosity(int verbosity
) {
60 atomic_store(¤t_verbosity
, verbosity
, memory_order_relaxed
);
62 INLINE
int Verbosity() {
63 return atomic_load(¤t_verbosity
, memory_order_relaxed
);
67 extern uptr PageSizeCached
;
68 INLINE uptr
GetPageSizeCached() {
70 PageSizeCached
= GetPageSize();
71 return PageSizeCached
;
73 uptr
GetMmapGranularity();
74 uptr
GetMaxVirtualAddress();
78 void GetThreadStackTopAndBottom(bool at_initialization
, uptr
*stack_top
,
80 void GetThreadStackAndTls(bool main
, uptr
*stk_addr
, uptr
*stk_size
,
81 uptr
*tls_addr
, uptr
*tls_size
);
84 void *MmapOrDie(uptr size
, const char *mem_type
, bool raw_report
= false);
85 INLINE
void *MmapOrDieQuietly(uptr size
, const char *mem_type
) {
86 return MmapOrDie(size
, mem_type
, /*raw_report*/ true);
88 void UnmapOrDie(void *addr
, uptr size
);
89 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
90 // case returns nullptr.
91 void *MmapOrDieOnFatalError(uptr size
, const char *mem_type
);
92 void *MmapFixedNoReserve(uptr fixed_addr
, uptr size
,
93 const char *name
= nullptr);
94 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
);
95 void *MmapFixedOrDie(uptr fixed_addr
, uptr size
);
96 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
97 // that case returns nullptr.
98 void *MmapFixedOrDieOnFatalError(uptr fixed_addr
, uptr size
);
99 void *MmapFixedNoAccess(uptr fixed_addr
, uptr size
, const char *name
= nullptr);
100 void *MmapNoAccess(uptr size
);
101 // Map aligned chunk of address space; size and alignment are powers of two.
102 // Dies on all but out of memory errors, in the latter case returns nullptr.
103 void *MmapAlignedOrDieOnFatalError(uptr size
, uptr alignment
,
104 const char *mem_type
);
105 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
106 // unaccessible memory.
107 bool MprotectNoAccess(uptr addr
, uptr size
);
108 bool MprotectReadOnly(uptr addr
, uptr size
);
110 // Find an available address space.
111 uptr
FindAvailableMemoryRange(uptr size
, uptr alignment
, uptr left_padding
,
112 uptr
*largest_gap_found
);
114 // Used to check if we can map shadow memory to a fixed location.
115 bool MemoryRangeIsAvailable(uptr range_start
, uptr range_end
);
116 // Releases memory pages entirely within the [beg, end] address range. Noop if
117 // the provided range does not contain at least one entire page.
118 void ReleaseMemoryPagesToOS(uptr beg
, uptr end
);
119 void IncreaseTotalMmap(uptr size
);
120 void DecreaseTotalMmap(uptr size
);
122 void NoHugePagesInRegion(uptr addr
, uptr length
);
123 void DontDumpShadowMemory(uptr addr
, uptr length
);
124 // Check if the built VMA size matches the runtime one.
126 void RunMallocHooks(const void *ptr
, uptr size
);
127 void RunFreeHooks(const void *ptr
);
129 typedef void (*fill_profile_f
)(uptr start
, uptr rss
, bool file
,
130 /*out*/uptr
*stats
, uptr stats_size
);
132 // Parse the contents of /proc/self/smaps and generate a memory profile.
133 // |cb| is a tool-specific callback that fills the |stats| array containing
134 // |stats_size| elements.
135 void GetMemoryProfile(fill_profile_f cb
, uptr
*stats
, uptr stats_size
);
137 // InternalScopedBuffer can be used instead of large stack arrays to
138 // keep frame size low.
139 // FIXME: use InternalAlloc instead of MmapOrDie once
140 // InternalAlloc is made libc-free.
141 template <typename T
>
142 class InternalScopedBuffer
{
144 explicit InternalScopedBuffer(uptr cnt
) {
146 ptr_
= (T
*)MmapOrDie(cnt
* sizeof(T
), "InternalScopedBuffer");
148 ~InternalScopedBuffer() { UnmapOrDie(ptr_
, cnt_
* sizeof(T
)); }
149 T
&operator[](uptr i
) { return ptr_
[i
]; }
150 T
*data() { return ptr_
; }
151 uptr
size() { return cnt_
* sizeof(T
); }
156 // Disallow copies and moves.
157 InternalScopedBuffer(const InternalScopedBuffer
&) = delete;
158 InternalScopedBuffer
&operator=(const InternalScopedBuffer
&) = delete;
159 InternalScopedBuffer(InternalScopedBuffer
&&) = delete;
160 InternalScopedBuffer
&operator=(InternalScopedBuffer
&&) = delete;
163 class InternalScopedString
: public InternalScopedBuffer
<char> {
165 explicit InternalScopedString(uptr max_length
)
166 : InternalScopedBuffer
<char>(max_length
), length_(0) {
169 uptr
length() { return length_
; }
174 void append(const char *format
, ...);
180 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
181 // constructor, so all instances of LowLevelAllocator should be
182 // linker initialized.
183 class LowLevelAllocator
{
185 // Requires an external lock.
186 void *Allocate(uptr size
);
188 char *allocated_end_
;
189 char *allocated_current_
;
191 typedef void (*LowLevelAllocateCallback
)(uptr ptr
, uptr size
);
192 // Allows to register tool-specific callbacks for LowLevelAllocator.
193 // Passing NULL removes the callback.
194 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback
);
197 void CatastrophicErrorWrite(const char *buffer
, uptr length
);
198 void RawWrite(const char *buffer
);
199 bool ColorizeReports();
200 void RemoveANSIEscapeSequencesFromString(char *buffer
);
201 void Printf(const char *format
, ...);
202 void Report(const char *format
, ...);
203 void SetPrintfAndReportCallback(void (*callback
)(const char *));
204 #define VReport(level, ...) \
206 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
208 #define VPrintf(level, ...) \
210 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
213 // Can be used to prevent mixing error reports from different sanitizers.
214 // FIXME: Replace with ScopedErrorReportLock and hide.
215 extern StaticSpinMutex CommonSanitizerReportMutex
;
217 // Lock sanitizer error reporting and protects against nested errors.
218 class ScopedErrorReportLock
{
220 ScopedErrorReportLock();
221 ~ScopedErrorReportLock();
223 static void CheckLocked();
226 extern uptr stoptheworld_tracer_pid
;
227 extern uptr stoptheworld_tracer_ppid
;
229 // Opens the file 'file_name" and reads up to 'max_len' bytes.
230 // The resulting buffer is mmaped and stored in '*buff'.
231 // The size of the mmaped region is stored in '*buff_size'.
232 // The total number of read bytes is stored in '*read_len'.
233 // Returns true if file was successfully opened and read.
234 bool ReadFileToBuffer(const char *file_name
, char **buff
, uptr
*buff_size
,
235 uptr
*read_len
, uptr max_len
= 1 << 26,
236 error_t
*errno_p
= nullptr);
238 bool IsAccessibleMemoryRange(uptr beg
, uptr size
);
240 // Error report formatting.
241 const char *StripPathPrefix(const char *filepath
,
242 const char *strip_file_prefix
);
243 // Strip the directories from the module name.
244 const char *StripModuleName(const char *module
);
247 uptr
ReadBinaryName(/*out*/char *buf
, uptr buf_len
);
248 uptr
ReadBinaryNameCached(/*out*/char *buf
, uptr buf_len
);
249 uptr
ReadLongProcessName(/*out*/ char *buf
, uptr buf_len
);
250 const char *GetProcessName();
251 void UpdateProcessName();
252 void CacheBinaryName();
253 void DisableCoreDumperIfNecessary();
254 void DumpProcessMap();
255 void PrintModuleMap();
256 const char *GetEnv(const char *name
);
257 bool SetEnv(const char *name
, const char *value
);
263 bool StackSizeIsUnlimited();
264 uptr
GetStackSizeLimitInBytes();
265 void SetStackSizeLimitInBytes(uptr limit
);
266 bool AddressSpaceIsUnlimited();
267 void SetAddressSpaceUnlimited();
268 void AdjustStackSize(void *attr
);
269 void PrepareForSandboxing(__sanitizer_sandbox_arguments
*args
);
270 void SetSandboxingCallback(void (*f
)());
272 void InitializeCoverage(bool enabled
, const char *coverage_dir
);
278 void SleepForSeconds(int seconds
);
279 void SleepForMillis(int millis
);
281 int Atexit(void (*function
)(void));
282 void SortArray(uptr
*array
, uptr size
);
283 void SortArray(u32
*array
, uptr size
);
284 bool TemplateMatch(const char *templ
, const char *str
);
287 void NORETURN
Abort();
290 CheckFailed(const char *file
, int line
, const char *cond
, u64 v1
, u64 v2
);
291 void NORETURN
ReportMmapFailureAndDie(uptr size
, const char *mem_type
,
292 const char *mmap_type
, error_t err
,
293 bool raw_report
= false);
295 // Set the name of the current thread to 'name', return true on succees.
296 // The name may be truncated to a system-dependent limit.
297 bool SanitizerSetThreadName(const char *name
);
298 // Get the name of the current thread (no more than max_len bytes),
299 // return true on succees. name should have space for at least max_len+1 bytes.
300 bool SanitizerGetThreadName(char *name
, int max_len
);
302 // Specific tools may override behavior of "Die" and "CheckFailed" functions
303 // to do tool-specific job.
304 typedef void (*DieCallbackType
)(void);
306 // It's possible to add several callbacks that would be run when "Die" is
307 // called. The callbacks will be run in the opposite order. The tools are
308 // strongly recommended to setup all callbacks during initialization, when there
309 // is only a single thread.
310 bool AddDieCallback(DieCallbackType callback
);
311 bool RemoveDieCallback(DieCallbackType callback
);
313 void SetUserDieCallback(DieCallbackType callback
);
315 typedef void (*CheckFailedCallbackType
)(const char *, int, const char *,
317 void SetCheckFailedCallback(CheckFailedCallbackType callback
);
319 // Callback will be called if soft_rss_limit_mb is given and the limit is
320 // exceeded (exceeded==true) or if rss went down below the limit
321 // (exceeded==false).
322 // The callback should be registered once at the tool init time.
323 void SetSoftRssLimitExceededCallback(void (*Callback
)(bool exceeded
));
325 // Functions related to signal handling.
326 typedef void (*SignalHandlerType
)(int, void *, void *);
327 HandleSignalMode
GetHandleSignalMode(int signum
);
328 void InstallDeadlySignalHandlers(SignalHandlerType handler
);
331 // Each sanitizer uses slightly different implementation of stack unwinding.
332 typedef void (*UnwindSignalStackCallbackType
)(const SignalContext
&sig
,
333 const void *callback_context
,
334 BufferedStackTrace
*stack
);
335 // Print deadly signal report and die.
336 void HandleDeadlySignal(void *siginfo
, void *context
, u32 tid
,
337 UnwindSignalStackCallbackType unwind
,
338 const void *unwind_context
);
340 // Part of HandleDeadlySignal, exposed for asan.
341 void StartReportDeadlySignal();
342 // Part of HandleDeadlySignal, exposed for asan.
343 void ReportDeadlySignal(const SignalContext
&sig
, u32 tid
,
344 UnwindSignalStackCallbackType unwind
,
345 const void *unwind_context
);
347 // Alternative signal stack (POSIX-only).
348 void SetAlternateSignalStack();
349 void UnsetAlternateSignalStack();
351 // We don't want a summary too long.
352 const int kMaxSummaryLength
= 1024;
353 // Construct a one-line string:
354 // SUMMARY: SanitizerToolName: error_message
355 // and pass it to __sanitizer_report_error_summary.
356 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
357 void ReportErrorSummary(const char *error_message
,
358 const char *alt_tool_name
= nullptr);
359 // Same as above, but construct error_message as:
360 // error_type file:line[:column][ function]
361 void ReportErrorSummary(const char *error_type
, const AddressInfo
&info
,
362 const char *alt_tool_name
= nullptr);
363 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
364 void ReportErrorSummary(const char *error_type
, const StackTrace
*trace
,
365 const char *alt_tool_name
= nullptr);
368 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
370 unsigned char _BitScanForward(unsigned long *index
, unsigned long mask
); // NOLINT
371 unsigned char _BitScanReverse(unsigned long *index
, unsigned long mask
); // NOLINT
373 unsigned char _BitScanForward64(unsigned long *index
, unsigned __int64 mask
); // NOLINT
374 unsigned char _BitScanReverse64(unsigned long *index
, unsigned __int64 mask
); // NOLINT
379 INLINE uptr
MostSignificantSetBitIndex(uptr x
) {
381 unsigned long up
; // NOLINT
382 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
384 up
= SANITIZER_WORDSIZE
- 1 - __builtin_clzll(x
);
386 up
= SANITIZER_WORDSIZE
- 1 - __builtin_clzl(x
);
388 #elif defined(_WIN64)
389 _BitScanReverse64(&up
, x
);
391 _BitScanReverse(&up
, x
);
396 INLINE uptr
LeastSignificantSetBitIndex(uptr x
) {
398 unsigned long up
; // NOLINT
399 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
401 up
= __builtin_ctzll(x
);
403 up
= __builtin_ctzl(x
);
405 #elif defined(_WIN64)
406 _BitScanForward64(&up
, x
);
408 _BitScanForward(&up
, x
);
413 INLINE
bool IsPowerOfTwo(uptr x
) {
414 return (x
& (x
- 1)) == 0;
417 INLINE uptr
RoundUpToPowerOfTwo(uptr size
) {
419 if (IsPowerOfTwo(size
)) return size
;
421 uptr up
= MostSignificantSetBitIndex(size
);
422 CHECK_LT(size
, (1ULL << (up
+ 1)));
423 CHECK_GT(size
, (1ULL << up
));
424 return 1ULL << (up
+ 1);
427 INLINE uptr
RoundUpTo(uptr size
, uptr boundary
) {
428 RAW_CHECK(IsPowerOfTwo(boundary
));
429 return (size
+ boundary
- 1) & ~(boundary
- 1);
432 INLINE uptr
RoundDownTo(uptr x
, uptr boundary
) {
433 return x
& ~(boundary
- 1);
436 INLINE
bool IsAligned(uptr a
, uptr alignment
) {
437 return (a
& (alignment
- 1)) == 0;
440 INLINE uptr
Log2(uptr x
) {
441 CHECK(IsPowerOfTwo(x
));
442 return LeastSignificantSetBitIndex(x
);
445 // Don't use std::min, std::max or std::swap, to minimize dependency
447 template<class T
> T
Min(T a
, T b
) { return a
< b
? a
: b
; }
448 template<class T
> T
Max(T a
, T b
) { return a
> b
? a
: b
; }
449 template<class T
> void Swap(T
& a
, T
& b
) {
456 INLINE
bool IsSpace(int c
) {
457 return (c
== ' ') || (c
== '\n') || (c
== '\t') ||
458 (c
== '\f') || (c
== '\r') || (c
== '\v');
460 INLINE
bool IsDigit(int c
) {
461 return (c
>= '0') && (c
<= '9');
463 INLINE
int ToLower(int c
) {
464 return (c
>= 'A' && c
<= 'Z') ? (c
+ 'a' - 'A') : c
;
467 // A low-level vector based on mmap. May incur a significant memory overhead for
469 // WARNING: The current implementation supports only POD types.
471 class InternalMmapVectorNoCtor
{
473 void Initialize(uptr initial_capacity
) {
474 capacity_
= Max(initial_capacity
, (uptr
)1);
476 data_
= (T
*)MmapOrDie(capacity_
* sizeof(T
), "InternalMmapVectorNoCtor");
479 UnmapOrDie(data_
, capacity_
* sizeof(T
));
481 T
&operator[](uptr i
) {
485 const T
&operator[](uptr i
) const {
489 void push_back(const T
&element
) {
490 CHECK_LE(size_
, capacity_
);
491 if (size_
== capacity_
) {
492 uptr new_capacity
= RoundUpToPowerOfTwo(size_
+ 1);
493 Resize(new_capacity
);
495 internal_memcpy(&data_
[size_
++], &element
, sizeof(T
));
499 return data_
[size_
- 1];
508 const T
*data() const {
514 uptr
capacity() const {
517 void resize(uptr new_size
) {
519 if (new_size
> size_
) {
520 internal_memset(&data_
[size_
], 0, sizeof(T
) * (new_size
- size_
));
525 void clear() { size_
= 0; }
526 bool empty() const { return size() == 0; }
528 const T
*begin() const {
534 const T
*end() const {
535 return data() + size();
538 return data() + size();
542 void Resize(uptr new_capacity
) {
543 CHECK_GT(new_capacity
, 0);
544 CHECK_LE(size_
, new_capacity
);
545 T
*new_data
= (T
*)MmapOrDie(new_capacity
* sizeof(T
),
546 "InternalMmapVector");
547 internal_memcpy(new_data
, data_
, size_
* sizeof(T
));
550 UnmapOrDie(old_data
, capacity_
* sizeof(T
));
551 capacity_
= new_capacity
;
560 class InternalMmapVector
: public InternalMmapVectorNoCtor
<T
> {
562 explicit InternalMmapVector(uptr initial_capacity
) {
563 InternalMmapVectorNoCtor
<T
>::Initialize(initial_capacity
);
565 ~InternalMmapVector() { InternalMmapVectorNoCtor
<T
>::Destroy(); }
566 // Disallow evil constructors.
567 InternalMmapVector(const InternalMmapVector
&);
568 void operator=(const InternalMmapVector
&);
571 // HeapSort for arrays and InternalMmapVector.
572 template<class Container
, class Compare
>
573 void InternalSort(Container
*v
, uptr size
, Compare comp
) {
576 // Stage 1: insert elements to the heap.
577 for (uptr i
= 1; i
< size
; i
++) {
579 for (j
= i
; j
> 0; j
= p
) {
581 if (comp((*v
)[p
], (*v
)[j
]))
582 Swap((*v
)[j
], (*v
)[p
]);
587 // Stage 2: swap largest element with the last one,
588 // and sink the new top.
589 for (uptr i
= size
- 1; i
> 0; i
--) {
590 Swap((*v
)[0], (*v
)[i
]);
592 for (j
= 0; j
< i
; j
= max_ind
) {
593 uptr left
= 2 * j
+ 1;
594 uptr right
= 2 * j
+ 2;
596 if (left
< i
&& comp((*v
)[max_ind
], (*v
)[left
]))
598 if (right
< i
&& comp((*v
)[max_ind
], (*v
)[right
]))
601 Swap((*v
)[j
], (*v
)[max_ind
]);
608 // Works like std::lower_bound: finds the first element that is not less
610 template <class Container
, class Value
, class Compare
>
611 uptr
InternalLowerBound(const Container
&v
, uptr first
, uptr last
,
612 const Value
&val
, Compare comp
) {
613 while (last
> first
) {
614 uptr mid
= (first
+ last
) / 2;
615 if (comp(v
[mid
], val
))
635 // When adding a new architecture, don't forget to also update
636 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cc.
637 inline const char *ModuleArchToString(ModuleArch arch
) {
639 case kModuleArchUnknown
:
641 case kModuleArchI386
:
643 case kModuleArchX86_64
:
645 case kModuleArchX86_64H
:
647 case kModuleArchARMV6
:
649 case kModuleArchARMV7
:
651 case kModuleArchARMV7S
:
653 case kModuleArchARMV7K
:
655 case kModuleArchARM64
:
658 CHECK(0 && "Invalid module arch");
662 const uptr kModuleUUIDSize
= 16;
663 const uptr kMaxSegName
= 16;
665 // Represents a binary loaded into virtual memory (e.g. this can be an
666 // executable or a shared object).
670 : full_name_(nullptr),
672 max_executable_address_(0),
673 arch_(kModuleArchUnknown
),
674 instrumented_(false) {
675 internal_memset(uuid_
, 0, kModuleUUIDSize
);
678 void set(const char *module_name
, uptr base_address
);
679 void set(const char *module_name
, uptr base_address
, ModuleArch arch
,
680 u8 uuid
[kModuleUUIDSize
], bool instrumented
);
682 void addAddressRange(uptr beg
, uptr end
, bool executable
, bool writable
,
683 const char *name
= nullptr);
684 bool containsAddress(uptr address
) const;
686 const char *full_name() const { return full_name_
; }
687 uptr
base_address() const { return base_address_
; }
688 uptr
max_executable_address() const { return max_executable_address_
; }
689 ModuleArch
arch() const { return arch_
; }
690 const u8
*uuid() const { return uuid_
; }
691 bool instrumented() const { return instrumented_
; }
693 struct AddressRange
{
699 char name
[kMaxSegName
];
701 AddressRange(uptr beg
, uptr end
, bool executable
, bool writable
,
706 executable(executable
),
708 internal_strncpy(this->name
, (name
? name
: ""), ARRAY_SIZE(this->name
));
712 const IntrusiveList
<AddressRange
> &ranges() const { return ranges_
; }
715 char *full_name_
; // Owned.
717 uptr max_executable_address_
;
719 u8 uuid_
[kModuleUUIDSize
];
721 IntrusiveList
<AddressRange
> ranges_
;
724 // List of LoadedModules. OS-dependent implementation is responsible for
725 // filling this information.
726 class ListOfModules
{
728 ListOfModules() : initialized(false) {}
729 ~ListOfModules() { clear(); }
731 void fallbackInit(); // Uses fallback init if available, otherwise clears
732 const LoadedModule
*begin() const { return modules_
.begin(); }
733 LoadedModule
*begin() { return modules_
.begin(); }
734 const LoadedModule
*end() const { return modules_
.end(); }
735 LoadedModule
*end() { return modules_
.end(); }
736 uptr
size() const { return modules_
.size(); }
737 const LoadedModule
&operator[](uptr i
) const {
738 CHECK_LT(i
, modules_
.size());
744 for (auto &module
: modules_
) module
.clear();
748 initialized
? clear() : modules_
.Initialize(kInitialCapacity
);
752 InternalMmapVectorNoCtor
<LoadedModule
> modules_
;
753 // We rarely have more than 16K loaded modules.
754 static const uptr kInitialCapacity
= 1 << 14;
758 // Callback type for iterating over a set of memory ranges.
759 typedef void (*RangeIteratorCallback
)(uptr begin
, uptr end
, void *arg
);
761 enum AndroidApiLevel
{
762 ANDROID_NOT_ANDROID
= 0,
764 ANDROID_LOLLIPOP_MR1
= 22,
765 ANDROID_POST_LOLLIPOP
= 23
768 void WriteToSyslog(const char *buffer
);
771 void LogFullErrorReport(const char *buffer
);
773 INLINE
void LogFullErrorReport(const char *buffer
) {}
776 #if SANITIZER_LINUX || SANITIZER_MAC
777 void WriteOneLineToSyslog(const char *s
);
778 void LogMessageOnPrintf(const char *str
);
780 INLINE
void WriteOneLineToSyslog(const char *s
) {}
781 INLINE
void LogMessageOnPrintf(const char *str
) {}
785 // Initialize Android logging. Any writes before this are silently lost.
786 void AndroidLogInit();
787 void SetAbortMessage(const char *);
789 INLINE
void AndroidLogInit() {}
790 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
791 INLINE
void SetAbortMessage(const char *) {}
794 #if SANITIZER_ANDROID
795 void SanitizerInitializeUnwinder();
796 AndroidApiLevel
AndroidGetApiLevel();
798 INLINE
void AndroidLogWrite(const char *buffer_unused
) {}
799 INLINE
void SanitizerInitializeUnwinder() {}
800 INLINE AndroidApiLevel
AndroidGetApiLevel() { return ANDROID_NOT_ANDROID
; }
803 INLINE uptr
GetPthreadDestructorIterations() {
804 #if SANITIZER_ANDROID
805 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1
) ? 8 : 4;
806 #elif SANITIZER_POSIX
809 // Unused on Windows.
814 void *internal_start_thread(void(*func
)(void*), void *arg
);
815 void internal_join_thread(void *th
);
816 void MaybeStartBackgroudThread();
818 // Make the compiler think that something is going on there.
819 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
820 // compiler from recognising it and turning it into an actual call to
821 // memset/memcpy/etc.
822 static inline void SanitizerBreakOptimization(void *arg
) {
823 #if defined(_MSC_VER) && !defined(__clang__)
826 __asm__
__volatile__("" : : "r" (arg
) : "memory");
830 struct SignalContext
{
837 bool is_memory_access
;
838 enum WriteFlag
{ UNKNOWN
, READ
, WRITE
} write_flag
;
840 // VS2013 doesn't implement unrestricted unions, so we need a trivial default
842 SignalContext() = default;
844 // Creates signal context in a platform-specific manner.
845 // SignalContext is going to keep pointers to siginfo and context without
847 SignalContext(void *siginfo
, void *context
)
851 is_memory_access(IsMemoryAccess()),
852 write_flag(GetWriteFlag()) {
856 static void DumpAllRegisters(void *context
);
858 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
861 // String description of the signal.
862 const char *Describe() const;
864 // Returns true if signal is stack overflow.
865 bool IsStackOverflow() const;
868 // Platform specific initialization.
870 uptr
GetAddress() const;
871 WriteFlag
GetWriteFlag() const;
872 bool IsMemoryAccess() const;
877 template <typename Fn
>
878 class RunOnDestruction
{
880 explicit RunOnDestruction(Fn fn
) : fn_(fn
) {}
881 ~RunOnDestruction() { fn_(); }
887 // A simple scope guard. Usage:
888 // auto cleanup = at_scope_exit([]{ do_cleanup; });
889 template <typename Fn
>
890 RunOnDestruction
<Fn
> at_scope_exit(Fn fn
) {
891 return RunOnDestruction
<Fn
>(fn
);
894 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
895 // if a process uses virtual memory over 4TB (as many sanitizers like
896 // to do). This function will abort the process if running on a kernel
897 // that looks vulnerable.
898 #if SANITIZER_LINUX && SANITIZER_S390_64
899 void AvoidCVE_2016_2143();
901 INLINE
void AvoidCVE_2016_2143() {}
904 struct StackDepotStats
{
909 // The default value for allocator_release_to_os_interval_ms common flag to
910 // indicate that sanitizer allocator should not attempt to release memory to OS.
911 const s32 kReleaseToOSIntervalNever
= -1;
913 void CheckNoDeepBind(const char *filename
, int flag
);
915 // Returns the requested amount of random data (up to 256 bytes) that can then
916 // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
917 bool GetRandom(void *buffer
, uptr length
, bool blocking
= true);
919 } // namespace __sanitizer
921 inline void *operator new(__sanitizer::operator_new_size_type size
,
922 __sanitizer::LowLevelAllocator
&alloc
) {
923 return alloc
.Allocate(size
);
926 #endif // SANITIZER_COMMON_H