1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between run-time libraries of sanitizers.
10 // It declares common functions and classes that are used in both runtimes.
11 // Implementation of some functions are provided in sanitizer_common, while
12 // others must be defined by run-time library itself.
13 //===----------------------------------------------------------------------===//
14 #ifndef SANITIZER_COMMON_H
15 #define SANITIZER_COMMON_H
17 #include "sanitizer_flags.h"
18 #include "sanitizer_interface_internal.h"
19 #include "sanitizer_internal_defs.h"
20 #include "sanitizer_libc.h"
21 #include "sanitizer_list.h"
22 #include "sanitizer_mutex.h"
24 #if defined(_MSC_VER) && !defined(__clang__)
25 extern "C" void _ReadWriteBarrier();
26 #pragma intrinsic(_ReadWriteBarrier)
29 namespace __sanitizer
{
32 struct BufferedStackTrace
;
37 const uptr kWordSize
= SANITIZER_WORDSIZE
/ 8;
38 const uptr kWordSizeInBits
= 8 * kWordSize
;
40 const uptr kCacheLineSize
= SANITIZER_CACHE_LINE_SIZE
;
42 const uptr kMaxPathLength
= 4096;
44 const uptr kMaxThreadStackSize
= 1 << 30; // 1Gb
46 static const uptr kErrorMessageBufferSize
= 1 << 16;
48 // Denotes fake PC values that come from JIT/JAVA/etc.
49 // For such PC values __tsan_symbolize_external_ex() will be called.
50 const u64 kExternalPCBit
= 1ULL << 60;
52 extern const char *SanitizerToolName
; // Can be changed by the tool.
54 extern atomic_uint32_t current_verbosity
;
55 INLINE
void SetVerbosity(int verbosity
) {
56 atomic_store(¤t_verbosity
, verbosity
, memory_order_relaxed
);
58 INLINE
int Verbosity() {
59 return atomic_load(¤t_verbosity
, memory_order_relaxed
);
63 extern uptr PageSizeCached
;
64 INLINE uptr
GetPageSizeCached() {
66 PageSizeCached
= GetPageSize();
67 return PageSizeCached
;
69 uptr
GetMmapGranularity();
70 uptr
GetMaxVirtualAddress();
71 uptr
GetMaxUserVirtualAddress();
74 int TgKill(pid_t pid
, tid_t tid
, int sig
);
76 void GetThreadStackTopAndBottom(bool at_initialization
, uptr
*stack_top
,
78 void GetThreadStackAndTls(bool main
, uptr
*stk_addr
, uptr
*stk_size
,
79 uptr
*tls_addr
, uptr
*tls_size
);
82 void *MmapOrDie(uptr size
, const char *mem_type
, bool raw_report
= false);
83 INLINE
void *MmapOrDieQuietly(uptr size
, const char *mem_type
) {
84 return MmapOrDie(size
, mem_type
, /*raw_report*/ true);
86 void UnmapOrDie(void *addr
, uptr size
);
87 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
88 // case returns nullptr.
89 void *MmapOrDieOnFatalError(uptr size
, const char *mem_type
);
90 bool MmapFixedNoReserve(uptr fixed_addr
, uptr size
, const char *name
= nullptr)
92 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
);
93 void *MmapFixedOrDie(uptr fixed_addr
, uptr size
);
94 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
95 // that case returns nullptr.
96 void *MmapFixedOrDieOnFatalError(uptr fixed_addr
, uptr size
);
97 void *MmapFixedNoAccess(uptr fixed_addr
, uptr size
, const char *name
= nullptr);
98 void *MmapNoAccess(uptr size
);
99 // Map aligned chunk of address space; size and alignment are powers of two.
100 // Dies on all but out of memory errors, in the latter case returns nullptr.
101 void *MmapAlignedOrDieOnFatalError(uptr size
, uptr alignment
,
102 const char *mem_type
);
103 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
104 // unaccessible memory.
105 bool MprotectNoAccess(uptr addr
, uptr size
);
106 bool MprotectReadOnly(uptr addr
, uptr size
);
108 void MprotectMallocZones(void *addr
, int prot
);
110 // Find an available address space.
111 uptr
FindAvailableMemoryRange(uptr size
, uptr alignment
, uptr left_padding
,
112 uptr
*largest_gap_found
, uptr
*max_occupied_addr
);
114 // Used to check if we can map shadow memory to a fixed location.
115 bool MemoryRangeIsAvailable(uptr range_start
, uptr range_end
);
116 // Releases memory pages entirely within the [beg, end] address range. Noop if
117 // the provided range does not contain at least one entire page.
118 void ReleaseMemoryPagesToOS(uptr beg
, uptr end
);
119 void IncreaseTotalMmap(uptr size
);
120 void DecreaseTotalMmap(uptr size
);
122 bool NoHugePagesInRegion(uptr addr
, uptr length
);
123 bool DontDumpShadowMemory(uptr addr
, uptr length
);
124 // Check if the built VMA size matches the runtime one.
126 void RunMallocHooks(const void *ptr
, uptr size
);
127 void RunFreeHooks(const void *ptr
);
129 class ReservedAddressRange
{
131 uptr
Init(uptr size
, const char *name
= nullptr, uptr fixed_addr
= 0);
132 uptr
Map(uptr fixed_addr
, uptr size
);
133 uptr
MapOrDie(uptr fixed_addr
, uptr size
);
134 void Unmap(uptr addr
, uptr size
);
135 void *base() const { return base_
; }
136 uptr
size() const { return size_
; }
145 typedef void (*fill_profile_f
)(uptr start
, uptr rss
, bool file
,
146 /*out*/uptr
*stats
, uptr stats_size
);
148 // Parse the contents of /proc/self/smaps and generate a memory profile.
149 // |cb| is a tool-specific callback that fills the |stats| array containing
150 // |stats_size| elements.
151 void GetMemoryProfile(fill_profile_f cb
, uptr
*stats
, uptr stats_size
);
153 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
154 // constructor, so all instances of LowLevelAllocator should be
155 // linker initialized.
156 class LowLevelAllocator
{
158 // Requires an external lock.
159 void *Allocate(uptr size
);
161 char *allocated_end_
;
162 char *allocated_current_
;
164 // Set the min alignment of LowLevelAllocator to at least alignment.
165 void SetLowLevelAllocateMinAlignment(uptr alignment
);
166 typedef void (*LowLevelAllocateCallback
)(uptr ptr
, uptr size
);
167 // Allows to register tool-specific callbacks for LowLevelAllocator.
168 // Passing NULL removes the callback.
169 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback
);
172 void CatastrophicErrorWrite(const char *buffer
, uptr length
);
173 void RawWrite(const char *buffer
);
174 bool ColorizeReports();
175 void RemoveANSIEscapeSequencesFromString(char *buffer
);
176 void Printf(const char *format
, ...);
177 void Report(const char *format
, ...);
178 void SetPrintfAndReportCallback(void (*callback
)(const char *));
179 #define VReport(level, ...) \
181 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
183 #define VPrintf(level, ...) \
185 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
188 // Lock sanitizer error reporting and protects against nested errors.
189 class ScopedErrorReportLock
{
191 ScopedErrorReportLock();
192 ~ScopedErrorReportLock();
194 static void CheckLocked();
197 extern uptr stoptheworld_tracer_pid
;
198 extern uptr stoptheworld_tracer_ppid
;
200 bool IsAccessibleMemoryRange(uptr beg
, uptr size
);
202 // Error report formatting.
203 const char *StripPathPrefix(const char *filepath
,
204 const char *strip_file_prefix
);
205 // Strip the directories from the module name.
206 const char *StripModuleName(const char *module
);
209 uptr
ReadBinaryName(/*out*/char *buf
, uptr buf_len
);
210 uptr
ReadBinaryNameCached(/*out*/char *buf
, uptr buf_len
);
211 uptr
ReadLongProcessName(/*out*/ char *buf
, uptr buf_len
);
212 const char *GetProcessName();
213 void UpdateProcessName();
214 void CacheBinaryName();
215 void DisableCoreDumperIfNecessary();
216 void DumpProcessMap();
217 void PrintModuleMap();
218 const char *GetEnv(const char *name
);
219 bool SetEnv(const char *name
, const char *value
);
226 bool StackSizeIsUnlimited();
227 uptr
GetStackSizeLimitInBytes();
228 void SetStackSizeLimitInBytes(uptr limit
);
229 bool AddressSpaceIsUnlimited();
230 void SetAddressSpaceUnlimited();
231 void AdjustStackSize(void *attr
);
232 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments
*args
);
233 void SetSandboxingCallback(void (*f
)());
235 void InitializeCoverage(bool enabled
, const char *coverage_dir
);
241 void SleepForSeconds(int seconds
);
242 void SleepForMillis(int millis
);
244 u64
MonotonicNanoTime();
245 int Atexit(void (*function
)(void));
246 bool TemplateMatch(const char *templ
, const char *str
);
249 void NORETURN
Abort();
252 CheckFailed(const char *file
, int line
, const char *cond
, u64 v1
, u64 v2
);
253 void NORETURN
ReportMmapFailureAndDie(uptr size
, const char *mem_type
,
254 const char *mmap_type
, error_t err
,
255 bool raw_report
= false);
257 // Specific tools may override behavior of "Die" and "CheckFailed" functions
258 // to do tool-specific job.
259 typedef void (*DieCallbackType
)(void);
261 // It's possible to add several callbacks that would be run when "Die" is
262 // called. The callbacks will be run in the opposite order. The tools are
263 // strongly recommended to setup all callbacks during initialization, when there
264 // is only a single thread.
265 bool AddDieCallback(DieCallbackType callback
);
266 bool RemoveDieCallback(DieCallbackType callback
);
268 void SetUserDieCallback(DieCallbackType callback
);
270 typedef void (*CheckFailedCallbackType
)(const char *, int, const char *,
272 void SetCheckFailedCallback(CheckFailedCallbackType callback
);
274 // Callback will be called if soft_rss_limit_mb is given and the limit is
275 // exceeded (exceeded==true) or if rss went down below the limit
276 // (exceeded==false).
277 // The callback should be registered once at the tool init time.
278 void SetSoftRssLimitExceededCallback(void (*Callback
)(bool exceeded
));
280 // Functions related to signal handling.
281 typedef void (*SignalHandlerType
)(int, void *, void *);
282 HandleSignalMode
GetHandleSignalMode(int signum
);
283 void InstallDeadlySignalHandlers(SignalHandlerType handler
);
286 // Each sanitizer uses slightly different implementation of stack unwinding.
287 typedef void (*UnwindSignalStackCallbackType
)(const SignalContext
&sig
,
288 const void *callback_context
,
289 BufferedStackTrace
*stack
);
290 // Print deadly signal report and die.
291 void HandleDeadlySignal(void *siginfo
, void *context
, u32 tid
,
292 UnwindSignalStackCallbackType unwind
,
293 const void *unwind_context
);
295 // Part of HandleDeadlySignal, exposed for asan.
296 void StartReportDeadlySignal();
297 // Part of HandleDeadlySignal, exposed for asan.
298 void ReportDeadlySignal(const SignalContext
&sig
, u32 tid
,
299 UnwindSignalStackCallbackType unwind
,
300 const void *unwind_context
);
302 // Alternative signal stack (POSIX-only).
303 void SetAlternateSignalStack();
304 void UnsetAlternateSignalStack();
306 // We don't want a summary too long.
307 const int kMaxSummaryLength
= 1024;
308 // Construct a one-line string:
309 // SUMMARY: SanitizerToolName: error_message
310 // and pass it to __sanitizer_report_error_summary.
311 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
312 void ReportErrorSummary(const char *error_message
,
313 const char *alt_tool_name
= nullptr);
314 // Same as above, but construct error_message as:
315 // error_type file:line[:column][ function]
316 void ReportErrorSummary(const char *error_type
, const AddressInfo
&info
,
317 const char *alt_tool_name
= nullptr);
318 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
319 void ReportErrorSummary(const char *error_type
, const StackTrace
*trace
,
320 const char *alt_tool_name
= nullptr);
322 void ReportMmapWriteExec(int prot
);
325 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
327 unsigned char _BitScanForward(unsigned long *index
, unsigned long mask
); // NOLINT
328 unsigned char _BitScanReverse(unsigned long *index
, unsigned long mask
); // NOLINT
330 unsigned char _BitScanForward64(unsigned long *index
, unsigned __int64 mask
); // NOLINT
331 unsigned char _BitScanReverse64(unsigned long *index
, unsigned __int64 mask
); // NOLINT
336 INLINE uptr
MostSignificantSetBitIndex(uptr x
) {
338 unsigned long up
; // NOLINT
339 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
341 up
= SANITIZER_WORDSIZE
- 1 - __builtin_clzll(x
);
343 up
= SANITIZER_WORDSIZE
- 1 - __builtin_clzl(x
);
345 #elif defined(_WIN64)
346 _BitScanReverse64(&up
, x
);
348 _BitScanReverse(&up
, x
);
353 INLINE uptr
LeastSignificantSetBitIndex(uptr x
) {
355 unsigned long up
; // NOLINT
356 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
358 up
= __builtin_ctzll(x
);
360 up
= __builtin_ctzl(x
);
362 #elif defined(_WIN64)
363 _BitScanForward64(&up
, x
);
365 _BitScanForward(&up
, x
);
370 INLINE
bool IsPowerOfTwo(uptr x
) {
371 return (x
& (x
- 1)) == 0;
374 INLINE uptr
RoundUpToPowerOfTwo(uptr size
) {
376 if (IsPowerOfTwo(size
)) return size
;
378 uptr up
= MostSignificantSetBitIndex(size
);
379 CHECK_LT(size
, (1ULL << (up
+ 1)));
380 CHECK_GT(size
, (1ULL << up
));
381 return 1ULL << (up
+ 1);
384 INLINE uptr
RoundUpTo(uptr size
, uptr boundary
) {
385 RAW_CHECK(IsPowerOfTwo(boundary
));
386 return (size
+ boundary
- 1) & ~(boundary
- 1);
389 INLINE uptr
RoundDownTo(uptr x
, uptr boundary
) {
390 return x
& ~(boundary
- 1);
393 INLINE
bool IsAligned(uptr a
, uptr alignment
) {
394 return (a
& (alignment
- 1)) == 0;
397 INLINE uptr
Log2(uptr x
) {
398 CHECK(IsPowerOfTwo(x
));
399 return LeastSignificantSetBitIndex(x
);
402 // Don't use std::min, std::max or std::swap, to minimize dependency
404 template<class T
> T
Min(T a
, T b
) { return a
< b
? a
: b
; }
405 template<class T
> T
Max(T a
, T b
) { return a
> b
? a
: b
; }
406 template<class T
> void Swap(T
& a
, T
& b
) {
413 INLINE
bool IsSpace(int c
) {
414 return (c
== ' ') || (c
== '\n') || (c
== '\t') ||
415 (c
== '\f') || (c
== '\r') || (c
== '\v');
417 INLINE
bool IsDigit(int c
) {
418 return (c
>= '0') && (c
<= '9');
420 INLINE
int ToLower(int c
) {
421 return (c
>= 'A' && c
<= 'Z') ? (c
+ 'a' - 'A') : c
;
424 // A low-level vector based on mmap. May incur a significant memory overhead for
426 // WARNING: The current implementation supports only POD types.
428 class InternalMmapVectorNoCtor
{
430 void Initialize(uptr initial_capacity
) {
434 reserve(initial_capacity
);
436 void Destroy() { UnmapOrDie(data_
, capacity_bytes_
); }
437 T
&operator[](uptr i
) {
441 const T
&operator[](uptr i
) const {
445 void push_back(const T
&element
) {
446 CHECK_LE(size_
, capacity());
447 if (size_
== capacity()) {
448 uptr new_capacity
= RoundUpToPowerOfTwo(size_
+ 1);
449 Realloc(new_capacity
);
451 internal_memcpy(&data_
[size_
++], &element
, sizeof(T
));
455 return data_
[size_
- 1];
464 const T
*data() const {
470 uptr
capacity() const { return capacity_bytes_
/ sizeof(T
); }
471 void reserve(uptr new_size
) {
472 // Never downsize internal buffer.
473 if (new_size
> capacity())
476 void resize(uptr new_size
) {
477 if (new_size
> size_
) {
479 internal_memset(&data_
[size_
], 0, sizeof(T
) * (new_size
- size_
));
484 void clear() { size_
= 0; }
485 bool empty() const { return size() == 0; }
487 const T
*begin() const {
493 const T
*end() const {
494 return data() + size();
497 return data() + size();
500 void swap(InternalMmapVectorNoCtor
&other
) {
501 Swap(data_
, other
.data_
);
502 Swap(capacity_bytes_
, other
.capacity_bytes_
);
503 Swap(size_
, other
.size_
);
507 void Realloc(uptr new_capacity
) {
508 CHECK_GT(new_capacity
, 0);
509 CHECK_LE(size_
, new_capacity
);
510 uptr new_capacity_bytes
=
511 RoundUpTo(new_capacity
* sizeof(T
), GetPageSizeCached());
512 T
*new_data
= (T
*)MmapOrDie(new_capacity_bytes
, "InternalMmapVector");
513 internal_memcpy(new_data
, data_
, size_
* sizeof(T
));
514 UnmapOrDie(data_
, capacity_bytes_
);
516 capacity_bytes_
= new_capacity_bytes
;
520 uptr capacity_bytes_
;
524 template <typename T
>
525 bool operator==(const InternalMmapVectorNoCtor
<T
> &lhs
,
526 const InternalMmapVectorNoCtor
<T
> &rhs
) {
527 if (lhs
.size() != rhs
.size()) return false;
528 return internal_memcmp(lhs
.data(), rhs
.data(), lhs
.size() * sizeof(T
)) == 0;
531 template <typename T
>
532 bool operator!=(const InternalMmapVectorNoCtor
<T
> &lhs
,
533 const InternalMmapVectorNoCtor
<T
> &rhs
) {
534 return !(lhs
== rhs
);
538 class InternalMmapVector
: public InternalMmapVectorNoCtor
<T
> {
540 InternalMmapVector() { InternalMmapVectorNoCtor
<T
>::Initialize(1); }
541 explicit InternalMmapVector(uptr cnt
) {
542 InternalMmapVectorNoCtor
<T
>::Initialize(cnt
);
545 ~InternalMmapVector() { InternalMmapVectorNoCtor
<T
>::Destroy(); }
546 // Disallow copies and moves.
547 InternalMmapVector(const InternalMmapVector
&) = delete;
548 InternalMmapVector
&operator=(const InternalMmapVector
&) = delete;
549 InternalMmapVector(InternalMmapVector
&&) = delete;
550 InternalMmapVector
&operator=(InternalMmapVector
&&) = delete;
553 class InternalScopedString
: public InternalMmapVector
<char> {
555 explicit InternalScopedString(uptr max_length
)
556 : InternalMmapVector
<char>(max_length
), length_(0) {
559 uptr
length() { return length_
; }
564 void append(const char *format
, ...);
572 bool operator()(const T
&a
, const T
&b
) const { return a
< b
; }
575 // HeapSort for arrays and InternalMmapVector.
576 template <class T
, class Compare
= CompareLess
<T
>>
577 void Sort(T
*v
, uptr size
, Compare comp
= {}) {
580 // Stage 1: insert elements to the heap.
581 for (uptr i
= 1; i
< size
; i
++) {
583 for (j
= i
; j
> 0; j
= p
) {
585 if (comp(v
[p
], v
[j
]))
591 // Stage 2: swap largest element with the last one,
592 // and sink the new top.
593 for (uptr i
= size
- 1; i
> 0; i
--) {
596 for (j
= 0; j
< i
; j
= max_ind
) {
597 uptr left
= 2 * j
+ 1;
598 uptr right
= 2 * j
+ 2;
600 if (left
< i
&& comp(v
[max_ind
], v
[left
]))
602 if (right
< i
&& comp(v
[max_ind
], v
[right
]))
605 Swap(v
[j
], v
[max_ind
]);
612 // Works like std::lower_bound: finds the first element that is not less
614 template <class Container
, class Value
, class Compare
>
615 uptr
InternalLowerBound(const Container
&v
, uptr first
, uptr last
,
616 const Value
&val
, Compare comp
) {
617 while (last
> first
) {
618 uptr mid
= (first
+ last
) / 2;
619 if (comp(v
[mid
], val
))
639 // Opens the file 'file_name" and reads up to 'max_len' bytes.
640 // The resulting buffer is mmaped and stored in '*buff'.
641 // Returns true if file was successfully opened and read.
642 bool ReadFileToVector(const char *file_name
,
643 InternalMmapVectorNoCtor
<char> *buff
,
644 uptr max_len
= 1 << 26, error_t
*errno_p
= nullptr);
646 // Opens the file 'file_name" and reads up to 'max_len' bytes.
647 // This function is less I/O efficient than ReadFileToVector as it may reread
648 // file multiple times to avoid mmap during read attempts. It's used to read
649 // procmap, so short reads with mmap in between can produce inconsistent result.
650 // The resulting buffer is mmaped and stored in '*buff'.
651 // The size of the mmaped region is stored in '*buff_size'.
652 // The total number of read bytes is stored in '*read_len'.
653 // Returns true if file was successfully opened and read.
654 bool ReadFileToBuffer(const char *file_name
, char **buff
, uptr
*buff_size
,
655 uptr
*read_len
, uptr max_len
= 1 << 26,
656 error_t
*errno_p
= nullptr);
658 // When adding a new architecture, don't forget to also update
659 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cc.
660 inline const char *ModuleArchToString(ModuleArch arch
) {
662 case kModuleArchUnknown
:
664 case kModuleArchI386
:
666 case kModuleArchX86_64
:
668 case kModuleArchX86_64H
:
670 case kModuleArchARMV6
:
672 case kModuleArchARMV7
:
674 case kModuleArchARMV7S
:
676 case kModuleArchARMV7K
:
678 case kModuleArchARM64
:
681 CHECK(0 && "Invalid module arch");
685 const uptr kModuleUUIDSize
= 16;
686 const uptr kMaxSegName
= 16;
688 // Represents a binary loaded into virtual memory (e.g. this can be an
689 // executable or a shared object).
693 : full_name_(nullptr),
695 max_executable_address_(0),
696 arch_(kModuleArchUnknown
),
697 instrumented_(false) {
698 internal_memset(uuid_
, 0, kModuleUUIDSize
);
701 void set(const char *module_name
, uptr base_address
);
702 void set(const char *module_name
, uptr base_address
, ModuleArch arch
,
703 u8 uuid
[kModuleUUIDSize
], bool instrumented
);
705 void addAddressRange(uptr beg
, uptr end
, bool executable
, bool writable
,
706 const char *name
= nullptr);
707 bool containsAddress(uptr address
) const;
709 const char *full_name() const { return full_name_
; }
710 uptr
base_address() const { return base_address_
; }
711 uptr
max_executable_address() const { return max_executable_address_
; }
712 ModuleArch
arch() const { return arch_
; }
713 const u8
*uuid() const { return uuid_
; }
714 bool instrumented() const { return instrumented_
; }
716 struct AddressRange
{
722 char name
[kMaxSegName
];
724 AddressRange(uptr beg
, uptr end
, bool executable
, bool writable
,
729 executable(executable
),
731 internal_strncpy(this->name
, (name
? name
: ""), ARRAY_SIZE(this->name
));
735 const IntrusiveList
<AddressRange
> &ranges() const { return ranges_
; }
738 char *full_name_
; // Owned.
740 uptr max_executable_address_
;
742 u8 uuid_
[kModuleUUIDSize
];
744 IntrusiveList
<AddressRange
> ranges_
;
747 // List of LoadedModules. OS-dependent implementation is responsible for
748 // filling this information.
749 class ListOfModules
{
751 ListOfModules() : initialized(false) {}
752 ~ListOfModules() { clear(); }
754 void fallbackInit(); // Uses fallback init if available, otherwise clears
755 const LoadedModule
*begin() const { return modules_
.begin(); }
756 LoadedModule
*begin() { return modules_
.begin(); }
757 const LoadedModule
*end() const { return modules_
.end(); }
758 LoadedModule
*end() { return modules_
.end(); }
759 uptr
size() const { return modules_
.size(); }
760 const LoadedModule
&operator[](uptr i
) const {
761 CHECK_LT(i
, modules_
.size());
767 for (auto &module
: modules_
) module
.clear();
771 initialized
? clear() : modules_
.Initialize(kInitialCapacity
);
775 InternalMmapVectorNoCtor
<LoadedModule
> modules_
;
776 // We rarely have more than 16K loaded modules.
777 static const uptr kInitialCapacity
= 1 << 14;
781 // Callback type for iterating over a set of memory ranges.
782 typedef void (*RangeIteratorCallback
)(uptr begin
, uptr end
, void *arg
);
784 enum AndroidApiLevel
{
785 ANDROID_NOT_ANDROID
= 0,
787 ANDROID_LOLLIPOP_MR1
= 22,
788 ANDROID_POST_LOLLIPOP
= 23
791 void WriteToSyslog(const char *buffer
);
794 void LogFullErrorReport(const char *buffer
);
796 INLINE
void LogFullErrorReport(const char *buffer
) {}
799 #if SANITIZER_LINUX || SANITIZER_MAC
800 void WriteOneLineToSyslog(const char *s
);
801 void LogMessageOnPrintf(const char *str
);
803 INLINE
void WriteOneLineToSyslog(const char *s
) {}
804 INLINE
void LogMessageOnPrintf(const char *str
) {}
808 // Initialize Android logging. Any writes before this are silently lost.
809 void AndroidLogInit();
810 void SetAbortMessage(const char *);
812 INLINE
void AndroidLogInit() {}
813 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
814 INLINE
void SetAbortMessage(const char *) {}
817 #if SANITIZER_ANDROID
818 void SanitizerInitializeUnwinder();
819 AndroidApiLevel
AndroidGetApiLevel();
821 INLINE
void AndroidLogWrite(const char *buffer_unused
) {}
822 INLINE
void SanitizerInitializeUnwinder() {}
823 INLINE AndroidApiLevel
AndroidGetApiLevel() { return ANDROID_NOT_ANDROID
; }
826 INLINE uptr
GetPthreadDestructorIterations() {
827 #if SANITIZER_ANDROID
828 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1
) ? 8 : 4;
829 #elif SANITIZER_POSIX
832 // Unused on Windows.
837 void *internal_start_thread(void(*func
)(void*), void *arg
);
838 void internal_join_thread(void *th
);
839 void MaybeStartBackgroudThread();
841 // Make the compiler think that something is going on there.
842 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
843 // compiler from recognising it and turning it into an actual call to
844 // memset/memcpy/etc.
845 static inline void SanitizerBreakOptimization(void *arg
) {
846 #if defined(_MSC_VER) && !defined(__clang__)
849 __asm__
__volatile__("" : : "r" (arg
) : "memory");
853 struct SignalContext
{
860 bool is_memory_access
;
861 enum WriteFlag
{ UNKNOWN
, READ
, WRITE
} write_flag
;
863 // VS2013 doesn't implement unrestricted unions, so we need a trivial default
865 SignalContext() = default;
867 // Creates signal context in a platform-specific manner.
868 // SignalContext is going to keep pointers to siginfo and context without
870 SignalContext(void *siginfo
, void *context
)
874 is_memory_access(IsMemoryAccess()),
875 write_flag(GetWriteFlag()) {
879 static void DumpAllRegisters(void *context
);
881 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
884 // String description of the signal.
885 const char *Describe() const;
887 // Returns true if signal is stack overflow.
888 bool IsStackOverflow() const;
891 // Platform specific initialization.
893 uptr
GetAddress() const;
894 WriteFlag
GetWriteFlag() const;
895 bool IsMemoryAccess() const;
900 template <typename Fn
>
901 class RunOnDestruction
{
903 explicit RunOnDestruction(Fn fn
) : fn_(fn
) {}
904 ~RunOnDestruction() { fn_(); }
910 // A simple scope guard. Usage:
911 // auto cleanup = at_scope_exit([]{ do_cleanup; });
912 template <typename Fn
>
913 RunOnDestruction
<Fn
> at_scope_exit(Fn fn
) {
914 return RunOnDestruction
<Fn
>(fn
);
917 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
918 // if a process uses virtual memory over 4TB (as many sanitizers like
919 // to do). This function will abort the process if running on a kernel
920 // that looks vulnerable.
921 #if SANITIZER_LINUX && SANITIZER_S390_64
922 void AvoidCVE_2016_2143();
924 INLINE
void AvoidCVE_2016_2143() {}
927 struct StackDepotStats
{
932 // The default value for allocator_release_to_os_interval_ms common flag to
933 // indicate that sanitizer allocator should not attempt to release memory to OS.
934 const s32 kReleaseToOSIntervalNever
= -1;
936 void CheckNoDeepBind(const char *filename
, int flag
);
938 // Returns the requested amount of random data (up to 256 bytes) that can then
939 // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
940 bool GetRandom(void *buffer
, uptr length
, bool blocking
= true);
942 // Returns the number of logical processors on the system.
943 u32
GetNumberOfCPUs();
944 extern u32 NumberOfCPUsCached
;
945 INLINE u32
GetNumberOfCPUsCached() {
946 if (!NumberOfCPUsCached
)
947 NumberOfCPUsCached
= GetNumberOfCPUs();
948 return NumberOfCPUsCached
;
951 } // namespace __sanitizer
953 inline void *operator new(__sanitizer::operator_new_size_type size
,
954 __sanitizer::LowLevelAllocator
&alloc
) {
955 return alloc
.Allocate(size
);
958 #endif // SANITIZER_COMMON_H