1 //=-- lsan_common.h -------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of LeakSanitizer.
10 // Private LSan header.
12 //===----------------------------------------------------------------------===//
17 #include "sanitizer_common/sanitizer_allocator.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_platform.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_stoptheworld.h"
23 #include "sanitizer_common/sanitizer_symbolizer.h"
24 #include "sanitizer_common/sanitizer_thread_registry.h"
26 // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
27 // Also, LSan doesn't like 32 bit architectures
28 // because of "small" (4 bytes) pointer size that leads to high false negative
29 // ratio on large leaks. But we still want to have it for some 32 bit arches
30 // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
31 // To enable LeakSanitizer on a new architecture, one needs to implement the
32 // internal_clone function as well as (probably) adjust the TLS machinery for
33 // the new architecture inside the sanitizer library.
34 // Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
35 // is missing. This caused a link error.
36 #if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
37 # define CAN_SANITIZE_LEAKS 0
38 #elif (SANITIZER_LINUX || SANITIZER_APPLE) && (SANITIZER_WORDSIZE == 64) && \
39 (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
40 defined(__powerpc64__) || defined(__s390x__))
41 # define CAN_SANITIZE_LEAKS 1
42 #elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_APPLE)
43 # define CAN_SANITIZE_LEAKS 1
44 #elif defined(__arm__) && SANITIZER_LINUX
45 # define CAN_SANITIZE_LEAKS 1
46 #elif SANITIZER_LOONGARCH64 && SANITIZER_LINUX
47 # define CAN_SANITIZE_LEAKS 1
48 #elif SANITIZER_RISCV64 && SANITIZER_LINUX
49 # define CAN_SANITIZE_LEAKS 1
50 #elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
51 # define CAN_SANITIZE_LEAKS 1
53 # define CAN_SANITIZE_LEAKS 0
56 namespace __sanitizer
{
59 class ThreadContextBase
;
63 // This section defines function and class prototypes which must be implemented
64 // by the parent tool linking in LSan. There are implementations provided by the
65 // LSan library which will be linked in when LSan is used as a standalone tool.
70 kDirectlyLeaked
= 0, // default
71 kIndirectlyLeaked
= 1,
76 enum IgnoreObjectResult
{
78 kIgnoreObjectAlreadyIgnored
,
87 //// --------------------------------------------------------------------------
88 //// Poisoning prototypes.
89 //// --------------------------------------------------------------------------
91 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
92 bool WordIsPoisoned(uptr addr
);
94 //// --------------------------------------------------------------------------
95 //// Thread prototypes.
96 //// --------------------------------------------------------------------------
98 // Wrappers for ThreadRegistry access.
99 void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
;
100 void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
;
101 // If called from the main thread, updates the main thread's TID in the thread
102 // registry. We need this to handle processes that fork() without a subsequent
103 // exec(), which invalidates the recorded TID. To update it, we must call
104 // gettid() from the main thread. Our solution is to call this function before
105 // leak checking and also before every call to pthread_create() (to handle cases
106 // where leak checking is initiated from a non-main thread).
107 void EnsureMainThreadIDIsCorrect();
109 bool GetThreadRangesLocked(tid_t os_id
, uptr
*stack_begin
, uptr
*stack_end
,
110 uptr
*tls_begin
, uptr
*tls_end
, uptr
*cache_begin
,
111 uptr
*cache_end
, DTLS
**dtls
);
112 void GetAllThreadAllocatorCachesLocked(InternalMmapVector
<uptr
> *caches
);
113 void GetThreadExtraStackRangesLocked(InternalMmapVector
<Range
> *ranges
);
114 void GetThreadExtraStackRangesLocked(tid_t os_id
,
115 InternalMmapVector
<Range
> *ranges
);
116 void GetAdditionalThreadContextPtrsLocked(InternalMmapVector
<uptr
> *ptrs
);
117 void GetRunningThreadsLocked(InternalMmapVector
<tid_t
> *threads
);
119 //// --------------------------------------------------------------------------
120 //// Allocator prototypes.
121 //// --------------------------------------------------------------------------
123 // Wrappers for allocator's ForceLock()/ForceUnlock().
124 void LockAllocator();
125 void UnlockAllocator();
127 // Returns the address range occupied by the global allocator object.
128 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
);
129 // If p points into a chunk that has been allocated to the user, returns its
130 // user-visible address. Otherwise, returns 0.
131 uptr
PointsIntoChunk(void *p
);
132 // Returns address of user-visible chunk contained in this allocator chunk.
133 uptr
GetUserBegin(uptr chunk
);
134 // Returns user-visible address for chunk. If memory tagging is used this
135 // function will return the tagged address.
136 uptr
GetUserAddr(uptr chunk
);
138 // Wrapper for chunk metadata operations.
141 // Constructor accepts address of user-visible chunk.
142 explicit LsanMetadata(uptr chunk
);
143 bool allocated() const;
144 ChunkTag
tag() const;
145 void set_tag(ChunkTag value
);
146 uptr
requested_size() const;
147 u32
stack_trace_id() const;
153 // Iterate over all existing chunks. Allocator must be locked.
154 void ForEachChunk(ForEachChunkCallback callback
, void *arg
);
156 // Helper for __lsan_ignore_object().
157 IgnoreObjectResult
IgnoreObject(const void *p
);
159 // The rest of the LSan interface which is implemented by library.
161 struct ScopedStopTheWorldLock
{
162 ScopedStopTheWorldLock() {
163 LockThreadRegistry();
167 ~ScopedStopTheWorldLock() {
169 UnlockThreadRegistry();
172 ScopedStopTheWorldLock
&operator=(const ScopedStopTheWorldLock
&) = delete;
173 ScopedStopTheWorldLock(const ScopedStopTheWorldLock
&) = delete;
177 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
178 #include "lsan_flags.inc"
182 uptr
pointer_alignment() const {
183 return use_unaligned
? 1 : sizeof(uptr
);
187 extern Flags lsan_flags
;
188 inline Flags
*flags() { return &lsan_flags
; }
189 void RegisterLsanFlags(FlagParser
*parser
, Flags
*f
);
198 using LeakedChunks
= InternalMmapVector
<LeakedChunk
>;
205 bool is_directly_leaked
;
209 struct LeakedObject
{
215 // Aggregates leaks by stack trace prefix.
219 void AddLeakedChunks(const LeakedChunks
&chunks
);
220 void ReportTopLeaks(uptr max_leaks
);
222 uptr
ApplySuppressions();
223 uptr
UnsuppressedLeakCount();
224 uptr
IndirectUnsuppressedLeakCount();
227 void PrintReportForLeak(uptr index
);
228 void PrintLeakedObjectsForLeak(uptr index
);
231 InternalMmapVector
<Leak
> leaks_
;
232 InternalMmapVector
<LeakedObject
> leaked_objects_
;
235 typedef InternalMmapVector
<uptr
> Frontier
;
237 // Platform-specific functions.
238 void InitializePlatformSpecificModules();
239 void ProcessGlobalRegions(Frontier
*frontier
);
240 void ProcessPlatformSpecificAllocations(Frontier
*frontier
);
247 // LockStuffAndStopTheWorld can start to use Scan* calls to collect into
248 // this Frontier vector before the StopTheWorldCallback actually runs.
249 // This is used when the OS has a unified callback API for suspending
250 // threads and enumerating roots.
251 struct CheckForLeaksParam
{
256 bool success
= false;
259 InternalMmapVectorNoCtor
<RootRegion
> const *GetRootRegions();
260 void ScanRootRegion(Frontier
*frontier
, RootRegion
const ®ion
,
261 uptr region_begin
, uptr region_end
, bool is_readable
);
262 // Run stoptheworld while holding any platform-specific locks, as well as the
263 // allocator and thread registry locks.
264 void LockStuffAndStopTheWorld(StopTheWorldCallback callback
,
265 CheckForLeaksParam
* argument
);
267 void ScanRangeForPointers(uptr begin
, uptr end
,
269 const char *region_type
, ChunkTag tag
);
270 void ScanGlobalRange(uptr begin
, uptr end
, Frontier
*frontier
);
271 void ScanExtraStackRanges(const InternalMmapVector
<Range
> &ranges
,
274 // Functions called from the parent tool.
275 const char *MaybeCallLsanDefaultOptions();
276 void InitCommonLsan();
278 void DoRecoverableLeakCheckVoid();
279 void DisableCounterUnderflow();
280 bool DisabledInThisThread();
282 // Used to implement __lsan::ScopedDisabler.
283 void DisableInThisThread();
284 void EnableInThisThread();
285 // Can be used to ignore memory allocated by an intercepted
287 struct ScopedInterceptorDisabler
{
288 ScopedInterceptorDisabler() { DisableInThisThread(); }
289 ~ScopedInterceptorDisabler() { EnableInThisThread(); }
292 // According to Itanium C++ ABI array cookie is a one word containing
293 // size of allocated array.
294 static inline bool IsItaniumABIArrayCookie(uptr chunk_beg
, uptr chunk_size
,
296 return chunk_size
== sizeof(uptr
) && chunk_beg
+ chunk_size
== addr
&&
297 *reinterpret_cast<uptr
*>(chunk_beg
) == 0;
300 // According to ARM C++ ABI array cookie consists of two words:
301 // struct array_cookie {
302 // std::size_t element_size; // element_size != 0
303 // std::size_t element_count;
305 static inline bool IsARMABIArrayCookie(uptr chunk_beg
, uptr chunk_size
,
307 return chunk_size
== 2 * sizeof(uptr
) && chunk_beg
+ chunk_size
== addr
&&
308 *reinterpret_cast<uptr
*>(chunk_beg
+ sizeof(uptr
)) == 0;
311 // Special case for "new T[0]" where T is a type with DTOR.
312 // new T[0] will allocate a cookie (one or two words) for the array size (0)
313 // and store a pointer to the end of allocated chunk. The actual cookie layout
314 // varies between platforms according to their C++ ABI implementation.
315 inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg
, uptr chunk_size
,
318 return IsARMABIArrayCookie(chunk_beg
, chunk_size
, addr
);
320 return IsItaniumABIArrayCookie(chunk_beg
, chunk_size
, addr
);
324 // Return the linker module, if valid for the platform.
325 LoadedModule
*GetLinker();
327 // Return true if LSan has finished leak checking and reported leaks.
328 bool HasReportedLeaks();
330 // Run platform-specific leak handlers.
333 } // namespace __lsan
336 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
337 const char *__lsan_default_options();
339 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
340 int __lsan_is_turned_off();
342 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
343 const char *__lsan_default_suppressions();
345 SANITIZER_INTERFACE_ATTRIBUTE
346 void __lsan_register_root_region(const void *p
, __lsan::uptr size
);
348 SANITIZER_INTERFACE_ATTRIBUTE
349 void __lsan_unregister_root_region(const void *p
, __lsan::uptr size
);
353 #endif // LSAN_COMMON_H