1 //=-- lsan_common.h -------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of LeakSanitizer.
9 // Private LSan header.
11 //===----------------------------------------------------------------------===//
16 #include "sanitizer_common/sanitizer_allocator.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_internal_defs.h"
19 #include "sanitizer_common/sanitizer_platform.h"
20 #include "sanitizer_common/sanitizer_stoptheworld.h"
21 #include "sanitizer_common/sanitizer_symbolizer.h"
23 // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus
24 // supported for Linux only. Also, LSan doesn't like 32 bit architectures
25 // because of "small" (4 bytes) pointer size that leads to high false negative
26 // ratio on large leaks. But we still want to have it for some 32 bit arches
27 // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
28 // To enable LeakSanitizer on new architecture, one need to implement
29 // internal_clone function as well as (probably) adjust TLS machinery for
30 // new architecture inside sanitizer library.
31 #if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
32 (SANITIZER_WORDSIZE == 64) && \
33 (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
34 defined(__powerpc64__))
35 #define CAN_SANITIZE_LEAKS 1
36 #elif defined(__i386__) && \
37 (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
38 #define CAN_SANITIZE_LEAKS 1
39 #elif defined(__arm__) && \
40 SANITIZER_LINUX && !SANITIZER_ANDROID
41 #define CAN_SANITIZE_LEAKS 1
43 #define CAN_SANITIZE_LEAKS 0
46 namespace __sanitizer
{
55 kDirectlyLeaked
= 0, // default
56 kIndirectlyLeaked
= 1,
61 const u32 kInvalidTid
= (u32
) -1;
64 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
65 #include "lsan_flags.inc"
69 uptr
pointer_alignment() const {
70 return use_unaligned
? 1 : sizeof(uptr
);
74 extern Flags lsan_flags
;
75 inline Flags
*flags() { return &lsan_flags
; }
76 void RegisterLsanFlags(FlagParser
*parser
, Flags
*f
);
83 bool is_directly_leaked
;
93 // Aggregates leaks by stack trace prefix.
96 LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
97 void AddLeakedChunk(uptr chunk
, u32 stack_trace_id
, uptr leaked_size
,
99 void ReportTopLeaks(uptr max_leaks
);
101 void ApplySuppressions();
102 uptr
UnsuppressedLeakCount();
106 void PrintReportForLeak(uptr index
);
107 void PrintLeakedObjectsForLeak(uptr index
);
110 InternalMmapVector
<Leak
> leaks_
;
111 InternalMmapVector
<LeakedObject
> leaked_objects_
;
114 typedef InternalMmapVector
<uptr
> Frontier
;
116 // Platform-specific functions.
117 void InitializePlatformSpecificModules();
118 void ProcessGlobalRegions(Frontier
*frontier
);
119 void ProcessPlatformSpecificAllocations(Frontier
*frontier
);
126 InternalMmapVector
<RootRegion
> const *GetRootRegions();
127 void ScanRootRegion(Frontier
*frontier
, RootRegion
const ®ion
,
128 uptr region_begin
, uptr region_end
, bool is_readable
);
129 // Run stoptheworld while holding any platform-specific locks.
130 void DoStopTheWorld(StopTheWorldCallback callback
, void* argument
);
132 void ScanRangeForPointers(uptr begin
, uptr end
,
134 const char *region_type
, ChunkTag tag
);
135 void ScanGlobalRange(uptr begin
, uptr end
, Frontier
*frontier
);
137 enum IgnoreObjectResult
{
138 kIgnoreObjectSuccess
,
139 kIgnoreObjectAlreadyIgnored
,
143 // Functions called from the parent tool.
144 const char *MaybeCallLsanDefaultOptions();
145 void InitCommonLsan();
147 void DoRecoverableLeakCheckVoid();
148 void DisableCounterUnderflow();
149 bool DisabledInThisThread();
151 // Used to implement __lsan::ScopedDisabler.
152 void DisableInThisThread();
153 void EnableInThisThread();
154 // Can be used to ignore memory allocated by an intercepted
156 struct ScopedInterceptorDisabler
{
157 ScopedInterceptorDisabler() { DisableInThisThread(); }
158 ~ScopedInterceptorDisabler() { EnableInThisThread(); }
161 // According to Itanium C++ ABI array cookie is a one word containing
162 // size of allocated array.
163 static inline bool IsItaniumABIArrayCookie(uptr chunk_beg
, uptr chunk_size
,
165 return chunk_size
== sizeof(uptr
) && chunk_beg
+ chunk_size
== addr
&&
166 *reinterpret_cast<uptr
*>(chunk_beg
) == 0;
169 // According to ARM C++ ABI array cookie consists of two words:
170 // struct array_cookie {
171 // std::size_t element_size; // element_size != 0
172 // std::size_t element_count;
174 static inline bool IsARMABIArrayCookie(uptr chunk_beg
, uptr chunk_size
,
176 return chunk_size
== 2 * sizeof(uptr
) && chunk_beg
+ chunk_size
== addr
&&
177 *reinterpret_cast<uptr
*>(chunk_beg
+ sizeof(uptr
)) == 0;
180 // Special case for "new T[0]" where T is a type with DTOR.
181 // new T[0] will allocate a cookie (one or two words) for the array size (0)
182 // and store a pointer to the end of allocated chunk. The actual cookie layout
183 // varies between platforms according to their C++ ABI implementation.
184 inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg
, uptr chunk_size
,
187 return IsARMABIArrayCookie(chunk_beg
, chunk_size
, addr
);
189 return IsItaniumABIArrayCookie(chunk_beg
, chunk_size
, addr
);
193 // The following must be implemented in the parent tool.
195 void ForEachChunk(ForEachChunkCallback callback
, void *arg
);
196 // Returns the address range occupied by the global allocator object.
197 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
);
198 // Wrappers for allocator's ForceLock()/ForceUnlock().
199 void LockAllocator();
200 void UnlockAllocator();
201 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
202 bool WordIsPoisoned(uptr addr
);
203 // Wrappers for ThreadRegistry access.
204 void LockThreadRegistry();
205 void UnlockThreadRegistry();
206 bool GetThreadRangesLocked(tid_t os_id
, uptr
*stack_begin
, uptr
*stack_end
,
207 uptr
*tls_begin
, uptr
*tls_end
, uptr
*cache_begin
,
208 uptr
*cache_end
, DTLS
**dtls
);
209 void ForEachExtraStackRange(tid_t os_id
, RangeIteratorCallback callback
,
211 // If called from the main thread, updates the main thread's TID in the thread
212 // registry. We need this to handle processes that fork() without a subsequent
213 // exec(), which invalidates the recorded TID. To update it, we must call
214 // gettid() from the main thread. Our solution is to call this function before
215 // leak checking and also before every call to pthread_create() (to handle cases
216 // where leak checking is initiated from a non-main thread).
217 void EnsureMainThreadIDIsCorrect();
218 // If p points into a chunk that has been allocated to the user, returns its
219 // user-visible address. Otherwise, returns 0.
220 uptr
PointsIntoChunk(void *p
);
221 // Returns address of user-visible chunk contained in this allocator chunk.
222 uptr
GetUserBegin(uptr chunk
);
223 // Helper for __lsan_ignore_object().
224 IgnoreObjectResult
IgnoreObjectLocked(const void *p
);
226 // Return the linker module, if valid for the platform.
227 LoadedModule
*GetLinker();
229 // Return true if LSan has finished leak checking and reported leaks.
230 bool HasReportedLeaks();
232 // Run platform-specific leak handlers.
235 // Wrapper for chunk metadata operations.
238 // Constructor accepts address of user-visible chunk.
239 explicit LsanMetadata(uptr chunk
);
240 bool allocated() const;
241 ChunkTag
tag() const;
242 void set_tag(ChunkTag value
);
243 uptr
requested_size() const;
244 u32
stack_trace_id() const;
249 } // namespace __lsan
252 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
253 const char *__lsan_default_options();
255 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
256 int __lsan_is_turned_off();
258 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
259 const char *__lsan_default_suppressions();
262 #endif // LSAN_COMMON_H