1 //=-- lsan_common.cpp -----------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of LeakSanitizer.
10 // Implementation of common leak checking functionality.
12 //===----------------------------------------------------------------------===//
14 #include "lsan_common.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_report_decorator.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_thread_registry.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
28 #if CAN_SANITIZE_LEAKS
31 // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
32 # if SANITIZER_IOS && !SANITIZER_IOSSIM
33 # define OBJC_DATA_MASK 0x0000007ffffffff8UL
35 # define OBJC_DATA_MASK 0x00007ffffffffff8UL
37 // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139
38 # define OBJC_FAST_IS_RW 0x8000000000000000UL
43 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
44 // also to protect the global list of root regions.
49 void DisableCounterUnderflow() {
50 if (common_flags()->detect_leaks
) {
51 Report("Unmatched call to __lsan_enable().\n");
56 void Flags::SetDefaults() {
57 # define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
58 # include "lsan_flags.inc"
62 void RegisterLsanFlags(FlagParser
*parser
, Flags
*f
) {
63 # define LSAN_FLAG(Type, Name, DefaultValue, Description) \
64 RegisterFlag(parser, #Name, Description, &f->Name);
65 # include "lsan_flags.inc"
69 # define LOG_POINTERS(...) \
71 if (flags()->log_pointers) \
72 Report(__VA_ARGS__); \
75 # define LOG_THREADS(...) \
77 if (flags()->log_threads) \
78 Report(__VA_ARGS__); \
81 class LeakSuppressionContext
{
83 SuppressionContext context
;
84 bool suppressed_stacks_sorted
= true;
85 InternalMmapVector
<u32
> suppressed_stacks
;
86 const LoadedModule
*suppress_module
= nullptr;
89 Suppression
*GetSuppressionForAddr(uptr addr
);
90 bool SuppressInvalid(const StackTrace
&stack
);
91 bool SuppressByRule(const StackTrace
&stack
, uptr hit_count
, uptr total_size
);
94 LeakSuppressionContext(const char *supprression_types
[],
95 int suppression_types_num
)
96 : context(supprression_types
, suppression_types_num
) {}
98 bool Suppress(u32 stack_trace_id
, uptr hit_count
, uptr total_size
);
100 const InternalMmapVector
<u32
> &GetSortedSuppressedStacks() {
101 if (!suppressed_stacks_sorted
) {
102 suppressed_stacks_sorted
= true;
103 SortAndDedup(suppressed_stacks
);
105 return suppressed_stacks
;
107 void PrintMatchedSuppressions();
110 ALIGNED(64) static char suppression_placeholder
[sizeof(LeakSuppressionContext
)];
111 static LeakSuppressionContext
*suppression_ctx
= nullptr;
112 static const char kSuppressionLeak
[] = "leak";
113 static const char *kSuppressionTypes
[] = {kSuppressionLeak
};
114 static const char kStdSuppressions
[] =
115 # if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
116 // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
118 "leak:*pthread_exit*\n"
119 # endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
121 // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
124 // TLS leak in some glibc versions, described in
125 // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
126 "leak:*tls_get_addr*\n";
128 void InitializeSuppressions() {
129 CHECK_EQ(nullptr, suppression_ctx
);
130 suppression_ctx
= new (suppression_placeholder
)
131 LeakSuppressionContext(kSuppressionTypes
, ARRAY_SIZE(kSuppressionTypes
));
134 void LeakSuppressionContext::LazyInit() {
137 context
.ParseFromFile(flags()->suppressions
);
138 if (&__lsan_default_suppressions
)
139 context
.Parse(__lsan_default_suppressions());
140 context
.Parse(kStdSuppressions
);
141 if (flags()->use_tls
&& flags()->use_ld_allocations
)
142 suppress_module
= GetLinker();
146 Suppression
*LeakSuppressionContext::GetSuppressionForAddr(uptr addr
) {
147 Suppression
*s
= nullptr;
149 // Suppress by module name.
150 const char *module_name
= Symbolizer::GetOrInit()->GetModuleNameForPc(addr
);
152 module_name
= "<unknown module>";
153 if (context
.Match(module_name
, kSuppressionLeak
, &s
))
156 // Suppress by file or function name.
157 SymbolizedStack
*frames
= Symbolizer::GetOrInit()->SymbolizePC(addr
);
158 for (SymbolizedStack
*cur
= frames
; cur
; cur
= cur
->next
) {
159 if (context
.Match(cur
->info
.function
, kSuppressionLeak
, &s
) ||
160 context
.Match(cur
->info
.file
, kSuppressionLeak
, &s
)) {
168 static uptr
GetCallerPC(const StackTrace
&stack
) {
169 // The top frame is our malloc/calloc/etc. The next frame is the caller.
171 return stack
.trace
[1];
176 // Objective-C class data pointers are stored with flags in the low bits, so
177 // they need to be transformed back into something that looks like a pointer.
178 static inline void *MaybeTransformPointer(void *p
) {
179 uptr ptr
= reinterpret_cast<uptr
>(p
);
180 if ((ptr
& OBJC_FAST_IS_RW
) == OBJC_FAST_IS_RW
)
181 ptr
&= OBJC_DATA_MASK
;
182 return reinterpret_cast<void *>(ptr
);
186 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
187 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
188 // modules accounting etc.
189 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
190 // They are allocated with a __libc_memalign() call in allocate_and_init()
191 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
192 // blocks, but we can make sure they come from our own allocator by intercepting
193 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
194 // addresses are stored in a dynamically allocated array (the DTV) which is
195 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
196 // being reachable from the static TLS, and the dynamic TLS being reachable from
197 // the DTV. This is because the initial DTV is allocated before our interception
198 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
199 // can't special-case it either, since we don't know its size.
200 // Our solution is to include in the root set all allocations made from
201 // ld-linux.so (which is where allocate_and_init() is implemented). This is
202 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
203 // which we don't care about).
204 // On all other platforms, this simply checks to ensure that the caller pc is
205 // valid before reporting chunks as leaked.
206 bool LeakSuppressionContext::SuppressInvalid(const StackTrace
&stack
) {
207 uptr caller_pc
= GetCallerPC(stack
);
208 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
209 // it as reachable, as we can't properly report its allocation stack anyway.
211 (suppress_module
&& suppress_module
->containsAddress(caller_pc
));
214 bool LeakSuppressionContext::SuppressByRule(const StackTrace
&stack
,
215 uptr hit_count
, uptr total_size
) {
216 for (uptr i
= 0; i
< stack
.size
; i
++) {
217 Suppression
*s
= GetSuppressionForAddr(
218 StackTrace::GetPreviousInstructionPc(stack
.trace
[i
]));
220 s
->weight
+= total_size
;
221 atomic_fetch_add(&s
->hit_count
, hit_count
, memory_order_relaxed
);
228 bool LeakSuppressionContext::Suppress(u32 stack_trace_id
, uptr hit_count
,
231 StackTrace stack
= StackDepotGet(stack_trace_id
);
232 if (!SuppressInvalid(stack
) && !SuppressByRule(stack
, hit_count
, total_size
))
234 suppressed_stacks_sorted
= false;
235 suppressed_stacks
.push_back(stack_trace_id
);
239 static LeakSuppressionContext
*GetSuppressionContext() {
240 CHECK(suppression_ctx
);
241 return suppression_ctx
;
244 static InternalMmapVectorNoCtor
<RootRegion
> root_regions
;
246 InternalMmapVectorNoCtor
<RootRegion
> const *GetRootRegions() {
247 return &root_regions
;
250 void InitCommonLsan() {
251 if (common_flags()->detect_leaks
) {
252 // Initialization which can fail or print warnings should only be done if
253 // LSan is actually enabled.
254 InitializeSuppressions();
255 InitializePlatformSpecificModules();
259 class Decorator
: public __sanitizer::SanitizerCommonDecorator
{
261 Decorator() : SanitizerCommonDecorator() {}
262 const char *Error() { return Red(); }
263 const char *Leak() { return Blue(); }
266 static inline bool MaybeUserPointer(uptr p
) {
267 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
268 // bound on heap addresses.
269 const uptr kMinAddress
= 4 * 4096;
272 # if defined(__x86_64__)
273 // TODO: add logic similar to ARM when Intel LAM is available.
274 // Accept only canonical form user-space addresses.
275 return ((p
>> 47) == 0);
276 # elif defined(__mips64)
277 return ((p
>> 40) == 0);
278 # elif defined(__aarch64__)
279 // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in
280 // address translation and can be used to store a tag.
281 constexpr uptr kPointerMask
= 255ULL << 48;
282 // Accept up to 48 bit VMA.
283 return ((p
& kPointerMask
) == 0);
284 # elif defined(__loongarch_lp64)
285 // Allow 47-bit user-space VMA at current.
286 return ((p
>> 47) == 0);
292 // Scans the memory range, looking for byte patterns that point into allocator
293 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
294 // There are two usage modes for this function: finding reachable chunks
295 // (|tag| = kReachable) and finding indirectly leaked chunks
296 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
297 // so |frontier| = 0.
298 void ScanRangeForPointers(uptr begin
, uptr end
, Frontier
*frontier
,
299 const char *region_type
, ChunkTag tag
) {
300 CHECK(tag
== kReachable
|| tag
== kIndirectlyLeaked
);
301 const uptr alignment
= flags()->pointer_alignment();
302 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type
, (void *)begin
,
306 pp
= pp
+ alignment
- pp
% alignment
;
307 for (; pp
+ sizeof(void *) <= end
; pp
+= alignment
) {
308 void *p
= *reinterpret_cast<void **>(pp
);
310 p
= MaybeTransformPointer(p
);
312 if (!MaybeUserPointer(reinterpret_cast<uptr
>(p
)))
314 uptr chunk
= PointsIntoChunk(p
);
317 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
320 LsanMetadata
m(chunk
);
321 if (m
.tag() == kReachable
|| m
.tag() == kIgnored
)
324 // Do this check relatively late so we can log only the interesting cases.
325 if (!flags()->use_poisoned
&& WordIsPoisoned(pp
)) {
327 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
329 (void *)pp
, p
, (void *)chunk
, (void *)(chunk
+ m
.requested_size()),
335 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
336 (void *)pp
, p
, (void *)chunk
,
337 (void *)(chunk
+ m
.requested_size()), m
.requested_size());
339 frontier
->push_back(chunk
);
343 // Scans a global range for pointers
344 void ScanGlobalRange(uptr begin
, uptr end
, Frontier
*frontier
) {
345 uptr allocator_begin
= 0, allocator_end
= 0;
346 GetAllocatorGlobalRange(&allocator_begin
, &allocator_end
);
347 if (begin
<= allocator_begin
&& allocator_begin
< end
) {
348 CHECK_LE(allocator_begin
, allocator_end
);
349 CHECK_LE(allocator_end
, end
);
350 if (begin
< allocator_begin
)
351 ScanRangeForPointers(begin
, allocator_begin
, frontier
, "GLOBAL",
353 if (allocator_end
< end
)
354 ScanRangeForPointers(allocator_end
, end
, frontier
, "GLOBAL", kReachable
);
356 ScanRangeForPointers(begin
, end
, frontier
, "GLOBAL", kReachable
);
360 void ScanExtraStackRanges(const InternalMmapVector
<Range
> &ranges
,
361 Frontier
*frontier
) {
362 for (uptr i
= 0; i
< ranges
.size(); i
++) {
363 ScanRangeForPointers(ranges
[i
].begin
, ranges
[i
].end
, frontier
, "FAKE STACK",
368 # if SANITIZER_FUCHSIA
370 // Fuchsia handles all threads together with its own callback.
371 static void ProcessThreads(SuspendedThreadsList
const &, Frontier
*, tid_t
,
376 # if SANITIZER_ANDROID
377 // FIXME: Move this out into *libcdep.cpp
378 extern "C" SANITIZER_WEAK_ATTRIBUTE
void __libc_iterate_dynamic_tls(
379 pid_t
, void (*cb
)(void *, void *, uptr
, void *), void *);
382 static void ProcessThreadRegistry(Frontier
*frontier
) {
383 InternalMmapVector
<uptr
> ptrs
;
384 GetAdditionalThreadContextPtrsLocked(&ptrs
);
386 for (uptr i
= 0; i
< ptrs
.size(); ++i
) {
387 void *ptr
= reinterpret_cast<void *>(ptrs
[i
]);
388 uptr chunk
= PointsIntoChunk(ptr
);
391 LsanMetadata
m(chunk
);
395 // Mark as reachable and add to frontier.
396 LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr
);
397 m
.set_tag(kReachable
);
398 frontier
->push_back(chunk
);
402 // Scans thread data (stacks and TLS) for heap pointers.
403 static void ProcessThreads(SuspendedThreadsList
const &suspended_threads
,
404 Frontier
*frontier
, tid_t caller_tid
,
406 InternalMmapVector
<uptr
> registers
;
407 InternalMmapVector
<Range
> extra_ranges
;
408 for (uptr i
= 0; i
< suspended_threads
.ThreadCount(); i
++) {
409 tid_t os_id
= static_cast<tid_t
>(suspended_threads
.GetThreadID(i
));
410 LOG_THREADS("Processing thread %llu.\n", os_id
);
411 uptr stack_begin
, stack_end
, tls_begin
, tls_end
, cache_begin
, cache_end
;
414 GetThreadRangesLocked(os_id
, &stack_begin
, &stack_end
, &tls_begin
,
415 &tls_end
, &cache_begin
, &cache_end
, &dtls
);
417 // If a thread can't be found in the thread registry, it's probably in the
418 // process of destruction. Log this event and move on.
419 LOG_THREADS("Thread %llu not found in registry.\n", os_id
);
423 PtraceRegistersStatus have_registers
=
424 suspended_threads
.GetRegistersAndSP(i
, ®isters
, &sp
);
425 if (have_registers
!= REGISTERS_AVAILABLE
) {
426 Report("Unable to get registers from thread %llu.\n", os_id
);
427 // If unable to get SP, consider the entire stack to be reachable unless
428 // GetRegistersAndSP failed with ESRCH.
429 if (have_registers
== REGISTERS_UNAVAILABLE_FATAL
)
433 if (suspended_threads
.GetThreadID(i
) == caller_tid
) {
437 if (flags()->use_registers
&& have_registers
) {
438 uptr registers_begin
= reinterpret_cast<uptr
>(registers
.data());
440 reinterpret_cast<uptr
>(registers
.data() + registers
.size());
441 ScanRangeForPointers(registers_begin
, registers_end
, frontier
,
442 "REGISTERS", kReachable
);
445 if (flags()->use_stacks
) {
446 LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin
,
447 (void *)stack_end
, (void *)sp
);
448 if (sp
< stack_begin
|| sp
>= stack_end
) {
449 // SP is outside the recorded stack range (e.g. the thread is running a
450 // signal handler on alternate stack, or swapcontext was used).
451 // Again, consider the entire stack range to be reachable.
452 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
453 uptr page_size
= GetPageSizeCached();
455 while (stack_begin
< stack_end
&&
456 !IsAccessibleMemoryRange(stack_begin
, 1)) {
458 stack_begin
+= page_size
;
460 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
461 skipped
, (void *)stack_begin
, (void *)stack_end
);
463 // Shrink the stack range to ignore out-of-scope values.
466 ScanRangeForPointers(stack_begin
, stack_end
, frontier
, "STACK",
468 extra_ranges
.clear();
469 GetThreadExtraStackRangesLocked(os_id
, &extra_ranges
);
470 ScanExtraStackRanges(extra_ranges
, frontier
);
473 if (flags()->use_tls
) {
475 LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin
, (void *)tls_end
);
476 // If the tls and cache ranges don't overlap, scan full tls range,
477 // otherwise, only scan the non-overlapping portions
478 if (cache_begin
== cache_end
|| tls_end
< cache_begin
||
479 tls_begin
> cache_end
) {
480 ScanRangeForPointers(tls_begin
, tls_end
, frontier
, "TLS", kReachable
);
482 if (tls_begin
< cache_begin
)
483 ScanRangeForPointers(tls_begin
, cache_begin
, frontier
, "TLS",
485 if (tls_end
> cache_end
)
486 ScanRangeForPointers(cache_end
, tls_end
, frontier
, "TLS",
490 # if SANITIZER_ANDROID
491 auto *cb
= +[](void *dtls_begin
, void *dtls_end
, uptr
/*dso_idd*/,
493 ScanRangeForPointers(reinterpret_cast<uptr
>(dtls_begin
),
494 reinterpret_cast<uptr
>(dtls_end
),
495 reinterpret_cast<Frontier
*>(arg
), "DTLS",
499 // FIXME: There might be a race-condition here (and in Bionic) if the
500 // thread is suspended in the middle of updating its DTLS. IOWs, we
501 // could scan already freed memory. (probably fine for now)
502 __libc_iterate_dynamic_tls(os_id
, cb
, frontier
);
504 if (dtls
&& !DTLSInDestruction(dtls
)) {
505 ForEachDVT(dtls
, [&](const DTLS::DTV
&dtv
, int id
) {
506 uptr dtls_beg
= dtv
.beg
;
507 uptr dtls_end
= dtls_beg
+ dtv
.size
;
508 if (dtls_beg
< dtls_end
) {
509 LOG_THREADS("DTLS %d at %p-%p.\n", id
, (void *)dtls_beg
,
511 ScanRangeForPointers(dtls_beg
, dtls_end
, frontier
, "DTLS",
516 // We are handling a thread with DTLS under destruction. Log about
517 // this and continue.
518 LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id
);
524 // Add pointers reachable from ThreadContexts
525 ProcessThreadRegistry(frontier
);
528 # endif // SANITIZER_FUCHSIA
530 void ScanRootRegion(Frontier
*frontier
, const RootRegion
&root_region
,
531 uptr region_begin
, uptr region_end
, bool is_readable
) {
532 uptr intersection_begin
= Max(root_region
.begin
, region_begin
);
533 uptr intersection_end
= Min(region_end
, root_region
.begin
+ root_region
.size
);
534 if (intersection_begin
>= intersection_end
)
536 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
537 (void *)root_region
.begin
,
538 (void *)(root_region
.begin
+ root_region
.size
),
539 (void *)region_begin
, (void *)region_end
,
540 is_readable
? "readable" : "unreadable");
542 ScanRangeForPointers(intersection_begin
, intersection_end
, frontier
, "ROOT",
546 static void ProcessRootRegion(Frontier
*frontier
,
547 const RootRegion
&root_region
) {
548 MemoryMappingLayout
proc_maps(/*cache_enabled*/ true);
549 MemoryMappedSegment segment
;
550 while (proc_maps
.Next(&segment
)) {
551 ScanRootRegion(frontier
, root_region
, segment
.start
, segment
.end
,
552 segment
.IsReadable());
556 // Scans root regions for heap pointers.
557 static void ProcessRootRegions(Frontier
*frontier
) {
558 if (!flags()->use_root_regions
)
560 for (uptr i
= 0; i
< root_regions
.size(); i
++)
561 ProcessRootRegion(frontier
, root_regions
[i
]);
564 static void FloodFillTag(Frontier
*frontier
, ChunkTag tag
) {
565 while (frontier
->size()) {
566 uptr next_chunk
= frontier
->back();
567 frontier
->pop_back();
568 LsanMetadata
m(next_chunk
);
569 ScanRangeForPointers(next_chunk
, next_chunk
+ m
.requested_size(), frontier
,
574 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
575 // which are reachable from it as indirectly leaked.
576 static void MarkIndirectlyLeakedCb(uptr chunk
, void *arg
) {
577 chunk
= GetUserBegin(chunk
);
578 LsanMetadata
m(chunk
);
579 if (m
.allocated() && m
.tag() != kReachable
) {
580 ScanRangeForPointers(chunk
, chunk
+ m
.requested_size(),
581 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked
);
585 static void IgnoredSuppressedCb(uptr chunk
, void *arg
) {
587 chunk
= GetUserBegin(chunk
);
588 LsanMetadata
m(chunk
);
589 if (!m
.allocated() || m
.tag() == kIgnored
)
592 const InternalMmapVector
<u32
> &suppressed
=
593 *static_cast<const InternalMmapVector
<u32
> *>(arg
);
594 uptr idx
= InternalLowerBound(suppressed
, m
.stack_trace_id());
595 if (idx
>= suppressed
.size() || m
.stack_trace_id() != suppressed
[idx
])
598 LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk
,
599 (void *)(chunk
+ m
.requested_size()), m
.requested_size());
603 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
605 static void CollectIgnoredCb(uptr chunk
, void *arg
) {
607 chunk
= GetUserBegin(chunk
);
608 LsanMetadata
m(chunk
);
609 if (m
.allocated() && m
.tag() == kIgnored
) {
610 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk
,
611 (void *)(chunk
+ m
.requested_size()), m
.requested_size());
612 reinterpret_cast<Frontier
*>(arg
)->push_back(chunk
);
616 // Sets the appropriate tag on each chunk.
617 static void ClassifyAllChunks(SuspendedThreadsList
const &suspended_threads
,
618 Frontier
*frontier
, tid_t caller_tid
,
620 const InternalMmapVector
<u32
> &suppressed_stacks
=
621 GetSuppressionContext()->GetSortedSuppressedStacks();
622 if (!suppressed_stacks
.empty()) {
623 ForEachChunk(IgnoredSuppressedCb
,
624 const_cast<InternalMmapVector
<u32
> *>(&suppressed_stacks
));
626 ForEachChunk(CollectIgnoredCb
, frontier
);
627 ProcessGlobalRegions(frontier
);
628 ProcessThreads(suspended_threads
, frontier
, caller_tid
, caller_sp
);
629 ProcessRootRegions(frontier
);
630 FloodFillTag(frontier
, kReachable
);
632 // The check here is relatively expensive, so we do this in a separate flood
633 // fill. That way we can skip the check for chunks that are reachable
635 LOG_POINTERS("Processing platform-specific allocations.\n");
636 ProcessPlatformSpecificAllocations(frontier
);
637 FloodFillTag(frontier
, kReachable
);
639 // Iterate over leaked chunks and mark those that are reachable from other
641 LOG_POINTERS("Scanning leaked chunks.\n");
642 ForEachChunk(MarkIndirectlyLeakedCb
, nullptr);
645 // ForEachChunk callback. Resets the tags to pre-leak-check state.
646 static void ResetTagsCb(uptr chunk
, void *arg
) {
648 chunk
= GetUserBegin(chunk
);
649 LsanMetadata
m(chunk
);
650 if (m
.allocated() && m
.tag() != kIgnored
)
651 m
.set_tag(kDirectlyLeaked
);
654 // ForEachChunk callback. Aggregates information about unreachable chunks into
656 static void CollectLeaksCb(uptr chunk
, void *arg
) {
658 LeakedChunks
*leaks
= reinterpret_cast<LeakedChunks
*>(arg
);
659 chunk
= GetUserBegin(chunk
);
660 LsanMetadata
m(chunk
);
663 if (m
.tag() == kDirectlyLeaked
|| m
.tag() == kIndirectlyLeaked
)
664 leaks
->push_back({chunk
, m
.stack_trace_id(), m
.requested_size(), m
.tag()});
667 void LeakSuppressionContext::PrintMatchedSuppressions() {
668 InternalMmapVector
<Suppression
*> matched
;
669 context
.GetMatched(&matched
);
672 const char *line
= "-----------------------------------------------------";
673 Printf("%s\n", line
);
674 Printf("Suppressions used:\n");
675 Printf(" count bytes template\n");
676 for (uptr i
= 0; i
< matched
.size(); i
++) {
677 Printf("%7zu %10zu %s\n",
678 static_cast<uptr
>(atomic_load_relaxed(&matched
[i
]->hit_count
)),
679 matched
[i
]->weight
, matched
[i
]->templ
);
681 Printf("%s\n\n", line
);
684 # if SANITIZER_FUCHSIA
686 // Fuchsia provides a libc interface that guarantees all threads are
687 // covered, and SuspendedThreadList is never really used.
688 static void ReportUnsuspendedThreads(const SuspendedThreadsList
&) {}
690 # else // !SANITIZER_FUCHSIA
692 static void ReportUnsuspendedThreads(
693 const SuspendedThreadsList
&suspended_threads
) {
694 InternalMmapVector
<tid_t
> threads(suspended_threads
.ThreadCount());
695 for (uptr i
= 0; i
< suspended_threads
.ThreadCount(); ++i
)
696 threads
[i
] = suspended_threads
.GetThreadID(i
);
698 Sort(threads
.data(), threads
.size());
700 InternalMmapVector
<tid_t
> unsuspended
;
701 GetRunningThreadsLocked(&unsuspended
);
703 for (auto os_id
: unsuspended
) {
704 uptr i
= InternalLowerBound(threads
, os_id
);
705 if (i
>= threads
.size() || threads
[i
] != os_id
)
707 "Running thread %zu was not suspended. False leaks are possible.\n",
712 # endif // !SANITIZER_FUCHSIA
714 static void CheckForLeaksCallback(const SuspendedThreadsList
&suspended_threads
,
716 CheckForLeaksParam
*param
= reinterpret_cast<CheckForLeaksParam
*>(arg
);
718 CHECK(!param
->success
);
719 ReportUnsuspendedThreads(suspended_threads
);
720 ClassifyAllChunks(suspended_threads
, ¶m
->frontier
, param
->caller_tid
,
722 ForEachChunk(CollectLeaksCb
, ¶m
->leaks
);
723 // Clean up for subsequent leak checks. This assumes we did not overwrite any
725 ForEachChunk(ResetTagsCb
, nullptr);
726 param
->success
= true;
729 static bool PrintResults(LeakReport
&report
) {
730 uptr unsuppressed_count
= report
.UnsuppressedLeakCount();
731 if (unsuppressed_count
) {
735 "================================================================="
737 Printf("%s", d
.Error());
738 Report("ERROR: LeakSanitizer: detected memory leaks\n");
739 Printf("%s", d
.Default());
740 report
.ReportTopLeaks(flags()->max_leaks
);
742 if (common_flags()->print_suppressions
)
743 GetSuppressionContext()->PrintMatchedSuppressions();
744 if (unsuppressed_count
> 0) {
745 report
.PrintSummary();
751 static bool CheckForLeaks() {
752 if (&__lsan_is_turned_off
&& __lsan_is_turned_off()) {
753 VReport(1, "LeakSanitizer is disabled");
756 VReport(1, "LeakSanitizer: checking for leaks");
757 // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
758 // suppressions. However if a stack id was previously suppressed, it should be
759 // suppressed in future checks as well.
760 for (int i
= 0;; ++i
) {
761 EnsureMainThreadIDIsCorrect();
762 CheckForLeaksParam param
;
763 // Capture calling thread's stack pointer early, to avoid false negatives.
764 // Old frame with dead pointers might be overlapped by new frame inside
765 // CheckForLeaks which does not use bytes with pointers before the
766 // threads are suspended and stack pointers captured.
767 param
.caller_tid
= GetTid();
768 param
.caller_sp
= reinterpret_cast<uptr
>(__builtin_frame_address(0));
769 LockStuffAndStopTheWorld(CheckForLeaksCallback
, ¶m
);
770 if (!param
.success
) {
771 Report("LeakSanitizer has encountered a fatal error.\n");
773 "HINT: For debugging, try setting environment variable "
774 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
776 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
780 LeakReport leak_report
;
781 leak_report
.AddLeakedChunks(param
.leaks
);
783 // No new suppressions stacks, so rerun will not help and we can report.
784 if (!leak_report
.ApplySuppressions())
785 return PrintResults(leak_report
);
787 // No indirect leaks to report, so we are done here.
788 if (!leak_report
.IndirectUnsuppressedLeakCount())
789 return PrintResults(leak_report
);
792 Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
793 return PrintResults(leak_report
);
796 // We found a new previously unseen suppressed call stack. Rerun to make
797 // sure it does not hold indirect leaks.
798 VReport(1, "Rerun with %zu suppressed stacks.",
799 GetSuppressionContext()->GetSortedSuppressedStacks().size());
803 static bool has_reported_leaks
= false;
804 bool HasReportedLeaks() { return has_reported_leaks
; }
807 Lock
l(&global_mutex
);
808 static bool already_done
;
812 has_reported_leaks
= CheckForLeaks();
813 if (has_reported_leaks
)
817 static int DoRecoverableLeakCheck() {
818 Lock
l(&global_mutex
);
819 bool have_leaks
= CheckForLeaks();
820 return have_leaks
? 1 : 0;
823 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
825 ///// LeakReport implementation. /////
827 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
828 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
829 // in real-world applications.
830 // FIXME: Get rid of this limit by moving logic into DedupLeaks.
831 const uptr kMaxLeaksConsidered
= 5000;
833 void LeakReport::AddLeakedChunks(const LeakedChunks
&chunks
) {
834 for (const LeakedChunk
&leak
: chunks
) {
835 uptr chunk
= leak
.chunk
;
836 u32 stack_trace_id
= leak
.stack_trace_id
;
837 uptr leaked_size
= leak
.leaked_size
;
838 ChunkTag tag
= leak
.tag
;
839 CHECK(tag
== kDirectlyLeaked
|| tag
== kIndirectlyLeaked
);
841 if (u32 resolution
= flags()->resolution
) {
842 StackTrace stack
= StackDepotGet(stack_trace_id
);
843 stack
.size
= Min(stack
.size
, resolution
);
844 stack_trace_id
= StackDepotPut(stack
);
847 bool is_directly_leaked
= (tag
== kDirectlyLeaked
);
849 for (i
= 0; i
< leaks_
.size(); i
++) {
850 if (leaks_
[i
].stack_trace_id
== stack_trace_id
&&
851 leaks_
[i
].is_directly_leaked
== is_directly_leaked
) {
852 leaks_
[i
].hit_count
++;
853 leaks_
[i
].total_size
+= leaked_size
;
857 if (i
== leaks_
.size()) {
858 if (leaks_
.size() == kMaxLeaksConsidered
)
860 Leak leak
= {next_id_
++, /* hit_count */ 1,
861 leaked_size
, stack_trace_id
,
862 is_directly_leaked
, /* is_suppressed */ false};
863 leaks_
.push_back(leak
);
865 if (flags()->report_objects
) {
866 LeakedObject obj
= {leaks_
[i
].id
, GetUserAddr(chunk
), leaked_size
};
867 leaked_objects_
.push_back(obj
);
872 static bool LeakComparator(const Leak
&leak1
, const Leak
&leak2
) {
873 if (leak1
.is_directly_leaked
== leak2
.is_directly_leaked
)
874 return leak1
.total_size
> leak2
.total_size
;
876 return leak1
.is_directly_leaked
;
879 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report
) {
880 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
882 if (leaks_
.size() == kMaxLeaksConsidered
)
884 "Too many leaks! Only the first %zu leaks encountered will be "
886 kMaxLeaksConsidered
);
888 uptr unsuppressed_count
= UnsuppressedLeakCount();
889 if (num_leaks_to_report
> 0 && num_leaks_to_report
< unsuppressed_count
)
890 Printf("The %zu top leak(s):\n", num_leaks_to_report
);
891 Sort(leaks_
.data(), leaks_
.size(), &LeakComparator
);
892 uptr leaks_reported
= 0;
893 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
894 if (leaks_
[i
].is_suppressed
)
896 PrintReportForLeak(i
);
898 if (leaks_reported
== num_leaks_to_report
)
901 if (leaks_reported
< unsuppressed_count
) {
902 uptr remaining
= unsuppressed_count
- leaks_reported
;
903 Printf("Omitting %zu more leak(s).\n", remaining
);
907 void LeakReport::PrintReportForLeak(uptr index
) {
909 Printf("%s", d
.Leak());
910 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
911 leaks_
[index
].is_directly_leaked
? "Direct" : "Indirect",
912 leaks_
[index
].total_size
, leaks_
[index
].hit_count
);
913 Printf("%s", d
.Default());
915 CHECK(leaks_
[index
].stack_trace_id
);
916 StackDepotGet(leaks_
[index
].stack_trace_id
).Print();
918 if (flags()->report_objects
) {
919 Printf("Objects leaked above:\n");
920 PrintLeakedObjectsForLeak(index
);
925 void LeakReport::PrintLeakedObjectsForLeak(uptr index
) {
926 u32 leak_id
= leaks_
[index
].id
;
927 for (uptr j
= 0; j
< leaked_objects_
.size(); j
++) {
928 if (leaked_objects_
[j
].leak_id
== leak_id
)
929 Printf("%p (%zu bytes)\n", (void *)leaked_objects_
[j
].addr
,
930 leaked_objects_
[j
].size
);
934 void LeakReport::PrintSummary() {
935 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
936 uptr bytes
= 0, allocations
= 0;
937 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
938 if (leaks_
[i
].is_suppressed
)
940 bytes
+= leaks_
[i
].total_size
;
941 allocations
+= leaks_
[i
].hit_count
;
943 InternalScopedString summary
;
944 summary
.append("%zu byte(s) leaked in %zu allocation(s).", bytes
,
946 ReportErrorSummary(summary
.data());
949 uptr
LeakReport::ApplySuppressions() {
950 LeakSuppressionContext
*suppressions
= GetSuppressionContext();
951 uptr new_suppressions
= 0;
952 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
953 if (suppressions
->Suppress(leaks_
[i
].stack_trace_id
, leaks_
[i
].hit_count
,
954 leaks_
[i
].total_size
)) {
955 leaks_
[i
].is_suppressed
= true;
959 return new_suppressions
;
962 uptr
LeakReport::UnsuppressedLeakCount() {
964 for (uptr i
= 0; i
< leaks_
.size(); i
++)
965 if (!leaks_
[i
].is_suppressed
)
970 uptr
LeakReport::IndirectUnsuppressedLeakCount() {
972 for (uptr i
= 0; i
< leaks_
.size(); i
++)
973 if (!leaks_
[i
].is_suppressed
&& !leaks_
[i
].is_directly_leaked
)
978 } // namespace __lsan
979 #else // CAN_SANITIZE_LEAKS
981 void InitCommonLsan() {}
982 void DoLeakCheck() {}
983 void DoRecoverableLeakCheckVoid() {}
984 void DisableInThisThread() {}
985 void EnableInThisThread() {}
986 } // namespace __lsan
987 #endif // CAN_SANITIZE_LEAKS
989 using namespace __lsan
;
992 SANITIZER_INTERFACE_ATTRIBUTE
993 void __lsan_ignore_object(const void *p
) {
994 #if CAN_SANITIZE_LEAKS
995 if (!common_flags()->detect_leaks
)
997 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
999 Lock
l(&global_mutex
);
1000 IgnoreObjectResult res
= IgnoreObject(p
);
1001 if (res
== kIgnoreObjectInvalid
)
1002 VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p
);
1003 if (res
== kIgnoreObjectAlreadyIgnored
)
1005 "__lsan_ignore_object(): "
1006 "heap object at %p is already being ignored\n",
1008 if (res
== kIgnoreObjectSuccess
)
1009 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p
);
1010 #endif // CAN_SANITIZE_LEAKS
1013 SANITIZER_INTERFACE_ATTRIBUTE
1014 void __lsan_register_root_region(const void *begin
, uptr size
) {
1015 #if CAN_SANITIZE_LEAKS
1016 Lock
l(&global_mutex
);
1017 RootRegion region
= {reinterpret_cast<uptr
>(begin
), size
};
1018 root_regions
.push_back(region
);
1019 VReport(1, "Registered root region at %p of size %zu\n", begin
, size
);
1020 #endif // CAN_SANITIZE_LEAKS
1023 SANITIZER_INTERFACE_ATTRIBUTE
1024 void __lsan_unregister_root_region(const void *begin
, uptr size
) {
1025 #if CAN_SANITIZE_LEAKS
1026 Lock
l(&global_mutex
);
1027 bool removed
= false;
1028 for (uptr i
= 0; i
< root_regions
.size(); i
++) {
1029 RootRegion region
= root_regions
[i
];
1030 if (region
.begin
== reinterpret_cast<uptr
>(begin
) && region
.size
== size
) {
1032 uptr last_index
= root_regions
.size() - 1;
1033 root_regions
[i
] = root_regions
[last_index
];
1034 root_regions
.pop_back();
1035 VReport(1, "Unregistered root region at %p of size %zu\n", begin
, size
);
1041 "__lsan_unregister_root_region(): region at %p of size %zu has not "
1042 "been registered.\n",
1046 #endif // CAN_SANITIZE_LEAKS
1049 SANITIZER_INTERFACE_ATTRIBUTE
1050 void __lsan_disable() {
1051 #if CAN_SANITIZE_LEAKS
1052 __lsan::DisableInThisThread();
1056 SANITIZER_INTERFACE_ATTRIBUTE
1057 void __lsan_enable() {
1058 #if CAN_SANITIZE_LEAKS
1059 __lsan::EnableInThisThread();
1063 SANITIZER_INTERFACE_ATTRIBUTE
1064 void __lsan_do_leak_check() {
1065 #if CAN_SANITIZE_LEAKS
1066 if (common_flags()->detect_leaks
)
1067 __lsan::DoLeakCheck();
1068 #endif // CAN_SANITIZE_LEAKS
1071 SANITIZER_INTERFACE_ATTRIBUTE
1072 int __lsan_do_recoverable_leak_check() {
1073 #if CAN_SANITIZE_LEAKS
1074 if (common_flags()->detect_leaks
)
1075 return __lsan::DoRecoverableLeakCheck();
1076 #endif // CAN_SANITIZE_LEAKS
1080 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options
, void) {
1084 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1085 SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off
, void) {
1089 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions
, void) {