1 //=-- lsan_common.cpp -----------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of LeakSanitizer.
10 // Implementation of common leak checking functionality.
12 //===----------------------------------------------------------------------===//
14 #include "lsan_common.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_report_decorator.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_thread_registry.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
28 #if CAN_SANITIZE_LEAKS
31 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
32 // also to protect the global list of root regions.
37 void DisableCounterUnderflow() {
38 if (common_flags()->detect_leaks
) {
39 Report("Unmatched call to __lsan_enable().\n");
44 void Flags::SetDefaults() {
45 # define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
46 # include "lsan_flags.inc"
50 void RegisterLsanFlags(FlagParser
*parser
, Flags
*f
) {
51 # define LSAN_FLAG(Type, Name, DefaultValue, Description) \
52 RegisterFlag(parser, #Name, Description, &f->Name);
53 # include "lsan_flags.inc"
57 # define LOG_POINTERS(...) \
59 if (flags()->log_pointers) \
60 Report(__VA_ARGS__); \
63 # define LOG_THREADS(...) \
65 if (flags()->log_threads) \
66 Report(__VA_ARGS__); \
69 class LeakSuppressionContext
{
71 SuppressionContext context
;
72 bool suppressed_stacks_sorted
= true;
73 InternalMmapVector
<u32
> suppressed_stacks
;
74 const LoadedModule
*suppress_module
= nullptr;
77 Suppression
*GetSuppressionForAddr(uptr addr
);
78 bool SuppressInvalid(const StackTrace
&stack
);
79 bool SuppressByRule(const StackTrace
&stack
, uptr hit_count
, uptr total_size
);
82 LeakSuppressionContext(const char *supprression_types
[],
83 int suppression_types_num
)
84 : context(supprression_types
, suppression_types_num
) {}
86 bool Suppress(u32 stack_trace_id
, uptr hit_count
, uptr total_size
);
88 const InternalMmapVector
<u32
> &GetSortedSuppressedStacks() {
89 if (!suppressed_stacks_sorted
) {
90 suppressed_stacks_sorted
= true;
91 SortAndDedup(suppressed_stacks
);
93 return suppressed_stacks
;
95 void PrintMatchedSuppressions();
98 ALIGNED(64) static char suppression_placeholder
[sizeof(LeakSuppressionContext
)];
99 static LeakSuppressionContext
*suppression_ctx
= nullptr;
100 static const char kSuppressionLeak
[] = "leak";
101 static const char *kSuppressionTypes
[] = {kSuppressionLeak
};
102 static const char kStdSuppressions
[] =
103 # if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
104 // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
106 "leak:*pthread_exit*\n"
107 # endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
109 // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
112 // TLS leak in some glibc versions, described in
113 // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
114 "leak:*tls_get_addr*\n";
116 void InitializeSuppressions() {
117 CHECK_EQ(nullptr, suppression_ctx
);
118 suppression_ctx
= new (suppression_placeholder
)
119 LeakSuppressionContext(kSuppressionTypes
, ARRAY_SIZE(kSuppressionTypes
));
122 void LeakSuppressionContext::LazyInit() {
125 context
.ParseFromFile(flags()->suppressions
);
126 if (&__lsan_default_suppressions
)
127 context
.Parse(__lsan_default_suppressions());
128 context
.Parse(kStdSuppressions
);
129 if (flags()->use_tls
&& flags()->use_ld_allocations
)
130 suppress_module
= GetLinker();
134 Suppression
*LeakSuppressionContext::GetSuppressionForAddr(uptr addr
) {
135 Suppression
*s
= nullptr;
137 // Suppress by module name.
138 const char *module_name
= Symbolizer::GetOrInit()->GetModuleNameForPc(addr
);
140 module_name
= "<unknown module>";
141 if (context
.Match(module_name
, kSuppressionLeak
, &s
))
144 // Suppress by file or function name.
145 SymbolizedStack
*frames
= Symbolizer::GetOrInit()->SymbolizePC(addr
);
146 for (SymbolizedStack
*cur
= frames
; cur
; cur
= cur
->next
) {
147 if (context
.Match(cur
->info
.function
, kSuppressionLeak
, &s
) ||
148 context
.Match(cur
->info
.file
, kSuppressionLeak
, &s
)) {
156 static uptr
GetCallerPC(const StackTrace
&stack
) {
157 // The top frame is our malloc/calloc/etc. The next frame is the caller.
159 return stack
.trace
[1];
163 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
164 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
165 // modules accounting etc.
166 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
167 // They are allocated with a __libc_memalign() call in allocate_and_init()
168 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
169 // blocks, but we can make sure they come from our own allocator by intercepting
170 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
171 // addresses are stored in a dynamically allocated array (the DTV) which is
172 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
173 // being reachable from the static TLS, and the dynamic TLS being reachable from
174 // the DTV. This is because the initial DTV is allocated before our interception
175 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
176 // can't special-case it either, since we don't know its size.
177 // Our solution is to include in the root set all allocations made from
178 // ld-linux.so (which is where allocate_and_init() is implemented). This is
179 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
180 // which we don't care about).
181 // On all other platforms, this simply checks to ensure that the caller pc is
182 // valid before reporting chunks as leaked.
183 bool LeakSuppressionContext::SuppressInvalid(const StackTrace
&stack
) {
184 uptr caller_pc
= GetCallerPC(stack
);
185 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
186 // it as reachable, as we can't properly report its allocation stack anyway.
188 (suppress_module
&& suppress_module
->containsAddress(caller_pc
));
191 bool LeakSuppressionContext::SuppressByRule(const StackTrace
&stack
,
192 uptr hit_count
, uptr total_size
) {
193 for (uptr i
= 0; i
< stack
.size
; i
++) {
194 Suppression
*s
= GetSuppressionForAddr(
195 StackTrace::GetPreviousInstructionPc(stack
.trace
[i
]));
197 s
->weight
+= total_size
;
198 atomic_fetch_add(&s
->hit_count
, hit_count
, memory_order_relaxed
);
205 bool LeakSuppressionContext::Suppress(u32 stack_trace_id
, uptr hit_count
,
208 StackTrace stack
= StackDepotGet(stack_trace_id
);
209 if (!SuppressInvalid(stack
) && !SuppressByRule(stack
, hit_count
, total_size
))
211 suppressed_stacks_sorted
= false;
212 suppressed_stacks
.push_back(stack_trace_id
);
216 static LeakSuppressionContext
*GetSuppressionContext() {
217 CHECK(suppression_ctx
);
218 return suppression_ctx
;
221 static InternalMmapVectorNoCtor
<RootRegion
> root_regions
;
223 InternalMmapVectorNoCtor
<RootRegion
> const *GetRootRegions() {
224 return &root_regions
;
227 void InitCommonLsan() {
228 if (common_flags()->detect_leaks
) {
229 // Initialization which can fail or print warnings should only be done if
230 // LSan is actually enabled.
231 InitializeSuppressions();
232 InitializePlatformSpecificModules();
236 class Decorator
: public __sanitizer::SanitizerCommonDecorator
{
238 Decorator() : SanitizerCommonDecorator() {}
239 const char *Error() { return Red(); }
240 const char *Leak() { return Blue(); }
243 static inline bool CanBeAHeapPointer(uptr p
) {
244 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
245 // bound on heap addresses.
246 const uptr kMinAddress
= 4 * 4096;
249 # if defined(__x86_64__)
250 // Accept only canonical form user-space addresses.
251 return ((p
>> 47) == 0);
252 # elif defined(__mips64)
253 return ((p
>> 40) == 0);
254 # elif defined(__aarch64__)
255 unsigned runtimeVMA
= (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
256 return ((p
>> runtimeVMA
) == 0);
262 // Scans the memory range, looking for byte patterns that point into allocator
263 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
264 // There are two usage modes for this function: finding reachable chunks
265 // (|tag| = kReachable) and finding indirectly leaked chunks
266 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
267 // so |frontier| = 0.
268 void ScanRangeForPointers(uptr begin
, uptr end
, Frontier
*frontier
,
269 const char *region_type
, ChunkTag tag
) {
270 CHECK(tag
== kReachable
|| tag
== kIndirectlyLeaked
);
271 const uptr alignment
= flags()->pointer_alignment();
272 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type
, (void *)begin
,
276 pp
= pp
+ alignment
- pp
% alignment
;
277 for (; pp
+ sizeof(void *) <= end
; pp
+= alignment
) {
278 void *p
= *reinterpret_cast<void **>(pp
);
279 if (!CanBeAHeapPointer(reinterpret_cast<uptr
>(p
)))
281 uptr chunk
= PointsIntoChunk(p
);
284 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
287 LsanMetadata
m(chunk
);
288 if (m
.tag() == kReachable
|| m
.tag() == kIgnored
)
291 // Do this check relatively late so we can log only the interesting cases.
292 if (!flags()->use_poisoned
&& WordIsPoisoned(pp
)) {
294 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
296 (void *)pp
, p
, (void *)chunk
, (void *)(chunk
+ m
.requested_size()),
302 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
303 (void *)pp
, p
, (void *)chunk
,
304 (void *)(chunk
+ m
.requested_size()), m
.requested_size());
306 frontier
->push_back(chunk
);
310 // Scans a global range for pointers
311 void ScanGlobalRange(uptr begin
, uptr end
, Frontier
*frontier
) {
312 uptr allocator_begin
= 0, allocator_end
= 0;
313 GetAllocatorGlobalRange(&allocator_begin
, &allocator_end
);
314 if (begin
<= allocator_begin
&& allocator_begin
< end
) {
315 CHECK_LE(allocator_begin
, allocator_end
);
316 CHECK_LE(allocator_end
, end
);
317 if (begin
< allocator_begin
)
318 ScanRangeForPointers(begin
, allocator_begin
, frontier
, "GLOBAL",
320 if (allocator_end
< end
)
321 ScanRangeForPointers(allocator_end
, end
, frontier
, "GLOBAL", kReachable
);
323 ScanRangeForPointers(begin
, end
, frontier
, "GLOBAL", kReachable
);
327 void ForEachExtraStackRangeCb(uptr begin
, uptr end
, void *arg
) {
328 Frontier
*frontier
= reinterpret_cast<Frontier
*>(arg
);
329 ScanRangeForPointers(begin
, end
, frontier
, "FAKE STACK", kReachable
);
332 # if SANITIZER_FUCHSIA
334 // Fuchsia handles all threads together with its own callback.
335 static void ProcessThreads(SuspendedThreadsList
const &, Frontier
*) {}
339 # if SANITIZER_ANDROID
340 // FIXME: Move this out into *libcdep.cpp
341 extern "C" SANITIZER_WEAK_ATTRIBUTE
void __libc_iterate_dynamic_tls(
342 pid_t
, void (*cb
)(void *, void *, uptr
, void *), void *);
345 static void ProcessThreadRegistry(Frontier
*frontier
) {
346 InternalMmapVector
<uptr
> ptrs
;
347 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
348 GetAdditionalThreadContextPtrs
, &ptrs
);
350 for (uptr i
= 0; i
< ptrs
.size(); ++i
) {
351 void *ptr
= reinterpret_cast<void *>(ptrs
[i
]);
352 uptr chunk
= PointsIntoChunk(ptr
);
355 LsanMetadata
m(chunk
);
359 // Mark as reachable and add to frontier.
360 LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr
);
361 m
.set_tag(kReachable
);
362 frontier
->push_back(chunk
);
366 // Scans thread data (stacks and TLS) for heap pointers.
367 static void ProcessThreads(SuspendedThreadsList
const &suspended_threads
,
368 Frontier
*frontier
) {
369 InternalMmapVector
<uptr
> registers
;
370 for (uptr i
= 0; i
< suspended_threads
.ThreadCount(); i
++) {
371 tid_t os_id
= static_cast<tid_t
>(suspended_threads
.GetThreadID(i
));
372 LOG_THREADS("Processing thread %llu.\n", os_id
);
373 uptr stack_begin
, stack_end
, tls_begin
, tls_end
, cache_begin
, cache_end
;
376 GetThreadRangesLocked(os_id
, &stack_begin
, &stack_end
, &tls_begin
,
377 &tls_end
, &cache_begin
, &cache_end
, &dtls
);
379 // If a thread can't be found in the thread registry, it's probably in the
380 // process of destruction. Log this event and move on.
381 LOG_THREADS("Thread %llu not found in registry.\n", os_id
);
385 PtraceRegistersStatus have_registers
=
386 suspended_threads
.GetRegistersAndSP(i
, ®isters
, &sp
);
387 if (have_registers
!= REGISTERS_AVAILABLE
) {
388 Report("Unable to get registers from thread %llu.\n", os_id
);
389 // If unable to get SP, consider the entire stack to be reachable unless
390 // GetRegistersAndSP failed with ESRCH.
391 if (have_registers
== REGISTERS_UNAVAILABLE_FATAL
)
396 if (flags()->use_registers
&& have_registers
) {
397 uptr registers_begin
= reinterpret_cast<uptr
>(registers
.data());
399 reinterpret_cast<uptr
>(registers
.data() + registers
.size());
400 ScanRangeForPointers(registers_begin
, registers_end
, frontier
,
401 "REGISTERS", kReachable
);
404 if (flags()->use_stacks
) {
405 LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin
,
406 (void *)stack_end
, (void *)sp
);
407 if (sp
< stack_begin
|| sp
>= stack_end
) {
408 // SP is outside the recorded stack range (e.g. the thread is running a
409 // signal handler on alternate stack, or swapcontext was used).
410 // Again, consider the entire stack range to be reachable.
411 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
412 uptr page_size
= GetPageSizeCached();
414 while (stack_begin
< stack_end
&&
415 !IsAccessibleMemoryRange(stack_begin
, 1)) {
417 stack_begin
+= page_size
;
419 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
420 skipped
, (void *)stack_begin
, (void *)stack_end
);
422 // Shrink the stack range to ignore out-of-scope values.
425 ScanRangeForPointers(stack_begin
, stack_end
, frontier
, "STACK",
427 ForEachExtraStackRange(os_id
, ForEachExtraStackRangeCb
, frontier
);
430 if (flags()->use_tls
) {
432 LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin
, (void *)tls_end
);
433 // If the tls and cache ranges don't overlap, scan full tls range,
434 // otherwise, only scan the non-overlapping portions
435 if (cache_begin
== cache_end
|| tls_end
< cache_begin
||
436 tls_begin
> cache_end
) {
437 ScanRangeForPointers(tls_begin
, tls_end
, frontier
, "TLS", kReachable
);
439 if (tls_begin
< cache_begin
)
440 ScanRangeForPointers(tls_begin
, cache_begin
, frontier
, "TLS",
442 if (tls_end
> cache_end
)
443 ScanRangeForPointers(cache_end
, tls_end
, frontier
, "TLS",
447 # if SANITIZER_ANDROID
448 auto *cb
= +[](void *dtls_begin
, void *dtls_end
, uptr
/*dso_idd*/,
450 ScanRangeForPointers(reinterpret_cast<uptr
>(dtls_begin
),
451 reinterpret_cast<uptr
>(dtls_end
),
452 reinterpret_cast<Frontier
*>(arg
), "DTLS",
456 // FIXME: There might be a race-condition here (and in Bionic) if the
457 // thread is suspended in the middle of updating its DTLS. IOWs, we
458 // could scan already freed memory. (probably fine for now)
459 __libc_iterate_dynamic_tls(os_id
, cb
, frontier
);
461 if (dtls
&& !DTLSInDestruction(dtls
)) {
462 ForEachDVT(dtls
, [&](const DTLS::DTV
&dtv
, int id
) {
463 uptr dtls_beg
= dtv
.beg
;
464 uptr dtls_end
= dtls_beg
+ dtv
.size
;
465 if (dtls_beg
< dtls_end
) {
466 LOG_THREADS("DTLS %d at %p-%p.\n", id
, (void *)dtls_beg
,
468 ScanRangeForPointers(dtls_beg
, dtls_end
, frontier
, "DTLS",
473 // We are handling a thread with DTLS under destruction. Log about
474 // this and continue.
475 LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id
);
481 // Add pointers reachable from ThreadContexts
482 ProcessThreadRegistry(frontier
);
485 # endif // SANITIZER_FUCHSIA
487 void ScanRootRegion(Frontier
*frontier
, const RootRegion
&root_region
,
488 uptr region_begin
, uptr region_end
, bool is_readable
) {
489 uptr intersection_begin
= Max(root_region
.begin
, region_begin
);
490 uptr intersection_end
= Min(region_end
, root_region
.begin
+ root_region
.size
);
491 if (intersection_begin
>= intersection_end
)
493 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
494 (void *)root_region
.begin
,
495 (void *)(root_region
.begin
+ root_region
.size
),
496 (void *)region_begin
, (void *)region_end
,
497 is_readable
? "readable" : "unreadable");
499 ScanRangeForPointers(intersection_begin
, intersection_end
, frontier
, "ROOT",
503 static void ProcessRootRegion(Frontier
*frontier
,
504 const RootRegion
&root_region
) {
505 MemoryMappingLayout
proc_maps(/*cache_enabled*/ true);
506 MemoryMappedSegment segment
;
507 while (proc_maps
.Next(&segment
)) {
508 ScanRootRegion(frontier
, root_region
, segment
.start
, segment
.end
,
509 segment
.IsReadable());
513 // Scans root regions for heap pointers.
514 static void ProcessRootRegions(Frontier
*frontier
) {
515 if (!flags()->use_root_regions
)
517 for (uptr i
= 0; i
< root_regions
.size(); i
++)
518 ProcessRootRegion(frontier
, root_regions
[i
]);
521 static void FloodFillTag(Frontier
*frontier
, ChunkTag tag
) {
522 while (frontier
->size()) {
523 uptr next_chunk
= frontier
->back();
524 frontier
->pop_back();
525 LsanMetadata
m(next_chunk
);
526 ScanRangeForPointers(next_chunk
, next_chunk
+ m
.requested_size(), frontier
,
531 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
532 // which are reachable from it as indirectly leaked.
533 static void MarkIndirectlyLeakedCb(uptr chunk
, void *arg
) {
534 chunk
= GetUserBegin(chunk
);
535 LsanMetadata
m(chunk
);
536 if (m
.allocated() && m
.tag() != kReachable
) {
537 ScanRangeForPointers(chunk
, chunk
+ m
.requested_size(),
538 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked
);
542 static void IgnoredSuppressedCb(uptr chunk
, void *arg
) {
544 chunk
= GetUserBegin(chunk
);
545 LsanMetadata
m(chunk
);
546 if (!m
.allocated() || m
.tag() == kIgnored
)
549 const InternalMmapVector
<u32
> &suppressed
=
550 *static_cast<const InternalMmapVector
<u32
> *>(arg
);
551 uptr idx
= InternalLowerBound(suppressed
, m
.stack_trace_id());
552 if (idx
>= suppressed
.size() || m
.stack_trace_id() != suppressed
[idx
])
555 LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk
,
556 (void *)(chunk
+ m
.requested_size()), m
.requested_size());
560 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
562 static void CollectIgnoredCb(uptr chunk
, void *arg
) {
564 chunk
= GetUserBegin(chunk
);
565 LsanMetadata
m(chunk
);
566 if (m
.allocated() && m
.tag() == kIgnored
) {
567 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk
,
568 (void *)(chunk
+ m
.requested_size()), m
.requested_size());
569 reinterpret_cast<Frontier
*>(arg
)->push_back(chunk
);
573 // Sets the appropriate tag on each chunk.
574 static void ClassifyAllChunks(SuspendedThreadsList
const &suspended_threads
,
575 Frontier
*frontier
) {
576 const InternalMmapVector
<u32
> &suppressed_stacks
=
577 GetSuppressionContext()->GetSortedSuppressedStacks();
578 if (!suppressed_stacks
.empty()) {
579 ForEachChunk(IgnoredSuppressedCb
,
580 const_cast<InternalMmapVector
<u32
> *>(&suppressed_stacks
));
582 ForEachChunk(CollectIgnoredCb
, frontier
);
583 ProcessGlobalRegions(frontier
);
584 ProcessThreads(suspended_threads
, frontier
);
585 ProcessRootRegions(frontier
);
586 FloodFillTag(frontier
, kReachable
);
588 // The check here is relatively expensive, so we do this in a separate flood
589 // fill. That way we can skip the check for chunks that are reachable
591 LOG_POINTERS("Processing platform-specific allocations.\n");
592 ProcessPlatformSpecificAllocations(frontier
);
593 FloodFillTag(frontier
, kReachable
);
595 // Iterate over leaked chunks and mark those that are reachable from other
597 LOG_POINTERS("Scanning leaked chunks.\n");
598 ForEachChunk(MarkIndirectlyLeakedCb
, nullptr);
601 // ForEachChunk callback. Resets the tags to pre-leak-check state.
602 static void ResetTagsCb(uptr chunk
, void *arg
) {
604 chunk
= GetUserBegin(chunk
);
605 LsanMetadata
m(chunk
);
606 if (m
.allocated() && m
.tag() != kIgnored
)
607 m
.set_tag(kDirectlyLeaked
);
610 // ForEachChunk callback. Aggregates information about unreachable chunks into
612 static void CollectLeaksCb(uptr chunk
, void *arg
) {
614 LeakedChunks
*leaks
= reinterpret_cast<LeakedChunks
*>(arg
);
615 chunk
= GetUserBegin(chunk
);
616 LsanMetadata
m(chunk
);
619 if (m
.tag() == kDirectlyLeaked
|| m
.tag() == kIndirectlyLeaked
)
620 leaks
->push_back({chunk
, m
.stack_trace_id(), m
.requested_size(), m
.tag()});
623 void LeakSuppressionContext::PrintMatchedSuppressions() {
624 InternalMmapVector
<Suppression
*> matched
;
625 context
.GetMatched(&matched
);
628 const char *line
= "-----------------------------------------------------";
629 Printf("%s\n", line
);
630 Printf("Suppressions used:\n");
631 Printf(" count bytes template\n");
632 for (uptr i
= 0; i
< matched
.size(); i
++) {
633 Printf("%7zu %10zu %s\n",
634 static_cast<uptr
>(atomic_load_relaxed(&matched
[i
]->hit_count
)),
635 matched
[i
]->weight
, matched
[i
]->templ
);
637 Printf("%s\n\n", line
);
640 static void ReportIfNotSuspended(ThreadContextBase
*tctx
, void *arg
) {
641 const InternalMmapVector
<tid_t
> &suspended_threads
=
642 *(const InternalMmapVector
<tid_t
> *)arg
;
643 if (tctx
->status
== ThreadStatusRunning
) {
644 uptr i
= InternalLowerBound(suspended_threads
, tctx
->os_id
);
645 if (i
>= suspended_threads
.size() || suspended_threads
[i
] != tctx
->os_id
)
647 "Running thread %llu was not suspended. False leaks are possible.\n",
652 # if SANITIZER_FUCHSIA
654 // Fuchsia provides a libc interface that guarantees all threads are
655 // covered, and SuspendedThreadList is never really used.
656 static void ReportUnsuspendedThreads(const SuspendedThreadsList
&) {}
658 # else // !SANITIZER_FUCHSIA
660 static void ReportUnsuspendedThreads(
661 const SuspendedThreadsList
&suspended_threads
) {
662 InternalMmapVector
<tid_t
> threads(suspended_threads
.ThreadCount());
663 for (uptr i
= 0; i
< suspended_threads
.ThreadCount(); ++i
)
664 threads
[i
] = suspended_threads
.GetThreadID(i
);
666 Sort(threads
.data(), threads
.size());
668 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
669 &ReportIfNotSuspended
, &threads
);
672 # endif // !SANITIZER_FUCHSIA
674 static void CheckForLeaksCallback(const SuspendedThreadsList
&suspended_threads
,
676 CheckForLeaksParam
*param
= reinterpret_cast<CheckForLeaksParam
*>(arg
);
678 CHECK(!param
->success
);
679 ReportUnsuspendedThreads(suspended_threads
);
680 ClassifyAllChunks(suspended_threads
, ¶m
->frontier
);
681 ForEachChunk(CollectLeaksCb
, ¶m
->leaks
);
682 // Clean up for subsequent leak checks. This assumes we did not overwrite any
684 ForEachChunk(ResetTagsCb
, nullptr);
685 param
->success
= true;
688 static bool PrintResults(LeakReport
&report
) {
689 uptr unsuppressed_count
= report
.UnsuppressedLeakCount();
690 if (unsuppressed_count
) {
694 "================================================================="
696 Printf("%s", d
.Error());
697 Report("ERROR: LeakSanitizer: detected memory leaks\n");
698 Printf("%s", d
.Default());
699 report
.ReportTopLeaks(flags()->max_leaks
);
701 if (common_flags()->print_suppressions
)
702 GetSuppressionContext()->PrintMatchedSuppressions();
703 if (unsuppressed_count
> 0) {
704 report
.PrintSummary();
710 static bool CheckForLeaks() {
711 if (&__lsan_is_turned_off
&& __lsan_is_turned_off())
713 // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
714 // suppressions. However if a stack id was previously suppressed, it should be
715 // suppressed in future checks as well.
716 for (int i
= 0;; ++i
) {
717 EnsureMainThreadIDIsCorrect();
718 CheckForLeaksParam param
;
719 LockStuffAndStopTheWorld(CheckForLeaksCallback
, ¶m
);
720 if (!param
.success
) {
721 Report("LeakSanitizer has encountered a fatal error.\n");
723 "HINT: For debugging, try setting environment variable "
724 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
726 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
730 LeakReport leak_report
;
731 leak_report
.AddLeakedChunks(param
.leaks
);
733 // No new suppressions stacks, so rerun will not help and we can report.
734 if (!leak_report
.ApplySuppressions())
735 return PrintResults(leak_report
);
737 // No indirect leaks to report, so we are done here.
738 if (!leak_report
.IndirectUnsuppressedLeakCount())
739 return PrintResults(leak_report
);
742 Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
743 return PrintResults(leak_report
);
746 // We found a new previously unseen suppressed call stack. Rerun to make
747 // sure it does not hold indirect leaks.
748 VReport(1, "Rerun with %zu suppressed stacks.",
749 GetSuppressionContext()->GetSortedSuppressedStacks().size());
753 static bool has_reported_leaks
= false;
754 bool HasReportedLeaks() { return has_reported_leaks
; }
757 Lock
l(&global_mutex
);
758 static bool already_done
;
762 has_reported_leaks
= CheckForLeaks();
763 if (has_reported_leaks
)
767 static int DoRecoverableLeakCheck() {
768 Lock
l(&global_mutex
);
769 bool have_leaks
= CheckForLeaks();
770 return have_leaks
? 1 : 0;
773 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
775 ///// LeakReport implementation. /////
777 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
778 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
779 // in real-world applications.
780 // FIXME: Get rid of this limit by moving logic into DedupLeaks.
781 const uptr kMaxLeaksConsidered
= 5000;
783 void LeakReport::AddLeakedChunks(const LeakedChunks
&chunks
) {
784 for (const LeakedChunk
&leak
: chunks
) {
785 uptr chunk
= leak
.chunk
;
786 u32 stack_trace_id
= leak
.stack_trace_id
;
787 uptr leaked_size
= leak
.leaked_size
;
788 ChunkTag tag
= leak
.tag
;
789 CHECK(tag
== kDirectlyLeaked
|| tag
== kIndirectlyLeaked
);
791 if (u32 resolution
= flags()->resolution
) {
792 StackTrace stack
= StackDepotGet(stack_trace_id
);
793 stack
.size
= Min(stack
.size
, resolution
);
794 stack_trace_id
= StackDepotPut(stack
);
797 bool is_directly_leaked
= (tag
== kDirectlyLeaked
);
799 for (i
= 0; i
< leaks_
.size(); i
++) {
800 if (leaks_
[i
].stack_trace_id
== stack_trace_id
&&
801 leaks_
[i
].is_directly_leaked
== is_directly_leaked
) {
802 leaks_
[i
].hit_count
++;
803 leaks_
[i
].total_size
+= leaked_size
;
807 if (i
== leaks_
.size()) {
808 if (leaks_
.size() == kMaxLeaksConsidered
)
810 Leak leak
= {next_id_
++, /* hit_count */ 1,
811 leaked_size
, stack_trace_id
,
812 is_directly_leaked
, /* is_suppressed */ false};
813 leaks_
.push_back(leak
);
815 if (flags()->report_objects
) {
816 LeakedObject obj
= {leaks_
[i
].id
, chunk
, leaked_size
};
817 leaked_objects_
.push_back(obj
);
822 static bool LeakComparator(const Leak
&leak1
, const Leak
&leak2
) {
823 if (leak1
.is_directly_leaked
== leak2
.is_directly_leaked
)
824 return leak1
.total_size
> leak2
.total_size
;
826 return leak1
.is_directly_leaked
;
829 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report
) {
830 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
832 if (leaks_
.size() == kMaxLeaksConsidered
)
834 "Too many leaks! Only the first %zu leaks encountered will be "
836 kMaxLeaksConsidered
);
838 uptr unsuppressed_count
= UnsuppressedLeakCount();
839 if (num_leaks_to_report
> 0 && num_leaks_to_report
< unsuppressed_count
)
840 Printf("The %zu top leak(s):\n", num_leaks_to_report
);
841 Sort(leaks_
.data(), leaks_
.size(), &LeakComparator
);
842 uptr leaks_reported
= 0;
843 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
844 if (leaks_
[i
].is_suppressed
)
846 PrintReportForLeak(i
);
848 if (leaks_reported
== num_leaks_to_report
)
851 if (leaks_reported
< unsuppressed_count
) {
852 uptr remaining
= unsuppressed_count
- leaks_reported
;
853 Printf("Omitting %zu more leak(s).\n", remaining
);
857 void LeakReport::PrintReportForLeak(uptr index
) {
859 Printf("%s", d
.Leak());
860 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
861 leaks_
[index
].is_directly_leaked
? "Direct" : "Indirect",
862 leaks_
[index
].total_size
, leaks_
[index
].hit_count
);
863 Printf("%s", d
.Default());
865 CHECK(leaks_
[index
].stack_trace_id
);
866 StackDepotGet(leaks_
[index
].stack_trace_id
).Print();
868 if (flags()->report_objects
) {
869 Printf("Objects leaked above:\n");
870 PrintLeakedObjectsForLeak(index
);
875 void LeakReport::PrintLeakedObjectsForLeak(uptr index
) {
876 u32 leak_id
= leaks_
[index
].id
;
877 for (uptr j
= 0; j
< leaked_objects_
.size(); j
++) {
878 if (leaked_objects_
[j
].leak_id
== leak_id
)
879 Printf("%p (%zu bytes)\n", (void *)leaked_objects_
[j
].addr
,
880 leaked_objects_
[j
].size
);
884 void LeakReport::PrintSummary() {
885 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
886 uptr bytes
= 0, allocations
= 0;
887 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
888 if (leaks_
[i
].is_suppressed
)
890 bytes
+= leaks_
[i
].total_size
;
891 allocations
+= leaks_
[i
].hit_count
;
893 InternalScopedString summary
;
894 summary
.append("%zu byte(s) leaked in %zu allocation(s).", bytes
,
896 ReportErrorSummary(summary
.data());
899 uptr
LeakReport::ApplySuppressions() {
900 LeakSuppressionContext
*suppressions
= GetSuppressionContext();
901 uptr new_suppressions
= false;
902 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
903 if (suppressions
->Suppress(leaks_
[i
].stack_trace_id
, leaks_
[i
].hit_count
,
904 leaks_
[i
].total_size
)) {
905 leaks_
[i
].is_suppressed
= true;
909 return new_suppressions
;
912 uptr
LeakReport::UnsuppressedLeakCount() {
914 for (uptr i
= 0; i
< leaks_
.size(); i
++)
915 if (!leaks_
[i
].is_suppressed
)
920 uptr
LeakReport::IndirectUnsuppressedLeakCount() {
922 for (uptr i
= 0; i
< leaks_
.size(); i
++)
923 if (!leaks_
[i
].is_suppressed
&& !leaks_
[i
].is_directly_leaked
)
928 } // namespace __lsan
929 #else // CAN_SANITIZE_LEAKS
931 void InitCommonLsan() {}
932 void DoLeakCheck() {}
933 void DoRecoverableLeakCheckVoid() {}
934 void DisableInThisThread() {}
935 void EnableInThisThread() {}
936 } // namespace __lsan
937 #endif // CAN_SANITIZE_LEAKS
939 using namespace __lsan
;
942 SANITIZER_INTERFACE_ATTRIBUTE
943 void __lsan_ignore_object(const void *p
) {
944 #if CAN_SANITIZE_LEAKS
945 if (!common_flags()->detect_leaks
)
947 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
949 Lock
l(&global_mutex
);
950 IgnoreObjectResult res
= IgnoreObjectLocked(p
);
951 if (res
== kIgnoreObjectInvalid
)
952 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p
);
953 if (res
== kIgnoreObjectAlreadyIgnored
)
955 "__lsan_ignore_object(): "
956 "heap object at %p is already being ignored\n",
958 if (res
== kIgnoreObjectSuccess
)
959 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p
);
960 #endif // CAN_SANITIZE_LEAKS
963 SANITIZER_INTERFACE_ATTRIBUTE
964 void __lsan_register_root_region(const void *begin
, uptr size
) {
965 #if CAN_SANITIZE_LEAKS
966 Lock
l(&global_mutex
);
967 RootRegion region
= {reinterpret_cast<uptr
>(begin
), size
};
968 root_regions
.push_back(region
);
969 VReport(1, "Registered root region at %p of size %zu\n", begin
, size
);
970 #endif // CAN_SANITIZE_LEAKS
973 SANITIZER_INTERFACE_ATTRIBUTE
974 void __lsan_unregister_root_region(const void *begin
, uptr size
) {
975 #if CAN_SANITIZE_LEAKS
976 Lock
l(&global_mutex
);
977 bool removed
= false;
978 for (uptr i
= 0; i
< root_regions
.size(); i
++) {
979 RootRegion region
= root_regions
[i
];
980 if (region
.begin
== reinterpret_cast<uptr
>(begin
) && region
.size
== size
) {
982 uptr last_index
= root_regions
.size() - 1;
983 root_regions
[i
] = root_regions
[last_index
];
984 root_regions
.pop_back();
985 VReport(1, "Unregistered root region at %p of size %zu\n", begin
, size
);
991 "__lsan_unregister_root_region(): region at %p of size %zu has not "
992 "been registered.\n",
996 #endif // CAN_SANITIZE_LEAKS
999 SANITIZER_INTERFACE_ATTRIBUTE
1000 void __lsan_disable() {
1001 #if CAN_SANITIZE_LEAKS
1002 __lsan::DisableInThisThread();
1006 SANITIZER_INTERFACE_ATTRIBUTE
1007 void __lsan_enable() {
1008 #if CAN_SANITIZE_LEAKS
1009 __lsan::EnableInThisThread();
1013 SANITIZER_INTERFACE_ATTRIBUTE
1014 void __lsan_do_leak_check() {
1015 #if CAN_SANITIZE_LEAKS
1016 if (common_flags()->detect_leaks
)
1017 __lsan::DoLeakCheck();
1018 #endif // CAN_SANITIZE_LEAKS
1021 SANITIZER_INTERFACE_ATTRIBUTE
1022 int __lsan_do_recoverable_leak_check() {
1023 #if CAN_SANITIZE_LEAKS
1024 if (common_flags()->detect_leaks
)
1025 return __lsan::DoRecoverableLeakCheck();
1026 #endif // CAN_SANITIZE_LEAKS
1030 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options
, void) {
1034 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1035 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int
1036 __lsan_is_turned_off() {
1040 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char *
1041 __lsan_default_suppressions() {