1 //=-- lsan_common.cc ------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of LeakSanitizer.
9 // Implementation of common leak checking functionality.
11 //===----------------------------------------------------------------------===//
13 #include "lsan_common.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_flags.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_placement_new.h"
19 #include "sanitizer_common/sanitizer_procmaps.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_suppressions.h"
23 #include "sanitizer_common/sanitizer_report_decorator.h"
24 #include "sanitizer_common/sanitizer_tls_get_addr.h"
26 #if CAN_SANITIZE_LEAKS
29 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
30 // also to protect the global list of root regions.
31 BlockingMutex
global_mutex(LINKER_INITIALIZED
);
33 __attribute__((tls_model("initial-exec")))
34 THREADLOCAL
int disable_counter
;
35 bool DisabledInThisThread() { return disable_counter
> 0; }
36 void DisableInThisThread() { disable_counter
++; }
37 void EnableInThisThread() {
38 if (!disable_counter
&& common_flags()->detect_leaks
) {
39 Report("Unmatched call to __lsan_enable().\n");
47 void Flags::SetDefaults() {
48 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
49 #include "lsan_flags.inc"
53 void RegisterLsanFlags(FlagParser
*parser
, Flags
*f
) {
54 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
55 RegisterFlag(parser, #Name, Description, &f->Name);
56 #include "lsan_flags.inc"
60 #define LOG_POINTERS(...) \
62 if (flags()->log_pointers) Report(__VA_ARGS__); \
65 #define LOG_THREADS(...) \
67 if (flags()->log_threads) Report(__VA_ARGS__); \
70 ALIGNED(64) static char suppression_placeholder
[sizeof(SuppressionContext
)];
71 static SuppressionContext
*suppression_ctx
= nullptr;
72 static const char kSuppressionLeak
[] = "leak";
73 static const char *kSuppressionTypes
[] = { kSuppressionLeak
};
75 void InitializeSuppressions() {
76 CHECK_EQ(nullptr, suppression_ctx
);
77 suppression_ctx
= new (suppression_placeholder
) // NOLINT
78 SuppressionContext(kSuppressionTypes
, ARRAY_SIZE(kSuppressionTypes
));
79 suppression_ctx
->ParseFromFile(flags()->suppressions
);
80 if (&__lsan_default_suppressions
)
81 suppression_ctx
->Parse(__lsan_default_suppressions());
84 static SuppressionContext
*GetSuppressionContext() {
85 CHECK(suppression_ctx
);
86 return suppression_ctx
;
94 InternalMmapVector
<RootRegion
> *root_regions
;
96 void InitializeRootRegions() {
98 ALIGNED(64) static char placeholder
[sizeof(InternalMmapVector
<RootRegion
>)];
99 root_regions
= new(placeholder
) InternalMmapVector
<RootRegion
>(1);
102 void InitCommonLsan() {
103 InitializeRootRegions();
104 if (common_flags()->detect_leaks
) {
105 // Initialization which can fail or print warnings should only be done if
106 // LSan is actually enabled.
107 InitializeSuppressions();
108 InitializePlatformSpecificModules();
112 class Decorator
: public __sanitizer::SanitizerCommonDecorator
{
114 Decorator() : SanitizerCommonDecorator() { }
115 const char *Error() { return Red(); }
116 const char *Leak() { return Blue(); }
117 const char *End() { return Default(); }
120 static inline bool CanBeAHeapPointer(uptr p
) {
121 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
122 // bound on heap addresses.
123 const uptr kMinAddress
= 4 * 4096;
124 if (p
< kMinAddress
) return false;
125 #if defined(__x86_64__)
126 // Accept only canonical form user-space addresses.
127 return ((p
>> 47) == 0);
128 #elif defined(__mips64)
129 return ((p
>> 40) == 0);
130 #elif defined(__aarch64__)
131 unsigned runtimeVMA
=
132 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
133 return ((p
>> runtimeVMA
) == 0);
139 // Scans the memory range, looking for byte patterns that point into allocator
140 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
141 // There are two usage modes for this function: finding reachable chunks
142 // (|tag| = kReachable) and finding indirectly leaked chunks
143 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
144 // so |frontier| = 0.
145 void ScanRangeForPointers(uptr begin
, uptr end
,
147 const char *region_type
, ChunkTag tag
) {
148 CHECK(tag
== kReachable
|| tag
== kIndirectlyLeaked
);
149 const uptr alignment
= flags()->pointer_alignment();
150 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type
, begin
, end
);
153 pp
= pp
+ alignment
- pp
% alignment
;
154 for (; pp
+ sizeof(void *) <= end
; pp
+= alignment
) { // NOLINT
155 void *p
= *reinterpret_cast<void **>(pp
);
156 if (!CanBeAHeapPointer(reinterpret_cast<uptr
>(p
))) continue;
157 uptr chunk
= PointsIntoChunk(p
);
158 if (!chunk
) continue;
159 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
160 if (chunk
== begin
) continue;
161 LsanMetadata
m(chunk
);
162 if (m
.tag() == kReachable
|| m
.tag() == kIgnored
) continue;
164 // Do this check relatively late so we can log only the interesting cases.
165 if (!flags()->use_poisoned
&& WordIsPoisoned(pp
)) {
167 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
169 pp
, p
, chunk
, chunk
+ m
.requested_size(), m
.requested_size());
174 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp
, p
,
175 chunk
, chunk
+ m
.requested_size(), m
.requested_size());
177 frontier
->push_back(chunk
);
181 void ForEachExtraStackRangeCb(uptr begin
, uptr end
, void* arg
) {
182 Frontier
*frontier
= reinterpret_cast<Frontier
*>(arg
);
183 ScanRangeForPointers(begin
, end
, frontier
, "FAKE STACK", kReachable
);
186 // Scans thread data (stacks and TLS) for heap pointers.
187 static void ProcessThreads(SuspendedThreadsList
const &suspended_threads
,
188 Frontier
*frontier
) {
189 InternalScopedBuffer
<uptr
> registers(SuspendedThreadsList::RegisterCount());
190 uptr registers_begin
= reinterpret_cast<uptr
>(registers
.data());
191 uptr registers_end
= registers_begin
+ registers
.size();
192 for (uptr i
= 0; i
< suspended_threads
.thread_count(); i
++) {
193 uptr os_id
= static_cast<uptr
>(suspended_threads
.GetThreadID(i
));
194 LOG_THREADS("Processing thread %d.\n", os_id
);
195 uptr stack_begin
, stack_end
, tls_begin
, tls_end
, cache_begin
, cache_end
;
197 bool thread_found
= GetThreadRangesLocked(os_id
, &stack_begin
, &stack_end
,
198 &tls_begin
, &tls_end
,
199 &cache_begin
, &cache_end
, &dtls
);
201 // If a thread can't be found in the thread registry, it's probably in the
202 // process of destruction. Log this event and move on.
203 LOG_THREADS("Thread %d not found in registry.\n", os_id
);
207 bool have_registers
=
208 (suspended_threads
.GetRegistersAndSP(i
, registers
.data(), &sp
) == 0);
209 if (!have_registers
) {
210 Report("Unable to get registers from thread %d.\n");
211 // If unable to get SP, consider the entire stack to be reachable.
215 if (flags()->use_registers
&& have_registers
)
216 ScanRangeForPointers(registers_begin
, registers_end
, frontier
,
217 "REGISTERS", kReachable
);
219 if (flags()->use_stacks
) {
220 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin
, stack_end
, sp
);
221 if (sp
< stack_begin
|| sp
>= stack_end
) {
222 // SP is outside the recorded stack range (e.g. the thread is running a
223 // signal handler on alternate stack, or swapcontext was used).
224 // Again, consider the entire stack range to be reachable.
225 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
226 uptr page_size
= GetPageSizeCached();
228 while (stack_begin
< stack_end
&&
229 !IsAccessibleMemoryRange(stack_begin
, 1)) {
231 stack_begin
+= page_size
;
233 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
234 skipped
, stack_begin
, stack_end
);
236 // Shrink the stack range to ignore out-of-scope values.
239 ScanRangeForPointers(stack_begin
, stack_end
, frontier
, "STACK",
241 ForEachExtraStackRange(os_id
, ForEachExtraStackRangeCb
, frontier
);
244 if (flags()->use_tls
) {
245 LOG_THREADS("TLS at %p-%p.\n", tls_begin
, tls_end
);
246 if (cache_begin
== cache_end
) {
247 ScanRangeForPointers(tls_begin
, tls_end
, frontier
, "TLS", kReachable
);
249 // Because LSan should not be loaded with dlopen(), we can assume
250 // that allocator cache will be part of static TLS image.
251 CHECK_LE(tls_begin
, cache_begin
);
252 CHECK_GE(tls_end
, cache_end
);
253 if (tls_begin
< cache_begin
)
254 ScanRangeForPointers(tls_begin
, cache_begin
, frontier
, "TLS",
256 if (tls_end
> cache_end
)
257 ScanRangeForPointers(cache_end
, tls_end
, frontier
, "TLS", kReachable
);
260 for (uptr j
= 0; j
< dtls
->dtv_size
; ++j
) {
261 uptr dtls_beg
= dtls
->dtv
[j
].beg
;
262 uptr dtls_end
= dtls_beg
+ dtls
->dtv
[j
].size
;
263 if (dtls_beg
< dtls_end
) {
264 LOG_THREADS("DTLS %zu at %p-%p.\n", j
, dtls_beg
, dtls_end
);
265 ScanRangeForPointers(dtls_beg
, dtls_end
, frontier
, "DTLS",
274 static void ProcessRootRegion(Frontier
*frontier
, uptr root_begin
,
276 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
277 uptr begin
, end
, prot
;
278 while (proc_maps
.Next(&begin
, &end
,
279 /*offset*/ nullptr, /*filename*/ nullptr,
280 /*filename_size*/ 0, &prot
)) {
281 uptr intersection_begin
= Max(root_begin
, begin
);
282 uptr intersection_end
= Min(end
, root_end
);
283 if (intersection_begin
>= intersection_end
) continue;
284 bool is_readable
= prot
& MemoryMappingLayout::kProtectionRead
;
285 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
286 root_begin
, root_end
, begin
, end
,
287 is_readable
? "readable" : "unreadable");
289 ScanRangeForPointers(intersection_begin
, intersection_end
, frontier
,
294 // Scans root regions for heap pointers.
295 static void ProcessRootRegions(Frontier
*frontier
) {
296 if (!flags()->use_root_regions
) return;
298 for (uptr i
= 0; i
< root_regions
->size(); i
++) {
299 RootRegion region
= (*root_regions
)[i
];
300 uptr begin_addr
= reinterpret_cast<uptr
>(region
.begin
);
301 ProcessRootRegion(frontier
, begin_addr
, begin_addr
+ region
.size
);
305 static void FloodFillTag(Frontier
*frontier
, ChunkTag tag
) {
306 while (frontier
->size()) {
307 uptr next_chunk
= frontier
->back();
308 frontier
->pop_back();
309 LsanMetadata
m(next_chunk
);
310 ScanRangeForPointers(next_chunk
, next_chunk
+ m
.requested_size(), frontier
,
315 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
316 // which are reachable from it as indirectly leaked.
317 static void MarkIndirectlyLeakedCb(uptr chunk
, void *arg
) {
318 chunk
= GetUserBegin(chunk
);
319 LsanMetadata
m(chunk
);
320 if (m
.allocated() && m
.tag() != kReachable
) {
321 ScanRangeForPointers(chunk
, chunk
+ m
.requested_size(),
322 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked
);
326 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
328 static void CollectIgnoredCb(uptr chunk
, void *arg
) {
330 chunk
= GetUserBegin(chunk
);
331 LsanMetadata
m(chunk
);
332 if (m
.allocated() && m
.tag() == kIgnored
) {
333 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
334 chunk
, chunk
+ m
.requested_size(), m
.requested_size());
335 reinterpret_cast<Frontier
*>(arg
)->push_back(chunk
);
339 // Sets the appropriate tag on each chunk.
340 static void ClassifyAllChunks(SuspendedThreadsList
const &suspended_threads
) {
341 // Holds the flood fill frontier.
342 Frontier
frontier(1);
344 ForEachChunk(CollectIgnoredCb
, &frontier
);
345 ProcessGlobalRegions(&frontier
);
346 ProcessThreads(suspended_threads
, &frontier
);
347 ProcessRootRegions(&frontier
);
348 FloodFillTag(&frontier
, kReachable
);
350 // The check here is relatively expensive, so we do this in a separate flood
351 // fill. That way we can skip the check for chunks that are reachable
353 LOG_POINTERS("Processing platform-specific allocations.\n");
354 CHECK_EQ(0, frontier
.size());
355 ProcessPlatformSpecificAllocations(&frontier
);
356 FloodFillTag(&frontier
, kReachable
);
358 // Iterate over leaked chunks and mark those that are reachable from other
360 LOG_POINTERS("Scanning leaked chunks.\n");
361 ForEachChunk(MarkIndirectlyLeakedCb
, nullptr);
364 // ForEachChunk callback. Resets the tags to pre-leak-check state.
365 static void ResetTagsCb(uptr chunk
, void *arg
) {
367 chunk
= GetUserBegin(chunk
);
368 LsanMetadata
m(chunk
);
369 if (m
.allocated() && m
.tag() != kIgnored
)
370 m
.set_tag(kDirectlyLeaked
);
373 static void PrintStackTraceById(u32 stack_trace_id
) {
374 CHECK(stack_trace_id
);
375 StackDepotGet(stack_trace_id
).Print();
378 // ForEachChunk callback. Aggregates information about unreachable chunks into
380 static void CollectLeaksCb(uptr chunk
, void *arg
) {
382 LeakReport
*leak_report
= reinterpret_cast<LeakReport
*>(arg
);
383 chunk
= GetUserBegin(chunk
);
384 LsanMetadata
m(chunk
);
385 if (!m
.allocated()) return;
386 if (m
.tag() == kDirectlyLeaked
|| m
.tag() == kIndirectlyLeaked
) {
387 u32 resolution
= flags()->resolution
;
388 u32 stack_trace_id
= 0;
389 if (resolution
> 0) {
390 StackTrace stack
= StackDepotGet(m
.stack_trace_id());
391 stack
.size
= Min(stack
.size
, resolution
);
392 stack_trace_id
= StackDepotPut(stack
);
394 stack_trace_id
= m
.stack_trace_id();
396 leak_report
->AddLeakedChunk(chunk
, stack_trace_id
, m
.requested_size(),
401 static void PrintMatchedSuppressions() {
402 InternalMmapVector
<Suppression
*> matched(1);
403 GetSuppressionContext()->GetMatched(&matched
);
406 const char *line
= "-----------------------------------------------------";
407 Printf("%s\n", line
);
408 Printf("Suppressions used:\n");
409 Printf(" count bytes template\n");
410 for (uptr i
= 0; i
< matched
.size(); i
++)
411 Printf("%7zu %10zu %s\n", static_cast<uptr
>(atomic_load_relaxed(
412 &matched
[i
]->hit_count
)), matched
[i
]->weight
, matched
[i
]->templ
);
413 Printf("%s\n\n", line
);
416 struct CheckForLeaksParam
{
418 LeakReport leak_report
;
421 static void CheckForLeaksCallback(const SuspendedThreadsList
&suspended_threads
,
423 CheckForLeaksParam
*param
= reinterpret_cast<CheckForLeaksParam
*>(arg
);
425 CHECK(!param
->success
);
426 ClassifyAllChunks(suspended_threads
);
427 ForEachChunk(CollectLeaksCb
, ¶m
->leak_report
);
428 // Clean up for subsequent leak checks. This assumes we did not overwrite any
430 ForEachChunk(ResetTagsCb
, nullptr);
431 param
->success
= true;
434 static bool CheckForLeaks() {
435 if (&__lsan_is_turned_off
&& __lsan_is_turned_off())
437 EnsureMainThreadIDIsCorrect();
438 CheckForLeaksParam param
;
439 param
.success
= false;
440 LockThreadRegistry();
442 DoStopTheWorld(CheckForLeaksCallback
, ¶m
);
444 UnlockThreadRegistry();
446 if (!param
.success
) {
447 Report("LeakSanitizer has encountered a fatal error.\n");
449 "HINT: For debugging, try setting environment variable "
450 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
452 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
455 param
.leak_report
.ApplySuppressions();
456 uptr unsuppressed_count
= param
.leak_report
.UnsuppressedLeakCount();
457 if (unsuppressed_count
> 0) {
460 "================================================================="
462 Printf("%s", d
.Error());
463 Report("ERROR: LeakSanitizer: detected memory leaks\n");
464 Printf("%s", d
.End());
465 param
.leak_report
.ReportTopLeaks(flags()->max_leaks
);
467 if (common_flags()->print_suppressions
)
468 PrintMatchedSuppressions();
469 if (unsuppressed_count
> 0) {
470 param
.leak_report
.PrintSummary();
477 BlockingMutexLock
l(&global_mutex
);
478 static bool already_done
;
479 if (already_done
) return;
481 bool have_leaks
= CheckForLeaks();
485 if (common_flags()->exitcode
) {
490 static int DoRecoverableLeakCheck() {
491 BlockingMutexLock
l(&global_mutex
);
492 bool have_leaks
= CheckForLeaks();
493 return have_leaks
? 1 : 0;
496 static Suppression
*GetSuppressionForAddr(uptr addr
) {
497 Suppression
*s
= nullptr;
499 // Suppress by module name.
500 SuppressionContext
*suppressions
= GetSuppressionContext();
501 if (const char *module_name
=
502 Symbolizer::GetOrInit()->GetModuleNameForPc(addr
))
503 if (suppressions
->Match(module_name
, kSuppressionLeak
, &s
))
506 // Suppress by file or function name.
507 SymbolizedStack
*frames
= Symbolizer::GetOrInit()->SymbolizePC(addr
);
508 for (SymbolizedStack
*cur
= frames
; cur
; cur
= cur
->next
) {
509 if (suppressions
->Match(cur
->info
.function
, kSuppressionLeak
, &s
) ||
510 suppressions
->Match(cur
->info
.file
, kSuppressionLeak
, &s
)) {
518 static Suppression
*GetSuppressionForStack(u32 stack_trace_id
) {
519 StackTrace stack
= StackDepotGet(stack_trace_id
);
520 for (uptr i
= 0; i
< stack
.size
; i
++) {
521 Suppression
*s
= GetSuppressionForAddr(
522 StackTrace::GetPreviousInstructionPc(stack
.trace
[i
]));
528 ///// LeakReport implementation. /////
530 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
531 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
532 // in real-world applications.
533 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
535 const uptr kMaxLeaksConsidered
= 5000;
537 void LeakReport::AddLeakedChunk(uptr chunk
, u32 stack_trace_id
,
538 uptr leaked_size
, ChunkTag tag
) {
539 CHECK(tag
== kDirectlyLeaked
|| tag
== kIndirectlyLeaked
);
540 bool is_directly_leaked
= (tag
== kDirectlyLeaked
);
542 for (i
= 0; i
< leaks_
.size(); i
++) {
543 if (leaks_
[i
].stack_trace_id
== stack_trace_id
&&
544 leaks_
[i
].is_directly_leaked
== is_directly_leaked
) {
545 leaks_
[i
].hit_count
++;
546 leaks_
[i
].total_size
+= leaked_size
;
550 if (i
== leaks_
.size()) {
551 if (leaks_
.size() == kMaxLeaksConsidered
) return;
552 Leak leak
= { next_id_
++, /* hit_count */ 1, leaked_size
, stack_trace_id
,
553 is_directly_leaked
, /* is_suppressed */ false };
554 leaks_
.push_back(leak
);
556 if (flags()->report_objects
) {
557 LeakedObject obj
= {leaks_
[i
].id
, chunk
, leaked_size
};
558 leaked_objects_
.push_back(obj
);
562 static bool LeakComparator(const Leak
&leak1
, const Leak
&leak2
) {
563 if (leak1
.is_directly_leaked
== leak2
.is_directly_leaked
)
564 return leak1
.total_size
> leak2
.total_size
;
566 return leak1
.is_directly_leaked
;
569 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report
) {
570 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
572 if (leaks_
.size() == kMaxLeaksConsidered
)
573 Printf("Too many leaks! Only the first %zu leaks encountered will be "
575 kMaxLeaksConsidered
);
577 uptr unsuppressed_count
= UnsuppressedLeakCount();
578 if (num_leaks_to_report
> 0 && num_leaks_to_report
< unsuppressed_count
)
579 Printf("The %zu top leak(s):\n", num_leaks_to_report
);
580 InternalSort(&leaks_
, leaks_
.size(), LeakComparator
);
581 uptr leaks_reported
= 0;
582 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
583 if (leaks_
[i
].is_suppressed
) continue;
584 PrintReportForLeak(i
);
586 if (leaks_reported
== num_leaks_to_report
) break;
588 if (leaks_reported
< unsuppressed_count
) {
589 uptr remaining
= unsuppressed_count
- leaks_reported
;
590 Printf("Omitting %zu more leak(s).\n", remaining
);
594 void LeakReport::PrintReportForLeak(uptr index
) {
596 Printf("%s", d
.Leak());
597 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
598 leaks_
[index
].is_directly_leaked
? "Direct" : "Indirect",
599 leaks_
[index
].total_size
, leaks_
[index
].hit_count
);
600 Printf("%s", d
.End());
602 PrintStackTraceById(leaks_
[index
].stack_trace_id
);
604 if (flags()->report_objects
) {
605 Printf("Objects leaked above:\n");
606 PrintLeakedObjectsForLeak(index
);
611 void LeakReport::PrintLeakedObjectsForLeak(uptr index
) {
612 u32 leak_id
= leaks_
[index
].id
;
613 for (uptr j
= 0; j
< leaked_objects_
.size(); j
++) {
614 if (leaked_objects_
[j
].leak_id
== leak_id
)
615 Printf("%p (%zu bytes)\n", leaked_objects_
[j
].addr
,
616 leaked_objects_
[j
].size
);
620 void LeakReport::PrintSummary() {
621 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
622 uptr bytes
= 0, allocations
= 0;
623 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
624 if (leaks_
[i
].is_suppressed
) continue;
625 bytes
+= leaks_
[i
].total_size
;
626 allocations
+= leaks_
[i
].hit_count
;
628 InternalScopedString
summary(kMaxSummaryLength
);
629 summary
.append("%zu byte(s) leaked in %zu allocation(s).", bytes
,
631 ReportErrorSummary(summary
.data());
634 void LeakReport::ApplySuppressions() {
635 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
636 Suppression
*s
= GetSuppressionForStack(leaks_
[i
].stack_trace_id
);
638 s
->weight
+= leaks_
[i
].total_size
;
639 atomic_store_relaxed(&s
->hit_count
, atomic_load_relaxed(&s
->hit_count
) +
640 leaks_
[i
].hit_count
);
641 leaks_
[i
].is_suppressed
= true;
646 uptr
LeakReport::UnsuppressedLeakCount() {
648 for (uptr i
= 0; i
< leaks_
.size(); i
++)
649 if (!leaks_
[i
].is_suppressed
) result
++;
653 } // namespace __lsan
654 #else // CAN_SANITIZE_LEAKS
656 void InitCommonLsan() { }
657 void DoLeakCheck() { }
658 void DisableInThisThread() { }
659 void EnableInThisThread() { }
661 #endif // CAN_SANITIZE_LEAKS
663 using namespace __lsan
; // NOLINT
666 SANITIZER_INTERFACE_ATTRIBUTE
667 void __lsan_ignore_object(const void *p
) {
668 #if CAN_SANITIZE_LEAKS
669 if (!common_flags()->detect_leaks
)
671 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
673 BlockingMutexLock
l(&global_mutex
);
674 IgnoreObjectResult res
= IgnoreObjectLocked(p
);
675 if (res
== kIgnoreObjectInvalid
)
676 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p
);
677 if (res
== kIgnoreObjectAlreadyIgnored
)
678 VReport(1, "__lsan_ignore_object(): "
679 "heap object at %p is already being ignored\n", p
);
680 if (res
== kIgnoreObjectSuccess
)
681 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p
);
682 #endif // CAN_SANITIZE_LEAKS
685 SANITIZER_INTERFACE_ATTRIBUTE
686 void __lsan_register_root_region(const void *begin
, uptr size
) {
687 #if CAN_SANITIZE_LEAKS
688 BlockingMutexLock
l(&global_mutex
);
690 RootRegion region
= {begin
, size
};
691 root_regions
->push_back(region
);
692 VReport(1, "Registered root region at %p of size %llu\n", begin
, size
);
693 #endif // CAN_SANITIZE_LEAKS
696 SANITIZER_INTERFACE_ATTRIBUTE
697 void __lsan_unregister_root_region(const void *begin
, uptr size
) {
698 #if CAN_SANITIZE_LEAKS
699 BlockingMutexLock
l(&global_mutex
);
701 bool removed
= false;
702 for (uptr i
= 0; i
< root_regions
->size(); i
++) {
703 RootRegion region
= (*root_regions
)[i
];
704 if (region
.begin
== begin
&& region
.size
== size
) {
706 uptr last_index
= root_regions
->size() - 1;
707 (*root_regions
)[i
] = (*root_regions
)[last_index
];
708 root_regions
->pop_back();
709 VReport(1, "Unregistered root region at %p of size %llu\n", begin
, size
);
715 "__lsan_unregister_root_region(): region at %p of size %llu has not "
716 "been registered.\n",
720 #endif // CAN_SANITIZE_LEAKS
723 SANITIZER_INTERFACE_ATTRIBUTE
724 void __lsan_disable() {
725 #if CAN_SANITIZE_LEAKS
726 __lsan::DisableInThisThread();
730 SANITIZER_INTERFACE_ATTRIBUTE
731 void __lsan_enable() {
732 #if CAN_SANITIZE_LEAKS
733 __lsan::EnableInThisThread();
737 SANITIZER_INTERFACE_ATTRIBUTE
738 void __lsan_do_leak_check() {
739 #if CAN_SANITIZE_LEAKS
740 if (common_flags()->detect_leaks
)
741 __lsan::DoLeakCheck();
742 #endif // CAN_SANITIZE_LEAKS
745 SANITIZER_INTERFACE_ATTRIBUTE
746 int __lsan_do_recoverable_leak_check() {
747 #if CAN_SANITIZE_LEAKS
748 if (common_flags()->detect_leaks
)
749 return __lsan::DoRecoverableLeakCheck();
750 #endif // CAN_SANITIZE_LEAKS
754 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
755 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
756 int __lsan_is_turned_off() {