2017-10-30 Maxim Ostapenko <m.ostapenko@samsung.com>
[official-gcc.git] / libsanitizer / lsan / lsan_common.cc
bloba3274d5c1c3dffe2e573b81e7ea1e1a930d8e91a
1 //=-- lsan_common.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of LeakSanitizer.
9 // Implementation of common leak checking functionality.
11 //===----------------------------------------------------------------------===//
13 #include "lsan_common.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_flags.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_placement_new.h"
19 #include "sanitizer_common/sanitizer_procmaps.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_suppressions.h"
23 #include "sanitizer_common/sanitizer_report_decorator.h"
24 #include "sanitizer_common/sanitizer_tls_get_addr.h"
26 #if CAN_SANITIZE_LEAKS
27 namespace __lsan {
29 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
30 // also to protect the global list of root regions.
31 BlockingMutex global_mutex(LINKER_INITIALIZED);
33 Flags lsan_flags;
35 void DisableCounterUnderflow() {
36 if (common_flags()->detect_leaks) {
37 Report("Unmatched call to __lsan_enable().\n");
38 Die();
42 void Flags::SetDefaults() {
43 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
44 #include "lsan_flags.inc"
45 #undef LSAN_FLAG
48 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
49 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
50 RegisterFlag(parser, #Name, Description, &f->Name);
51 #include "lsan_flags.inc"
52 #undef LSAN_FLAG
55 #define LOG_POINTERS(...) \
56 do { \
57 if (flags()->log_pointers) Report(__VA_ARGS__); \
58 } while (0);
60 #define LOG_THREADS(...) \
61 do { \
62 if (flags()->log_threads) Report(__VA_ARGS__); \
63 } while (0);
65 ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
66 static SuppressionContext *suppression_ctx = nullptr;
67 static const char kSuppressionLeak[] = "leak";
68 static const char *kSuppressionTypes[] = { kSuppressionLeak };
69 static const char kStdSuppressions[] =
70 #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
71 // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
72 // definition.
73 "leak:*pthread_exit*\n"
74 #endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
75 #if SANITIZER_MAC
76 // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
77 "leak:*_os_trace*\n"
78 #endif
79 // TLS leak in some glibc versions, described in
80 // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
81 "leak:*tls_get_addr*\n";
83 void InitializeSuppressions() {
84 CHECK_EQ(nullptr, suppression_ctx);
85 suppression_ctx = new (suppression_placeholder) // NOLINT
86 SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
87 suppression_ctx->ParseFromFile(flags()->suppressions);
88 if (&__lsan_default_suppressions)
89 suppression_ctx->Parse(__lsan_default_suppressions());
90 suppression_ctx->Parse(kStdSuppressions);
93 static SuppressionContext *GetSuppressionContext() {
94 CHECK(suppression_ctx);
95 return suppression_ctx;
98 static InternalMmapVector<RootRegion> *root_regions;
100 InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
102 void InitializeRootRegions() {
103 CHECK(!root_regions);
104 ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
105 root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
108 const char *MaybeCallLsanDefaultOptions() {
109 return (&__lsan_default_options) ? __lsan_default_options() : "";
112 void InitCommonLsan() {
113 InitializeRootRegions();
114 if (common_flags()->detect_leaks) {
115 // Initialization which can fail or print warnings should only be done if
116 // LSan is actually enabled.
117 InitializeSuppressions();
118 InitializePlatformSpecificModules();
122 class Decorator: public __sanitizer::SanitizerCommonDecorator {
123 public:
124 Decorator() : SanitizerCommonDecorator() { }
125 const char *Error() { return Red(); }
126 const char *Leak() { return Blue(); }
129 static inline bool CanBeAHeapPointer(uptr p) {
130 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
131 // bound on heap addresses.
132 const uptr kMinAddress = 4 * 4096;
133 if (p < kMinAddress) return false;
134 #if defined(__x86_64__)
135 // Accept only canonical form user-space addresses.
136 return ((p >> 47) == 0);
137 #elif defined(__mips64)
138 return ((p >> 40) == 0);
139 #elif defined(__aarch64__)
140 unsigned runtimeVMA =
141 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
142 return ((p >> runtimeVMA) == 0);
143 #else
144 return true;
145 #endif
148 // Scans the memory range, looking for byte patterns that point into allocator
149 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
150 // There are two usage modes for this function: finding reachable chunks
151 // (|tag| = kReachable) and finding indirectly leaked chunks
152 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
153 // so |frontier| = 0.
154 void ScanRangeForPointers(uptr begin, uptr end,
155 Frontier *frontier,
156 const char *region_type, ChunkTag tag) {
157 CHECK(tag == kReachable || tag == kIndirectlyLeaked);
158 const uptr alignment = flags()->pointer_alignment();
159 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
160 uptr pp = begin;
161 if (pp % alignment)
162 pp = pp + alignment - pp % alignment;
163 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
164 void *p = *reinterpret_cast<void **>(pp);
165 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
166 uptr chunk = PointsIntoChunk(p);
167 if (!chunk) continue;
168 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
169 if (chunk == begin) continue;
170 LsanMetadata m(chunk);
171 if (m.tag() == kReachable || m.tag() == kIgnored) continue;
173 // Do this check relatively late so we can log only the interesting cases.
174 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
175 LOG_POINTERS(
176 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
177 "%zu.\n",
178 pp, p, chunk, chunk + m.requested_size(), m.requested_size());
179 continue;
182 m.set_tag(tag);
183 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
184 chunk, chunk + m.requested_size(), m.requested_size());
185 if (frontier)
186 frontier->push_back(chunk);
190 // Scans a global range for pointers
191 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
192 uptr allocator_begin = 0, allocator_end = 0;
193 GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
194 if (begin <= allocator_begin && allocator_begin < end) {
195 CHECK_LE(allocator_begin, allocator_end);
196 CHECK_LE(allocator_end, end);
197 if (begin < allocator_begin)
198 ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
199 kReachable);
200 if (allocator_end < end)
201 ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
202 } else {
203 ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
207 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
208 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
209 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
212 // Scans thread data (stacks and TLS) for heap pointers.
213 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
214 Frontier *frontier) {
215 InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount());
216 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
217 uptr registers_end = registers_begin + registers.size();
218 for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
219 tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
220 LOG_THREADS("Processing thread %d.\n", os_id);
221 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
222 DTLS *dtls;
223 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
224 &tls_begin, &tls_end,
225 &cache_begin, &cache_end, &dtls);
226 if (!thread_found) {
227 // If a thread can't be found in the thread registry, it's probably in the
228 // process of destruction. Log this event and move on.
229 LOG_THREADS("Thread %d not found in registry.\n", os_id);
230 continue;
232 uptr sp;
233 PtraceRegistersStatus have_registers =
234 suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
235 if (have_registers != REGISTERS_AVAILABLE) {
236 Report("Unable to get registers from thread %d.\n", os_id);
237 // If unable to get SP, consider the entire stack to be reachable unless
238 // GetRegistersAndSP failed with ESRCH.
239 if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
240 sp = stack_begin;
243 if (flags()->use_registers && have_registers)
244 ScanRangeForPointers(registers_begin, registers_end, frontier,
245 "REGISTERS", kReachable);
247 if (flags()->use_stacks) {
248 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
249 if (sp < stack_begin || sp >= stack_end) {
250 // SP is outside the recorded stack range (e.g. the thread is running a
251 // signal handler on alternate stack, or swapcontext was used).
252 // Again, consider the entire stack range to be reachable.
253 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
254 uptr page_size = GetPageSizeCached();
255 int skipped = 0;
256 while (stack_begin < stack_end &&
257 !IsAccessibleMemoryRange(stack_begin, 1)) {
258 skipped++;
259 stack_begin += page_size;
261 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
262 skipped, stack_begin, stack_end);
263 } else {
264 // Shrink the stack range to ignore out-of-scope values.
265 stack_begin = sp;
267 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
268 kReachable);
269 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
272 if (flags()->use_tls) {
273 if (tls_begin) {
274 LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
275 // If the tls and cache ranges don't overlap, scan full tls range,
276 // otherwise, only scan the non-overlapping portions
277 if (cache_begin == cache_end || tls_end < cache_begin ||
278 tls_begin > cache_end) {
279 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
280 } else {
281 if (tls_begin < cache_begin)
282 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
283 kReachable);
284 if (tls_end > cache_end)
285 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
286 kReachable);
289 if (dtls && !DTLSInDestruction(dtls)) {
290 for (uptr j = 0; j < dtls->dtv_size; ++j) {
291 uptr dtls_beg = dtls->dtv[j].beg;
292 uptr dtls_end = dtls_beg + dtls->dtv[j].size;
293 if (dtls_beg < dtls_end) {
294 LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
295 ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
296 kReachable);
299 } else {
300 // We are handling a thread with DTLS under destruction. Log about
301 // this and continue.
302 LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
308 void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
309 uptr region_begin, uptr region_end, bool is_readable) {
310 uptr intersection_begin = Max(root_region.begin, region_begin);
311 uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
312 if (intersection_begin >= intersection_end) return;
313 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
314 root_region.begin, root_region.begin + root_region.size,
315 region_begin, region_end,
316 is_readable ? "readable" : "unreadable");
317 if (is_readable)
318 ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
319 kReachable);
322 static void ProcessRootRegion(Frontier *frontier,
323 const RootRegion &root_region) {
324 MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
325 MemoryMappedSegment segment;
326 while (proc_maps.Next(&segment)) {
327 ScanRootRegion(frontier, root_region, segment.start, segment.end,
328 segment.IsReadable());
332 // Scans root regions for heap pointers.
333 static void ProcessRootRegions(Frontier *frontier) {
334 if (!flags()->use_root_regions) return;
335 CHECK(root_regions);
336 for (uptr i = 0; i < root_regions->size(); i++) {
337 ProcessRootRegion(frontier, (*root_regions)[i]);
341 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
342 while (frontier->size()) {
343 uptr next_chunk = frontier->back();
344 frontier->pop_back();
345 LsanMetadata m(next_chunk);
346 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
347 "HEAP", tag);
351 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
352 // which are reachable from it as indirectly leaked.
353 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
354 chunk = GetUserBegin(chunk);
355 LsanMetadata m(chunk);
356 if (m.allocated() && m.tag() != kReachable) {
357 ScanRangeForPointers(chunk, chunk + m.requested_size(),
358 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
362 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
363 // frontier.
364 static void CollectIgnoredCb(uptr chunk, void *arg) {
365 CHECK(arg);
366 chunk = GetUserBegin(chunk);
367 LsanMetadata m(chunk);
368 if (m.allocated() && m.tag() == kIgnored) {
369 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
370 chunk, chunk + m.requested_size(), m.requested_size());
371 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
375 static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
376 CHECK(stack_id);
377 StackTrace stack = map->Get(stack_id);
378 // The top frame is our malloc/calloc/etc. The next frame is the caller.
379 if (stack.size >= 2)
380 return stack.trace[1];
381 return 0;
384 struct InvalidPCParam {
385 Frontier *frontier;
386 StackDepotReverseMap *stack_depot_reverse_map;
387 bool skip_linker_allocations;
390 // ForEachChunk callback. If the caller pc is invalid or is within the linker,
391 // mark as reachable. Called by ProcessPlatformSpecificAllocations.
392 static void MarkInvalidPCCb(uptr chunk, void *arg) {
393 CHECK(arg);
394 InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
395 chunk = GetUserBegin(chunk);
396 LsanMetadata m(chunk);
397 if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
398 u32 stack_id = m.stack_trace_id();
399 uptr caller_pc = 0;
400 if (stack_id > 0)
401 caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
402 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
403 // it as reachable, as we can't properly report its allocation stack anyway.
404 if (caller_pc == 0 || (param->skip_linker_allocations &&
405 GetLinker()->containsAddress(caller_pc))) {
406 m.set_tag(kReachable);
407 param->frontier->push_back(chunk);
412 // On Linux, handles dynamically allocated TLS blocks by treating all chunks
413 // allocated from ld-linux.so as reachable.
414 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
415 // They are allocated with a __libc_memalign() call in allocate_and_init()
416 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
417 // blocks, but we can make sure they come from our own allocator by intercepting
418 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
419 // addresses are stored in a dynamically allocated array (the DTV) which is
420 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
421 // being reachable from the static TLS, and the dynamic TLS being reachable from
422 // the DTV. This is because the initial DTV is allocated before our interception
423 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
424 // can't special-case it either, since we don't know its size.
425 // Our solution is to include in the root set all allocations made from
426 // ld-linux.so (which is where allocate_and_init() is implemented). This is
427 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
428 // which we don't care about).
429 // On all other platforms, this simply checks to ensure that the caller pc is
430 // valid before reporting chunks as leaked.
431 void ProcessPC(Frontier *frontier) {
432 StackDepotReverseMap stack_depot_reverse_map;
433 InvalidPCParam arg;
434 arg.frontier = frontier;
435 arg.stack_depot_reverse_map = &stack_depot_reverse_map;
436 arg.skip_linker_allocations =
437 flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
438 ForEachChunk(MarkInvalidPCCb, &arg);
441 // Sets the appropriate tag on each chunk.
442 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
443 // Holds the flood fill frontier.
444 Frontier frontier(1);
446 ForEachChunk(CollectIgnoredCb, &frontier);
447 ProcessGlobalRegions(&frontier);
448 ProcessThreads(suspended_threads, &frontier);
449 ProcessRootRegions(&frontier);
450 FloodFillTag(&frontier, kReachable);
452 CHECK_EQ(0, frontier.size());
453 ProcessPC(&frontier);
455 // The check here is relatively expensive, so we do this in a separate flood
456 // fill. That way we can skip the check for chunks that are reachable
457 // otherwise.
458 LOG_POINTERS("Processing platform-specific allocations.\n");
459 ProcessPlatformSpecificAllocations(&frontier);
460 FloodFillTag(&frontier, kReachable);
462 // Iterate over leaked chunks and mark those that are reachable from other
463 // leaked chunks.
464 LOG_POINTERS("Scanning leaked chunks.\n");
465 ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
468 // ForEachChunk callback. Resets the tags to pre-leak-check state.
469 static void ResetTagsCb(uptr chunk, void *arg) {
470 (void)arg;
471 chunk = GetUserBegin(chunk);
472 LsanMetadata m(chunk);
473 if (m.allocated() && m.tag() != kIgnored)
474 m.set_tag(kDirectlyLeaked);
477 static void PrintStackTraceById(u32 stack_trace_id) {
478 CHECK(stack_trace_id);
479 StackDepotGet(stack_trace_id).Print();
482 // ForEachChunk callback. Aggregates information about unreachable chunks into
483 // a LeakReport.
484 static void CollectLeaksCb(uptr chunk, void *arg) {
485 CHECK(arg);
486 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
487 chunk = GetUserBegin(chunk);
488 LsanMetadata m(chunk);
489 if (!m.allocated()) return;
490 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
491 u32 resolution = flags()->resolution;
492 u32 stack_trace_id = 0;
493 if (resolution > 0) {
494 StackTrace stack = StackDepotGet(m.stack_trace_id());
495 stack.size = Min(stack.size, resolution);
496 stack_trace_id = StackDepotPut(stack);
497 } else {
498 stack_trace_id = m.stack_trace_id();
500 leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
501 m.tag());
505 static void PrintMatchedSuppressions() {
506 InternalMmapVector<Suppression *> matched(1);
507 GetSuppressionContext()->GetMatched(&matched);
508 if (!matched.size())
509 return;
510 const char *line = "-----------------------------------------------------";
511 Printf("%s\n", line);
512 Printf("Suppressions used:\n");
513 Printf(" count bytes template\n");
514 for (uptr i = 0; i < matched.size(); i++)
515 Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
516 &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
517 Printf("%s\n\n", line);
520 struct CheckForLeaksParam {
521 bool success;
522 LeakReport leak_report;
525 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
526 void *arg) {
527 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
528 CHECK(param);
529 CHECK(!param->success);
530 ClassifyAllChunks(suspended_threads);
531 ForEachChunk(CollectLeaksCb, &param->leak_report);
532 // Clean up for subsequent leak checks. This assumes we did not overwrite any
533 // kIgnored tags.
534 ForEachChunk(ResetTagsCb, nullptr);
535 param->success = true;
538 static bool CheckForLeaks() {
539 if (&__lsan_is_turned_off && __lsan_is_turned_off())
540 return false;
541 EnsureMainThreadIDIsCorrect();
542 CheckForLeaksParam param;
543 param.success = false;
544 LockThreadRegistry();
545 LockAllocator();
546 DoStopTheWorld(CheckForLeaksCallback, &param);
547 UnlockAllocator();
548 UnlockThreadRegistry();
550 if (!param.success) {
551 Report("LeakSanitizer has encountered a fatal error.\n");
552 Report(
553 "HINT: For debugging, try setting environment variable "
554 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
555 Report(
556 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
557 Die();
559 param.leak_report.ApplySuppressions();
560 uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
561 if (unsuppressed_count > 0) {
562 Decorator d;
563 Printf("\n"
564 "================================================================="
565 "\n");
566 Printf("%s", d.Error());
567 Report("ERROR: LeakSanitizer: detected memory leaks\n");
568 Printf("%s", d.Default());
569 param.leak_report.ReportTopLeaks(flags()->max_leaks);
571 if (common_flags()->print_suppressions)
572 PrintMatchedSuppressions();
573 if (unsuppressed_count > 0) {
574 param.leak_report.PrintSummary();
575 return true;
577 return false;
580 static bool has_reported_leaks = false;
581 bool HasReportedLeaks() { return has_reported_leaks; }
583 void DoLeakCheck() {
584 BlockingMutexLock l(&global_mutex);
585 static bool already_done;
586 if (already_done) return;
587 already_done = true;
588 has_reported_leaks = CheckForLeaks();
589 if (has_reported_leaks) HandleLeaks();
592 static int DoRecoverableLeakCheck() {
593 BlockingMutexLock l(&global_mutex);
594 bool have_leaks = CheckForLeaks();
595 return have_leaks ? 1 : 0;
598 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
600 static Suppression *GetSuppressionForAddr(uptr addr) {
601 Suppression *s = nullptr;
603 // Suppress by module name.
604 SuppressionContext *suppressions = GetSuppressionContext();
605 if (const char *module_name =
606 Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
607 if (suppressions->Match(module_name, kSuppressionLeak, &s))
608 return s;
610 // Suppress by file or function name.
611 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
612 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
613 if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
614 suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
615 break;
618 frames->ClearAll();
619 return s;
622 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
623 StackTrace stack = StackDepotGet(stack_trace_id);
624 for (uptr i = 0; i < stack.size; i++) {
625 Suppression *s = GetSuppressionForAddr(
626 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
627 if (s) return s;
629 return nullptr;
632 ///// LeakReport implementation. /////
634 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
635 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
636 // in real-world applications.
637 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
638 // use a hash table.
639 const uptr kMaxLeaksConsidered = 5000;
641 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
642 uptr leaked_size, ChunkTag tag) {
643 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
644 bool is_directly_leaked = (tag == kDirectlyLeaked);
645 uptr i;
646 for (i = 0; i < leaks_.size(); i++) {
647 if (leaks_[i].stack_trace_id == stack_trace_id &&
648 leaks_[i].is_directly_leaked == is_directly_leaked) {
649 leaks_[i].hit_count++;
650 leaks_[i].total_size += leaked_size;
651 break;
654 if (i == leaks_.size()) {
655 if (leaks_.size() == kMaxLeaksConsidered) return;
656 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
657 is_directly_leaked, /* is_suppressed */ false };
658 leaks_.push_back(leak);
660 if (flags()->report_objects) {
661 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
662 leaked_objects_.push_back(obj);
666 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
667 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
668 return leak1.total_size > leak2.total_size;
669 else
670 return leak1.is_directly_leaked;
673 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
674 CHECK(leaks_.size() <= kMaxLeaksConsidered);
675 Printf("\n");
676 if (leaks_.size() == kMaxLeaksConsidered)
677 Printf("Too many leaks! Only the first %zu leaks encountered will be "
678 "reported.\n",
679 kMaxLeaksConsidered);
681 uptr unsuppressed_count = UnsuppressedLeakCount();
682 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
683 Printf("The %zu top leak(s):\n", num_leaks_to_report);
684 InternalSort(&leaks_, leaks_.size(), LeakComparator);
685 uptr leaks_reported = 0;
686 for (uptr i = 0; i < leaks_.size(); i++) {
687 if (leaks_[i].is_suppressed) continue;
688 PrintReportForLeak(i);
689 leaks_reported++;
690 if (leaks_reported == num_leaks_to_report) break;
692 if (leaks_reported < unsuppressed_count) {
693 uptr remaining = unsuppressed_count - leaks_reported;
694 Printf("Omitting %zu more leak(s).\n", remaining);
698 void LeakReport::PrintReportForLeak(uptr index) {
699 Decorator d;
700 Printf("%s", d.Leak());
701 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
702 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
703 leaks_[index].total_size, leaks_[index].hit_count);
704 Printf("%s", d.Default());
706 PrintStackTraceById(leaks_[index].stack_trace_id);
708 if (flags()->report_objects) {
709 Printf("Objects leaked above:\n");
710 PrintLeakedObjectsForLeak(index);
711 Printf("\n");
715 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
716 u32 leak_id = leaks_[index].id;
717 for (uptr j = 0; j < leaked_objects_.size(); j++) {
718 if (leaked_objects_[j].leak_id == leak_id)
719 Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
720 leaked_objects_[j].size);
724 void LeakReport::PrintSummary() {
725 CHECK(leaks_.size() <= kMaxLeaksConsidered);
726 uptr bytes = 0, allocations = 0;
727 for (uptr i = 0; i < leaks_.size(); i++) {
728 if (leaks_[i].is_suppressed) continue;
729 bytes += leaks_[i].total_size;
730 allocations += leaks_[i].hit_count;
732 InternalScopedString summary(kMaxSummaryLength);
733 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
734 allocations);
735 ReportErrorSummary(summary.data());
738 void LeakReport::ApplySuppressions() {
739 for (uptr i = 0; i < leaks_.size(); i++) {
740 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
741 if (s) {
742 s->weight += leaks_[i].total_size;
743 atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
744 leaks_[i].hit_count);
745 leaks_[i].is_suppressed = true;
750 uptr LeakReport::UnsuppressedLeakCount() {
751 uptr result = 0;
752 for (uptr i = 0; i < leaks_.size(); i++)
753 if (!leaks_[i].is_suppressed) result++;
754 return result;
757 } // namespace __lsan
758 #else // CAN_SANITIZE_LEAKS
759 namespace __lsan {
760 void InitCommonLsan() { }
761 void DoLeakCheck() { }
762 void DoRecoverableLeakCheckVoid() { }
763 void DisableInThisThread() { }
764 void EnableInThisThread() { }
766 #endif // CAN_SANITIZE_LEAKS
768 using namespace __lsan; // NOLINT
770 extern "C" {
771 SANITIZER_INTERFACE_ATTRIBUTE
772 void __lsan_ignore_object(const void *p) {
773 #if CAN_SANITIZE_LEAKS
774 if (!common_flags()->detect_leaks)
775 return;
776 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
777 // locked.
778 BlockingMutexLock l(&global_mutex);
779 IgnoreObjectResult res = IgnoreObjectLocked(p);
780 if (res == kIgnoreObjectInvalid)
781 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
782 if (res == kIgnoreObjectAlreadyIgnored)
783 VReport(1, "__lsan_ignore_object(): "
784 "heap object at %p is already being ignored\n", p);
785 if (res == kIgnoreObjectSuccess)
786 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
787 #endif // CAN_SANITIZE_LEAKS
790 SANITIZER_INTERFACE_ATTRIBUTE
791 void __lsan_register_root_region(const void *begin, uptr size) {
792 #if CAN_SANITIZE_LEAKS
793 BlockingMutexLock l(&global_mutex);
794 CHECK(root_regions);
795 RootRegion region = {reinterpret_cast<uptr>(begin), size};
796 root_regions->push_back(region);
797 VReport(1, "Registered root region at %p of size %llu\n", begin, size);
798 #endif // CAN_SANITIZE_LEAKS
801 SANITIZER_INTERFACE_ATTRIBUTE
802 void __lsan_unregister_root_region(const void *begin, uptr size) {
803 #if CAN_SANITIZE_LEAKS
804 BlockingMutexLock l(&global_mutex);
805 CHECK(root_regions);
806 bool removed = false;
807 for (uptr i = 0; i < root_regions->size(); i++) {
808 RootRegion region = (*root_regions)[i];
809 if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
810 removed = true;
811 uptr last_index = root_regions->size() - 1;
812 (*root_regions)[i] = (*root_regions)[last_index];
813 root_regions->pop_back();
814 VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
815 break;
818 if (!removed) {
819 Report(
820 "__lsan_unregister_root_region(): region at %p of size %llu has not "
821 "been registered.\n",
822 begin, size);
823 Die();
825 #endif // CAN_SANITIZE_LEAKS
828 SANITIZER_INTERFACE_ATTRIBUTE
829 void __lsan_disable() {
830 #if CAN_SANITIZE_LEAKS
831 __lsan::DisableInThisThread();
832 #endif
835 SANITIZER_INTERFACE_ATTRIBUTE
836 void __lsan_enable() {
837 #if CAN_SANITIZE_LEAKS
838 __lsan::EnableInThisThread();
839 #endif
842 SANITIZER_INTERFACE_ATTRIBUTE
843 void __lsan_do_leak_check() {
844 #if CAN_SANITIZE_LEAKS
845 if (common_flags()->detect_leaks)
846 __lsan::DoLeakCheck();
847 #endif // CAN_SANITIZE_LEAKS
850 SANITIZER_INTERFACE_ATTRIBUTE
851 int __lsan_do_recoverable_leak_check() {
852 #if CAN_SANITIZE_LEAKS
853 if (common_flags()->detect_leaks)
854 return __lsan::DoRecoverableLeakCheck();
855 #endif // CAN_SANITIZE_LEAKS
856 return 0;
859 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
860 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
861 const char * __lsan_default_options() {
862 return "";
865 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
866 int __lsan_is_turned_off() {
867 return 0;
870 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
871 const char *__lsan_default_suppressions() {
872 return "";
874 #endif
875 } // extern "C"