Regenerate .pot files.
[official-gcc.git] / libsanitizer / lsan / lsan_common.cc
blob6d674c5e437627e2e4d9cd60d5f512e2ada031af
1 //=-- lsan_common.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of LeakSanitizer.
9 // Implementation of common leak checking functionality.
11 //===----------------------------------------------------------------------===//
13 #include "lsan_common.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_flags.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_placement_new.h"
19 #include "sanitizer_common/sanitizer_procmaps.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_suppressions.h"
23 #include "sanitizer_common/sanitizer_report_decorator.h"
25 #if CAN_SANITIZE_LEAKS
26 namespace __lsan {
28 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
29 // also to protect the global list of root regions.
30 BlockingMutex global_mutex(LINKER_INITIALIZED);
32 THREADLOCAL int disable_counter;
33 bool DisabledInThisThread() { return disable_counter > 0; }
35 Flags lsan_flags;
37 void Flags::SetDefaults() {
38 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
39 #include "lsan_flags.inc"
40 #undef LSAN_FLAG
43 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
44 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
45 RegisterFlag(parser, #Name, Description, &f->Name);
46 #include "lsan_flags.inc"
47 #undef LSAN_FLAG
50 #define LOG_POINTERS(...) \
51 do { \
52 if (flags()->log_pointers) Report(__VA_ARGS__); \
53 } while (0);
55 #define LOG_THREADS(...) \
56 do { \
57 if (flags()->log_threads) Report(__VA_ARGS__); \
58 } while (0);
60 ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
61 static SuppressionContext *suppression_ctx = nullptr;
62 static const char kSuppressionLeak[] = "leak";
63 static const char *kSuppressionTypes[] = { kSuppressionLeak };
65 void InitializeSuppressions() {
66 CHECK_EQ(nullptr, suppression_ctx);
67 suppression_ctx = new (suppression_placeholder) // NOLINT
68 SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
69 suppression_ctx->ParseFromFile(flags()->suppressions);
70 if (&__lsan_default_suppressions)
71 suppression_ctx->Parse(__lsan_default_suppressions());
74 static SuppressionContext *GetSuppressionContext() {
75 CHECK(suppression_ctx);
76 return suppression_ctx;
79 struct RootRegion {
80 const void *begin;
81 uptr size;
84 InternalMmapVector<RootRegion> *root_regions;
86 void InitializeRootRegions() {
87 CHECK(!root_regions);
88 ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
89 root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
92 void InitCommonLsan() {
93 InitializeRootRegions();
94 if (common_flags()->detect_leaks) {
95 // Initialization which can fail or print warnings should only be done if
96 // LSan is actually enabled.
97 InitializeSuppressions();
98 InitializePlatformSpecificModules();
102 class Decorator: public __sanitizer::SanitizerCommonDecorator {
103 public:
104 Decorator() : SanitizerCommonDecorator() { }
105 const char *Error() { return Red(); }
106 const char *Leak() { return Blue(); }
107 const char *End() { return Default(); }
110 static inline bool CanBeAHeapPointer(uptr p) {
111 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
112 // bound on heap addresses.
113 const uptr kMinAddress = 4 * 4096;
114 if (p < kMinAddress) return false;
115 #if defined(__x86_64__)
116 // Accept only canonical form user-space addresses.
117 return ((p >> 47) == 0);
118 #elif defined(__mips64)
119 return ((p >> 40) == 0);
120 #elif defined(__aarch64__)
121 unsigned runtimeVMA =
122 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
123 return ((p >> runtimeVMA) == 0);
124 #else
125 return true;
126 #endif
129 // Scans the memory range, looking for byte patterns that point into allocator
130 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
131 // There are two usage modes for this function: finding reachable chunks
132 // (|tag| = kReachable) and finding indirectly leaked chunks
133 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
134 // so |frontier| = 0.
135 void ScanRangeForPointers(uptr begin, uptr end,
136 Frontier *frontier,
137 const char *region_type, ChunkTag tag) {
138 CHECK(tag == kReachable || tag == kIndirectlyLeaked);
139 const uptr alignment = flags()->pointer_alignment();
140 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
141 uptr pp = begin;
142 if (pp % alignment)
143 pp = pp + alignment - pp % alignment;
144 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
145 void *p = *reinterpret_cast<void **>(pp);
146 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
147 uptr chunk = PointsIntoChunk(p);
148 if (!chunk) continue;
149 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
150 if (chunk == begin) continue;
151 LsanMetadata m(chunk);
152 if (m.tag() == kReachable || m.tag() == kIgnored) continue;
154 // Do this check relatively late so we can log only the interesting cases.
155 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
156 LOG_POINTERS(
157 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
158 "%zu.\n",
159 pp, p, chunk, chunk + m.requested_size(), m.requested_size());
160 continue;
163 m.set_tag(tag);
164 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
165 chunk, chunk + m.requested_size(), m.requested_size());
166 if (frontier)
167 frontier->push_back(chunk);
171 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
172 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
173 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
176 // Scans thread data (stacks and TLS) for heap pointers.
177 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
178 Frontier *frontier) {
179 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
180 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
181 uptr registers_end = registers_begin + registers.size();
182 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
183 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
184 LOG_THREADS("Processing thread %d.\n", os_id);
185 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
186 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
187 &tls_begin, &tls_end,
188 &cache_begin, &cache_end);
189 if (!thread_found) {
190 // If a thread can't be found in the thread registry, it's probably in the
191 // process of destruction. Log this event and move on.
192 LOG_THREADS("Thread %d not found in registry.\n", os_id);
193 continue;
195 uptr sp;
196 bool have_registers =
197 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
198 if (!have_registers) {
199 Report("Unable to get registers from thread %d.\n");
200 // If unable to get SP, consider the entire stack to be reachable.
201 sp = stack_begin;
204 if (flags()->use_registers && have_registers)
205 ScanRangeForPointers(registers_begin, registers_end, frontier,
206 "REGISTERS", kReachable);
208 if (flags()->use_stacks) {
209 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
210 if (sp < stack_begin || sp >= stack_end) {
211 // SP is outside the recorded stack range (e.g. the thread is running a
212 // signal handler on alternate stack). Again, consider the entire stack
213 // range to be reachable.
214 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
215 } else {
216 // Shrink the stack range to ignore out-of-scope values.
217 stack_begin = sp;
219 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
220 kReachable);
221 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
224 if (flags()->use_tls) {
225 LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
226 if (cache_begin == cache_end) {
227 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
228 } else {
229 // Because LSan should not be loaded with dlopen(), we can assume
230 // that allocator cache will be part of static TLS image.
231 CHECK_LE(tls_begin, cache_begin);
232 CHECK_GE(tls_end, cache_end);
233 if (tls_begin < cache_begin)
234 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
235 kReachable);
236 if (tls_end > cache_end)
237 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
243 static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
244 uptr root_end) {
245 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
246 uptr begin, end, prot;
247 while (proc_maps.Next(&begin, &end,
248 /*offset*/ nullptr, /*filename*/ nullptr,
249 /*filename_size*/ 0, &prot)) {
250 uptr intersection_begin = Max(root_begin, begin);
251 uptr intersection_end = Min(end, root_end);
252 if (intersection_begin >= intersection_end) continue;
253 bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
254 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
255 root_begin, root_end, begin, end,
256 is_readable ? "readable" : "unreadable");
257 if (is_readable)
258 ScanRangeForPointers(intersection_begin, intersection_end, frontier,
259 "ROOT", kReachable);
263 // Scans root regions for heap pointers.
264 static void ProcessRootRegions(Frontier *frontier) {
265 if (!flags()->use_root_regions) return;
266 CHECK(root_regions);
267 for (uptr i = 0; i < root_regions->size(); i++) {
268 RootRegion region = (*root_regions)[i];
269 uptr begin_addr = reinterpret_cast<uptr>(region.begin);
270 ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
274 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
275 while (frontier->size()) {
276 uptr next_chunk = frontier->back();
277 frontier->pop_back();
278 LsanMetadata m(next_chunk);
279 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
280 "HEAP", tag);
284 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
285 // which are reachable from it as indirectly leaked.
286 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
287 chunk = GetUserBegin(chunk);
288 LsanMetadata m(chunk);
289 if (m.allocated() && m.tag() != kReachable) {
290 ScanRangeForPointers(chunk, chunk + m.requested_size(),
291 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
295 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
296 // frontier.
297 static void CollectIgnoredCb(uptr chunk, void *arg) {
298 CHECK(arg);
299 chunk = GetUserBegin(chunk);
300 LsanMetadata m(chunk);
301 if (m.allocated() && m.tag() == kIgnored) {
302 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
303 chunk, chunk + m.requested_size(), m.requested_size());
304 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
308 // Sets the appropriate tag on each chunk.
309 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
310 // Holds the flood fill frontier.
311 Frontier frontier(1);
313 ForEachChunk(CollectIgnoredCb, &frontier);
314 ProcessGlobalRegions(&frontier);
315 ProcessThreads(suspended_threads, &frontier);
316 ProcessRootRegions(&frontier);
317 FloodFillTag(&frontier, kReachable);
319 // The check here is relatively expensive, so we do this in a separate flood
320 // fill. That way we can skip the check for chunks that are reachable
321 // otherwise.
322 LOG_POINTERS("Processing platform-specific allocations.\n");
323 CHECK_EQ(0, frontier.size());
324 ProcessPlatformSpecificAllocations(&frontier);
325 FloodFillTag(&frontier, kReachable);
327 // Iterate over leaked chunks and mark those that are reachable from other
328 // leaked chunks.
329 LOG_POINTERS("Scanning leaked chunks.\n");
330 ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
333 // ForEachChunk callback. Resets the tags to pre-leak-check state.
334 static void ResetTagsCb(uptr chunk, void *arg) {
335 (void)arg;
336 chunk = GetUserBegin(chunk);
337 LsanMetadata m(chunk);
338 if (m.allocated() && m.tag() != kIgnored)
339 m.set_tag(kDirectlyLeaked);
342 static void PrintStackTraceById(u32 stack_trace_id) {
343 CHECK(stack_trace_id);
344 StackDepotGet(stack_trace_id).Print();
347 // ForEachChunk callback. Aggregates information about unreachable chunks into
348 // a LeakReport.
349 static void CollectLeaksCb(uptr chunk, void *arg) {
350 CHECK(arg);
351 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
352 chunk = GetUserBegin(chunk);
353 LsanMetadata m(chunk);
354 if (!m.allocated()) return;
355 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
356 u32 resolution = flags()->resolution;
357 u32 stack_trace_id = 0;
358 if (resolution > 0) {
359 StackTrace stack = StackDepotGet(m.stack_trace_id());
360 stack.size = Min(stack.size, resolution);
361 stack_trace_id = StackDepotPut(stack);
362 } else {
363 stack_trace_id = m.stack_trace_id();
365 leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
366 m.tag());
370 static void PrintMatchedSuppressions() {
371 InternalMmapVector<Suppression *> matched(1);
372 GetSuppressionContext()->GetMatched(&matched);
373 if (!matched.size())
374 return;
375 const char *line = "-----------------------------------------------------";
376 Printf("%s\n", line);
377 Printf("Suppressions used:\n");
378 Printf(" count bytes template\n");
379 for (uptr i = 0; i < matched.size(); i++)
380 Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
381 &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
382 Printf("%s\n\n", line);
385 struct CheckForLeaksParam {
386 bool success;
387 LeakReport leak_report;
390 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
391 void *arg) {
392 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
393 CHECK(param);
394 CHECK(!param->success);
395 ClassifyAllChunks(suspended_threads);
396 ForEachChunk(CollectLeaksCb, &param->leak_report);
397 // Clean up for subsequent leak checks. This assumes we did not overwrite any
398 // kIgnored tags.
399 ForEachChunk(ResetTagsCb, nullptr);
400 param->success = true;
403 static bool CheckForLeaks() {
404 if (&__lsan_is_turned_off && __lsan_is_turned_off())
405 return false;
406 EnsureMainThreadIDIsCorrect();
407 CheckForLeaksParam param;
408 param.success = false;
409 LockThreadRegistry();
410 LockAllocator();
411 DoStopTheWorld(CheckForLeaksCallback, &param);
412 UnlockAllocator();
413 UnlockThreadRegistry();
415 if (!param.success) {
416 Report("LeakSanitizer has encountered a fatal error.\n");
417 Die();
419 param.leak_report.ApplySuppressions();
420 uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
421 if (unsuppressed_count > 0) {
422 Decorator d;
423 Printf("\n"
424 "================================================================="
425 "\n");
426 Printf("%s", d.Error());
427 Report("ERROR: LeakSanitizer: detected memory leaks\n");
428 Printf("%s", d.End());
429 param.leak_report.ReportTopLeaks(flags()->max_leaks);
431 if (common_flags()->print_suppressions)
432 PrintMatchedSuppressions();
433 if (unsuppressed_count > 0) {
434 param.leak_report.PrintSummary();
435 return true;
437 return false;
440 void DoLeakCheck() {
441 BlockingMutexLock l(&global_mutex);
442 static bool already_done;
443 if (already_done) return;
444 already_done = true;
445 bool have_leaks = CheckForLeaks();
446 if (!have_leaks) {
447 return;
449 if (common_flags()->exitcode) {
450 Die();
454 static int DoRecoverableLeakCheck() {
455 BlockingMutexLock l(&global_mutex);
456 bool have_leaks = CheckForLeaks();
457 return have_leaks ? 1 : 0;
460 static Suppression *GetSuppressionForAddr(uptr addr) {
461 Suppression *s = nullptr;
463 // Suppress by module name.
464 SuppressionContext *suppressions = GetSuppressionContext();
465 if (const char *module_name =
466 Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
467 if (suppressions->Match(module_name, kSuppressionLeak, &s))
468 return s;
470 // Suppress by file or function name.
471 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
472 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
473 if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
474 suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
475 break;
478 frames->ClearAll();
479 return s;
482 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
483 StackTrace stack = StackDepotGet(stack_trace_id);
484 for (uptr i = 0; i < stack.size; i++) {
485 Suppression *s = GetSuppressionForAddr(
486 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
487 if (s) return s;
489 return nullptr;
492 ///// LeakReport implementation. /////
494 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
495 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
496 // in real-world applications.
497 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
498 // use a hash table.
499 const uptr kMaxLeaksConsidered = 5000;
501 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
502 uptr leaked_size, ChunkTag tag) {
503 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
504 bool is_directly_leaked = (tag == kDirectlyLeaked);
505 uptr i;
506 for (i = 0; i < leaks_.size(); i++) {
507 if (leaks_[i].stack_trace_id == stack_trace_id &&
508 leaks_[i].is_directly_leaked == is_directly_leaked) {
509 leaks_[i].hit_count++;
510 leaks_[i].total_size += leaked_size;
511 break;
514 if (i == leaks_.size()) {
515 if (leaks_.size() == kMaxLeaksConsidered) return;
516 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
517 is_directly_leaked, /* is_suppressed */ false };
518 leaks_.push_back(leak);
520 if (flags()->report_objects) {
521 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
522 leaked_objects_.push_back(obj);
526 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
527 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
528 return leak1.total_size > leak2.total_size;
529 else
530 return leak1.is_directly_leaked;
533 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
534 CHECK(leaks_.size() <= kMaxLeaksConsidered);
535 Printf("\n");
536 if (leaks_.size() == kMaxLeaksConsidered)
537 Printf("Too many leaks! Only the first %zu leaks encountered will be "
538 "reported.\n",
539 kMaxLeaksConsidered);
541 uptr unsuppressed_count = UnsuppressedLeakCount();
542 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
543 Printf("The %zu top leak(s):\n", num_leaks_to_report);
544 InternalSort(&leaks_, leaks_.size(), LeakComparator);
545 uptr leaks_reported = 0;
546 for (uptr i = 0; i < leaks_.size(); i++) {
547 if (leaks_[i].is_suppressed) continue;
548 PrintReportForLeak(i);
549 leaks_reported++;
550 if (leaks_reported == num_leaks_to_report) break;
552 if (leaks_reported < unsuppressed_count) {
553 uptr remaining = unsuppressed_count - leaks_reported;
554 Printf("Omitting %zu more leak(s).\n", remaining);
558 void LeakReport::PrintReportForLeak(uptr index) {
559 Decorator d;
560 Printf("%s", d.Leak());
561 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
562 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
563 leaks_[index].total_size, leaks_[index].hit_count);
564 Printf("%s", d.End());
566 PrintStackTraceById(leaks_[index].stack_trace_id);
568 if (flags()->report_objects) {
569 Printf("Objects leaked above:\n");
570 PrintLeakedObjectsForLeak(index);
571 Printf("\n");
575 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
576 u32 leak_id = leaks_[index].id;
577 for (uptr j = 0; j < leaked_objects_.size(); j++) {
578 if (leaked_objects_[j].leak_id == leak_id)
579 Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
580 leaked_objects_[j].size);
584 void LeakReport::PrintSummary() {
585 CHECK(leaks_.size() <= kMaxLeaksConsidered);
586 uptr bytes = 0, allocations = 0;
587 for (uptr i = 0; i < leaks_.size(); i++) {
588 if (leaks_[i].is_suppressed) continue;
589 bytes += leaks_[i].total_size;
590 allocations += leaks_[i].hit_count;
592 InternalScopedString summary(kMaxSummaryLength);
593 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
594 allocations);
595 ReportErrorSummary(summary.data());
598 void LeakReport::ApplySuppressions() {
599 for (uptr i = 0; i < leaks_.size(); i++) {
600 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
601 if (s) {
602 s->weight += leaks_[i].total_size;
603 atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
604 leaks_[i].hit_count);
605 leaks_[i].is_suppressed = true;
610 uptr LeakReport::UnsuppressedLeakCount() {
611 uptr result = 0;
612 for (uptr i = 0; i < leaks_.size(); i++)
613 if (!leaks_[i].is_suppressed) result++;
614 return result;
617 } // namespace __lsan
618 #endif // CAN_SANITIZE_LEAKS
620 using namespace __lsan; // NOLINT
622 extern "C" {
623 SANITIZER_INTERFACE_ATTRIBUTE
624 void __lsan_ignore_object(const void *p) {
625 #if CAN_SANITIZE_LEAKS
626 if (!common_flags()->detect_leaks)
627 return;
628 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
629 // locked.
630 BlockingMutexLock l(&global_mutex);
631 IgnoreObjectResult res = IgnoreObjectLocked(p);
632 if (res == kIgnoreObjectInvalid)
633 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
634 if (res == kIgnoreObjectAlreadyIgnored)
635 VReport(1, "__lsan_ignore_object(): "
636 "heap object at %p is already being ignored\n", p);
637 if (res == kIgnoreObjectSuccess)
638 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
639 #endif // CAN_SANITIZE_LEAKS
642 SANITIZER_INTERFACE_ATTRIBUTE
643 void __lsan_register_root_region(const void *begin, uptr size) {
644 #if CAN_SANITIZE_LEAKS
645 BlockingMutexLock l(&global_mutex);
646 CHECK(root_regions);
647 RootRegion region = {begin, size};
648 root_regions->push_back(region);
649 VReport(1, "Registered root region at %p of size %llu\n", begin, size);
650 #endif // CAN_SANITIZE_LEAKS
653 SANITIZER_INTERFACE_ATTRIBUTE
654 void __lsan_unregister_root_region(const void *begin, uptr size) {
655 #if CAN_SANITIZE_LEAKS
656 BlockingMutexLock l(&global_mutex);
657 CHECK(root_regions);
658 bool removed = false;
659 for (uptr i = 0; i < root_regions->size(); i++) {
660 RootRegion region = (*root_regions)[i];
661 if (region.begin == begin && region.size == size) {
662 removed = true;
663 uptr last_index = root_regions->size() - 1;
664 (*root_regions)[i] = (*root_regions)[last_index];
665 root_regions->pop_back();
666 VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
667 break;
670 if (!removed) {
671 Report(
672 "__lsan_unregister_root_region(): region at %p of size %llu has not "
673 "been registered.\n",
674 begin, size);
675 Die();
677 #endif // CAN_SANITIZE_LEAKS
680 SANITIZER_INTERFACE_ATTRIBUTE
681 void __lsan_disable() {
682 #if CAN_SANITIZE_LEAKS
683 __lsan::disable_counter++;
684 #endif
687 SANITIZER_INTERFACE_ATTRIBUTE
688 void __lsan_enable() {
689 #if CAN_SANITIZE_LEAKS
690 if (!__lsan::disable_counter && common_flags()->detect_leaks) {
691 Report("Unmatched call to __lsan_enable().\n");
692 Die();
694 __lsan::disable_counter--;
695 #endif
698 SANITIZER_INTERFACE_ATTRIBUTE
699 void __lsan_do_leak_check() {
700 #if CAN_SANITIZE_LEAKS
701 if (common_flags()->detect_leaks)
702 __lsan::DoLeakCheck();
703 #endif // CAN_SANITIZE_LEAKS
706 SANITIZER_INTERFACE_ATTRIBUTE
707 int __lsan_do_recoverable_leak_check() {
708 #if CAN_SANITIZE_LEAKS
709 if (common_flags()->detect_leaks)
710 return __lsan::DoRecoverableLeakCheck();
711 #endif // CAN_SANITIZE_LEAKS
712 return 0;
715 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
716 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
717 int __lsan_is_turned_off() {
718 return 0;
720 #endif
721 } // extern "C"