1 //=-- lsan_common.cc ------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of LeakSanitizer.
9 // Implementation of common leak checking functionality.
11 //===----------------------------------------------------------------------===//
13 #include "lsan_common.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_flags.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_placement_new.h"
19 #include "sanitizer_common/sanitizer_procmaps.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_suppressions.h"
23 #include "sanitizer_common/sanitizer_report_decorator.h"
25 #if CAN_SANITIZE_LEAKS
28 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
29 // also to protect the global list of root regions.
30 BlockingMutex
global_mutex(LINKER_INITIALIZED
);
32 THREADLOCAL
int disable_counter
;
33 bool DisabledInThisThread() { return disable_counter
> 0; }
37 void Flags::SetDefaults() {
38 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
39 #include "lsan_flags.inc"
43 void RegisterLsanFlags(FlagParser
*parser
, Flags
*f
) {
44 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
45 RegisterFlag(parser, #Name, Description, &f->Name);
46 #include "lsan_flags.inc"
50 #define LOG_POINTERS(...) \
52 if (flags()->log_pointers) Report(__VA_ARGS__); \
55 #define LOG_THREADS(...) \
57 if (flags()->log_threads) Report(__VA_ARGS__); \
60 ALIGNED(64) static char suppression_placeholder
[sizeof(SuppressionContext
)];
61 static SuppressionContext
*suppression_ctx
= nullptr;
62 static const char kSuppressionLeak
[] = "leak";
63 static const char *kSuppressionTypes
[] = { kSuppressionLeak
};
65 void InitializeSuppressions() {
66 CHECK_EQ(nullptr, suppression_ctx
);
67 suppression_ctx
= new (suppression_placeholder
) // NOLINT
68 SuppressionContext(kSuppressionTypes
, ARRAY_SIZE(kSuppressionTypes
));
69 suppression_ctx
->ParseFromFile(flags()->suppressions
);
70 if (&__lsan_default_suppressions
)
71 suppression_ctx
->Parse(__lsan_default_suppressions());
74 static SuppressionContext
*GetSuppressionContext() {
75 CHECK(suppression_ctx
);
76 return suppression_ctx
;
84 InternalMmapVector
<RootRegion
> *root_regions
;
86 void InitializeRootRegions() {
88 ALIGNED(64) static char placeholder
[sizeof(InternalMmapVector
<RootRegion
>)];
89 root_regions
= new(placeholder
) InternalMmapVector
<RootRegion
>(1);
92 void InitCommonLsan() {
93 InitializeRootRegions();
94 if (common_flags()->detect_leaks
) {
95 // Initialization which can fail or print warnings should only be done if
96 // LSan is actually enabled.
97 InitializeSuppressions();
98 InitializePlatformSpecificModules();
102 class Decorator
: public __sanitizer::SanitizerCommonDecorator
{
104 Decorator() : SanitizerCommonDecorator() { }
105 const char *Error() { return Red(); }
106 const char *Leak() { return Blue(); }
107 const char *End() { return Default(); }
110 static inline bool CanBeAHeapPointer(uptr p
) {
111 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
112 // bound on heap addresses.
113 const uptr kMinAddress
= 4 * 4096;
114 if (p
< kMinAddress
) return false;
115 #if defined(__x86_64__)
116 // Accept only canonical form user-space addresses.
117 return ((p
>> 47) == 0);
118 #elif defined(__mips64)
119 return ((p
>> 40) == 0);
125 // Scans the memory range, looking for byte patterns that point into allocator
126 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
127 // There are two usage modes for this function: finding reachable chunks
128 // (|tag| = kReachable) and finding indirectly leaked chunks
129 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
130 // so |frontier| = 0.
131 void ScanRangeForPointers(uptr begin
, uptr end
,
133 const char *region_type
, ChunkTag tag
) {
134 CHECK(tag
== kReachable
|| tag
== kIndirectlyLeaked
);
135 const uptr alignment
= flags()->pointer_alignment();
136 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type
, begin
, end
);
139 pp
= pp
+ alignment
- pp
% alignment
;
140 for (; pp
+ sizeof(void *) <= end
; pp
+= alignment
) { // NOLINT
141 void *p
= *reinterpret_cast<void **>(pp
);
142 if (!CanBeAHeapPointer(reinterpret_cast<uptr
>(p
))) continue;
143 uptr chunk
= PointsIntoChunk(p
);
144 if (!chunk
) continue;
145 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
146 if (chunk
== begin
) continue;
147 LsanMetadata
m(chunk
);
148 if (m
.tag() == kReachable
|| m
.tag() == kIgnored
) continue;
150 // Do this check relatively late so we can log only the interesting cases.
151 if (!flags()->use_poisoned
&& WordIsPoisoned(pp
)) {
153 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
155 pp
, p
, chunk
, chunk
+ m
.requested_size(), m
.requested_size());
160 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp
, p
,
161 chunk
, chunk
+ m
.requested_size(), m
.requested_size());
163 frontier
->push_back(chunk
);
167 void ForEachExtraStackRangeCb(uptr begin
, uptr end
, void* arg
) {
168 Frontier
*frontier
= reinterpret_cast<Frontier
*>(arg
);
169 ScanRangeForPointers(begin
, end
, frontier
, "FAKE STACK", kReachable
);
172 // Scans thread data (stacks and TLS) for heap pointers.
173 static void ProcessThreads(SuspendedThreadsList
const &suspended_threads
,
174 Frontier
*frontier
) {
175 InternalScopedBuffer
<uptr
> registers(SuspendedThreadsList::RegisterCount());
176 uptr registers_begin
= reinterpret_cast<uptr
>(registers
.data());
177 uptr registers_end
= registers_begin
+ registers
.size();
178 for (uptr i
= 0; i
< suspended_threads
.thread_count(); i
++) {
179 uptr os_id
= static_cast<uptr
>(suspended_threads
.GetThreadID(i
));
180 LOG_THREADS("Processing thread %d.\n", os_id
);
181 uptr stack_begin
, stack_end
, tls_begin
, tls_end
, cache_begin
, cache_end
;
182 bool thread_found
= GetThreadRangesLocked(os_id
, &stack_begin
, &stack_end
,
183 &tls_begin
, &tls_end
,
184 &cache_begin
, &cache_end
);
186 // If a thread can't be found in the thread registry, it's probably in the
187 // process of destruction. Log this event and move on.
188 LOG_THREADS("Thread %d not found in registry.\n", os_id
);
192 bool have_registers
=
193 (suspended_threads
.GetRegistersAndSP(i
, registers
.data(), &sp
) == 0);
194 if (!have_registers
) {
195 Report("Unable to get registers from thread %d.\n");
196 // If unable to get SP, consider the entire stack to be reachable.
200 if (flags()->use_registers
&& have_registers
)
201 ScanRangeForPointers(registers_begin
, registers_end
, frontier
,
202 "REGISTERS", kReachable
);
204 if (flags()->use_stacks
) {
205 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin
, stack_end
, sp
);
206 if (sp
< stack_begin
|| sp
>= stack_end
) {
207 // SP is outside the recorded stack range (e.g. the thread is running a
208 // signal handler on alternate stack). Again, consider the entire stack
209 // range to be reachable.
210 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
212 // Shrink the stack range to ignore out-of-scope values.
215 ScanRangeForPointers(stack_begin
, stack_end
, frontier
, "STACK",
217 ForEachExtraStackRange(os_id
, ForEachExtraStackRangeCb
, frontier
);
220 if (flags()->use_tls
) {
221 LOG_THREADS("TLS at %p-%p.\n", tls_begin
, tls_end
);
222 if (cache_begin
== cache_end
) {
223 ScanRangeForPointers(tls_begin
, tls_end
, frontier
, "TLS", kReachable
);
225 // Because LSan should not be loaded with dlopen(), we can assume
226 // that allocator cache will be part of static TLS image.
227 CHECK_LE(tls_begin
, cache_begin
);
228 CHECK_GE(tls_end
, cache_end
);
229 if (tls_begin
< cache_begin
)
230 ScanRangeForPointers(tls_begin
, cache_begin
, frontier
, "TLS",
232 if (tls_end
> cache_end
)
233 ScanRangeForPointers(cache_end
, tls_end
, frontier
, "TLS", kReachable
);
239 static void ProcessRootRegion(Frontier
*frontier
, uptr root_begin
,
241 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
242 uptr begin
, end
, prot
;
243 while (proc_maps
.Next(&begin
, &end
,
244 /*offset*/ nullptr, /*filename*/ nullptr,
245 /*filename_size*/ 0, &prot
)) {
246 uptr intersection_begin
= Max(root_begin
, begin
);
247 uptr intersection_end
= Min(end
, root_end
);
248 if (intersection_begin
>= intersection_end
) continue;
249 bool is_readable
= prot
& MemoryMappingLayout::kProtectionRead
;
250 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
251 root_begin
, root_end
, begin
, end
,
252 is_readable
? "readable" : "unreadable");
254 ScanRangeForPointers(intersection_begin
, intersection_end
, frontier
,
259 // Scans root regions for heap pointers.
260 static void ProcessRootRegions(Frontier
*frontier
) {
261 if (!flags()->use_root_regions
) return;
263 for (uptr i
= 0; i
< root_regions
->size(); i
++) {
264 RootRegion region
= (*root_regions
)[i
];
265 uptr begin_addr
= reinterpret_cast<uptr
>(region
.begin
);
266 ProcessRootRegion(frontier
, begin_addr
, begin_addr
+ region
.size
);
270 static void FloodFillTag(Frontier
*frontier
, ChunkTag tag
) {
271 while (frontier
->size()) {
272 uptr next_chunk
= frontier
->back();
273 frontier
->pop_back();
274 LsanMetadata
m(next_chunk
);
275 ScanRangeForPointers(next_chunk
, next_chunk
+ m
.requested_size(), frontier
,
280 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
281 // which are reachable from it as indirectly leaked.
282 static void MarkIndirectlyLeakedCb(uptr chunk
, void *arg
) {
283 chunk
= GetUserBegin(chunk
);
284 LsanMetadata
m(chunk
);
285 if (m
.allocated() && m
.tag() != kReachable
) {
286 ScanRangeForPointers(chunk
, chunk
+ m
.requested_size(),
287 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked
);
291 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
293 static void CollectIgnoredCb(uptr chunk
, void *arg
) {
295 chunk
= GetUserBegin(chunk
);
296 LsanMetadata
m(chunk
);
297 if (m
.allocated() && m
.tag() == kIgnored
) {
298 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
299 chunk
, chunk
+ m
.requested_size(), m
.requested_size());
300 reinterpret_cast<Frontier
*>(arg
)->push_back(chunk
);
304 // Sets the appropriate tag on each chunk.
305 static void ClassifyAllChunks(SuspendedThreadsList
const &suspended_threads
) {
306 // Holds the flood fill frontier.
307 Frontier
frontier(1);
309 ForEachChunk(CollectIgnoredCb
, &frontier
);
310 ProcessGlobalRegions(&frontier
);
311 ProcessThreads(suspended_threads
, &frontier
);
312 ProcessRootRegions(&frontier
);
313 FloodFillTag(&frontier
, kReachable
);
315 // The check here is relatively expensive, so we do this in a separate flood
316 // fill. That way we can skip the check for chunks that are reachable
318 LOG_POINTERS("Processing platform-specific allocations.\n");
319 CHECK_EQ(0, frontier
.size());
320 ProcessPlatformSpecificAllocations(&frontier
);
321 FloodFillTag(&frontier
, kReachable
);
323 // Iterate over leaked chunks and mark those that are reachable from other
325 LOG_POINTERS("Scanning leaked chunks.\n");
326 ForEachChunk(MarkIndirectlyLeakedCb
, nullptr);
329 // ForEachChunk callback. Resets the tags to pre-leak-check state.
330 static void ResetTagsCb(uptr chunk
, void *arg
) {
332 chunk
= GetUserBegin(chunk
);
333 LsanMetadata
m(chunk
);
334 if (m
.allocated() && m
.tag() != kIgnored
)
335 m
.set_tag(kDirectlyLeaked
);
338 static void PrintStackTraceById(u32 stack_trace_id
) {
339 CHECK(stack_trace_id
);
340 StackDepotGet(stack_trace_id
).Print();
343 // ForEachChunk callback. Aggregates information about unreachable chunks into
345 static void CollectLeaksCb(uptr chunk
, void *arg
) {
347 LeakReport
*leak_report
= reinterpret_cast<LeakReport
*>(arg
);
348 chunk
= GetUserBegin(chunk
);
349 LsanMetadata
m(chunk
);
350 if (!m
.allocated()) return;
351 if (m
.tag() == kDirectlyLeaked
|| m
.tag() == kIndirectlyLeaked
) {
352 u32 resolution
= flags()->resolution
;
353 u32 stack_trace_id
= 0;
354 if (resolution
> 0) {
355 StackTrace stack
= StackDepotGet(m
.stack_trace_id());
356 stack
.size
= Min(stack
.size
, resolution
);
357 stack_trace_id
= StackDepotPut(stack
);
359 stack_trace_id
= m
.stack_trace_id();
361 leak_report
->AddLeakedChunk(chunk
, stack_trace_id
, m
.requested_size(),
366 static void PrintMatchedSuppressions() {
367 InternalMmapVector
<Suppression
*> matched(1);
368 GetSuppressionContext()->GetMatched(&matched
);
371 const char *line
= "-----------------------------------------------------";
372 Printf("%s\n", line
);
373 Printf("Suppressions used:\n");
374 Printf(" count bytes template\n");
375 for (uptr i
= 0; i
< matched
.size(); i
++)
376 Printf("%7zu %10zu %s\n", static_cast<uptr
>(atomic_load_relaxed(
377 &matched
[i
]->hit_count
)), matched
[i
]->weight
, matched
[i
]->templ
);
378 Printf("%s\n\n", line
);
381 struct CheckForLeaksParam
{
383 LeakReport leak_report
;
386 static void CheckForLeaksCallback(const SuspendedThreadsList
&suspended_threads
,
388 CheckForLeaksParam
*param
= reinterpret_cast<CheckForLeaksParam
*>(arg
);
390 CHECK(!param
->success
);
391 ClassifyAllChunks(suspended_threads
);
392 ForEachChunk(CollectLeaksCb
, ¶m
->leak_report
);
393 // Clean up for subsequent leak checks. This assumes we did not overwrite any
395 ForEachChunk(ResetTagsCb
, nullptr);
396 param
->success
= true;
399 static bool CheckForLeaks() {
400 if (&__lsan_is_turned_off
&& __lsan_is_turned_off())
402 EnsureMainThreadIDIsCorrect();
403 CheckForLeaksParam param
;
404 param
.success
= false;
405 LockThreadRegistry();
407 DoStopTheWorld(CheckForLeaksCallback
, ¶m
);
409 UnlockThreadRegistry();
411 if (!param
.success
) {
412 Report("LeakSanitizer has encountered a fatal error.\n");
415 param
.leak_report
.ApplySuppressions();
416 uptr unsuppressed_count
= param
.leak_report
.UnsuppressedLeakCount();
417 if (unsuppressed_count
> 0) {
420 "================================================================="
422 Printf("%s", d
.Error());
423 Report("ERROR: LeakSanitizer: detected memory leaks\n");
424 Printf("%s", d
.End());
425 param
.leak_report
.ReportTopLeaks(flags()->max_leaks
);
427 if (common_flags()->print_suppressions
)
428 PrintMatchedSuppressions();
429 if (unsuppressed_count
> 0) {
430 param
.leak_report
.PrintSummary();
437 BlockingMutexLock
l(&global_mutex
);
438 static bool already_done
;
439 if (already_done
) return;
441 bool have_leaks
= CheckForLeaks();
445 if (common_flags()->exitcode
) {
450 static int DoRecoverableLeakCheck() {
451 BlockingMutexLock
l(&global_mutex
);
452 bool have_leaks
= CheckForLeaks();
453 return have_leaks
? 1 : 0;
456 static Suppression
*GetSuppressionForAddr(uptr addr
) {
457 Suppression
*s
= nullptr;
459 // Suppress by module name.
460 SuppressionContext
*suppressions
= GetSuppressionContext();
461 if (const char *module_name
=
462 Symbolizer::GetOrInit()->GetModuleNameForPc(addr
))
463 if (suppressions
->Match(module_name
, kSuppressionLeak
, &s
))
466 // Suppress by file or function name.
467 SymbolizedStack
*frames
= Symbolizer::GetOrInit()->SymbolizePC(addr
);
468 for (SymbolizedStack
*cur
= frames
; cur
; cur
= cur
->next
) {
469 if (suppressions
->Match(cur
->info
.function
, kSuppressionLeak
, &s
) ||
470 suppressions
->Match(cur
->info
.file
, kSuppressionLeak
, &s
)) {
478 static Suppression
*GetSuppressionForStack(u32 stack_trace_id
) {
479 StackTrace stack
= StackDepotGet(stack_trace_id
);
480 for (uptr i
= 0; i
< stack
.size
; i
++) {
481 Suppression
*s
= GetSuppressionForAddr(
482 StackTrace::GetPreviousInstructionPc(stack
.trace
[i
]));
488 ///// LeakReport implementation. /////
490 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
491 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
492 // in real-world applications.
493 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
495 const uptr kMaxLeaksConsidered
= 5000;
497 void LeakReport::AddLeakedChunk(uptr chunk
, u32 stack_trace_id
,
498 uptr leaked_size
, ChunkTag tag
) {
499 CHECK(tag
== kDirectlyLeaked
|| tag
== kIndirectlyLeaked
);
500 bool is_directly_leaked
= (tag
== kDirectlyLeaked
);
502 for (i
= 0; i
< leaks_
.size(); i
++) {
503 if (leaks_
[i
].stack_trace_id
== stack_trace_id
&&
504 leaks_
[i
].is_directly_leaked
== is_directly_leaked
) {
505 leaks_
[i
].hit_count
++;
506 leaks_
[i
].total_size
+= leaked_size
;
510 if (i
== leaks_
.size()) {
511 if (leaks_
.size() == kMaxLeaksConsidered
) return;
512 Leak leak
= { next_id_
++, /* hit_count */ 1, leaked_size
, stack_trace_id
,
513 is_directly_leaked
, /* is_suppressed */ false };
514 leaks_
.push_back(leak
);
516 if (flags()->report_objects
) {
517 LeakedObject obj
= {leaks_
[i
].id
, chunk
, leaked_size
};
518 leaked_objects_
.push_back(obj
);
522 static bool LeakComparator(const Leak
&leak1
, const Leak
&leak2
) {
523 if (leak1
.is_directly_leaked
== leak2
.is_directly_leaked
)
524 return leak1
.total_size
> leak2
.total_size
;
526 return leak1
.is_directly_leaked
;
529 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report
) {
530 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
532 if (leaks_
.size() == kMaxLeaksConsidered
)
533 Printf("Too many leaks! Only the first %zu leaks encountered will be "
535 kMaxLeaksConsidered
);
537 uptr unsuppressed_count
= UnsuppressedLeakCount();
538 if (num_leaks_to_report
> 0 && num_leaks_to_report
< unsuppressed_count
)
539 Printf("The %zu top leak(s):\n", num_leaks_to_report
);
540 InternalSort(&leaks_
, leaks_
.size(), LeakComparator
);
541 uptr leaks_reported
= 0;
542 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
543 if (leaks_
[i
].is_suppressed
) continue;
544 PrintReportForLeak(i
);
546 if (leaks_reported
== num_leaks_to_report
) break;
548 if (leaks_reported
< unsuppressed_count
) {
549 uptr remaining
= unsuppressed_count
- leaks_reported
;
550 Printf("Omitting %zu more leak(s).\n", remaining
);
554 void LeakReport::PrintReportForLeak(uptr index
) {
556 Printf("%s", d
.Leak());
557 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
558 leaks_
[index
].is_directly_leaked
? "Direct" : "Indirect",
559 leaks_
[index
].total_size
, leaks_
[index
].hit_count
);
560 Printf("%s", d
.End());
562 PrintStackTraceById(leaks_
[index
].stack_trace_id
);
564 if (flags()->report_objects
) {
565 Printf("Objects leaked above:\n");
566 PrintLeakedObjectsForLeak(index
);
571 void LeakReport::PrintLeakedObjectsForLeak(uptr index
) {
572 u32 leak_id
= leaks_
[index
].id
;
573 for (uptr j
= 0; j
< leaked_objects_
.size(); j
++) {
574 if (leaked_objects_
[j
].leak_id
== leak_id
)
575 Printf("%p (%zu bytes)\n", leaked_objects_
[j
].addr
,
576 leaked_objects_
[j
].size
);
580 void LeakReport::PrintSummary() {
581 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
582 uptr bytes
= 0, allocations
= 0;
583 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
584 if (leaks_
[i
].is_suppressed
) continue;
585 bytes
+= leaks_
[i
].total_size
;
586 allocations
+= leaks_
[i
].hit_count
;
588 InternalScopedString
summary(kMaxSummaryLength
);
589 summary
.append("%zu byte(s) leaked in %zu allocation(s).", bytes
,
591 ReportErrorSummary(summary
.data());
594 void LeakReport::ApplySuppressions() {
595 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
596 Suppression
*s
= GetSuppressionForStack(leaks_
[i
].stack_trace_id
);
598 s
->weight
+= leaks_
[i
].total_size
;
599 atomic_store_relaxed(&s
->hit_count
, atomic_load_relaxed(&s
->hit_count
) +
600 leaks_
[i
].hit_count
);
601 leaks_
[i
].is_suppressed
= true;
606 uptr
LeakReport::UnsuppressedLeakCount() {
608 for (uptr i
= 0; i
< leaks_
.size(); i
++)
609 if (!leaks_
[i
].is_suppressed
) result
++;
613 } // namespace __lsan
614 #endif // CAN_SANITIZE_LEAKS
616 using namespace __lsan
; // NOLINT
619 SANITIZER_INTERFACE_ATTRIBUTE
620 void __lsan_ignore_object(const void *p
) {
621 #if CAN_SANITIZE_LEAKS
622 if (!common_flags()->detect_leaks
)
624 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
626 BlockingMutexLock
l(&global_mutex
);
627 IgnoreObjectResult res
= IgnoreObjectLocked(p
);
628 if (res
== kIgnoreObjectInvalid
)
629 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p
);
630 if (res
== kIgnoreObjectAlreadyIgnored
)
631 VReport(1, "__lsan_ignore_object(): "
632 "heap object at %p is already being ignored\n", p
);
633 if (res
== kIgnoreObjectSuccess
)
634 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p
);
635 #endif // CAN_SANITIZE_LEAKS
638 SANITIZER_INTERFACE_ATTRIBUTE
639 void __lsan_register_root_region(const void *begin
, uptr size
) {
640 #if CAN_SANITIZE_LEAKS
641 BlockingMutexLock
l(&global_mutex
);
643 RootRegion region
= {begin
, size
};
644 root_regions
->push_back(region
);
645 VReport(1, "Registered root region at %p of size %llu\n", begin
, size
);
646 #endif // CAN_SANITIZE_LEAKS
649 SANITIZER_INTERFACE_ATTRIBUTE
650 void __lsan_unregister_root_region(const void *begin
, uptr size
) {
651 #if CAN_SANITIZE_LEAKS
652 BlockingMutexLock
l(&global_mutex
);
654 bool removed
= false;
655 for (uptr i
= 0; i
< root_regions
->size(); i
++) {
656 RootRegion region
= (*root_regions
)[i
];
657 if (region
.begin
== begin
&& region
.size
== size
) {
659 uptr last_index
= root_regions
->size() - 1;
660 (*root_regions
)[i
] = (*root_regions
)[last_index
];
661 root_regions
->pop_back();
662 VReport(1, "Unregistered root region at %p of size %llu\n", begin
, size
);
668 "__lsan_unregister_root_region(): region at %p of size %llu has not "
669 "been registered.\n",
673 #endif // CAN_SANITIZE_LEAKS
676 SANITIZER_INTERFACE_ATTRIBUTE
677 void __lsan_disable() {
678 #if CAN_SANITIZE_LEAKS
679 __lsan::disable_counter
++;
683 SANITIZER_INTERFACE_ATTRIBUTE
684 void __lsan_enable() {
685 #if CAN_SANITIZE_LEAKS
686 if (!__lsan::disable_counter
&& common_flags()->detect_leaks
) {
687 Report("Unmatched call to __lsan_enable().\n");
690 __lsan::disable_counter
--;
694 SANITIZER_INTERFACE_ATTRIBUTE
695 void __lsan_do_leak_check() {
696 #if CAN_SANITIZE_LEAKS
697 if (common_flags()->detect_leaks
)
698 __lsan::DoLeakCheck();
699 #endif // CAN_SANITIZE_LEAKS
702 SANITIZER_INTERFACE_ATTRIBUTE
703 int __lsan_do_recoverable_leak_check() {
704 #if CAN_SANITIZE_LEAKS
705 if (common_flags()->detect_leaks
)
706 return __lsan::DoRecoverableLeakCheck();
707 #endif // CAN_SANITIZE_LEAKS
711 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
712 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
713 int __lsan_is_turned_off() {