1 //=-- lsan_common.cc ------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of LeakSanitizer.
9 // Implementation of common leak checking functionality.
11 //===----------------------------------------------------------------------===//
13 #include "lsan_common.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_flags.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "sanitizer_common/sanitizer_procmaps.h"
19 #include "sanitizer_common/sanitizer_stackdepot.h"
20 #include "sanitizer_common/sanitizer_stacktrace.h"
21 #include "sanitizer_common/sanitizer_stoptheworld.h"
22 #include "sanitizer_common/sanitizer_suppressions.h"
23 #include "sanitizer_common/sanitizer_report_decorator.h"
25 #if CAN_SANITIZE_LEAKS
28 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
29 // also to protect the global list of root regions.
30 BlockingMutex
global_mutex(LINKER_INITIALIZED
);
32 THREADLOCAL
int disable_counter
;
33 bool DisabledInThisThread() { return disable_counter
> 0; }
37 static void InitializeFlags(bool standalone
) {
40 f
->report_objects
= false;
44 f
->use_registers
= true;
45 f
->use_globals
= true;
48 f
->use_root_regions
= true;
49 f
->use_unaligned
= false;
50 f
->use_poisoned
= false;
51 f
->log_pointers
= false;
52 f
->log_threads
= false;
54 const char *options
= GetEnv("LSAN_OPTIONS");
56 ParseFlag(options
, &f
->use_registers
, "use_registers", "");
57 ParseFlag(options
, &f
->use_globals
, "use_globals", "");
58 ParseFlag(options
, &f
->use_stacks
, "use_stacks", "");
59 ParseFlag(options
, &f
->use_tls
, "use_tls", "");
60 ParseFlag(options
, &f
->use_root_regions
, "use_root_regions", "");
61 ParseFlag(options
, &f
->use_unaligned
, "use_unaligned", "");
62 ParseFlag(options
, &f
->use_poisoned
, "use_poisoned", "");
63 ParseFlag(options
, &f
->report_objects
, "report_objects", "");
64 ParseFlag(options
, &f
->resolution
, "resolution", "");
65 CHECK_GE(&f
->resolution
, 0);
66 ParseFlag(options
, &f
->max_leaks
, "max_leaks", "");
67 CHECK_GE(&f
->max_leaks
, 0);
68 ParseFlag(options
, &f
->log_pointers
, "log_pointers", "");
69 ParseFlag(options
, &f
->log_threads
, "log_threads", "");
70 ParseFlag(options
, &f
->exitcode
, "exitcode", "");
73 // Set defaults for common flags (only in standalone mode) and parse
74 // them from LSAN_OPTIONS.
75 CommonFlags
*cf
= common_flags();
77 SetCommonFlagsDefaults(cf
);
78 cf
->external_symbolizer_path
= GetEnv("LSAN_SYMBOLIZER_PATH");
79 cf
->malloc_context_size
= 30;
80 cf
->detect_leaks
= true;
82 ParseCommonFlagsFromString(cf
, options
);
85 #define LOG_POINTERS(...) \
87 if (flags()->log_pointers) Report(__VA_ARGS__); \
90 #define LOG_THREADS(...) \
92 if (flags()->log_threads) Report(__VA_ARGS__); \
95 static bool suppressions_inited
= false;
97 void InitializeSuppressions() {
98 CHECK(!suppressions_inited
);
99 SuppressionContext::InitIfNecessary();
100 if (&__lsan_default_suppressions
)
101 SuppressionContext::Get()->Parse(__lsan_default_suppressions());
102 suppressions_inited
= true;
110 InternalMmapVector
<RootRegion
> *root_regions
;
112 void InitializeRootRegions() {
113 CHECK(!root_regions
);
114 ALIGNED(64) static char placeholder
[sizeof(InternalMmapVector
<RootRegion
>)];
115 root_regions
= new(placeholder
) InternalMmapVector
<RootRegion
>(1);
118 void InitCommonLsan(bool standalone
) {
119 InitializeFlags(standalone
);
120 InitializeRootRegions();
121 if (common_flags()->detect_leaks
) {
122 // Initialization which can fail or print warnings should only be done if
123 // LSan is actually enabled.
124 InitializeSuppressions();
125 InitializePlatformSpecificModules();
129 class Decorator
: public __sanitizer::SanitizerCommonDecorator
{
131 Decorator() : SanitizerCommonDecorator() { }
132 const char *Error() { return Red(); }
133 const char *Leak() { return Blue(); }
134 const char *End() { return Default(); }
137 static inline bool CanBeAHeapPointer(uptr p
) {
138 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
139 // bound on heap addresses.
140 const uptr kMinAddress
= 4 * 4096;
141 if (p
< kMinAddress
) return false;
143 // Accept only canonical form user-space addresses.
144 return ((p
>> 47) == 0);
150 // Scans the memory range, looking for byte patterns that point into allocator
151 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
152 // There are two usage modes for this function: finding reachable or ignored
153 // chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
154 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
155 // so |frontier| = 0.
156 void ScanRangeForPointers(uptr begin
, uptr end
,
158 const char *region_type
, ChunkTag tag
) {
159 const uptr alignment
= flags()->pointer_alignment();
160 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type
, begin
, end
);
163 pp
= pp
+ alignment
- pp
% alignment
;
164 for (; pp
+ sizeof(void *) <= end
; pp
+= alignment
) { // NOLINT
165 void *p
= *reinterpret_cast<void **>(pp
);
166 if (!CanBeAHeapPointer(reinterpret_cast<uptr
>(p
))) continue;
167 uptr chunk
= PointsIntoChunk(p
);
168 if (!chunk
) continue;
169 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
170 if (chunk
== begin
) continue;
171 LsanMetadata
m(chunk
);
172 // Reachable beats ignored beats leaked.
173 if (m
.tag() == kReachable
) continue;
174 if (m
.tag() == kIgnored
&& tag
!= kReachable
) continue;
176 // Do this check relatively late so we can log only the interesting cases.
177 if (!flags()->use_poisoned
&& WordIsPoisoned(pp
)) {
179 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
181 pp
, p
, chunk
, chunk
+ m
.requested_size(), m
.requested_size());
186 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp
, p
,
187 chunk
, chunk
+ m
.requested_size(), m
.requested_size());
189 frontier
->push_back(chunk
);
193 void ForEachExtraStackRangeCb(uptr begin
, uptr end
, void* arg
) {
194 Frontier
*frontier
= reinterpret_cast<Frontier
*>(arg
);
195 ScanRangeForPointers(begin
, end
, frontier
, "FAKE STACK", kReachable
);
198 // Scans thread data (stacks and TLS) for heap pointers.
199 static void ProcessThreads(SuspendedThreadsList
const &suspended_threads
,
200 Frontier
*frontier
) {
201 InternalScopedBuffer
<uptr
> registers(SuspendedThreadsList::RegisterCount());
202 uptr registers_begin
= reinterpret_cast<uptr
>(registers
.data());
203 uptr registers_end
= registers_begin
+ registers
.size();
204 for (uptr i
= 0; i
< suspended_threads
.thread_count(); i
++) {
205 uptr os_id
= static_cast<uptr
>(suspended_threads
.GetThreadID(i
));
206 LOG_THREADS("Processing thread %d.\n", os_id
);
207 uptr stack_begin
, stack_end
, tls_begin
, tls_end
, cache_begin
, cache_end
;
208 bool thread_found
= GetThreadRangesLocked(os_id
, &stack_begin
, &stack_end
,
209 &tls_begin
, &tls_end
,
210 &cache_begin
, &cache_end
);
212 // If a thread can't be found in the thread registry, it's probably in the
213 // process of destruction. Log this event and move on.
214 LOG_THREADS("Thread %d not found in registry.\n", os_id
);
218 bool have_registers
=
219 (suspended_threads
.GetRegistersAndSP(i
, registers
.data(), &sp
) == 0);
220 if (!have_registers
) {
221 Report("Unable to get registers from thread %d.\n");
222 // If unable to get SP, consider the entire stack to be reachable.
226 if (flags()->use_registers
&& have_registers
)
227 ScanRangeForPointers(registers_begin
, registers_end
, frontier
,
228 "REGISTERS", kReachable
);
230 if (flags()->use_stacks
) {
231 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin
, stack_end
, sp
);
232 if (sp
< stack_begin
|| sp
>= stack_end
) {
233 // SP is outside the recorded stack range (e.g. the thread is running a
234 // signal handler on alternate stack). Again, consider the entire stack
235 // range to be reachable.
236 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
238 // Shrink the stack range to ignore out-of-scope values.
241 ScanRangeForPointers(stack_begin
, stack_end
, frontier
, "STACK",
243 ForEachExtraStackRange(os_id
, ForEachExtraStackRangeCb
, frontier
);
246 if (flags()->use_tls
) {
247 LOG_THREADS("TLS at %p-%p.\n", tls_begin
, tls_end
);
248 if (cache_begin
== cache_end
) {
249 ScanRangeForPointers(tls_begin
, tls_end
, frontier
, "TLS", kReachable
);
251 // Because LSan should not be loaded with dlopen(), we can assume
252 // that allocator cache will be part of static TLS image.
253 CHECK_LE(tls_begin
, cache_begin
);
254 CHECK_GE(tls_end
, cache_end
);
255 if (tls_begin
< cache_begin
)
256 ScanRangeForPointers(tls_begin
, cache_begin
, frontier
, "TLS",
258 if (tls_end
> cache_end
)
259 ScanRangeForPointers(cache_end
, tls_end
, frontier
, "TLS", kReachable
);
265 static void ProcessRootRegion(Frontier
*frontier
, uptr root_begin
,
267 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
268 uptr begin
, end
, prot
;
269 while (proc_maps
.Next(&begin
, &end
,
270 /*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
272 uptr intersection_begin
= Max(root_begin
, begin
);
273 uptr intersection_end
= Min(end
, root_end
);
274 if (intersection_begin
>= intersection_end
) continue;
275 bool is_readable
= prot
& MemoryMappingLayout::kProtectionRead
;
276 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
277 root_begin
, root_end
, begin
, end
,
278 is_readable
? "readable" : "unreadable");
280 ScanRangeForPointers(intersection_begin
, intersection_end
, frontier
,
285 // Scans root regions for heap pointers.
286 static void ProcessRootRegions(Frontier
*frontier
) {
287 if (!flags()->use_root_regions
) return;
289 for (uptr i
= 0; i
< root_regions
->size(); i
++) {
290 RootRegion region
= (*root_regions
)[i
];
291 uptr begin_addr
= reinterpret_cast<uptr
>(region
.begin
);
292 ProcessRootRegion(frontier
, begin_addr
, begin_addr
+ region
.size
);
296 static void FloodFillTag(Frontier
*frontier
, ChunkTag tag
) {
297 while (frontier
->size()) {
298 uptr next_chunk
= frontier
->back();
299 frontier
->pop_back();
300 LsanMetadata
m(next_chunk
);
301 ScanRangeForPointers(next_chunk
, next_chunk
+ m
.requested_size(), frontier
,
306 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
307 // which are reachable from it as indirectly leaked.
308 static void MarkIndirectlyLeakedCb(uptr chunk
, void *arg
) {
309 chunk
= GetUserBegin(chunk
);
310 LsanMetadata
m(chunk
);
311 if (m
.allocated() && m
.tag() != kReachable
) {
312 ScanRangeForPointers(chunk
, chunk
+ m
.requested_size(),
313 /* frontier */ 0, "HEAP", kIndirectlyLeaked
);
317 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
319 static void CollectIgnoredCb(uptr chunk
, void *arg
) {
321 chunk
= GetUserBegin(chunk
);
322 LsanMetadata
m(chunk
);
323 if (m
.allocated() && m
.tag() == kIgnored
)
324 reinterpret_cast<Frontier
*>(arg
)->push_back(chunk
);
327 // Sets the appropriate tag on each chunk.
328 static void ClassifyAllChunks(SuspendedThreadsList
const &suspended_threads
) {
329 // Holds the flood fill frontier.
330 Frontier
frontier(1);
332 ProcessGlobalRegions(&frontier
);
333 ProcessThreads(suspended_threads
, &frontier
);
334 ProcessRootRegions(&frontier
);
335 FloodFillTag(&frontier
, kReachable
);
336 // The check here is relatively expensive, so we do this in a separate flood
337 // fill. That way we can skip the check for chunks that are reachable
339 LOG_POINTERS("Processing platform-specific allocations.\n");
340 ProcessPlatformSpecificAllocations(&frontier
);
341 FloodFillTag(&frontier
, kReachable
);
343 LOG_POINTERS("Scanning ignored chunks.\n");
344 CHECK_EQ(0, frontier
.size());
345 ForEachChunk(CollectIgnoredCb
, &frontier
);
346 FloodFillTag(&frontier
, kIgnored
);
348 // Iterate over leaked chunks and mark those that are reachable from other
350 LOG_POINTERS("Scanning leaked chunks.\n");
351 ForEachChunk(MarkIndirectlyLeakedCb
, 0 /* arg */);
354 static void PrintStackTraceById(u32 stack_trace_id
) {
355 CHECK(stack_trace_id
);
356 StackDepotGet(stack_trace_id
).Print();
359 // ForEachChunk callback. Aggregates information about unreachable chunks into
361 static void CollectLeaksCb(uptr chunk
, void *arg
) {
363 LeakReport
*leak_report
= reinterpret_cast<LeakReport
*>(arg
);
364 chunk
= GetUserBegin(chunk
);
365 LsanMetadata
m(chunk
);
366 if (!m
.allocated()) return;
367 if (m
.tag() == kDirectlyLeaked
|| m
.tag() == kIndirectlyLeaked
) {
368 uptr resolution
= flags()->resolution
;
369 u32 stack_trace_id
= 0;
370 if (resolution
> 0) {
371 StackTrace stack
= StackDepotGet(m
.stack_trace_id());
372 stack
.size
= Min(stack
.size
, resolution
);
373 stack_trace_id
= StackDepotPut(stack
);
375 stack_trace_id
= m
.stack_trace_id();
377 leak_report
->AddLeakedChunk(chunk
, stack_trace_id
, m
.requested_size(),
382 static void PrintMatchedSuppressions() {
383 InternalMmapVector
<Suppression
*> matched(1);
384 SuppressionContext::Get()->GetMatched(&matched
);
387 const char *line
= "-----------------------------------------------------";
388 Printf("%s\n", line
);
389 Printf("Suppressions used:\n");
390 Printf(" count bytes template\n");
391 for (uptr i
= 0; i
< matched
.size(); i
++)
392 Printf("%7zu %10zu %s\n", static_cast<uptr
>(matched
[i
]->hit_count
),
393 matched
[i
]->weight
, matched
[i
]->templ
);
394 Printf("%s\n\n", line
);
397 struct DoLeakCheckParam
{
399 LeakReport leak_report
;
402 static void DoLeakCheckCallback(const SuspendedThreadsList
&suspended_threads
,
404 DoLeakCheckParam
*param
= reinterpret_cast<DoLeakCheckParam
*>(arg
);
406 CHECK(!param
->success
);
407 ClassifyAllChunks(suspended_threads
);
408 ForEachChunk(CollectLeaksCb
, ¶m
->leak_report
);
409 param
->success
= true;
413 EnsureMainThreadIDIsCorrect();
414 BlockingMutexLock
l(&global_mutex
);
415 static bool already_done
;
416 if (already_done
) return;
418 if (&__lsan_is_turned_off
&& __lsan_is_turned_off())
421 DoLeakCheckParam param
;
422 param
.success
= false;
423 LockThreadRegistry();
425 StopTheWorld(DoLeakCheckCallback
, ¶m
);
427 UnlockThreadRegistry();
429 if (!param
.success
) {
430 Report("LeakSanitizer has encountered a fatal error.\n");
433 param
.leak_report
.ApplySuppressions();
434 uptr unsuppressed_count
= param
.leak_report
.UnsuppressedLeakCount();
435 if (unsuppressed_count
> 0) {
438 "================================================================="
440 Printf("%s", d
.Error());
441 Report("ERROR: LeakSanitizer: detected memory leaks\n");
442 Printf("%s", d
.End());
443 param
.leak_report
.ReportTopLeaks(flags()->max_leaks
);
445 if (common_flags()->print_suppressions
)
446 PrintMatchedSuppressions();
447 if (unsuppressed_count
> 0) {
448 param
.leak_report
.PrintSummary();
449 if (flags()->exitcode
) {
450 if (common_flags()->coverage
)
451 __sanitizer_cov_dump();
452 internal__exit(flags()->exitcode
);
457 static Suppression
*GetSuppressionForAddr(uptr addr
) {
460 // Suppress by module name.
461 const char *module_name
;
463 if (Symbolizer::GetOrInit()
464 ->GetModuleNameAndOffsetForPC(addr
, &module_name
, &module_offset
) &&
465 SuppressionContext::Get()->Match(module_name
, SuppressionLeak
, &s
))
468 // Suppress by file or function name.
469 static const uptr kMaxAddrFrames
= 16;
470 InternalScopedBuffer
<AddressInfo
> addr_frames(kMaxAddrFrames
);
471 for (uptr i
= 0; i
< kMaxAddrFrames
; i
++) new (&addr_frames
[i
]) AddressInfo();
472 uptr addr_frames_num
= Symbolizer::GetOrInit()->SymbolizePC(
473 addr
, addr_frames
.data(), kMaxAddrFrames
);
474 for (uptr i
= 0; i
< addr_frames_num
; i
++) {
475 if (SuppressionContext::Get()->Match(addr_frames
[i
].function
,
476 SuppressionLeak
, &s
) ||
477 SuppressionContext::Get()->Match(addr_frames
[i
].file
, SuppressionLeak
,
484 static Suppression
*GetSuppressionForStack(u32 stack_trace_id
) {
485 StackTrace stack
= StackDepotGet(stack_trace_id
);
486 for (uptr i
= 0; i
< stack
.size
; i
++) {
487 Suppression
*s
= GetSuppressionForAddr(
488 StackTrace::GetPreviousInstructionPc(stack
.trace
[i
]));
494 ///// LeakReport implementation. /////
496 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
497 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
498 // in real-world applications.
499 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
501 const uptr kMaxLeaksConsidered
= 5000;
503 void LeakReport::AddLeakedChunk(uptr chunk
, u32 stack_trace_id
,
504 uptr leaked_size
, ChunkTag tag
) {
505 CHECK(tag
== kDirectlyLeaked
|| tag
== kIndirectlyLeaked
);
506 bool is_directly_leaked
= (tag
== kDirectlyLeaked
);
508 for (i
= 0; i
< leaks_
.size(); i
++) {
509 if (leaks_
[i
].stack_trace_id
== stack_trace_id
&&
510 leaks_
[i
].is_directly_leaked
== is_directly_leaked
) {
511 leaks_
[i
].hit_count
++;
512 leaks_
[i
].total_size
+= leaked_size
;
516 if (i
== leaks_
.size()) {
517 if (leaks_
.size() == kMaxLeaksConsidered
) return;
518 Leak leak
= { next_id_
++, /* hit_count */ 1, leaked_size
, stack_trace_id
,
519 is_directly_leaked
, /* is_suppressed */ false };
520 leaks_
.push_back(leak
);
522 if (flags()->report_objects
) {
523 LeakedObject obj
= {leaks_
[i
].id
, chunk
, leaked_size
};
524 leaked_objects_
.push_back(obj
);
528 static bool LeakComparator(const Leak
&leak1
, const Leak
&leak2
) {
529 if (leak1
.is_directly_leaked
== leak2
.is_directly_leaked
)
530 return leak1
.total_size
> leak2
.total_size
;
532 return leak1
.is_directly_leaked
;
535 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report
) {
536 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
538 if (leaks_
.size() == kMaxLeaksConsidered
)
539 Printf("Too many leaks! Only the first %zu leaks encountered will be "
541 kMaxLeaksConsidered
);
543 uptr unsuppressed_count
= UnsuppressedLeakCount();
544 if (num_leaks_to_report
> 0 && num_leaks_to_report
< unsuppressed_count
)
545 Printf("The %zu top leak(s):\n", num_leaks_to_report
);
546 InternalSort(&leaks_
, leaks_
.size(), LeakComparator
);
547 uptr leaks_reported
= 0;
548 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
549 if (leaks_
[i
].is_suppressed
) continue;
550 PrintReportForLeak(i
);
552 if (leaks_reported
== num_leaks_to_report
) break;
554 if (leaks_reported
< unsuppressed_count
) {
555 uptr remaining
= unsuppressed_count
- leaks_reported
;
556 Printf("Omitting %zu more leak(s).\n", remaining
);
560 void LeakReport::PrintReportForLeak(uptr index
) {
562 Printf("%s", d
.Leak());
563 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
564 leaks_
[index
].is_directly_leaked
? "Direct" : "Indirect",
565 leaks_
[index
].total_size
, leaks_
[index
].hit_count
);
566 Printf("%s", d
.End());
568 PrintStackTraceById(leaks_
[index
].stack_trace_id
);
570 if (flags()->report_objects
) {
571 Printf("Objects leaked above:\n");
572 PrintLeakedObjectsForLeak(index
);
577 void LeakReport::PrintLeakedObjectsForLeak(uptr index
) {
578 u32 leak_id
= leaks_
[index
].id
;
579 for (uptr j
= 0; j
< leaked_objects_
.size(); j
++) {
580 if (leaked_objects_
[j
].leak_id
== leak_id
)
581 Printf("%p (%zu bytes)\n", leaked_objects_
[j
].addr
,
582 leaked_objects_
[j
].size
);
586 void LeakReport::PrintSummary() {
587 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
588 uptr bytes
= 0, allocations
= 0;
589 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
590 if (leaks_
[i
].is_suppressed
) continue;
591 bytes
+= leaks_
[i
].total_size
;
592 allocations
+= leaks_
[i
].hit_count
;
594 InternalScopedBuffer
<char> summary(kMaxSummaryLength
);
595 internal_snprintf(summary
.data(), summary
.size(),
596 "%zu byte(s) leaked in %zu allocation(s).", bytes
,
598 ReportErrorSummary(summary
.data());
601 void LeakReport::ApplySuppressions() {
602 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
603 Suppression
*s
= GetSuppressionForStack(leaks_
[i
].stack_trace_id
);
605 s
->weight
+= leaks_
[i
].total_size
;
606 s
->hit_count
+= leaks_
[i
].hit_count
;
607 leaks_
[i
].is_suppressed
= true;
612 uptr
LeakReport::UnsuppressedLeakCount() {
614 for (uptr i
= 0; i
< leaks_
.size(); i
++)
615 if (!leaks_
[i
].is_suppressed
) result
++;
619 } // namespace __lsan
620 #endif // CAN_SANITIZE_LEAKS
622 using namespace __lsan
; // NOLINT
625 SANITIZER_INTERFACE_ATTRIBUTE
626 void __lsan_ignore_object(const void *p
) {
627 #if CAN_SANITIZE_LEAKS
628 if (!common_flags()->detect_leaks
)
630 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
632 BlockingMutexLock
l(&global_mutex
);
633 IgnoreObjectResult res
= IgnoreObjectLocked(p
);
634 if (res
== kIgnoreObjectInvalid
)
635 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p
);
636 if (res
== kIgnoreObjectAlreadyIgnored
)
637 VReport(1, "__lsan_ignore_object(): "
638 "heap object at %p is already being ignored\n", p
);
639 if (res
== kIgnoreObjectSuccess
)
640 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p
);
641 #endif // CAN_SANITIZE_LEAKS
644 SANITIZER_INTERFACE_ATTRIBUTE
645 void __lsan_register_root_region(const void *begin
, uptr size
) {
646 #if CAN_SANITIZE_LEAKS
647 BlockingMutexLock
l(&global_mutex
);
649 RootRegion region
= {begin
, size
};
650 root_regions
->push_back(region
);
651 VReport(1, "Registered root region at %p of size %llu\n", begin
, size
);
652 #endif // CAN_SANITIZE_LEAKS
655 SANITIZER_INTERFACE_ATTRIBUTE
656 void __lsan_unregister_root_region(const void *begin
, uptr size
) {
657 #if CAN_SANITIZE_LEAKS
658 BlockingMutexLock
l(&global_mutex
);
660 bool removed
= false;
661 for (uptr i
= 0; i
< root_regions
->size(); i
++) {
662 RootRegion region
= (*root_regions
)[i
];
663 if (region
.begin
== begin
&& region
.size
== size
) {
665 uptr last_index
= root_regions
->size() - 1;
666 (*root_regions
)[i
] = (*root_regions
)[last_index
];
667 root_regions
->pop_back();
668 VReport(1, "Unregistered root region at %p of size %llu\n", begin
, size
);
674 "__lsan_unregister_root_region(): region at %p of size %llu has not "
675 "been registered.\n",
679 #endif // CAN_SANITIZE_LEAKS
682 SANITIZER_INTERFACE_ATTRIBUTE
683 void __lsan_disable() {
684 #if CAN_SANITIZE_LEAKS
685 __lsan::disable_counter
++;
689 SANITIZER_INTERFACE_ATTRIBUTE
690 void __lsan_enable() {
691 #if CAN_SANITIZE_LEAKS
692 if (!__lsan::disable_counter
&& common_flags()->detect_leaks
) {
693 Report("Unmatched call to __lsan_enable().\n");
696 __lsan::disable_counter
--;
700 SANITIZER_INTERFACE_ATTRIBUTE
701 void __lsan_do_leak_check() {
702 #if CAN_SANITIZE_LEAKS
703 if (common_flags()->detect_leaks
)
704 __lsan::DoLeakCheck();
705 #endif // CAN_SANITIZE_LEAKS
708 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
709 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
710 int __lsan_is_turned_off() {