* include/bits/allocator.h (operator==, operator!=): Add exception
[official-gcc.git] / libsanitizer / lsan / lsan_common.cc
blobbbc5b5f0378ae7acff1ffac56810969f19e6a9d1
1 //=-- lsan_common.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of LeakSanitizer.
9 // Implementation of common leak checking functionality.
11 //===----------------------------------------------------------------------===//
13 #include "lsan_common.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_flags.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "sanitizer_common/sanitizer_stacktrace.h"
20 #include "sanitizer_common/sanitizer_stoptheworld.h"
21 #include "sanitizer_common/sanitizer_suppressions.h"
22 #include "sanitizer_common/sanitizer_report_decorator.h"
24 #if CAN_SANITIZE_LEAKS
25 namespace __lsan {
27 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
28 BlockingMutex global_mutex(LINKER_INITIALIZED);
30 THREADLOCAL int disable_counter;
31 bool DisabledInThisThread() { return disable_counter > 0; }
33 Flags lsan_flags;
35 static void InitializeFlags() {
36 Flags *f = flags();
37 // Default values.
38 f->report_objects = false;
39 f->resolution = 0;
40 f->max_leaks = 0;
41 f->exitcode = 23;
42 f->suppressions="";
43 f->use_registers = true;
44 f->use_globals = true;
45 f->use_stacks = true;
46 f->use_tls = true;
47 f->use_unaligned = false;
48 f->verbosity = 0;
49 f->log_pointers = false;
50 f->log_threads = false;
52 const char *options = GetEnv("LSAN_OPTIONS");
53 if (options) {
54 ParseFlag(options, &f->use_registers, "use_registers");
55 ParseFlag(options, &f->use_globals, "use_globals");
56 ParseFlag(options, &f->use_stacks, "use_stacks");
57 ParseFlag(options, &f->use_tls, "use_tls");
58 ParseFlag(options, &f->use_unaligned, "use_unaligned");
59 ParseFlag(options, &f->report_objects, "report_objects");
60 ParseFlag(options, &f->resolution, "resolution");
61 CHECK_GE(&f->resolution, 0);
62 ParseFlag(options, &f->max_leaks, "max_leaks");
63 CHECK_GE(&f->max_leaks, 0);
64 ParseFlag(options, &f->verbosity, "verbosity");
65 ParseFlag(options, &f->log_pointers, "log_pointers");
66 ParseFlag(options, &f->log_threads, "log_threads");
67 ParseFlag(options, &f->exitcode, "exitcode");
68 ParseFlag(options, &f->suppressions, "suppressions");
72 SuppressionContext *suppression_ctx;
74 void InitializeSuppressions() {
75 CHECK(!suppression_ctx);
76 ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
77 suppression_ctx = new(placeholder_) SuppressionContext;
78 char *suppressions_from_file;
79 uptr buffer_size;
80 if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
81 &buffer_size, 1 << 26 /* max_len */))
82 suppression_ctx->Parse(suppressions_from_file);
83 if (flags()->suppressions[0] && !buffer_size) {
84 Printf("LeakSanitizer: failed to read suppressions file '%s'\n",
85 flags()->suppressions);
86 Die();
88 if (&__lsan_default_suppressions)
89 suppression_ctx->Parse(__lsan_default_suppressions());
92 void InitCommonLsan() {
93 InitializeFlags();
94 if (common_flags()->detect_leaks) {
95 // Initialization which can fail or print warnings should only be done if
96 // LSan is actually enabled.
97 InitializeSuppressions();
98 InitializePlatformSpecificModules();
102 class Decorator: private __sanitizer::AnsiColorDecorator {
103 public:
104 Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
105 const char *Error() { return Red(); }
106 const char *Leak() { return Blue(); }
107 const char *End() { return Default(); }
110 static inline bool CanBeAHeapPointer(uptr p) {
111 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
112 // bound on heap addresses.
113 const uptr kMinAddress = 4 * 4096;
114 if (p < kMinAddress) return false;
115 #ifdef __x86_64__
116 // Accept only canonical form user-space addresses.
117 return ((p >> 47) == 0);
118 #else
119 return true;
120 #endif
123 // Scans the memory range, looking for byte patterns that point into allocator
124 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
125 // There are two usage modes for this function: finding reachable or ignored
126 // chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
127 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
128 // so |frontier| = 0.
129 void ScanRangeForPointers(uptr begin, uptr end,
130 Frontier *frontier,
131 const char *region_type, ChunkTag tag) {
132 const uptr alignment = flags()->pointer_alignment();
133 if (flags()->log_pointers)
134 Report("Scanning %s range %p-%p.\n", region_type, begin, end);
135 uptr pp = begin;
136 if (pp % alignment)
137 pp = pp + alignment - pp % alignment;
138 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
139 void *p = *reinterpret_cast<void **>(pp);
140 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
141 uptr chunk = PointsIntoChunk(p);
142 if (!chunk) continue;
143 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
144 if (chunk == begin) continue;
145 LsanMetadata m(chunk);
146 // Reachable beats ignored beats leaked.
147 if (m.tag() == kReachable) continue;
148 if (m.tag() == kIgnored && tag != kReachable) continue;
149 m.set_tag(tag);
150 if (flags()->log_pointers)
151 Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
152 chunk, chunk + m.requested_size(), m.requested_size());
153 if (frontier)
154 frontier->push_back(chunk);
158 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
159 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
160 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
163 // Scans thread data (stacks and TLS) for heap pointers.
164 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
165 Frontier *frontier) {
166 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
167 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
168 uptr registers_end = registers_begin + registers.size();
169 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
170 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
171 if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
172 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
173 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
174 &tls_begin, &tls_end,
175 &cache_begin, &cache_end);
176 if (!thread_found) {
177 // If a thread can't be found in the thread registry, it's probably in the
178 // process of destruction. Log this event and move on.
179 if (flags()->log_threads)
180 Report("Thread %d not found in registry.\n", os_id);
181 continue;
183 uptr sp;
184 bool have_registers =
185 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
186 if (!have_registers) {
187 Report("Unable to get registers from thread %d.\n");
188 // If unable to get SP, consider the entire stack to be reachable.
189 sp = stack_begin;
192 if (flags()->use_registers && have_registers)
193 ScanRangeForPointers(registers_begin, registers_end, frontier,
194 "REGISTERS", kReachable);
196 if (flags()->use_stacks) {
197 if (flags()->log_threads)
198 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
199 if (sp < stack_begin || sp >= stack_end) {
200 // SP is outside the recorded stack range (e.g. the thread is running a
201 // signal handler on alternate stack). Again, consider the entire stack
202 // range to be reachable.
203 if (flags()->log_threads)
204 Report("WARNING: stack pointer not in stack range.\n");
205 } else {
206 // Shrink the stack range to ignore out-of-scope values.
207 stack_begin = sp;
209 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
210 kReachable);
211 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
214 if (flags()->use_tls) {
215 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
216 if (cache_begin == cache_end) {
217 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
218 } else {
219 // Because LSan should not be loaded with dlopen(), we can assume
220 // that allocator cache will be part of static TLS image.
221 CHECK_LE(tls_begin, cache_begin);
222 CHECK_GE(tls_end, cache_end);
223 if (tls_begin < cache_begin)
224 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
225 kReachable);
226 if (tls_end > cache_end)
227 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
233 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
234 while (frontier->size()) {
235 uptr next_chunk = frontier->back();
236 frontier->pop_back();
237 LsanMetadata m(next_chunk);
238 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
239 "HEAP", tag);
243 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
244 // which are reachable from it as indirectly leaked.
245 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
246 chunk = GetUserBegin(chunk);
247 LsanMetadata m(chunk);
248 if (m.allocated() && m.tag() != kReachable) {
249 ScanRangeForPointers(chunk, chunk + m.requested_size(),
250 /* frontier */ 0, "HEAP", kIndirectlyLeaked);
254 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
255 // frontier.
256 static void CollectIgnoredCb(uptr chunk, void *arg) {
257 CHECK(arg);
258 chunk = GetUserBegin(chunk);
259 LsanMetadata m(chunk);
260 if (m.allocated() && m.tag() == kIgnored)
261 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
264 // Sets the appropriate tag on each chunk.
265 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
266 // Holds the flood fill frontier.
267 Frontier frontier(GetPageSizeCached());
269 if (flags()->use_globals)
270 ProcessGlobalRegions(&frontier);
271 ProcessThreads(suspended_threads, &frontier);
272 FloodFillTag(&frontier, kReachable);
273 // The check here is relatively expensive, so we do this in a separate flood
274 // fill. That way we can skip the check for chunks that are reachable
275 // otherwise.
276 if (flags()->log_pointers)
277 Report("Processing platform-specific allocations.\n");
278 ProcessPlatformSpecificAllocations(&frontier);
279 FloodFillTag(&frontier, kReachable);
281 if (flags()->log_pointers)
282 Report("Scanning ignored chunks.\n");
283 CHECK_EQ(0, frontier.size());
284 ForEachChunk(CollectIgnoredCb, &frontier);
285 FloodFillTag(&frontier, kIgnored);
287 // Iterate over leaked chunks and mark those that are reachable from other
288 // leaked chunks.
289 if (flags()->log_pointers)
290 Report("Scanning leaked chunks.\n");
291 ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
294 static void PrintStackTraceById(u32 stack_trace_id) {
295 CHECK(stack_trace_id);
296 uptr size = 0;
297 const uptr *trace = StackDepotGet(stack_trace_id, &size);
298 StackTrace::PrintStack(trace, size);
301 // ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
302 static void CollectLeaksCb(uptr chunk, void *arg) {
303 CHECK(arg);
304 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
305 chunk = GetUserBegin(chunk);
306 LsanMetadata m(chunk);
307 if (!m.allocated()) return;
308 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
309 uptr resolution = flags()->resolution;
310 if (resolution > 0) {
311 uptr size = 0;
312 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
313 size = Min(size, resolution);
314 leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
315 } else {
316 leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
321 // ForEachChunkCallback. Prints addresses of unreachable chunks.
322 static void PrintLeakedCb(uptr chunk, void *arg) {
323 chunk = GetUserBegin(chunk);
324 LsanMetadata m(chunk);
325 if (!m.allocated()) return;
326 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
327 Printf("%s leaked %zu byte object at %p.\n",
328 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
329 m.requested_size(), chunk);
333 static void PrintMatchedSuppressions() {
334 InternalMmapVector<Suppression *> matched(1);
335 suppression_ctx->GetMatched(&matched);
336 if (!matched.size())
337 return;
338 const char *line = "-----------------------------------------------------";
339 Printf("%s\n", line);
340 Printf("Suppressions used:\n");
341 Printf(" count bytes template\n");
342 for (uptr i = 0; i < matched.size(); i++)
343 Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
344 matched[i]->weight, matched[i]->templ);
345 Printf("%s\n\n", line);
348 static void PrintLeaked() {
349 Printf("\n");
350 Printf("Reporting individual objects:\n");
351 ForEachChunk(PrintLeakedCb, 0 /* arg */);
354 struct DoLeakCheckParam {
355 bool success;
356 LeakReport leak_report;
359 static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
360 void *arg) {
361 DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
362 CHECK(param);
363 CHECK(!param->success);
364 CHECK(param->leak_report.IsEmpty());
365 ClassifyAllChunks(suspended_threads);
366 ForEachChunk(CollectLeaksCb, &param->leak_report);
367 if (!param->leak_report.IsEmpty() && flags()->report_objects)
368 PrintLeaked();
369 param->success = true;
372 void DoLeakCheck() {
373 EnsureMainThreadIDIsCorrect();
374 BlockingMutexLock l(&global_mutex);
375 static bool already_done;
376 if (already_done) return;
377 already_done = true;
378 if (&__lsan_is_turned_off && __lsan_is_turned_off())
379 return;
381 DoLeakCheckParam param;
382 param.success = false;
383 LockThreadRegistry();
384 LockAllocator();
385 StopTheWorld(DoLeakCheckCallback, &param);
386 UnlockAllocator();
387 UnlockThreadRegistry();
389 if (!param.success) {
390 Report("LeakSanitizer has encountered a fatal error.\n");
391 Die();
393 uptr have_unsuppressed = param.leak_report.ApplySuppressions();
394 if (have_unsuppressed) {
395 Decorator d;
396 Printf("\n"
397 "================================================================="
398 "\n");
399 Printf("%s", d.Error());
400 Report("ERROR: LeakSanitizer: detected memory leaks\n");
401 Printf("%s", d.End());
402 param.leak_report.PrintLargest(flags()->max_leaks);
404 if (have_unsuppressed || (flags()->verbosity >= 1)) {
405 PrintMatchedSuppressions();
406 param.leak_report.PrintSummary();
408 if (have_unsuppressed && flags()->exitcode)
409 internal__exit(flags()->exitcode);
412 static Suppression *GetSuppressionForAddr(uptr addr) {
413 static const uptr kMaxAddrFrames = 16;
414 InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
415 for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
416 uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
417 addr, addr_frames.data(), kMaxAddrFrames);
418 for (uptr i = 0; i < addr_frames_num; i++) {
419 Suppression* s;
420 if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
421 suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
422 suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
423 return s;
425 return 0;
428 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
429 uptr size = 0;
430 const uptr *trace = StackDepotGet(stack_trace_id, &size);
431 for (uptr i = 0; i < size; i++) {
432 Suppression *s =
433 GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
434 if (s) return s;
436 return 0;
439 ///// LeakReport implementation. /////
441 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
442 // in LeakReport::Add(). We don't expect to ever see this many leaks in
443 // real-world applications.
444 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
445 // use a hash table.
446 const uptr kMaxLeaksConsidered = 5000;
448 void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
449 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
450 bool is_directly_leaked = (tag == kDirectlyLeaked);
451 for (uptr i = 0; i < leaks_.size(); i++)
452 if (leaks_[i].stack_trace_id == stack_trace_id &&
453 leaks_[i].is_directly_leaked == is_directly_leaked) {
454 leaks_[i].hit_count++;
455 leaks_[i].total_size += leaked_size;
456 return;
458 if (leaks_.size() == kMaxLeaksConsidered) return;
459 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
460 is_directly_leaked, /* is_suppressed */ false };
461 leaks_.push_back(leak);
464 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
465 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
466 return leak1.total_size > leak2.total_size;
467 else
468 return leak1.is_directly_leaked;
471 void LeakReport::PrintLargest(uptr num_leaks_to_print) {
472 CHECK(leaks_.size() <= kMaxLeaksConsidered);
473 Printf("\n");
474 if (leaks_.size() == kMaxLeaksConsidered)
475 Printf("Too many leaks! Only the first %zu leaks encountered will be "
476 "reported.\n",
477 kMaxLeaksConsidered);
479 uptr unsuppressed_count = 0;
480 for (uptr i = 0; i < leaks_.size(); i++)
481 if (!leaks_[i].is_suppressed) unsuppressed_count++;
482 if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
483 Printf("The %zu largest leak(s):\n", num_leaks_to_print);
484 InternalSort(&leaks_, leaks_.size(), LeakComparator);
485 uptr leaks_printed = 0;
486 Decorator d;
487 for (uptr i = 0; i < leaks_.size(); i++) {
488 if (leaks_[i].is_suppressed) continue;
489 Printf("%s", d.Leak());
490 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
491 leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
492 leaks_[i].total_size, leaks_[i].hit_count);
493 Printf("%s", d.End());
494 PrintStackTraceById(leaks_[i].stack_trace_id);
495 leaks_printed++;
496 if (leaks_printed == num_leaks_to_print) break;
498 if (leaks_printed < unsuppressed_count) {
499 uptr remaining = unsuppressed_count - leaks_printed;
500 Printf("Omitting %zu more leak(s).\n", remaining);
504 void LeakReport::PrintSummary() {
505 CHECK(leaks_.size() <= kMaxLeaksConsidered);
506 uptr bytes = 0, allocations = 0;
507 for (uptr i = 0; i < leaks_.size(); i++) {
508 if (leaks_[i].is_suppressed) continue;
509 bytes += leaks_[i].total_size;
510 allocations += leaks_[i].hit_count;
512 InternalScopedBuffer<char> summary(kMaxSummaryLength);
513 internal_snprintf(summary.data(), summary.size(),
514 "%zu byte(s) leaked in %zu allocation(s).", bytes,
515 allocations);
516 ReportErrorSummary(summary.data());
519 uptr LeakReport::ApplySuppressions() {
520 uptr unsuppressed_count = 0;
521 for (uptr i = 0; i < leaks_.size(); i++) {
522 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
523 if (s) {
524 s->weight += leaks_[i].total_size;
525 s->hit_count += leaks_[i].hit_count;
526 leaks_[i].is_suppressed = true;
527 } else {
528 unsuppressed_count++;
531 return unsuppressed_count;
533 } // namespace __lsan
534 #endif // CAN_SANITIZE_LEAKS
536 using namespace __lsan; // NOLINT
538 extern "C" {
539 SANITIZER_INTERFACE_ATTRIBUTE
540 void __lsan_ignore_object(const void *p) {
541 #if CAN_SANITIZE_LEAKS
542 if (!common_flags()->detect_leaks)
543 return;
544 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
545 // locked.
546 BlockingMutexLock l(&global_mutex);
547 IgnoreObjectResult res = IgnoreObjectLocked(p);
548 if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
549 Report("__lsan_ignore_object(): no heap object found at %p", p);
550 if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
551 Report("__lsan_ignore_object(): "
552 "heap object at %p is already being ignored\n", p);
553 if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
554 Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
555 #endif // CAN_SANITIZE_LEAKS
558 SANITIZER_INTERFACE_ATTRIBUTE
559 void __lsan_disable() {
560 #if CAN_SANITIZE_LEAKS
561 __lsan::disable_counter++;
562 #endif
565 SANITIZER_INTERFACE_ATTRIBUTE
566 void __lsan_enable() {
567 #if CAN_SANITIZE_LEAKS
568 if (!__lsan::disable_counter && common_flags()->detect_leaks) {
569 Report("Unmatched call to __lsan_enable().\n");
570 Die();
572 __lsan::disable_counter--;
573 #endif
576 SANITIZER_INTERFACE_ATTRIBUTE
577 void __lsan_do_leak_check() {
578 #if CAN_SANITIZE_LEAKS
579 if (common_flags()->detect_leaks)
580 __lsan::DoLeakCheck();
581 #endif // CAN_SANITIZE_LEAKS
584 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
585 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
586 int __lsan_is_turned_off() {
587 return 0;
589 #endif
590 } // extern "C"