2017-01-26 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / libsanitizer / lsan / lsan_common_linux.cc
blobabbb61f07c922242575d601f7cffa26bda3fa92f
1 //=-- lsan_common_linux.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of LeakSanitizer.
9 // Implementation of common leak checking functionality. Linux-specific code.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_platform.h"
14 #include "lsan_common.h"
16 #if CAN_SANITIZE_LEAKS && SANITIZER_LINUX
17 #include <link.h>
19 #include "sanitizer_common/sanitizer_common.h"
20 #include "sanitizer_common/sanitizer_flags.h"
21 #include "sanitizer_common/sanitizer_linux.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
24 namespace __lsan {
26 static const char kLinkerName[] = "ld";
28 static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
29 static LoadedModule *linker = nullptr;
31 static bool IsLinker(const char* full_name) {
32 return LibraryNameIs(full_name, kLinkerName);
35 void InitializePlatformSpecificModules() {
36 ListOfModules modules;
37 modules.init();
38 for (LoadedModule &module : modules) {
39 if (!IsLinker(module.full_name())) continue;
40 if (linker == nullptr) {
41 linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
42 *linker = module;
43 module = LoadedModule();
44 } else {
45 VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
46 "TLS will not be handled correctly.\n", kLinkerName);
47 linker->clear();
48 linker = nullptr;
49 return;
52 VReport(1, "LeakSanitizer: Dynamic linker not found. "
53 "TLS will not be handled correctly.\n");
56 static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
57 void *data) {
58 Frontier *frontier = reinterpret_cast<Frontier *>(data);
59 for (uptr j = 0; j < info->dlpi_phnum; j++) {
60 const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
61 // We're looking for .data and .bss sections, which reside in writeable,
62 // loadable segments.
63 if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
64 (phdr->p_memsz == 0))
65 continue;
66 uptr begin = info->dlpi_addr + phdr->p_vaddr;
67 uptr end = begin + phdr->p_memsz;
68 uptr allocator_begin = 0, allocator_end = 0;
69 GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
70 if (begin <= allocator_begin && allocator_begin < end) {
71 CHECK_LE(allocator_begin, allocator_end);
72 CHECK_LE(allocator_end, end);
73 if (begin < allocator_begin)
74 ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
75 kReachable);
76 if (allocator_end < end)
77 ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
78 kReachable);
79 } else {
80 ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
83 return 0;
86 // Scans global variables for heap pointers.
87 void ProcessGlobalRegions(Frontier *frontier) {
88 if (!flags()->use_globals) return;
89 dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
92 static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
93 CHECK(stack_id);
94 StackTrace stack = map->Get(stack_id);
95 // The top frame is our malloc/calloc/etc. The next frame is the caller.
96 if (stack.size >= 2)
97 return stack.trace[1];
98 return 0;
101 struct ProcessPlatformAllocParam {
102 Frontier *frontier;
103 StackDepotReverseMap *stack_depot_reverse_map;
104 bool skip_linker_allocations;
107 // ForEachChunk callback. Identifies unreachable chunks which must be treated as
108 // reachable. Marks them as reachable and adds them to the frontier.
109 static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
110 CHECK(arg);
111 ProcessPlatformAllocParam *param =
112 reinterpret_cast<ProcessPlatformAllocParam *>(arg);
113 chunk = GetUserBegin(chunk);
114 LsanMetadata m(chunk);
115 if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
116 u32 stack_id = m.stack_trace_id();
117 uptr caller_pc = 0;
118 if (stack_id > 0)
119 caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
120 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
121 // it as reachable, as we can't properly report its allocation stack anyway.
122 if (caller_pc == 0 || (param->skip_linker_allocations &&
123 linker->containsAddress(caller_pc))) {
124 m.set_tag(kReachable);
125 param->frontier->push_back(chunk);
130 // Handles dynamically allocated TLS blocks by treating all chunks allocated
131 // from ld-linux.so as reachable.
132 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
133 // They are allocated with a __libc_memalign() call in allocate_and_init()
134 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
135 // blocks, but we can make sure they come from our own allocator by intercepting
136 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
137 // addresses are stored in a dynamically allocated array (the DTV) which is
138 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
139 // being reachable from the static TLS, and the dynamic TLS being reachable from
140 // the DTV. This is because the initial DTV is allocated before our interception
141 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
142 // can't special-case it either, since we don't know its size.
143 // Our solution is to include in the root set all allocations made from
144 // ld-linux.so (which is where allocate_and_init() is implemented). This is
145 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
146 // which we don't care about).
147 void ProcessPlatformSpecificAllocations(Frontier *frontier) {
148 StackDepotReverseMap stack_depot_reverse_map;
149 ProcessPlatformAllocParam arg;
150 arg.frontier = frontier;
151 arg.stack_depot_reverse_map = &stack_depot_reverse_map;
152 arg.skip_linker_allocations =
153 flags()->use_tls && flags()->use_ld_allocations && linker != nullptr;
154 ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
157 struct DoStopTheWorldParam {
158 StopTheWorldCallback callback;
159 void *argument;
162 static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
163 void *data) {
164 DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
165 StopTheWorld(param->callback, param->argument);
166 return 1;
169 // LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
170 // of the threads is frozen while holding the libdl lock, the tracer will hang
171 // in dl_iterate_phdr() forever.
172 // Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
173 // tracer task and the thread that spawned it. Thus, if we run the tracer task
174 // while holding the libdl lock in the parent thread, we can safely reenter it
175 // in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
176 // callback in the parent thread.
177 void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
178 DoStopTheWorldParam param = {callback, argument};
179 dl_iterate_phdr(DoStopTheWorldCallback, &param);
182 } // namespace __lsan
184 #endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX