2015-05-05 Yvan Roux <yvan.roux@linaro.org>
[official-gcc.git] / libsanitizer / lsan / lsan_allocator.cc
blob2d406a0f8520f79ac926753cd5570ff04e61e06c
1 //=-- lsan_allocator.cc ---------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of LeakSanitizer.
9 // See lsan_allocator.h for details.
11 //===----------------------------------------------------------------------===//
13 #include "lsan_allocator.h"
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_allocator_interface.h"
17 #include "sanitizer_common/sanitizer_internal_defs.h"
18 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "sanitizer_common/sanitizer_stacktrace.h"
20 #include "lsan_common.h"
22 extern "C" void *memset(void *ptr, int value, uptr num);
24 namespace __lsan {
26 static const uptr kMaxAllowedMallocSize = 8UL << 30;
27 static const uptr kAllocatorSpace = 0x600000000000ULL;
28 static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
30 struct ChunkMetadata {
31 bool allocated : 8; // Must be first.
32 ChunkTag tag : 2;
33 uptr requested_size : 54;
34 u32 stack_trace_id;
37 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
38 sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
39 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
40 typedef LargeMmapAllocator<> SecondaryAllocator;
41 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
42 SecondaryAllocator> Allocator;
44 static Allocator allocator;
45 static THREADLOCAL AllocatorCache cache;
47 void InitializeAllocator() {
48 allocator.Init();
51 void AllocatorThreadFinish() {
52 allocator.SwallowCache(&cache);
55 static ChunkMetadata *Metadata(const void *p) {
56 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
59 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
60 if (!p) return;
61 ChunkMetadata *m = Metadata(p);
62 CHECK(m);
63 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
64 m->stack_trace_id = StackDepotPut(stack);
65 m->requested_size = size;
66 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
69 static void RegisterDeallocation(void *p) {
70 if (!p) return;
71 ChunkMetadata *m = Metadata(p);
72 CHECK(m);
73 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
76 void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
77 bool cleared) {
78 if (size == 0)
79 size = 1;
80 if (size > kMaxAllowedMallocSize) {
81 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
82 return 0;
84 void *p = allocator.Allocate(&cache, size, alignment, false);
85 // Do not rely on the allocator to clear the memory (it's slow).
86 if (cleared && allocator.FromPrimary(p))
87 memset(p, 0, size);
88 RegisterAllocation(stack, p, size);
89 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
90 return p;
93 void Deallocate(void *p) {
94 if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
95 RegisterDeallocation(p);
96 allocator.Deallocate(&cache, p);
99 void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
100 uptr alignment) {
101 RegisterDeallocation(p);
102 if (new_size > kMaxAllowedMallocSize) {
103 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
104 allocator.Deallocate(&cache, p);
105 return 0;
107 p = allocator.Reallocate(&cache, p, new_size, alignment);
108 RegisterAllocation(stack, p, new_size);
109 return p;
112 void GetAllocatorCacheRange(uptr *begin, uptr *end) {
113 *begin = (uptr)&cache;
114 *end = *begin + sizeof(cache);
117 uptr GetMallocUsableSize(const void *p) {
118 ChunkMetadata *m = Metadata(p);
119 if (!m) return 0;
120 return m->requested_size;
123 ///// Interface to the common LSan module. /////
125 void LockAllocator() {
126 allocator.ForceLock();
129 void UnlockAllocator() {
130 allocator.ForceUnlock();
133 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
134 *begin = (uptr)&allocator;
135 *end = *begin + sizeof(allocator);
138 uptr PointsIntoChunk(void* p) {
139 uptr addr = reinterpret_cast<uptr>(p);
140 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
141 if (!chunk) return 0;
142 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
143 // valid, but we don't want that.
144 if (addr < chunk) return 0;
145 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
146 CHECK(m);
147 if (!m->allocated)
148 return 0;
149 if (addr < chunk + m->requested_size)
150 return chunk;
151 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
152 return chunk;
153 return 0;
156 uptr GetUserBegin(uptr chunk) {
157 return chunk;
160 LsanMetadata::LsanMetadata(uptr chunk) {
161 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
162 CHECK(metadata_);
165 bool LsanMetadata::allocated() const {
166 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
169 ChunkTag LsanMetadata::tag() const {
170 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
173 void LsanMetadata::set_tag(ChunkTag value) {
174 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
177 uptr LsanMetadata::requested_size() const {
178 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
181 u32 LsanMetadata::stack_trace_id() const {
182 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
185 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
186 allocator.ForEachChunk(callback, arg);
189 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
190 void *chunk = allocator.GetBlockBegin(p);
191 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
192 ChunkMetadata *m = Metadata(chunk);
193 CHECK(m);
194 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
195 if (m->tag == kIgnored)
196 return kIgnoreObjectAlreadyIgnored;
197 m->tag = kIgnored;
198 return kIgnoreObjectSuccess;
199 } else {
200 return kIgnoreObjectInvalid;
203 } // namespace __lsan
205 using namespace __lsan;
207 extern "C" {
208 SANITIZER_INTERFACE_ATTRIBUTE
209 uptr __sanitizer_get_current_allocated_bytes() {
210 uptr stats[AllocatorStatCount];
211 allocator.GetStats(stats);
212 return stats[AllocatorStatAllocated];
215 SANITIZER_INTERFACE_ATTRIBUTE
216 uptr __sanitizer_get_heap_size() {
217 uptr stats[AllocatorStatCount];
218 allocator.GetStats(stats);
219 return stats[AllocatorStatMapped];
222 SANITIZER_INTERFACE_ATTRIBUTE
223 uptr __sanitizer_get_free_bytes() { return 0; }
225 SANITIZER_INTERFACE_ATTRIBUTE
226 uptr __sanitizer_get_unmapped_bytes() { return 0; }
228 SANITIZER_INTERFACE_ATTRIBUTE
229 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
231 SANITIZER_INTERFACE_ATTRIBUTE
232 int __sanitizer_get_ownership(const void *p) { return Metadata(p) != 0; }
234 SANITIZER_INTERFACE_ATTRIBUTE
235 uptr __sanitizer_get_allocated_size(const void *p) {
236 return GetMallocUsableSize(p);
238 } // extern "C"