Fix more typos.
[official-gcc.git] / libsanitizer / lsan / lsan_allocator.cc
blob3e81ebef2a7c4c5192956049fb04f2a236ab5aa5
1 //=-- lsan_allocator.cc ---------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of LeakSanitizer.
9 // See lsan_allocator.h for details.
11 //===----------------------------------------------------------------------===//
13 #include "lsan_allocator.h"
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_internal_defs.h"
17 #include "sanitizer_common/sanitizer_stackdepot.h"
18 #include "sanitizer_common/sanitizer_stacktrace.h"
19 #include "lsan_common.h"
21 extern "C" void *memset(void *ptr, int value, uptr num);
23 namespace __lsan {
25 static const uptr kMaxAllowedMallocSize = 8UL << 30;
26 static const uptr kAllocatorSpace = 0x600000000000ULL;
27 static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
29 struct ChunkMetadata {
30 bool allocated : 8; // Must be first.
31 ChunkTag tag : 2;
32 uptr requested_size : 54;
33 u32 stack_trace_id;
36 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
37 sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
38 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
39 typedef LargeMmapAllocator<> SecondaryAllocator;
40 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
41 SecondaryAllocator> Allocator;
43 static Allocator allocator;
44 static THREADLOCAL AllocatorCache cache;
46 void InitializeAllocator() {
47 allocator.Init();
50 void AllocatorThreadFinish() {
51 allocator.SwallowCache(&cache);
54 static ChunkMetadata *Metadata(void *p) {
55 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
58 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
59 if (!p) return;
60 ChunkMetadata *m = Metadata(p);
61 CHECK(m);
62 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
63 m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
64 m->requested_size = size;
65 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
68 static void RegisterDeallocation(void *p) {
69 if (!p) return;
70 ChunkMetadata *m = Metadata(p);
71 CHECK(m);
72 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
75 void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
76 bool cleared) {
77 if (size == 0)
78 size = 1;
79 if (size > kMaxAllowedMallocSize) {
80 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
81 return 0;
83 void *p = allocator.Allocate(&cache, size, alignment, false);
84 // Do not rely on the allocator to clear the memory (it's slow).
85 if (cleared && allocator.FromPrimary(p))
86 memset(p, 0, size);
87 RegisterAllocation(stack, p, size);
88 return p;
91 void Deallocate(void *p) {
92 RegisterDeallocation(p);
93 allocator.Deallocate(&cache, p);
96 void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
97 uptr alignment) {
98 RegisterDeallocation(p);
99 if (new_size > kMaxAllowedMallocSize) {
100 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
101 allocator.Deallocate(&cache, p);
102 return 0;
104 p = allocator.Reallocate(&cache, p, new_size, alignment);
105 RegisterAllocation(stack, p, new_size);
106 return p;
109 void GetAllocatorCacheRange(uptr *begin, uptr *end) {
110 *begin = (uptr)&cache;
111 *end = *begin + sizeof(cache);
114 uptr GetMallocUsableSize(void *p) {
115 ChunkMetadata *m = Metadata(p);
116 if (!m) return 0;
117 return m->requested_size;
120 ///// Interface to the common LSan module. /////
122 void LockAllocator() {
123 allocator.ForceLock();
126 void UnlockAllocator() {
127 allocator.ForceUnlock();
130 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
131 *begin = (uptr)&allocator;
132 *end = *begin + sizeof(allocator);
135 uptr PointsIntoChunk(void* p) {
136 uptr addr = reinterpret_cast<uptr>(p);
137 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
138 if (!chunk) return 0;
139 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
140 // valid, but we don't want that.
141 if (addr < chunk) return 0;
142 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
143 CHECK(m);
144 if (!m->allocated)
145 return 0;
146 if (addr < chunk + m->requested_size)
147 return chunk;
148 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
149 return chunk;
150 return 0;
153 uptr GetUserBegin(uptr chunk) {
154 return chunk;
157 LsanMetadata::LsanMetadata(uptr chunk) {
158 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
159 CHECK(metadata_);
162 bool LsanMetadata::allocated() const {
163 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
166 ChunkTag LsanMetadata::tag() const {
167 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
170 void LsanMetadata::set_tag(ChunkTag value) {
171 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
174 uptr LsanMetadata::requested_size() const {
175 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
178 u32 LsanMetadata::stack_trace_id() const {
179 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
182 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
183 allocator.ForEachChunk(callback, arg);
186 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
187 void *chunk = allocator.GetBlockBegin(p);
188 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
189 ChunkMetadata *m = Metadata(chunk);
190 CHECK(m);
191 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
192 if (m->tag == kIgnored)
193 return kIgnoreObjectAlreadyIgnored;
194 m->tag = kIgnored;
195 return kIgnoreObjectSuccess;
196 } else {
197 return kIgnoreObjectInvalid;
200 } // namespace __lsan