* config/sh/sh.c (barrier_align): Return 0 when barrier_or_label
[official-gcc.git] / libsanitizer / asan / asan_allocator2.cc
blob34aad11ed75205a8f6ed1247c630a24b608844e4
1 //===-- asan_allocator2.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of AddressSanitizer, an address sanity checker.
9 //
10 // Implementation of ASan's memory allocator, 2-nd version.
11 // This variant uses the allocator from sanitizer_common, i.e. the one shared
12 // with ThreadSanitizer and MemorySanitizer.
14 //===----------------------------------------------------------------------===//
15 #include "asan_allocator.h"
17 #include "asan_mapping.h"
18 #include "asan_poisoning.h"
19 #include "asan_report.h"
20 #include "asan_thread.h"
21 #include "sanitizer_common/sanitizer_allocator.h"
22 #include "sanitizer_common/sanitizer_flags.h"
23 #include "sanitizer_common/sanitizer_internal_defs.h"
24 #include "sanitizer_common/sanitizer_list.h"
25 #include "sanitizer_common/sanitizer_stackdepot.h"
26 #include "sanitizer_common/sanitizer_quarantine.h"
27 #include "lsan/lsan_common.h"
29 namespace __asan {
31 struct AsanMapUnmapCallback {
32 void OnMap(uptr p, uptr size) const {
33 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
34 // Statistics.
35 AsanStats &thread_stats = GetCurrentThreadStats();
36 thread_stats.mmaps++;
37 thread_stats.mmaped += size;
39 void OnUnmap(uptr p, uptr size) const {
40 PoisonShadow(p, size, 0);
41 // We are about to unmap a chunk of user memory.
42 // Mark the corresponding shadow memory as not needed.
43 // Since asan's mapping is compacting, the shadow chunk may be
44 // not page-aligned, so we only flush the page-aligned portion.
45 uptr page_size = GetPageSizeCached();
46 uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
47 uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
48 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
49 // Statistics.
50 AsanStats &thread_stats = GetCurrentThreadStats();
51 thread_stats.munmaps++;
52 thread_stats.munmaped += size;
56 #if SANITIZER_WORDSIZE == 64
57 #if defined(__powerpc64__)
58 const uptr kAllocatorSpace = 0xa0000000000ULL;
59 const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
60 #else
61 const uptr kAllocatorSpace = 0x600000000000ULL;
62 const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
63 #endif
64 typedef DefaultSizeClassMap SizeClassMap;
65 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
66 SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
67 #elif SANITIZER_WORDSIZE == 32
68 static const u64 kAddressSpaceSize = 1ULL << 32;
69 typedef CompactSizeClassMap SizeClassMap;
70 static const uptr kRegionSizeLog = 20;
71 static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
72 typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
73 SizeClassMap, kRegionSizeLog,
74 FlatByteMap<kFlatByteMapSize>,
75 AsanMapUnmapCallback> PrimaryAllocator;
76 #endif
78 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
79 typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
80 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
81 SecondaryAllocator> Allocator;
83 // We can not use THREADLOCAL because it is not supported on some of the
84 // platforms we care about (OSX 10.6, Android).
85 // static THREADLOCAL AllocatorCache cache;
86 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
87 CHECK(ms);
88 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
89 return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
92 static Allocator allocator;
94 static const uptr kMaxAllowedMallocSize =
95 FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
97 static const uptr kMaxThreadLocalQuarantine =
98 FIRST_32_SECOND_64(1 << 18, 1 << 20);
100 // Every chunk of memory allocated by this allocator can be in one of 3 states:
101 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
102 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
103 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
104 enum {
105 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
106 CHUNK_ALLOCATED = 2,
107 CHUNK_QUARANTINE = 3
110 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
111 // We use adaptive redzones: for larger allocation larger redzones are used.
112 static u32 RZLog2Size(u32 rz_log) {
113 CHECK_LT(rz_log, 8);
114 return 16 << rz_log;
117 static u32 RZSize2Log(u32 rz_size) {
118 CHECK_GE(rz_size, 16);
119 CHECK_LE(rz_size, 2048);
120 CHECK(IsPowerOfTwo(rz_size));
121 u32 res = Log2(rz_size) - 4;
122 CHECK_EQ(rz_size, RZLog2Size(res));
123 return res;
126 static uptr ComputeRZLog(uptr user_requested_size) {
127 u32 rz_log =
128 user_requested_size <= 64 - 16 ? 0 :
129 user_requested_size <= 128 - 32 ? 1 :
130 user_requested_size <= 512 - 64 ? 2 :
131 user_requested_size <= 4096 - 128 ? 3 :
132 user_requested_size <= (1 << 14) - 256 ? 4 :
133 user_requested_size <= (1 << 15) - 512 ? 5 :
134 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
135 return Max(rz_log, RZSize2Log(flags()->redzone));
138 // The memory chunk allocated from the underlying allocator looks like this:
139 // L L L L L L H H U U U U U U R R
140 // L -- left redzone words (0 or more bytes)
141 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
142 // U -- user memory.
143 // R -- right redzone (0 or more bytes)
144 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
145 // memory.
147 // If the left redzone is greater than the ChunkHeader size we store a magic
148 // value in the first uptr word of the memory block and store the address of
149 // ChunkBase in the next uptr.
150 // M B L L L L L L L L L H H U U U U U U
151 // | ^
152 // ---------------------|
153 // M -- magic value kAllocBegMagic
154 // B -- address of ChunkHeader pointing to the first 'H'
155 static const uptr kAllocBegMagic = 0xCC6E96B9;
157 struct ChunkHeader {
158 // 1-st 8 bytes.
159 u32 chunk_state : 8; // Must be first.
160 u32 alloc_tid : 24;
162 u32 free_tid : 24;
163 u32 from_memalign : 1;
164 u32 alloc_type : 2;
165 u32 rz_log : 3;
166 u32 lsan_tag : 2;
167 // 2-nd 8 bytes
168 // This field is used for small sizes. For large sizes it is equal to
169 // SizeClassMap::kMaxSize and the actual size is stored in the
170 // SecondaryAllocator's metadata.
171 u32 user_requested_size;
172 u32 alloc_context_id;
175 struct ChunkBase : ChunkHeader {
176 // Header2, intersects with user memory.
177 u32 free_context_id;
180 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
181 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
182 COMPILER_CHECK(kChunkHeaderSize == 16);
183 COMPILER_CHECK(kChunkHeader2Size <= 16);
185 struct AsanChunk: ChunkBase {
186 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
187 uptr UsedSize() {
188 if (user_requested_size != SizeClassMap::kMaxSize)
189 return user_requested_size;
190 return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg()));
192 void *AllocBeg() {
193 if (from_memalign)
194 return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
195 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
197 // If we don't use stack depot, we store the alloc/free stack traces
198 // in the chunk itself.
199 u32 *AllocStackBeg() {
200 return (u32*)(Beg() - RZLog2Size(rz_log));
202 uptr AllocStackSize() {
203 CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
204 return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
206 u32 *FreeStackBeg() {
207 return (u32*)(Beg() + kChunkHeader2Size);
209 uptr FreeStackSize() {
210 if (user_requested_size < kChunkHeader2Size) return 0;
211 uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
212 return (available - kChunkHeader2Size) / sizeof(u32);
214 bool AddrIsInside(uptr addr) {
215 return (addr >= Beg()) && (addr < Beg() + UsedSize());
219 uptr AsanChunkView::Beg() { return chunk_->Beg(); }
220 uptr AsanChunkView::End() { return Beg() + UsedSize(); }
221 uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
222 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
223 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
225 static void GetStackTraceFromId(u32 id, StackTrace *stack) {
226 CHECK(id);
227 uptr size = 0;
228 const uptr *trace = StackDepotGet(id, &size);
229 CHECK_LT(size, kStackTraceMax);
230 internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
231 stack->size = size;
234 void AsanChunkView::GetAllocStack(StackTrace *stack) {
235 if (flags()->use_stack_depot)
236 GetStackTraceFromId(chunk_->alloc_context_id, stack);
237 else
238 StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
239 chunk_->AllocStackSize());
242 void AsanChunkView::GetFreeStack(StackTrace *stack) {
243 if (flags()->use_stack_depot)
244 GetStackTraceFromId(chunk_->free_context_id, stack);
245 else
246 StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
247 chunk_->FreeStackSize());
250 struct QuarantineCallback;
251 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
252 typedef AsanQuarantine::Cache QuarantineCache;
253 static AsanQuarantine quarantine(LINKER_INITIALIZED);
254 static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
255 static AllocatorCache fallback_allocator_cache;
256 static SpinMutex fallback_mutex;
258 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
259 CHECK(ms);
260 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
261 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
264 struct QuarantineCallback {
265 explicit QuarantineCallback(AllocatorCache *cache)
266 : cache_(cache) {
269 void Recycle(AsanChunk *m) {
270 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
271 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
272 CHECK_NE(m->alloc_tid, kInvalidTid);
273 CHECK_NE(m->free_tid, kInvalidTid);
274 PoisonShadow(m->Beg(),
275 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
276 kAsanHeapLeftRedzoneMagic);
277 void *p = reinterpret_cast<void *>(m->AllocBeg());
278 if (p != m) {
279 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
280 CHECK_EQ(alloc_magic[0], kAllocBegMagic);
281 // Clear the magic value, as allocator internals may overwrite the
282 // contents of deallocated chunk, confusing GetAsanChunk lookup.
283 alloc_magic[0] = 0;
284 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
287 // Statistics.
288 AsanStats &thread_stats = GetCurrentThreadStats();
289 thread_stats.real_frees++;
290 thread_stats.really_freed += m->UsedSize();
292 allocator.Deallocate(cache_, p);
295 void *Allocate(uptr size) {
296 return allocator.Allocate(cache_, size, 1, false);
299 void Deallocate(void *p) {
300 allocator.Deallocate(cache_, p);
303 AllocatorCache *cache_;
306 void InitializeAllocator() {
307 allocator.Init();
308 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
311 static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
312 AllocType alloc_type, bool can_fill) {
313 if (!asan_inited)
314 __asan_init();
315 Flags &fl = *flags();
316 CHECK(stack);
317 const uptr min_alignment = SHADOW_GRANULARITY;
318 if (alignment < min_alignment)
319 alignment = min_alignment;
320 if (size == 0) {
321 // We'd be happy to avoid allocating memory for zero-size requests, but
322 // some programs/tests depend on this behavior and assume that malloc would
323 // not return NULL even for zero-size allocations. Moreover, it looks like
324 // operator new should never return NULL, and results of consecutive "new"
325 // calls must be different even if the allocated size is zero.
326 size = 1;
328 CHECK(IsPowerOfTwo(alignment));
329 uptr rz_log = ComputeRZLog(size);
330 uptr rz_size = RZLog2Size(rz_log);
331 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
332 uptr needed_size = rounded_size + rz_size;
333 if (alignment > min_alignment)
334 needed_size += alignment;
335 bool using_primary_allocator = true;
336 // If we are allocating from the secondary allocator, there will be no
337 // automatic right redzone, so add the right redzone manually.
338 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
339 needed_size += rz_size;
340 using_primary_allocator = false;
342 CHECK(IsAligned(needed_size, min_alignment));
343 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
344 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
345 (void*)size);
346 return AllocatorReturnNull();
349 AsanThread *t = GetCurrentThread();
350 void *allocated;
351 if (t) {
352 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
353 allocated = allocator.Allocate(cache, needed_size, 8, false);
354 } else {
355 SpinMutexLock l(&fallback_mutex);
356 AllocatorCache *cache = &fallback_allocator_cache;
357 allocated = allocator.Allocate(cache, needed_size, 8, false);
359 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
360 uptr alloc_end = alloc_beg + needed_size;
361 uptr beg_plus_redzone = alloc_beg + rz_size;
362 uptr user_beg = beg_plus_redzone;
363 if (!IsAligned(user_beg, alignment))
364 user_beg = RoundUpTo(user_beg, alignment);
365 uptr user_end = user_beg + size;
366 CHECK_LE(user_end, alloc_end);
367 uptr chunk_beg = user_beg - kChunkHeaderSize;
368 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
369 m->alloc_type = alloc_type;
370 m->rz_log = rz_log;
371 u32 alloc_tid = t ? t->tid() : 0;
372 m->alloc_tid = alloc_tid;
373 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
374 m->free_tid = kInvalidTid;
375 m->from_memalign = user_beg != beg_plus_redzone;
376 if (alloc_beg != chunk_beg) {
377 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
378 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
379 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
381 if (using_primary_allocator) {
382 CHECK(size);
383 m->user_requested_size = size;
384 CHECK(allocator.FromPrimary(allocated));
385 } else {
386 CHECK(!allocator.FromPrimary(allocated));
387 m->user_requested_size = SizeClassMap::kMaxSize;
388 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
389 meta[0] = size;
390 meta[1] = chunk_beg;
393 if (fl.use_stack_depot) {
394 m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
395 } else {
396 m->alloc_context_id = 0;
397 StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
400 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
401 // Unpoison the bulk of the memory region.
402 if (size_rounded_down_to_granularity)
403 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
404 // Deal with the end of the region if size is not aligned to granularity.
405 if (size != size_rounded_down_to_granularity && fl.poison_heap) {
406 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
407 *shadow = size & (SHADOW_GRANULARITY - 1);
410 AsanStats &thread_stats = GetCurrentThreadStats();
411 thread_stats.mallocs++;
412 thread_stats.malloced += size;
413 thread_stats.malloced_redzones += needed_size - size;
414 uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
415 thread_stats.malloced_by_size[class_id]++;
416 if (needed_size > SizeClassMap::kMaxSize)
417 thread_stats.malloc_large++;
419 void *res = reinterpret_cast<void *>(user_beg);
420 if (can_fill && fl.max_malloc_fill_size) {
421 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
422 REAL(memset)(res, fl.malloc_fill_byte, fill_size);
424 #if CAN_SANITIZE_LEAKS
425 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
426 : __lsan::kDirectlyLeaked;
427 #endif
428 // Must be the last mutation of metadata in this function.
429 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
430 ASAN_MALLOC_HOOK(res, size);
431 return res;
434 static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) {
435 if (chunk_state == CHUNK_QUARANTINE)
436 ReportDoubleFree((uptr)ptr, stack);
437 else
438 ReportFreeNotMalloced((uptr)ptr, stack);
441 static void AtomicallySetQuarantineFlag(AsanChunk *m,
442 void *ptr, StackTrace *stack) {
443 u8 old_chunk_state = CHUNK_ALLOCATED;
444 // Flip the chunk_state atomically to avoid race on double-free.
445 if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
446 CHUNK_QUARANTINE, memory_order_acquire))
447 ReportInvalidFree(ptr, old_chunk_state, stack);
448 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
451 // Expects the chunk to already be marked as quarantined by using
452 // AtomicallySetQuarantineFlag.
453 static void QuarantineChunk(AsanChunk *m, void *ptr,
454 StackTrace *stack, AllocType alloc_type) {
455 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
457 if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
458 ReportAllocTypeMismatch((uptr)ptr, stack,
459 (AllocType)m->alloc_type, (AllocType)alloc_type);
461 CHECK_GE(m->alloc_tid, 0);
462 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
463 CHECK_EQ(m->free_tid, kInvalidTid);
464 AsanThread *t = GetCurrentThread();
465 m->free_tid = t ? t->tid() : 0;
466 if (flags()->use_stack_depot) {
467 m->free_context_id = StackDepotPut(stack->trace, stack->size);
468 } else {
469 m->free_context_id = 0;
470 StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
472 // Poison the region.
473 PoisonShadow(m->Beg(),
474 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
475 kAsanHeapFreeMagic);
477 AsanStats &thread_stats = GetCurrentThreadStats();
478 thread_stats.frees++;
479 thread_stats.freed += m->UsedSize();
481 // Push into quarantine.
482 if (t) {
483 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
484 AllocatorCache *ac = GetAllocatorCache(ms);
485 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
486 m, m->UsedSize());
487 } else {
488 SpinMutexLock l(&fallback_mutex);
489 AllocatorCache *ac = &fallback_allocator_cache;
490 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
491 m, m->UsedSize());
495 static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
496 uptr p = reinterpret_cast<uptr>(ptr);
497 if (p == 0) return;
499 uptr chunk_beg = p - kChunkHeaderSize;
500 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
501 ASAN_FREE_HOOK(ptr);
502 // Must mark the chunk as quarantined before any changes to its metadata.
503 AtomicallySetQuarantineFlag(m, ptr, stack);
504 QuarantineChunk(m, ptr, stack, alloc_type);
507 static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
508 CHECK(old_ptr && new_size);
509 uptr p = reinterpret_cast<uptr>(old_ptr);
510 uptr chunk_beg = p - kChunkHeaderSize;
511 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
513 AsanStats &thread_stats = GetCurrentThreadStats();
514 thread_stats.reallocs++;
515 thread_stats.realloced += new_size;
517 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
518 if (new_ptr) {
519 u8 chunk_state = m->chunk_state;
520 if (chunk_state != CHUNK_ALLOCATED)
521 ReportInvalidFree(old_ptr, chunk_state, stack);
522 CHECK_NE(REAL(memcpy), (void*)0);
523 uptr memcpy_size = Min(new_size, m->UsedSize());
524 // If realloc() races with free(), we may start copying freed memory.
525 // However, we will report racy double-free later anyway.
526 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
527 Deallocate(old_ptr, stack, FROM_MALLOC);
529 return new_ptr;
532 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
533 static AsanChunk *GetAsanChunk(void *alloc_beg) {
534 if (!alloc_beg) return 0;
535 if (!allocator.FromPrimary(alloc_beg)) {
536 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
537 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
538 return m;
540 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
541 if (alloc_magic[0] == kAllocBegMagic)
542 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
543 return reinterpret_cast<AsanChunk *>(alloc_beg);
546 static AsanChunk *GetAsanChunkByAddr(uptr p) {
547 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
548 return GetAsanChunk(alloc_beg);
551 // Allocator must be locked when this function is called.
552 static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
553 void *alloc_beg =
554 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
555 return GetAsanChunk(alloc_beg);
558 static uptr AllocationSize(uptr p) {
559 AsanChunk *m = GetAsanChunkByAddr(p);
560 if (!m) return 0;
561 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
562 if (m->Beg() != p) return 0;
563 return m->UsedSize();
566 // We have an address between two chunks, and we want to report just one.
567 AsanChunk *ChooseChunk(uptr addr,
568 AsanChunk *left_chunk, AsanChunk *right_chunk) {
569 // Prefer an allocated chunk over freed chunk and freed chunk
570 // over available chunk.
571 if (left_chunk->chunk_state != right_chunk->chunk_state) {
572 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
573 return left_chunk;
574 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
575 return right_chunk;
576 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
577 return left_chunk;
578 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
579 return right_chunk;
581 // Same chunk_state: choose based on offset.
582 sptr l_offset = 0, r_offset = 0;
583 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
584 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
585 if (l_offset < r_offset)
586 return left_chunk;
587 return right_chunk;
590 AsanChunkView FindHeapChunkByAddress(uptr addr) {
591 AsanChunk *m1 = GetAsanChunkByAddr(addr);
592 if (!m1) return AsanChunkView(m1);
593 sptr offset = 0;
594 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
595 // The address is in the chunk's left redzone, so maybe it is actually
596 // a right buffer overflow from the other chunk to the left.
597 // Search a bit to the left to see if there is another chunk.
598 AsanChunk *m2 = 0;
599 for (uptr l = 1; l < GetPageSizeCached(); l++) {
600 m2 = GetAsanChunkByAddr(addr - l);
601 if (m2 == m1) continue; // Still the same chunk.
602 break;
604 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
605 m1 = ChooseChunk(addr, m2, m1);
607 return AsanChunkView(m1);
610 void AsanThreadLocalMallocStorage::CommitBack() {
611 AllocatorCache *ac = GetAllocatorCache(this);
612 quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
613 allocator.SwallowCache(GetAllocatorCache(this));
616 void PrintInternalAllocatorStats() {
617 allocator.PrintStats();
620 void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
621 AllocType alloc_type) {
622 return Allocate(size, alignment, stack, alloc_type, true);
625 void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
626 Deallocate(ptr, stack, alloc_type);
629 void *asan_malloc(uptr size, StackTrace *stack) {
630 return Allocate(size, 8, stack, FROM_MALLOC, true);
633 void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
634 if (CallocShouldReturnNullDueToOverflow(size, nmemb))
635 return AllocatorReturnNull();
636 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
637 // If the memory comes from the secondary allocator no need to clear it
638 // as it comes directly from mmap.
639 if (ptr && allocator.FromPrimary(ptr))
640 REAL(memset)(ptr, 0, nmemb * size);
641 return ptr;
644 void *asan_realloc(void *p, uptr size, StackTrace *stack) {
645 if (p == 0)
646 return Allocate(size, 8, stack, FROM_MALLOC, true);
647 if (size == 0) {
648 Deallocate(p, stack, FROM_MALLOC);
649 return 0;
651 return Reallocate(p, size, stack);
654 void *asan_valloc(uptr size, StackTrace *stack) {
655 return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
658 void *asan_pvalloc(uptr size, StackTrace *stack) {
659 uptr PageSize = GetPageSizeCached();
660 size = RoundUpTo(size, PageSize);
661 if (size == 0) {
662 // pvalloc(0) should allocate one page.
663 size = PageSize;
665 return Allocate(size, PageSize, stack, FROM_MALLOC, true);
668 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
669 StackTrace *stack) {
670 void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
671 CHECK(IsAligned((uptr)ptr, alignment));
672 *memptr = ptr;
673 return 0;
676 uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
677 CHECK(stack);
678 if (ptr == 0) return 0;
679 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
680 if (flags()->check_malloc_usable_size && (usable_size == 0))
681 ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
682 return usable_size;
685 uptr asan_mz_size(const void *ptr) {
686 return AllocationSize(reinterpret_cast<uptr>(ptr));
689 void asan_mz_force_lock() {
690 allocator.ForceLock();
691 fallback_mutex.Lock();
694 void asan_mz_force_unlock() {
695 fallback_mutex.Unlock();
696 allocator.ForceUnlock();
699 } // namespace __asan
701 // --- Implementation of LSan-specific functions --- {{{1
702 namespace __lsan {
703 void LockAllocator() {
704 __asan::allocator.ForceLock();
707 void UnlockAllocator() {
708 __asan::allocator.ForceUnlock();
711 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
712 *begin = (uptr)&__asan::allocator;
713 *end = *begin + sizeof(__asan::allocator);
716 uptr PointsIntoChunk(void* p) {
717 uptr addr = reinterpret_cast<uptr>(p);
718 __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
719 if (!m) return 0;
720 uptr chunk = m->Beg();
721 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
722 return chunk;
723 return 0;
726 uptr GetUserBegin(uptr chunk) {
727 __asan::AsanChunk *m =
728 __asan::GetAsanChunkByAddrFastLocked(chunk);
729 CHECK(m);
730 return m->Beg();
733 LsanMetadata::LsanMetadata(uptr chunk) {
734 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
737 bool LsanMetadata::allocated() const {
738 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
739 return m->chunk_state == __asan::CHUNK_ALLOCATED;
742 ChunkTag LsanMetadata::tag() const {
743 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
744 return static_cast<ChunkTag>(m->lsan_tag);
747 void LsanMetadata::set_tag(ChunkTag value) {
748 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
749 m->lsan_tag = value;
752 uptr LsanMetadata::requested_size() const {
753 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
754 return m->UsedSize();
757 u32 LsanMetadata::stack_trace_id() const {
758 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
759 return m->alloc_context_id;
762 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
763 __asan::allocator.ForEachChunk(callback, arg);
766 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
767 uptr addr = reinterpret_cast<uptr>(p);
768 __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
769 if (!m) return kIgnoreObjectInvalid;
770 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
771 if (m->lsan_tag == kIgnored)
772 return kIgnoreObjectAlreadyIgnored;
773 m->lsan_tag = __lsan::kIgnored;
774 return kIgnoreObjectSuccess;
775 } else {
776 return kIgnoreObjectInvalid;
779 } // namespace __lsan
781 // ---------------------- Interface ---------------- {{{1
782 using namespace __asan; // NOLINT
784 // ASan allocator doesn't reserve extra bytes, so normally we would
785 // just return "size". We don't want to expose our redzone sizes, etc here.
786 uptr __asan_get_estimated_allocated_size(uptr size) {
787 return size;
790 bool __asan_get_ownership(const void *p) {
791 uptr ptr = reinterpret_cast<uptr>(p);
792 return (AllocationSize(ptr) > 0);
795 uptr __asan_get_allocated_size(const void *p) {
796 if (p == 0) return 0;
797 uptr ptr = reinterpret_cast<uptr>(p);
798 uptr allocated_size = AllocationSize(ptr);
799 // Die if p is not malloced or if it is already freed.
800 if (allocated_size == 0) {
801 GET_STACK_TRACE_FATAL_HERE;
802 ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
804 return allocated_size;
807 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
808 // Provide default (no-op) implementation of malloc hooks.
809 extern "C" {
810 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
811 void __asan_malloc_hook(void *ptr, uptr size) {
812 (void)ptr;
813 (void)size;
815 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
816 void __asan_free_hook(void *ptr) {
817 (void)ptr;
819 } // extern "C"
820 #endif