1 //===-- asan_allocator2.cc ------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of AddressSanitizer, an address sanity checker.
12 // Implementation of ASan's memory allocator, 2-nd version.
13 // This variant uses the allocator from sanitizer_common, i.e. the one shared
14 // with ThreadSanitizer and MemorySanitizer.
16 //===----------------------------------------------------------------------===//
17 #include "asan_allocator.h"
19 #include "asan_mapping.h"
20 #include "asan_poisoning.h"
21 #include "asan_report.h"
22 #include "asan_stack.h"
23 #include "asan_thread.h"
24 #include "sanitizer_common/sanitizer_allocator.h"
25 #include "sanitizer_common/sanitizer_flags.h"
26 #include "sanitizer_common/sanitizer_internal_defs.h"
27 #include "sanitizer_common/sanitizer_list.h"
28 #include "sanitizer_common/sanitizer_stackdepot.h"
29 #include "sanitizer_common/sanitizer_quarantine.h"
30 #include "lsan/lsan_common.h"
34 struct AsanMapUnmapCallback
{
35 void OnMap(uptr p
, uptr size
) const {
36 PoisonShadow(p
, size
, kAsanHeapLeftRedzoneMagic
);
38 AsanStats
&thread_stats
= GetCurrentThreadStats();
40 thread_stats
.mmaped
+= size
;
42 void OnUnmap(uptr p
, uptr size
) const {
43 PoisonShadow(p
, size
, 0);
44 // We are about to unmap a chunk of user memory.
45 // Mark the corresponding shadow memory as not needed.
46 FlushUnneededASanShadowMemory(p
, size
);
48 AsanStats
&thread_stats
= GetCurrentThreadStats();
49 thread_stats
.munmaps
++;
50 thread_stats
.munmaped
+= size
;
54 #if SANITIZER_CAN_USE_ALLOCATOR64
55 # if defined(__powerpc64__)
56 const uptr kAllocatorSpace
= 0xa0000000000ULL
;
57 const uptr kAllocatorSize
= 0x20000000000ULL
; // 2T.
59 const uptr kAllocatorSpace
= 0x600000000000ULL
;
60 const uptr kAllocatorSize
= 0x40000000000ULL
; // 4T.
62 typedef DefaultSizeClassMap SizeClassMap
;
63 typedef SizeClassAllocator64
<kAllocatorSpace
, kAllocatorSize
, 0 /*metadata*/,
64 SizeClassMap
, AsanMapUnmapCallback
> PrimaryAllocator
;
65 #else // Fallback to SizeClassAllocator32.
66 static const uptr kRegionSizeLog
= 20;
67 static const uptr kNumRegions
= SANITIZER_MMAP_RANGE_SIZE
>> kRegionSizeLog
;
68 # if SANITIZER_WORDSIZE == 32
69 typedef FlatByteMap
<kNumRegions
> ByteMap
;
70 # elif SANITIZER_WORDSIZE == 64
71 typedef TwoLevelByteMap
<(kNumRegions
>> 12), 1 << 12> ByteMap
;
73 typedef CompactSizeClassMap SizeClassMap
;
74 typedef SizeClassAllocator32
<0, SANITIZER_MMAP_RANGE_SIZE
, 16,
75 SizeClassMap
, kRegionSizeLog
,
77 AsanMapUnmapCallback
> PrimaryAllocator
;
78 #endif // SANITIZER_CAN_USE_ALLOCATOR64
80 typedef SizeClassAllocatorLocalCache
<PrimaryAllocator
> AllocatorCache
;
81 typedef LargeMmapAllocator
<AsanMapUnmapCallback
> SecondaryAllocator
;
82 typedef CombinedAllocator
<PrimaryAllocator
, AllocatorCache
,
83 SecondaryAllocator
> Allocator
;
85 // We can not use THREADLOCAL because it is not supported on some of the
86 // platforms we care about (OSX 10.6, Android).
87 // static THREADLOCAL AllocatorCache cache;
88 AllocatorCache
*GetAllocatorCache(AsanThreadLocalMallocStorage
*ms
) {
90 CHECK_LE(sizeof(AllocatorCache
), sizeof(ms
->allocator2_cache
));
91 return reinterpret_cast<AllocatorCache
*>(ms
->allocator2_cache
);
94 static Allocator allocator
;
96 static const uptr kMaxAllowedMallocSize
=
97 FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
99 static const uptr kMaxThreadLocalQuarantine
=
100 FIRST_32_SECOND_64(1 << 18, 1 << 20);
102 // Every chunk of memory allocated by this allocator can be in one of 3 states:
103 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
104 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
105 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
107 CHUNK_AVAILABLE
= 0, // 0 is the default value even if we didn't set it.
112 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
113 // We use adaptive redzones: for larger allocation larger redzones are used.
114 static u32
RZLog2Size(u32 rz_log
) {
119 static u32
RZSize2Log(u32 rz_size
) {
120 CHECK_GE(rz_size
, 16);
121 CHECK_LE(rz_size
, 2048);
122 CHECK(IsPowerOfTwo(rz_size
));
123 u32 res
= Log2(rz_size
) - 4;
124 CHECK_EQ(rz_size
, RZLog2Size(res
));
128 static uptr
ComputeRZLog(uptr user_requested_size
) {
130 user_requested_size
<= 64 - 16 ? 0 :
131 user_requested_size
<= 128 - 32 ? 1 :
132 user_requested_size
<= 512 - 64 ? 2 :
133 user_requested_size
<= 4096 - 128 ? 3 :
134 user_requested_size
<= (1 << 14) - 256 ? 4 :
135 user_requested_size
<= (1 << 15) - 512 ? 5 :
136 user_requested_size
<= (1 << 16) - 1024 ? 6 : 7;
137 return Min(Max(rz_log
, RZSize2Log(flags()->redzone
)),
138 RZSize2Log(flags()->max_redzone
));
141 // The memory chunk allocated from the underlying allocator looks like this:
142 // L L L L L L H H U U U U U U R R
143 // L -- left redzone words (0 or more bytes)
144 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
146 // R -- right redzone (0 or more bytes)
147 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
150 // If the left redzone is greater than the ChunkHeader size we store a magic
151 // value in the first uptr word of the memory block and store the address of
152 // ChunkBase in the next uptr.
153 // M B L L L L L L L L L H H U U U U U U
155 // ---------------------|
156 // M -- magic value kAllocBegMagic
157 // B -- address of ChunkHeader pointing to the first 'H'
158 static const uptr kAllocBegMagic
= 0xCC6E96B9;
162 u32 chunk_state
: 8; // Must be first.
166 u32 from_memalign
: 1;
171 // This field is used for small sizes. For large sizes it is equal to
172 // SizeClassMap::kMaxSize and the actual size is stored in the
173 // SecondaryAllocator's metadata.
174 u32 user_requested_size
;
175 u32 alloc_context_id
;
178 struct ChunkBase
: ChunkHeader
{
179 // Header2, intersects with user memory.
183 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
184 static const uptr kChunkHeader2Size
= sizeof(ChunkBase
) - kChunkHeaderSize
;
185 COMPILER_CHECK(kChunkHeaderSize
== 16);
186 COMPILER_CHECK(kChunkHeader2Size
<= 16);
188 struct AsanChunk
: ChunkBase
{
189 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
190 uptr
UsedSize(bool locked_version
= false) {
191 if (user_requested_size
!= SizeClassMap::kMaxSize
)
192 return user_requested_size
;
193 return *reinterpret_cast<uptr
*>(
194 allocator
.GetMetaData(AllocBeg(locked_version
)));
196 void *AllocBeg(bool locked_version
= false) {
199 return allocator
.GetBlockBeginFastLocked(
200 reinterpret_cast<void *>(this));
201 return allocator
.GetBlockBegin(reinterpret_cast<void *>(this));
203 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log
));
205 // If we don't use stack depot, we store the alloc/free stack traces
206 // in the chunk itself.
207 u32
*AllocStackBeg() {
208 return (u32
*)(Beg() - RZLog2Size(rz_log
));
210 uptr
AllocStackSize() {
211 CHECK_LE(RZLog2Size(rz_log
), kChunkHeaderSize
);
212 return (RZLog2Size(rz_log
) - kChunkHeaderSize
) / sizeof(u32
);
214 u32
*FreeStackBeg() {
215 return (u32
*)(Beg() + kChunkHeader2Size
);
217 uptr
FreeStackSize() {
218 if (user_requested_size
< kChunkHeader2Size
) return 0;
219 uptr available
= RoundUpTo(user_requested_size
, SHADOW_GRANULARITY
);
220 return (available
- kChunkHeader2Size
) / sizeof(u32
);
222 bool AddrIsInside(uptr addr
, bool locked_version
= false) {
223 return (addr
>= Beg()) && (addr
< Beg() + UsedSize(locked_version
));
227 bool AsanChunkView::IsValid() {
228 return chunk_
!= 0 && chunk_
->chunk_state
!= CHUNK_AVAILABLE
;
230 uptr
AsanChunkView::Beg() { return chunk_
->Beg(); }
231 uptr
AsanChunkView::End() { return Beg() + UsedSize(); }
232 uptr
AsanChunkView::UsedSize() { return chunk_
->UsedSize(); }
233 uptr
AsanChunkView::AllocTid() { return chunk_
->alloc_tid
; }
234 uptr
AsanChunkView::FreeTid() { return chunk_
->free_tid
; }
236 static void GetStackTraceFromId(u32 id
, StackTrace
*stack
) {
239 const uptr
*trace
= StackDepotGet(id
, &size
);
241 stack
->CopyFrom(trace
, size
);
244 void AsanChunkView::GetAllocStack(StackTrace
*stack
) {
245 GetStackTraceFromId(chunk_
->alloc_context_id
, stack
);
248 void AsanChunkView::GetFreeStack(StackTrace
*stack
) {
249 GetStackTraceFromId(chunk_
->free_context_id
, stack
);
252 struct QuarantineCallback
;
253 typedef Quarantine
<QuarantineCallback
, AsanChunk
> AsanQuarantine
;
254 typedef AsanQuarantine::Cache QuarantineCache
;
255 static AsanQuarantine
quarantine(LINKER_INITIALIZED
);
256 static QuarantineCache
fallback_quarantine_cache(LINKER_INITIALIZED
);
257 static AllocatorCache fallback_allocator_cache
;
258 static SpinMutex fallback_mutex
;
260 QuarantineCache
*GetQuarantineCache(AsanThreadLocalMallocStorage
*ms
) {
262 CHECK_LE(sizeof(QuarantineCache
), sizeof(ms
->quarantine_cache
));
263 return reinterpret_cast<QuarantineCache
*>(ms
->quarantine_cache
);
266 struct QuarantineCallback
{
267 explicit QuarantineCallback(AllocatorCache
*cache
)
271 void Recycle(AsanChunk
*m
) {
272 CHECK_EQ(m
->chunk_state
, CHUNK_QUARANTINE
);
273 atomic_store((atomic_uint8_t
*)m
, CHUNK_AVAILABLE
, memory_order_relaxed
);
274 CHECK_NE(m
->alloc_tid
, kInvalidTid
);
275 CHECK_NE(m
->free_tid
, kInvalidTid
);
276 PoisonShadow(m
->Beg(),
277 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
278 kAsanHeapLeftRedzoneMagic
);
279 void *p
= reinterpret_cast<void *>(m
->AllocBeg());
281 uptr
*alloc_magic
= reinterpret_cast<uptr
*>(p
);
282 CHECK_EQ(alloc_magic
[0], kAllocBegMagic
);
283 // Clear the magic value, as allocator internals may overwrite the
284 // contents of deallocated chunk, confusing GetAsanChunk lookup.
286 CHECK_EQ(alloc_magic
[1], reinterpret_cast<uptr
>(m
));
290 AsanStats
&thread_stats
= GetCurrentThreadStats();
291 thread_stats
.real_frees
++;
292 thread_stats
.really_freed
+= m
->UsedSize();
294 allocator
.Deallocate(cache_
, p
);
297 void *Allocate(uptr size
) {
298 return allocator
.Allocate(cache_
, size
, 1, false);
301 void Deallocate(void *p
) {
302 allocator
.Deallocate(cache_
, p
);
305 AllocatorCache
*cache_
;
308 void InitializeAllocator() {
310 quarantine
.Init((uptr
)flags()->quarantine_size
, kMaxThreadLocalQuarantine
);
313 void ReInitializeAllocator() {
314 quarantine
.Init((uptr
)flags()->quarantine_size
, kMaxThreadLocalQuarantine
);
317 static void *Allocate(uptr size
, uptr alignment
, StackTrace
*stack
,
318 AllocType alloc_type
, bool can_fill
) {
321 Flags
&fl
= *flags();
323 const uptr min_alignment
= SHADOW_GRANULARITY
;
324 if (alignment
< min_alignment
)
325 alignment
= min_alignment
;
327 // We'd be happy to avoid allocating memory for zero-size requests, but
328 // some programs/tests depend on this behavior and assume that malloc would
329 // not return NULL even for zero-size allocations. Moreover, it looks like
330 // operator new should never return NULL, and results of consecutive "new"
331 // calls must be different even if the allocated size is zero.
334 CHECK(IsPowerOfTwo(alignment
));
335 uptr rz_log
= ComputeRZLog(size
);
336 uptr rz_size
= RZLog2Size(rz_log
);
337 uptr rounded_size
= RoundUpTo(Max(size
, kChunkHeader2Size
), alignment
);
338 uptr needed_size
= rounded_size
+ rz_size
;
339 if (alignment
> min_alignment
)
340 needed_size
+= alignment
;
341 bool using_primary_allocator
= true;
342 // If we are allocating from the secondary allocator, there will be no
343 // automatic right redzone, so add the right redzone manually.
344 if (!PrimaryAllocator::CanAllocate(needed_size
, alignment
)) {
345 needed_size
+= rz_size
;
346 using_primary_allocator
= false;
348 CHECK(IsAligned(needed_size
, min_alignment
));
349 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
) {
350 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
352 return AllocatorReturnNull();
355 AsanThread
*t
= GetCurrentThread();
358 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
359 allocated
= allocator
.Allocate(cache
, needed_size
, 8, false);
361 SpinMutexLock
l(&fallback_mutex
);
362 AllocatorCache
*cache
= &fallback_allocator_cache
;
363 allocated
= allocator
.Allocate(cache
, needed_size
, 8, false);
366 if (*(u8
*)MEM_TO_SHADOW((uptr
)allocated
) == 0 && flags()->poison_heap
) {
367 // Heap poisoning is enabled, but the allocator provides an unpoisoned
368 // chunk. This is possible if flags()->poison_heap was disabled for some
369 // time, for example, due to flags()->start_disabled.
370 // Anyway, poison the block before using it for anything else.
371 uptr allocated_size
= allocator
.GetActuallyAllocatedSize(allocated
);
372 PoisonShadow((uptr
)allocated
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
375 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
376 uptr alloc_end
= alloc_beg
+ needed_size
;
377 uptr beg_plus_redzone
= alloc_beg
+ rz_size
;
378 uptr user_beg
= beg_plus_redzone
;
379 if (!IsAligned(user_beg
, alignment
))
380 user_beg
= RoundUpTo(user_beg
, alignment
);
381 uptr user_end
= user_beg
+ size
;
382 CHECK_LE(user_end
, alloc_end
);
383 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
384 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
385 m
->alloc_type
= alloc_type
;
387 u32 alloc_tid
= t
? t
->tid() : 0;
388 m
->alloc_tid
= alloc_tid
;
389 CHECK_EQ(alloc_tid
, m
->alloc_tid
); // Does alloc_tid fit into the bitfield?
390 m
->free_tid
= kInvalidTid
;
391 m
->from_memalign
= user_beg
!= beg_plus_redzone
;
392 if (alloc_beg
!= chunk_beg
) {
393 CHECK_LE(alloc_beg
+ 2 * sizeof(uptr
), chunk_beg
);
394 reinterpret_cast<uptr
*>(alloc_beg
)[0] = kAllocBegMagic
;
395 reinterpret_cast<uptr
*>(alloc_beg
)[1] = chunk_beg
;
397 if (using_primary_allocator
) {
399 m
->user_requested_size
= size
;
400 CHECK(allocator
.FromPrimary(allocated
));
402 CHECK(!allocator
.FromPrimary(allocated
));
403 m
->user_requested_size
= SizeClassMap::kMaxSize
;
404 uptr
*meta
= reinterpret_cast<uptr
*>(allocator
.GetMetaData(allocated
));
409 m
->alloc_context_id
= StackDepotPut(stack
->trace
, stack
->size
);
411 uptr size_rounded_down_to_granularity
= RoundDownTo(size
, SHADOW_GRANULARITY
);
412 // Unpoison the bulk of the memory region.
413 if (size_rounded_down_to_granularity
)
414 PoisonShadow(user_beg
, size_rounded_down_to_granularity
, 0);
415 // Deal with the end of the region if size is not aligned to granularity.
416 if (size
!= size_rounded_down_to_granularity
&& fl
.poison_heap
) {
417 u8
*shadow
= (u8
*)MemToShadow(user_beg
+ size_rounded_down_to_granularity
);
418 *shadow
= fl
.poison_partial
? (size
& (SHADOW_GRANULARITY
- 1)) : 0;
421 AsanStats
&thread_stats
= GetCurrentThreadStats();
422 thread_stats
.mallocs
++;
423 thread_stats
.malloced
+= size
;
424 thread_stats
.malloced_redzones
+= needed_size
- size
;
425 uptr class_id
= Min(kNumberOfSizeClasses
, SizeClassMap::ClassID(needed_size
));
426 thread_stats
.malloced_by_size
[class_id
]++;
427 if (needed_size
> SizeClassMap::kMaxSize
)
428 thread_stats
.malloc_large
++;
430 void *res
= reinterpret_cast<void *>(user_beg
);
431 if (can_fill
&& fl
.max_malloc_fill_size
) {
432 uptr fill_size
= Min(size
, (uptr
)fl
.max_malloc_fill_size
);
433 REAL(memset
)(res
, fl
.malloc_fill_byte
, fill_size
);
435 #if CAN_SANITIZE_LEAKS
436 m
->lsan_tag
= __lsan::DisabledInThisThread() ? __lsan::kIgnored
437 : __lsan::kDirectlyLeaked
;
439 // Must be the last mutation of metadata in this function.
440 atomic_store((atomic_uint8_t
*)m
, CHUNK_ALLOCATED
, memory_order_release
);
441 ASAN_MALLOC_HOOK(res
, size
);
445 static void ReportInvalidFree(void *ptr
, u8 chunk_state
, StackTrace
*stack
) {
446 if (chunk_state
== CHUNK_QUARANTINE
)
447 ReportDoubleFree((uptr
)ptr
, stack
);
449 ReportFreeNotMalloced((uptr
)ptr
, stack
);
452 static void AtomicallySetQuarantineFlag(AsanChunk
*m
,
453 void *ptr
, StackTrace
*stack
) {
454 u8 old_chunk_state
= CHUNK_ALLOCATED
;
455 // Flip the chunk_state atomically to avoid race on double-free.
456 if (!atomic_compare_exchange_strong((atomic_uint8_t
*)m
, &old_chunk_state
,
457 CHUNK_QUARANTINE
, memory_order_acquire
))
458 ReportInvalidFree(ptr
, old_chunk_state
, stack
);
459 CHECK_EQ(CHUNK_ALLOCATED
, old_chunk_state
);
462 // Expects the chunk to already be marked as quarantined by using
463 // AtomicallySetQuarantineFlag.
464 static void QuarantineChunk(AsanChunk
*m
, void *ptr
,
465 StackTrace
*stack
, AllocType alloc_type
) {
466 CHECK_EQ(m
->chunk_state
, CHUNK_QUARANTINE
);
468 if (m
->alloc_type
!= alloc_type
&& flags()->alloc_dealloc_mismatch
)
469 ReportAllocTypeMismatch((uptr
)ptr
, stack
,
470 (AllocType
)m
->alloc_type
, (AllocType
)alloc_type
);
472 CHECK_GE(m
->alloc_tid
, 0);
473 if (SANITIZER_WORDSIZE
== 64) // On 32-bits this resides in user area.
474 CHECK_EQ(m
->free_tid
, kInvalidTid
);
475 AsanThread
*t
= GetCurrentThread();
476 m
->free_tid
= t
? t
->tid() : 0;
477 m
->free_context_id
= StackDepotPut(stack
->trace
, stack
->size
);
478 // Poison the region.
479 PoisonShadow(m
->Beg(),
480 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
483 AsanStats
&thread_stats
= GetCurrentThreadStats();
484 thread_stats
.frees
++;
485 thread_stats
.freed
+= m
->UsedSize();
487 // Push into quarantine.
489 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
490 AllocatorCache
*ac
= GetAllocatorCache(ms
);
491 quarantine
.Put(GetQuarantineCache(ms
), QuarantineCallback(ac
),
494 SpinMutexLock
l(&fallback_mutex
);
495 AllocatorCache
*ac
= &fallback_allocator_cache
;
496 quarantine
.Put(&fallback_quarantine_cache
, QuarantineCallback(ac
),
501 static void Deallocate(void *ptr
, StackTrace
*stack
, AllocType alloc_type
) {
502 uptr p
= reinterpret_cast<uptr
>(ptr
);
505 uptr chunk_beg
= p
- kChunkHeaderSize
;
506 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
508 // Must mark the chunk as quarantined before any changes to its metadata.
509 AtomicallySetQuarantineFlag(m
, ptr
, stack
);
510 QuarantineChunk(m
, ptr
, stack
, alloc_type
);
513 static void *Reallocate(void *old_ptr
, uptr new_size
, StackTrace
*stack
) {
514 CHECK(old_ptr
&& new_size
);
515 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
516 uptr chunk_beg
= p
- kChunkHeaderSize
;
517 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
519 AsanStats
&thread_stats
= GetCurrentThreadStats();
520 thread_stats
.reallocs
++;
521 thread_stats
.realloced
+= new_size
;
523 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
, true);
525 u8 chunk_state
= m
->chunk_state
;
526 if (chunk_state
!= CHUNK_ALLOCATED
)
527 ReportInvalidFree(old_ptr
, chunk_state
, stack
);
528 CHECK_NE(REAL(memcpy
), (void*)0);
529 uptr memcpy_size
= Min(new_size
, m
->UsedSize());
530 // If realloc() races with free(), we may start copying freed memory.
531 // However, we will report racy double-free later anyway.
532 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
533 Deallocate(old_ptr
, stack
, FROM_MALLOC
);
538 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
539 static AsanChunk
*GetAsanChunk(void *alloc_beg
) {
540 if (!alloc_beg
) return 0;
541 if (!allocator
.FromPrimary(alloc_beg
)) {
542 uptr
*meta
= reinterpret_cast<uptr
*>(allocator
.GetMetaData(alloc_beg
));
543 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(meta
[1]);
546 uptr
*alloc_magic
= reinterpret_cast<uptr
*>(alloc_beg
);
547 if (alloc_magic
[0] == kAllocBegMagic
)
548 return reinterpret_cast<AsanChunk
*>(alloc_magic
[1]);
549 return reinterpret_cast<AsanChunk
*>(alloc_beg
);
552 static AsanChunk
*GetAsanChunkByAddr(uptr p
) {
553 void *alloc_beg
= allocator
.GetBlockBegin(reinterpret_cast<void *>(p
));
554 return GetAsanChunk(alloc_beg
);
557 // Allocator must be locked when this function is called.
558 static AsanChunk
*GetAsanChunkByAddrFastLocked(uptr p
) {
560 allocator
.GetBlockBeginFastLocked(reinterpret_cast<void *>(p
));
561 return GetAsanChunk(alloc_beg
);
564 static uptr
AllocationSize(uptr p
) {
565 AsanChunk
*m
= GetAsanChunkByAddr(p
);
567 if (m
->chunk_state
!= CHUNK_ALLOCATED
) return 0;
568 if (m
->Beg() != p
) return 0;
569 return m
->UsedSize();
572 // We have an address between two chunks, and we want to report just one.
573 AsanChunk
*ChooseChunk(uptr addr
,
574 AsanChunk
*left_chunk
, AsanChunk
*right_chunk
) {
575 // Prefer an allocated chunk over freed chunk and freed chunk
576 // over available chunk.
577 if (left_chunk
->chunk_state
!= right_chunk
->chunk_state
) {
578 if (left_chunk
->chunk_state
== CHUNK_ALLOCATED
)
580 if (right_chunk
->chunk_state
== CHUNK_ALLOCATED
)
582 if (left_chunk
->chunk_state
== CHUNK_QUARANTINE
)
584 if (right_chunk
->chunk_state
== CHUNK_QUARANTINE
)
587 // Same chunk_state: choose based on offset.
588 sptr l_offset
= 0, r_offset
= 0;
589 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
590 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
591 if (l_offset
< r_offset
)
596 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
597 AsanChunk
*m1
= GetAsanChunkByAddr(addr
);
598 if (!m1
) return AsanChunkView(m1
);
600 if (AsanChunkView(m1
).AddrIsAtLeft(addr
, 1, &offset
)) {
601 // The address is in the chunk's left redzone, so maybe it is actually
602 // a right buffer overflow from the other chunk to the left.
603 // Search a bit to the left to see if there is another chunk.
605 for (uptr l
= 1; l
< GetPageSizeCached(); l
++) {
606 m2
= GetAsanChunkByAddr(addr
- l
);
607 if (m2
== m1
) continue; // Still the same chunk.
610 if (m2
&& AsanChunkView(m2
).AddrIsAtRight(addr
, 1, &offset
))
611 m1
= ChooseChunk(addr
, m2
, m1
);
613 return AsanChunkView(m1
);
616 void AsanThreadLocalMallocStorage::CommitBack() {
617 AllocatorCache
*ac
= GetAllocatorCache(this);
618 quarantine
.Drain(GetQuarantineCache(this), QuarantineCallback(ac
));
619 allocator
.SwallowCache(GetAllocatorCache(this));
622 void PrintInternalAllocatorStats() {
623 allocator
.PrintStats();
626 void *asan_memalign(uptr alignment
, uptr size
, StackTrace
*stack
,
627 AllocType alloc_type
) {
628 return Allocate(size
, alignment
, stack
, alloc_type
, true);
631 void asan_free(void *ptr
, StackTrace
*stack
, AllocType alloc_type
) {
632 Deallocate(ptr
, stack
, alloc_type
);
635 void *asan_malloc(uptr size
, StackTrace
*stack
) {
636 return Allocate(size
, 8, stack
, FROM_MALLOC
, true);
639 void *asan_calloc(uptr nmemb
, uptr size
, StackTrace
*stack
) {
640 if (CallocShouldReturnNullDueToOverflow(size
, nmemb
))
641 return AllocatorReturnNull();
642 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
, false);
643 // If the memory comes from the secondary allocator no need to clear it
644 // as it comes directly from mmap.
645 if (ptr
&& allocator
.FromPrimary(ptr
))
646 REAL(memset
)(ptr
, 0, nmemb
* size
);
650 void *asan_realloc(void *p
, uptr size
, StackTrace
*stack
) {
652 return Allocate(size
, 8, stack
, FROM_MALLOC
, true);
654 Deallocate(p
, stack
, FROM_MALLOC
);
657 return Reallocate(p
, size
, stack
);
660 void *asan_valloc(uptr size
, StackTrace
*stack
) {
661 return Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
, true);
664 void *asan_pvalloc(uptr size
, StackTrace
*stack
) {
665 uptr PageSize
= GetPageSizeCached();
666 size
= RoundUpTo(size
, PageSize
);
668 // pvalloc(0) should allocate one page.
671 return Allocate(size
, PageSize
, stack
, FROM_MALLOC
, true);
674 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
676 void *ptr
= Allocate(size
, alignment
, stack
, FROM_MALLOC
, true);
677 CHECK(IsAligned((uptr
)ptr
, alignment
));
682 uptr
asan_malloc_usable_size(void *ptr
, uptr pc
, uptr bp
) {
683 if (ptr
== 0) return 0;
684 uptr usable_size
= AllocationSize(reinterpret_cast<uptr
>(ptr
));
685 if (flags()->check_malloc_usable_size
&& (usable_size
== 0)) {
686 GET_STACK_TRACE_FATAL(pc
, bp
);
687 ReportMallocUsableSizeNotOwned((uptr
)ptr
, &stack
);
692 uptr
asan_mz_size(const void *ptr
) {
693 return AllocationSize(reinterpret_cast<uptr
>(ptr
));
696 void asan_mz_force_lock() {
697 allocator
.ForceLock();
698 fallback_mutex
.Lock();
701 void asan_mz_force_unlock() {
702 fallback_mutex
.Unlock();
703 allocator
.ForceUnlock();
706 } // namespace __asan
708 // --- Implementation of LSan-specific functions --- {{{1
710 void LockAllocator() {
711 __asan::allocator
.ForceLock();
714 void UnlockAllocator() {
715 __asan::allocator
.ForceUnlock();
718 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
) {
719 *begin
= (uptr
)&__asan::allocator
;
720 *end
= *begin
+ sizeof(__asan::allocator
);
723 uptr
PointsIntoChunk(void* p
) {
724 uptr addr
= reinterpret_cast<uptr
>(p
);
725 __asan::AsanChunk
*m
= __asan::GetAsanChunkByAddrFastLocked(addr
);
727 uptr chunk
= m
->Beg();
728 if (m
->chunk_state
!= __asan::CHUNK_ALLOCATED
)
730 if (m
->AddrIsInside(addr
, /*locked_version=*/true))
732 if (IsSpecialCaseOfOperatorNew0(chunk
, m
->UsedSize(/*locked_version*/ true),
738 uptr
GetUserBegin(uptr chunk
) {
739 __asan::AsanChunk
*m
=
740 __asan::GetAsanChunkByAddrFastLocked(chunk
);
745 LsanMetadata::LsanMetadata(uptr chunk
) {
746 metadata_
= reinterpret_cast<void *>(chunk
- __asan::kChunkHeaderSize
);
749 bool LsanMetadata::allocated() const {
750 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
751 return m
->chunk_state
== __asan::CHUNK_ALLOCATED
;
754 ChunkTag
LsanMetadata::tag() const {
755 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
756 return static_cast<ChunkTag
>(m
->lsan_tag
);
759 void LsanMetadata::set_tag(ChunkTag value
) {
760 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
764 uptr
LsanMetadata::requested_size() const {
765 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
766 return m
->UsedSize(/*locked_version=*/true);
769 u32
LsanMetadata::stack_trace_id() const {
770 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
771 return m
->alloc_context_id
;
774 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
775 __asan::allocator
.ForEachChunk(callback
, arg
);
778 IgnoreObjectResult
IgnoreObjectLocked(const void *p
) {
779 uptr addr
= reinterpret_cast<uptr
>(p
);
780 __asan::AsanChunk
*m
= __asan::GetAsanChunkByAddr(addr
);
781 if (!m
) return kIgnoreObjectInvalid
;
782 if ((m
->chunk_state
== __asan::CHUNK_ALLOCATED
) && m
->AddrIsInside(addr
)) {
783 if (m
->lsan_tag
== kIgnored
)
784 return kIgnoreObjectAlreadyIgnored
;
785 m
->lsan_tag
= __lsan::kIgnored
;
786 return kIgnoreObjectSuccess
;
788 return kIgnoreObjectInvalid
;
791 } // namespace __lsan
793 // ---------------------- Interface ---------------- {{{1
794 using namespace __asan
; // NOLINT
796 // ASan allocator doesn't reserve extra bytes, so normally we would
797 // just return "size". We don't want to expose our redzone sizes, etc here.
798 uptr
__asan_get_estimated_allocated_size(uptr size
) {
802 bool __asan_get_ownership(const void *p
) {
803 uptr ptr
= reinterpret_cast<uptr
>(p
);
804 return (AllocationSize(ptr
) > 0);
807 uptr
__asan_get_allocated_size(const void *p
) {
808 if (p
== 0) return 0;
809 uptr ptr
= reinterpret_cast<uptr
>(p
);
810 uptr allocated_size
= AllocationSize(ptr
);
811 // Die if p is not malloced or if it is already freed.
812 if (allocated_size
== 0) {
813 GET_STACK_TRACE_FATAL_HERE
;
814 ReportAsanGetAllocatedSizeNotOwned(ptr
, &stack
);
816 return allocated_size
;
819 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
820 // Provide default (no-op) implementation of malloc hooks.
822 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
823 void __asan_malloc_hook(void *ptr
, uptr size
) {
827 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
828 void __asan_free_hook(void *ptr
) {