1 //===-- asan_allocator2.cc ------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of AddressSanitizer, an address sanity checker.
12 // Implementation of ASan's memory allocator, 2-nd version.
13 // This variant uses the allocator from sanitizer_common, i.e. the one shared
14 // with ThreadSanitizer and MemorySanitizer.
16 // Status: under development, not enabled by default yet.
17 //===----------------------------------------------------------------------===//
18 #include "asan_allocator.h"
20 #include "asan_mapping.h"
21 #include "asan_poisoning.h"
22 #include "asan_report.h"
23 #include "asan_thread.h"
24 #include "sanitizer_common/sanitizer_allocator.h"
25 #include "sanitizer_common/sanitizer_flags.h"
26 #include "sanitizer_common/sanitizer_internal_defs.h"
27 #include "sanitizer_common/sanitizer_list.h"
28 #include "sanitizer_common/sanitizer_stackdepot.h"
29 #include "sanitizer_common/sanitizer_quarantine.h"
33 struct AsanMapUnmapCallback
{
34 void OnMap(uptr p
, uptr size
) const {
35 PoisonShadow(p
, size
, kAsanHeapLeftRedzoneMagic
);
37 AsanStats
&thread_stats
= GetCurrentThreadStats();
39 thread_stats
.mmaped
+= size
;
41 void OnUnmap(uptr p
, uptr size
) const {
42 PoisonShadow(p
, size
, 0);
43 // We are about to unmap a chunk of user memory.
44 // Mark the corresponding shadow memory as not needed.
45 // Since asan's mapping is compacting, the shadow chunk may be
46 // not page-aligned, so we only flush the page-aligned portion.
47 uptr page_size
= GetPageSizeCached();
48 uptr shadow_beg
= RoundUpTo(MemToShadow(p
), page_size
);
49 uptr shadow_end
= RoundDownTo(MemToShadow(p
+ size
), page_size
);
50 FlushUnneededShadowMemory(shadow_beg
, shadow_end
- shadow_beg
);
52 AsanStats
&thread_stats
= GetCurrentThreadStats();
53 thread_stats
.munmaps
++;
54 thread_stats
.munmaped
+= size
;
58 #if SANITIZER_WORDSIZE == 64
59 #if defined(__powerpc64__)
60 const uptr kAllocatorSpace
= 0xa0000000000ULL
;
61 const uptr kAllocatorSize
= 0x20000000000ULL
; // 2T.
63 const uptr kAllocatorSpace
= 0x600000000000ULL
;
64 const uptr kAllocatorSize
= 0x40000000000ULL
; // 4T.
66 typedef DefaultSizeClassMap SizeClassMap
;
67 typedef SizeClassAllocator64
<kAllocatorSpace
, kAllocatorSize
, 0 /*metadata*/,
68 SizeClassMap
, AsanMapUnmapCallback
> PrimaryAllocator
;
69 #elif SANITIZER_WORDSIZE == 32
70 static const u64 kAddressSpaceSize
= 1ULL << 32;
71 typedef CompactSizeClassMap SizeClassMap
;
72 typedef SizeClassAllocator32
<0, kAddressSpaceSize
, 16,
73 SizeClassMap
, AsanMapUnmapCallback
> PrimaryAllocator
;
76 typedef SizeClassAllocatorLocalCache
<PrimaryAllocator
> AllocatorCache
;
77 typedef LargeMmapAllocator
<AsanMapUnmapCallback
> SecondaryAllocator
;
78 typedef CombinedAllocator
<PrimaryAllocator
, AllocatorCache
,
79 SecondaryAllocator
> Allocator
;
81 // We can not use THREADLOCAL because it is not supported on some of the
82 // platforms we care about (OSX 10.6, Android).
83 // static THREADLOCAL AllocatorCache cache;
84 AllocatorCache
*GetAllocatorCache(AsanThreadLocalMallocStorage
*ms
) {
86 CHECK_LE(sizeof(AllocatorCache
), sizeof(ms
->allocator2_cache
));
87 return reinterpret_cast<AllocatorCache
*>(ms
->allocator2_cache
);
90 static Allocator allocator
;
92 static const uptr kMaxAllowedMallocSize
=
93 FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
95 static const uptr kMaxThreadLocalQuarantine
=
96 FIRST_32_SECOND_64(1 << 18, 1 << 20);
98 // Every chunk of memory allocated by this allocator can be in one of 3 states:
99 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
100 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
101 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
103 CHUNK_AVAILABLE
= 0, // 0 is the default value even if we didn't set it.
108 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
109 // We use adaptive redzones: for larger allocation larger redzones are used.
110 static u32
RZLog2Size(u32 rz_log
) {
115 static u32
RZSize2Log(u32 rz_size
) {
116 CHECK_GE(rz_size
, 16);
117 CHECK_LE(rz_size
, 2048);
118 CHECK(IsPowerOfTwo(rz_size
));
119 u32 res
= Log2(rz_size
) - 4;
120 CHECK_EQ(rz_size
, RZLog2Size(res
));
124 static uptr
ComputeRZLog(uptr user_requested_size
) {
126 user_requested_size
<= 64 - 16 ? 0 :
127 user_requested_size
<= 128 - 32 ? 1 :
128 user_requested_size
<= 512 - 64 ? 2 :
129 user_requested_size
<= 4096 - 128 ? 3 :
130 user_requested_size
<= (1 << 14) - 256 ? 4 :
131 user_requested_size
<= (1 << 15) - 512 ? 5 :
132 user_requested_size
<= (1 << 16) - 1024 ? 6 : 7;
133 return Max(rz_log
, RZSize2Log(flags()->redzone
));
136 // The memory chunk allocated from the underlying allocator looks like this:
137 // L L L L L L H H U U U U U U R R
138 // L -- left redzone words (0 or more bytes)
139 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
141 // R -- right redzone (0 or more bytes)
142 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
145 // If a memory chunk is allocated by memalign and we had to increase the
146 // allocation size to achieve the proper alignment, then we store this magic
147 // value in the first uptr word of the memory block and store the address of
148 // ChunkBase in the next uptr.
149 // M B ? ? ? L L L L L L H H U U U U U U
150 // M -- magic value kMemalignMagic
151 // B -- address of ChunkHeader pointing to the first 'H'
152 static const uptr kMemalignMagic
= 0xCC6E96B9;
156 u32 chunk_state
: 8; // Must be first.
160 u32 from_memalign
: 1;
164 // This field is used for small sizes. For large sizes it is equal to
165 // SizeClassMap::kMaxSize and the actual size is stored in the
166 // SecondaryAllocator's metadata.
167 u32 user_requested_size
;
168 u32 alloc_context_id
;
171 struct ChunkBase
: ChunkHeader
{
172 // Header2, intersects with user memory.
177 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
178 static const uptr kChunkHeader2Size
= sizeof(ChunkBase
) - kChunkHeaderSize
;
179 COMPILER_CHECK(kChunkHeaderSize
== 16);
180 COMPILER_CHECK(kChunkHeader2Size
<= 16);
182 struct AsanChunk
: ChunkBase
{
183 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
185 if (user_requested_size
!= SizeClassMap::kMaxSize
)
186 return user_requested_size
;
187 return *reinterpret_cast<uptr
*>(allocator
.GetMetaData(AllocBeg()));
191 return allocator
.GetBlockBegin(reinterpret_cast<void *>(this));
192 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log
));
194 // We store the alloc/free stack traces in the chunk itself.
195 u32
*AllocStackBeg() {
196 return (u32
*)(Beg() - RZLog2Size(rz_log
));
198 uptr
AllocStackSize() {
199 CHECK_LE(RZLog2Size(rz_log
), kChunkHeaderSize
);
200 return (RZLog2Size(rz_log
) - kChunkHeaderSize
) / sizeof(u32
);
202 u32
*FreeStackBeg() {
203 return (u32
*)(Beg() + kChunkHeader2Size
);
205 uptr
FreeStackSize() {
206 if (user_requested_size
< kChunkHeader2Size
) return 0;
207 uptr available
= RoundUpTo(user_requested_size
, SHADOW_GRANULARITY
);
208 return (available
- kChunkHeader2Size
) / sizeof(u32
);
212 uptr
AsanChunkView::Beg() { return chunk_
->Beg(); }
213 uptr
AsanChunkView::End() { return Beg() + UsedSize(); }
214 uptr
AsanChunkView::UsedSize() { return chunk_
->UsedSize(); }
215 uptr
AsanChunkView::AllocTid() { return chunk_
->alloc_tid
; }
216 uptr
AsanChunkView::FreeTid() { return chunk_
->free_tid
; }
218 static void GetStackTraceFromId(u32 id
, StackTrace
*stack
) {
221 const uptr
*trace
= StackDepotGet(id
, &size
);
222 CHECK_LT(size
, kStackTraceMax
);
223 internal_memcpy(stack
->trace
, trace
, sizeof(uptr
) * size
);
227 void AsanChunkView::GetAllocStack(StackTrace
*stack
) {
228 if (flags()->use_stack_depot
)
229 GetStackTraceFromId(chunk_
->alloc_context_id
, stack
);
231 StackTrace::UncompressStack(stack
, chunk_
->AllocStackBeg(),
232 chunk_
->AllocStackSize());
235 void AsanChunkView::GetFreeStack(StackTrace
*stack
) {
236 if (flags()->use_stack_depot
)
237 GetStackTraceFromId(chunk_
->free_context_id
, stack
);
239 StackTrace::UncompressStack(stack
, chunk_
->FreeStackBeg(),
240 chunk_
->FreeStackSize());
243 struct QuarantineCallback
;
244 typedef Quarantine
<QuarantineCallback
, AsanChunk
> AsanQuarantine
;
245 typedef AsanQuarantine::Cache QuarantineCache
;
246 static AsanQuarantine
quarantine(LINKER_INITIALIZED
);
247 static QuarantineCache
fallback_quarantine_cache(LINKER_INITIALIZED
);
248 static AllocatorCache fallback_allocator_cache
;
249 static SpinMutex fallback_mutex
;
251 QuarantineCache
*GetQuarantineCache(AsanThreadLocalMallocStorage
*ms
) {
253 CHECK_LE(sizeof(QuarantineCache
), sizeof(ms
->quarantine_cache
));
254 return reinterpret_cast<QuarantineCache
*>(ms
->quarantine_cache
);
257 struct QuarantineCallback
{
258 explicit QuarantineCallback(AllocatorCache
*cache
)
262 void Recycle(AsanChunk
*m
) {
263 CHECK_EQ(m
->chunk_state
, CHUNK_QUARANTINE
);
264 m
->chunk_state
= CHUNK_AVAILABLE
;
265 CHECK_NE(m
->alloc_tid
, kInvalidTid
);
266 CHECK_NE(m
->free_tid
, kInvalidTid
);
267 PoisonShadow(m
->Beg(),
268 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
269 kAsanHeapLeftRedzoneMagic
);
270 void *p
= reinterpret_cast<void *>(m
->AllocBeg());
271 if (m
->from_memalign
) {
272 uptr
*memalign_magic
= reinterpret_cast<uptr
*>(p
);
273 CHECK_EQ(memalign_magic
[0], kMemalignMagic
);
274 CHECK_EQ(memalign_magic
[1], reinterpret_cast<uptr
>(m
));
278 AsanStats
&thread_stats
= GetCurrentThreadStats();
279 thread_stats
.real_frees
++;
280 thread_stats
.really_freed
+= m
->UsedSize();
282 allocator
.Deallocate(cache_
, p
);
285 void *Allocate(uptr size
) {
286 return allocator
.Allocate(cache_
, size
, 1, false);
289 void Deallocate(void *p
) {
290 allocator
.Deallocate(cache_
, p
);
293 AllocatorCache
*cache_
;
296 void InitializeAllocator() {
298 quarantine
.Init((uptr
)flags()->quarantine_size
, kMaxThreadLocalQuarantine
);
301 static void *Allocate(uptr size
, uptr alignment
, StackTrace
*stack
,
302 AllocType alloc_type
, bool can_fill
) {
305 Flags
&fl
= *flags();
307 const uptr min_alignment
= SHADOW_GRANULARITY
;
308 if (alignment
< min_alignment
)
309 alignment
= min_alignment
;
311 // We'd be happy to avoid allocating memory for zero-size requests, but
312 // some programs/tests depend on this behavior and assume that malloc would
313 // not return NULL even for zero-size allocations. Moreover, it looks like
314 // operator new should never return NULL, and results of consecutive "new"
315 // calls must be different even if the allocated size is zero.
318 CHECK(IsPowerOfTwo(alignment
));
319 uptr rz_log
= ComputeRZLog(size
);
320 uptr rz_size
= RZLog2Size(rz_log
);
321 uptr rounded_size
= RoundUpTo(size
, alignment
);
322 if (rounded_size
< kChunkHeader2Size
)
323 rounded_size
= kChunkHeader2Size
;
324 uptr needed_size
= rounded_size
+ rz_size
;
325 if (alignment
> min_alignment
)
326 needed_size
+= alignment
;
327 bool using_primary_allocator
= true;
328 // If we are allocating from the secondary allocator, there will be no
329 // automatic right redzone, so add the right redzone manually.
330 if (!PrimaryAllocator::CanAllocate(needed_size
, alignment
)) {
331 needed_size
+= rz_size
;
332 using_primary_allocator
= false;
334 CHECK(IsAligned(needed_size
, min_alignment
));
335 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
) {
336 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
341 AsanThread
*t
= GetCurrentThread();
344 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
345 allocated
= allocator
.Allocate(cache
, needed_size
, 8, false);
347 SpinMutexLock
l(&fallback_mutex
);
348 AllocatorCache
*cache
= &fallback_allocator_cache
;
349 allocated
= allocator
.Allocate(cache
, needed_size
, 8, false);
351 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
352 // Clear the first allocated word (an old kMemalignMagic may still be there).
353 reinterpret_cast<uptr
*>(alloc_beg
)[0] = 0;
354 uptr alloc_end
= alloc_beg
+ needed_size
;
355 uptr beg_plus_redzone
= alloc_beg
+ rz_size
;
356 uptr user_beg
= beg_plus_redzone
;
357 if (!IsAligned(user_beg
, alignment
))
358 user_beg
= RoundUpTo(user_beg
, alignment
);
359 uptr user_end
= user_beg
+ size
;
360 CHECK_LE(user_end
, alloc_end
);
361 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
362 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
363 m
->chunk_state
= CHUNK_ALLOCATED
;
364 m
->alloc_type
= alloc_type
;
366 u32 alloc_tid
= t
? t
->tid() : 0;
367 m
->alloc_tid
= alloc_tid
;
368 CHECK_EQ(alloc_tid
, m
->alloc_tid
); // Does alloc_tid fit into the bitfield?
369 m
->free_tid
= kInvalidTid
;
370 m
->from_memalign
= user_beg
!= beg_plus_redzone
;
371 if (m
->from_memalign
) {
372 CHECK_LE(beg_plus_redzone
+ 2 * sizeof(uptr
), user_beg
);
373 uptr
*memalign_magic
= reinterpret_cast<uptr
*>(alloc_beg
);
374 memalign_magic
[0] = kMemalignMagic
;
375 memalign_magic
[1] = chunk_beg
;
377 if (using_primary_allocator
) {
379 m
->user_requested_size
= size
;
380 CHECK(allocator
.FromPrimary(allocated
));
382 CHECK(!allocator
.FromPrimary(allocated
));
383 m
->user_requested_size
= SizeClassMap::kMaxSize
;
384 uptr
*meta
= reinterpret_cast<uptr
*>(allocator
.GetMetaData(allocated
));
389 if (fl
.use_stack_depot
) {
390 m
->alloc_context_id
= StackDepotPut(stack
->trace
, stack
->size
);
392 m
->alloc_context_id
= 0;
393 StackTrace::CompressStack(stack
, m
->AllocStackBeg(), m
->AllocStackSize());
396 uptr size_rounded_down_to_granularity
= RoundDownTo(size
, SHADOW_GRANULARITY
);
397 // Unpoison the bulk of the memory region.
398 if (size_rounded_down_to_granularity
)
399 PoisonShadow(user_beg
, size_rounded_down_to_granularity
, 0);
400 // Deal with the end of the region if size is not aligned to granularity.
401 if (size
!= size_rounded_down_to_granularity
&& fl
.poison_heap
) {
402 u8
*shadow
= (u8
*)MemToShadow(user_beg
+ size_rounded_down_to_granularity
);
403 *shadow
= size
& (SHADOW_GRANULARITY
- 1);
406 AsanStats
&thread_stats
= GetCurrentThreadStats();
407 thread_stats
.mallocs
++;
408 thread_stats
.malloced
+= size
;
409 thread_stats
.malloced_redzones
+= needed_size
- size
;
410 uptr class_id
= Min(kNumberOfSizeClasses
, SizeClassMap::ClassID(needed_size
));
411 thread_stats
.malloced_by_size
[class_id
]++;
412 if (needed_size
> SizeClassMap::kMaxSize
)
413 thread_stats
.malloc_large
++;
415 void *res
= reinterpret_cast<void *>(user_beg
);
416 if (can_fill
&& fl
.max_malloc_fill_size
) {
417 uptr fill_size
= Min(size
, (uptr
)fl
.max_malloc_fill_size
);
418 REAL(memset
)(res
, fl
.malloc_fill_byte
, fill_size
);
420 ASAN_MALLOC_HOOK(res
, size
);
424 static void Deallocate(void *ptr
, StackTrace
*stack
, AllocType alloc_type
) {
425 uptr p
= reinterpret_cast<uptr
>(ptr
);
428 uptr chunk_beg
= p
- kChunkHeaderSize
;
429 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
431 u8 old_chunk_state
= CHUNK_ALLOCATED
;
432 // Flip the chunk_state atomically to avoid race on double-free.
433 if (!atomic_compare_exchange_strong((atomic_uint8_t
*)m
, &old_chunk_state
,
434 CHUNK_QUARANTINE
, memory_order_relaxed
)) {
435 if (old_chunk_state
== CHUNK_QUARANTINE
)
436 ReportDoubleFree((uptr
)ptr
, stack
);
438 ReportFreeNotMalloced((uptr
)ptr
, stack
);
440 CHECK_EQ(CHUNK_ALLOCATED
, old_chunk_state
);
442 if (m
->alloc_type
!= alloc_type
&& flags()->alloc_dealloc_mismatch
)
443 ReportAllocTypeMismatch((uptr
)ptr
, stack
,
444 (AllocType
)m
->alloc_type
, (AllocType
)alloc_type
);
446 CHECK_GE(m
->alloc_tid
, 0);
447 if (SANITIZER_WORDSIZE
== 64) // On 32-bits this resides in user area.
448 CHECK_EQ(m
->free_tid
, kInvalidTid
);
449 AsanThread
*t
= GetCurrentThread();
450 m
->free_tid
= t
? t
->tid() : 0;
451 if (flags()->use_stack_depot
) {
452 m
->free_context_id
= StackDepotPut(stack
->trace
, stack
->size
);
454 m
->free_context_id
= 0;
455 StackTrace::CompressStack(stack
, m
->FreeStackBeg(), m
->FreeStackSize());
457 CHECK_EQ(m
->chunk_state
, CHUNK_QUARANTINE
);
458 // Poison the region.
459 PoisonShadow(m
->Beg(),
460 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
463 AsanStats
&thread_stats
= GetCurrentThreadStats();
464 thread_stats
.frees
++;
465 thread_stats
.freed
+= m
->UsedSize();
467 // Push into quarantine.
469 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
470 AllocatorCache
*ac
= GetAllocatorCache(ms
);
471 quarantine
.Put(GetQuarantineCache(ms
), QuarantineCallback(ac
),
474 SpinMutexLock
l(&fallback_mutex
);
475 AllocatorCache
*ac
= &fallback_allocator_cache
;
476 quarantine
.Put(&fallback_quarantine_cache
, QuarantineCallback(ac
),
481 static void *Reallocate(void *old_ptr
, uptr new_size
, StackTrace
*stack
) {
482 CHECK(old_ptr
&& new_size
);
483 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
484 uptr chunk_beg
= p
- kChunkHeaderSize
;
485 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
487 AsanStats
&thread_stats
= GetCurrentThreadStats();
488 thread_stats
.reallocs
++;
489 thread_stats
.realloced
+= new_size
;
491 CHECK_EQ(m
->chunk_state
, CHUNK_ALLOCATED
);
492 uptr old_size
= m
->UsedSize();
493 uptr memcpy_size
= Min(new_size
, old_size
);
494 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
, true);
496 CHECK_NE(REAL(memcpy
), (void*)0);
497 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
498 Deallocate(old_ptr
, stack
, FROM_MALLOC
);
503 static AsanChunk
*GetAsanChunkByAddr(uptr p
) {
504 void *ptr
= reinterpret_cast<void *>(p
);
505 uptr alloc_beg
= reinterpret_cast<uptr
>(allocator
.GetBlockBegin(ptr
));
506 if (!alloc_beg
) return 0;
507 uptr
*memalign_magic
= reinterpret_cast<uptr
*>(alloc_beg
);
508 if (memalign_magic
[0] == kMemalignMagic
) {
509 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(memalign_magic
[1]);
510 CHECK(m
->from_memalign
);
513 if (!allocator
.FromPrimary(ptr
)) {
514 uptr
*meta
= reinterpret_cast<uptr
*>(
515 allocator
.GetMetaData(reinterpret_cast<void *>(alloc_beg
)));
516 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(meta
[1]);
519 uptr actual_size
= allocator
.GetActuallyAllocatedSize(ptr
);
520 CHECK_LE(actual_size
, SizeClassMap::kMaxSize
);
521 // We know the actually allocted size, but we don't know the redzone size.
522 // Just try all possible redzone sizes.
523 for (u32 rz_log
= 0; rz_log
< 8; rz_log
++) {
524 u32 rz_size
= RZLog2Size(rz_log
);
525 uptr max_possible_size
= actual_size
- rz_size
;
526 if (ComputeRZLog(max_possible_size
) != rz_log
)
528 return reinterpret_cast<AsanChunk
*>(
529 alloc_beg
+ rz_size
- kChunkHeaderSize
);
534 static uptr
AllocationSize(uptr p
) {
535 AsanChunk
*m
= GetAsanChunkByAddr(p
);
537 if (m
->chunk_state
!= CHUNK_ALLOCATED
) return 0;
538 if (m
->Beg() != p
) return 0;
539 return m
->UsedSize();
542 // We have an address between two chunks, and we want to report just one.
543 AsanChunk
*ChooseChunk(uptr addr
,
544 AsanChunk
*left_chunk
, AsanChunk
*right_chunk
) {
545 // Prefer an allocated chunk over freed chunk and freed chunk
546 // over available chunk.
547 if (left_chunk
->chunk_state
!= right_chunk
->chunk_state
) {
548 if (left_chunk
->chunk_state
== CHUNK_ALLOCATED
)
550 if (right_chunk
->chunk_state
== CHUNK_ALLOCATED
)
552 if (left_chunk
->chunk_state
== CHUNK_QUARANTINE
)
554 if (right_chunk
->chunk_state
== CHUNK_QUARANTINE
)
557 // Same chunk_state: choose based on offset.
558 sptr l_offset
= 0, r_offset
= 0;
559 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
560 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
561 if (l_offset
< r_offset
)
566 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
567 AsanChunk
*m1
= GetAsanChunkByAddr(addr
);
568 if (!m1
) return AsanChunkView(m1
);
570 if (AsanChunkView(m1
).AddrIsAtLeft(addr
, 1, &offset
)) {
571 // The address is in the chunk's left redzone, so maybe it is actually
572 // a right buffer overflow from the other chunk to the left.
573 // Search a bit to the left to see if there is another chunk.
575 for (uptr l
= 1; l
< GetPageSizeCached(); l
++) {
576 m2
= GetAsanChunkByAddr(addr
- l
);
577 if (m2
== m1
) continue; // Still the same chunk.
580 if (m2
&& AsanChunkView(m2
).AddrIsAtRight(addr
, 1, &offset
))
581 m1
= ChooseChunk(addr
, m2
, m1
);
583 return AsanChunkView(m1
);
586 void AsanThreadLocalMallocStorage::CommitBack() {
587 AllocatorCache
*ac
= GetAllocatorCache(this);
588 quarantine
.Drain(GetQuarantineCache(this), QuarantineCallback(ac
));
589 allocator
.SwallowCache(GetAllocatorCache(this));
592 void PrintInternalAllocatorStats() {
593 allocator
.PrintStats();
596 SANITIZER_INTERFACE_ATTRIBUTE
597 void *asan_memalign(uptr alignment
, uptr size
, StackTrace
*stack
,
598 AllocType alloc_type
) {
599 return Allocate(size
, alignment
, stack
, alloc_type
, true);
602 SANITIZER_INTERFACE_ATTRIBUTE
603 void asan_free(void *ptr
, StackTrace
*stack
, AllocType alloc_type
) {
604 Deallocate(ptr
, stack
, alloc_type
);
607 SANITIZER_INTERFACE_ATTRIBUTE
608 void *asan_malloc(uptr size
, StackTrace
*stack
) {
609 return Allocate(size
, 8, stack
, FROM_MALLOC
, true);
612 void *asan_calloc(uptr nmemb
, uptr size
, StackTrace
*stack
) {
613 if (CallocShouldReturnNullDueToOverflow(size
, nmemb
)) return 0;
614 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
, false);
615 // If the memory comes from the secondary allocator no need to clear it
616 // as it comes directly from mmap.
617 if (ptr
&& allocator
.FromPrimary(ptr
))
618 REAL(memset
)(ptr
, 0, nmemb
* size
);
622 void *asan_realloc(void *p
, uptr size
, StackTrace
*stack
) {
624 return Allocate(size
, 8, stack
, FROM_MALLOC
, true);
626 Deallocate(p
, stack
, FROM_MALLOC
);
629 return Reallocate(p
, size
, stack
);
632 void *asan_valloc(uptr size
, StackTrace
*stack
) {
633 return Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
, true);
636 void *asan_pvalloc(uptr size
, StackTrace
*stack
) {
637 uptr PageSize
= GetPageSizeCached();
638 size
= RoundUpTo(size
, PageSize
);
640 // pvalloc(0) should allocate one page.
643 return Allocate(size
, PageSize
, stack
, FROM_MALLOC
, true);
646 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
648 void *ptr
= Allocate(size
, alignment
, stack
, FROM_MALLOC
, true);
649 CHECK(IsAligned((uptr
)ptr
, alignment
));
654 uptr
asan_malloc_usable_size(void *ptr
, StackTrace
*stack
) {
656 if (ptr
== 0) return 0;
657 uptr usable_size
= AllocationSize(reinterpret_cast<uptr
>(ptr
));
658 if (flags()->check_malloc_usable_size
&& (usable_size
== 0))
659 ReportMallocUsableSizeNotOwned((uptr
)ptr
, stack
);
663 uptr
asan_mz_size(const void *ptr
) {
664 return AllocationSize(reinterpret_cast<uptr
>(ptr
));
667 void asan_mz_force_lock() {
668 allocator
.ForceLock();
669 fallback_mutex
.Lock();
672 void asan_mz_force_unlock() {
673 fallback_mutex
.Unlock();
674 allocator
.ForceUnlock();
677 } // namespace __asan
679 // ---------------------- Interface ---------------- {{{1
680 using namespace __asan
; // NOLINT
682 // ASan allocator doesn't reserve extra bytes, so normally we would
683 // just return "size". We don't want to expose our redzone sizes, etc here.
684 uptr
__asan_get_estimated_allocated_size(uptr size
) {
688 bool __asan_get_ownership(const void *p
) {
689 uptr ptr
= reinterpret_cast<uptr
>(p
);
690 return (AllocationSize(ptr
) > 0);
693 uptr
__asan_get_allocated_size(const void *p
) {
694 if (p
== 0) return 0;
695 uptr ptr
= reinterpret_cast<uptr
>(p
);
696 uptr allocated_size
= AllocationSize(ptr
);
697 // Die if p is not malloced or if it is already freed.
698 if (allocated_size
== 0) {
699 GET_STACK_TRACE_FATAL_HERE
;
700 ReportAsanGetAllocatedSizeNotOwned(ptr
, &stack
);
702 return allocated_size
;
705 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
706 // Provide default (no-op) implementation of malloc hooks.
708 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
709 void __asan_malloc_hook(void *ptr
, uptr size
) {
713 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
714 void __asan_free_hook(void *ptr
) {