1 //===-- asan_allocator2.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // Implementation of ASan's memory allocator, 2-nd version.
11 // This variant uses the allocator from sanitizer_common, i.e. the one shared
12 // with ThreadSanitizer and MemorySanitizer.
14 // Status: under development, not enabled by default yet.
15 //===----------------------------------------------------------------------===//
16 #include "asan_allocator.h"
17 #if ASAN_ALLOCATOR_VERSION == 2
19 #include "asan_mapping.h"
20 #include "asan_report.h"
21 #include "asan_thread.h"
22 #include "asan_thread_registry.h"
23 #include "sanitizer_common/sanitizer_allocator.h"
24 #include "sanitizer_common/sanitizer_internal_defs.h"
25 #include "sanitizer_common/sanitizer_list.h"
26 #include "sanitizer_common/sanitizer_stackdepot.h"
27 #include "sanitizer_common/sanitizer_quarantine.h"
31 struct AsanMapUnmapCallback
{
32 void OnMap(uptr p
, uptr size
) const {
33 PoisonShadow(p
, size
, kAsanHeapLeftRedzoneMagic
);
35 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
37 thread_stats
.mmaped
+= size
;
39 void OnUnmap(uptr p
, uptr size
) const {
40 PoisonShadow(p
, size
, 0);
41 // We are about to unmap a chunk of user memory.
42 // Mark the corresponding shadow memory as not needed.
43 // Since asan's mapping is compacting, the shadow chunk may be
44 // not page-aligned, so we only flush the page-aligned portion.
45 uptr page_size
= GetPageSizeCached();
46 uptr shadow_beg
= RoundUpTo(MemToShadow(p
), page_size
);
47 uptr shadow_end
= RoundDownTo(MemToShadow(p
+ size
), page_size
);
48 FlushUnneededShadowMemory(shadow_beg
, shadow_end
- shadow_beg
);
50 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
51 thread_stats
.munmaps
++;
52 thread_stats
.munmaped
+= size
;
56 #if SANITIZER_WORDSIZE == 64
57 #if defined(__powerpc64__)
58 const uptr kAllocatorSpace
= 0xa0000000000ULL
;
60 const uptr kAllocatorSpace
= 0x600000000000ULL
;
62 const uptr kAllocatorSize
= 0x10000000000ULL
; // 1T.
63 typedef DefaultSizeClassMap SizeClassMap
;
64 typedef SizeClassAllocator64
<kAllocatorSpace
, kAllocatorSize
, 0 /*metadata*/,
65 SizeClassMap
, AsanMapUnmapCallback
> PrimaryAllocator
;
66 #elif SANITIZER_WORDSIZE == 32
67 static const u64 kAddressSpaceSize
= 1ULL << 32;
68 typedef CompactSizeClassMap SizeClassMap
;
69 typedef SizeClassAllocator32
<0, kAddressSpaceSize
, 16,
70 SizeClassMap
, AsanMapUnmapCallback
> PrimaryAllocator
;
73 typedef SizeClassAllocatorLocalCache
<PrimaryAllocator
> AllocatorCache
;
74 typedef LargeMmapAllocator
<AsanMapUnmapCallback
> SecondaryAllocator
;
75 typedef CombinedAllocator
<PrimaryAllocator
, AllocatorCache
,
76 SecondaryAllocator
> Allocator
;
78 // We can not use THREADLOCAL because it is not supported on some of the
79 // platforms we care about (OSX 10.6, Android).
80 // static THREADLOCAL AllocatorCache cache;
81 AllocatorCache
*GetAllocatorCache(AsanThreadLocalMallocStorage
*ms
) {
83 CHECK_LE(sizeof(AllocatorCache
), sizeof(ms
->allocator2_cache
));
84 return reinterpret_cast<AllocatorCache
*>(ms
->allocator2_cache
);
87 static Allocator allocator
;
89 static const uptr kMaxAllowedMallocSize
=
90 FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
92 static const uptr kMaxThreadLocalQuarantine
=
93 FIRST_32_SECOND_64(1 << 18, 1 << 20);
95 // Every chunk of memory allocated by this allocator can be in one of 3 states:
96 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
97 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
98 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
100 CHUNK_AVAILABLE
= 0, // 0 is the default value even if we didn't set it.
105 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
106 // We use adaptive redzones: for larger allocation larger redzones are used.
107 static u32
RZLog2Size(u32 rz_log
) {
112 static u32
RZSize2Log(u32 rz_size
) {
113 CHECK_GE(rz_size
, 16);
114 CHECK_LE(rz_size
, 2048);
115 CHECK(IsPowerOfTwo(rz_size
));
116 u32 res
= Log2(rz_size
) - 4;
117 CHECK_EQ(rz_size
, RZLog2Size(res
));
121 static uptr
ComputeRZLog(uptr user_requested_size
) {
123 user_requested_size
<= 64 - 16 ? 0 :
124 user_requested_size
<= 128 - 32 ? 1 :
125 user_requested_size
<= 512 - 64 ? 2 :
126 user_requested_size
<= 4096 - 128 ? 3 :
127 user_requested_size
<= (1 << 14) - 256 ? 4 :
128 user_requested_size
<= (1 << 15) - 512 ? 5 :
129 user_requested_size
<= (1 << 16) - 1024 ? 6 : 7;
130 return Max(rz_log
, RZSize2Log(flags()->redzone
));
133 // The memory chunk allocated from the underlying allocator looks like this:
134 // L L L L L L H H U U U U U U R R
135 // L -- left redzone words (0 or more bytes)
136 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
138 // R -- right redzone (0 or more bytes)
139 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
142 // If a memory chunk is allocated by memalign and we had to increase the
143 // allocation size to achieve the proper alignment, then we store this magic
144 // value in the first uptr word of the memory block and store the address of
145 // ChunkBase in the next uptr.
146 // M B ? ? ? L L L L L L H H U U U U U U
147 // M -- magic value kMemalignMagic
148 // B -- address of ChunkHeader pointing to the first 'H'
149 static const uptr kMemalignMagic
= 0xCC6E96B9;
153 u32 chunk_state
: 8; // Must be first.
157 u32 from_memalign
: 1;
161 // This field is used for small sizes. For large sizes it is equal to
162 // SizeClassMap::kMaxSize and the actual size is stored in the
163 // SecondaryAllocator's metadata.
164 u32 user_requested_size
;
165 u32 alloc_context_id
;
168 struct ChunkBase
: ChunkHeader
{
169 // Header2, intersects with user memory.
174 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
175 static const uptr kChunkHeader2Size
= sizeof(ChunkBase
) - kChunkHeaderSize
;
176 COMPILER_CHECK(kChunkHeaderSize
== 16);
177 COMPILER_CHECK(kChunkHeader2Size
<= 16);
179 struct AsanChunk
: ChunkBase
{
180 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
182 if (user_requested_size
!= SizeClassMap::kMaxSize
)
183 return user_requested_size
;
184 return *reinterpret_cast<uptr
*>(allocator
.GetMetaData(AllocBeg()));
188 return allocator
.GetBlockBegin(reinterpret_cast<void *>(this));
189 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log
));
191 // We store the alloc/free stack traces in the chunk itself.
192 u32
*AllocStackBeg() {
193 return (u32
*)(Beg() - RZLog2Size(rz_log
));
195 uptr
AllocStackSize() {
196 CHECK_LE(RZLog2Size(rz_log
), kChunkHeaderSize
);
197 return (RZLog2Size(rz_log
) - kChunkHeaderSize
) / sizeof(u32
);
199 u32
*FreeStackBeg() {
200 return (u32
*)(Beg() + kChunkHeader2Size
);
202 uptr
FreeStackSize() {
203 if (user_requested_size
< kChunkHeader2Size
) return 0;
204 uptr available
= RoundUpTo(user_requested_size
, SHADOW_GRANULARITY
);
205 return (available
- kChunkHeader2Size
) / sizeof(u32
);
209 uptr
AsanChunkView::Beg() { return chunk_
->Beg(); }
210 uptr
AsanChunkView::End() { return Beg() + UsedSize(); }
211 uptr
AsanChunkView::UsedSize() { return chunk_
->UsedSize(); }
212 uptr
AsanChunkView::AllocTid() { return chunk_
->alloc_tid
; }
213 uptr
AsanChunkView::FreeTid() { return chunk_
->free_tid
; }
215 static void GetStackTraceFromId(u32 id
, StackTrace
*stack
) {
218 const uptr
*trace
= StackDepotGet(id
, &size
);
219 CHECK_LT(size
, kStackTraceMax
);
220 internal_memcpy(stack
->trace
, trace
, sizeof(uptr
) * size
);
224 void AsanChunkView::GetAllocStack(StackTrace
*stack
) {
225 if (flags()->use_stack_depot
)
226 GetStackTraceFromId(chunk_
->alloc_context_id
, stack
);
228 StackTrace::UncompressStack(stack
, chunk_
->AllocStackBeg(),
229 chunk_
->AllocStackSize());
232 void AsanChunkView::GetFreeStack(StackTrace
*stack
) {
233 if (flags()->use_stack_depot
)
234 GetStackTraceFromId(chunk_
->free_context_id
, stack
);
236 StackTrace::UncompressStack(stack
, chunk_
->FreeStackBeg(),
237 chunk_
->FreeStackSize());
240 struct QuarantineCallback
;
241 typedef Quarantine
<QuarantineCallback
, AsanChunk
> AsanQuarantine
;
242 typedef AsanQuarantine::Cache QuarantineCache
;
243 static AsanQuarantine
quarantine(LINKER_INITIALIZED
);
244 static QuarantineCache
fallback_quarantine_cache(LINKER_INITIALIZED
);
245 static AllocatorCache fallback_allocator_cache
;
246 static SpinMutex fallback_mutex
;
248 QuarantineCache
*GetQuarantineCache(AsanThreadLocalMallocStorage
*ms
) {
250 CHECK_LE(sizeof(QuarantineCache
), sizeof(ms
->quarantine_cache
));
251 return reinterpret_cast<QuarantineCache
*>(ms
->quarantine_cache
);
254 struct QuarantineCallback
{
255 explicit QuarantineCallback(AllocatorCache
*cache
)
259 void Recycle(AsanChunk
*m
) {
260 CHECK(m
->chunk_state
== CHUNK_QUARANTINE
);
261 m
->chunk_state
= CHUNK_AVAILABLE
;
262 CHECK_NE(m
->alloc_tid
, kInvalidTid
);
263 CHECK_NE(m
->free_tid
, kInvalidTid
);
264 PoisonShadow(m
->Beg(),
265 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
266 kAsanHeapLeftRedzoneMagic
);
267 void *p
= reinterpret_cast<void *>(m
->AllocBeg());
268 if (m
->from_memalign
) {
269 uptr
*memalign_magic
= reinterpret_cast<uptr
*>(p
);
270 CHECK_EQ(memalign_magic
[0], kMemalignMagic
);
271 CHECK_EQ(memalign_magic
[1], reinterpret_cast<uptr
>(m
));
275 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
276 thread_stats
.real_frees
++;
277 thread_stats
.really_freed
+= m
->UsedSize();
279 allocator
.Deallocate(cache_
, p
);
282 void *Allocate(uptr size
) {
283 return allocator
.Allocate(cache_
, size
, 1, false);
286 void Deallocate(void *p
) {
287 allocator
.Deallocate(cache_
, p
);
290 AllocatorCache
*cache_
;
293 void InitializeAllocator() {
295 quarantine
.Init((uptr
)flags()->quarantine_size
, kMaxThreadLocalQuarantine
);
298 static void *Allocate(uptr size
, uptr alignment
, StackTrace
*stack
,
299 AllocType alloc_type
) {
303 const uptr min_alignment
= SHADOW_GRANULARITY
;
304 if (alignment
< min_alignment
)
305 alignment
= min_alignment
;
307 // We'd be happy to avoid allocating memory for zero-size requests, but
308 // some programs/tests depend on this behavior and assume that malloc would
309 // not return NULL even for zero-size allocations. Moreover, it looks like
310 // operator new should never return NULL, and results of consecutive "new"
311 // calls must be different even if the allocated size is zero.
314 CHECK(IsPowerOfTwo(alignment
));
315 uptr rz_log
= ComputeRZLog(size
);
316 uptr rz_size
= RZLog2Size(rz_log
);
317 uptr rounded_size
= RoundUpTo(size
, alignment
);
318 if (rounded_size
< kChunkHeader2Size
)
319 rounded_size
= kChunkHeader2Size
;
320 uptr needed_size
= rounded_size
+ rz_size
;
321 if (alignment
> min_alignment
)
322 needed_size
+= alignment
;
323 bool using_primary_allocator
= true;
324 // If we are allocating from the secondary allocator, there will be no
325 // automatic right redzone, so add the right redzone manually.
326 if (!PrimaryAllocator::CanAllocate(needed_size
, alignment
)) {
327 needed_size
+= rz_size
;
328 using_primary_allocator
= false;
330 CHECK(IsAligned(needed_size
, min_alignment
));
331 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
) {
332 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
337 AsanThread
*t
= asanThreadRegistry().GetCurrent();
340 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
341 allocated
= allocator
.Allocate(cache
, needed_size
, 8, false);
343 SpinMutexLock
l(&fallback_mutex
);
344 AllocatorCache
*cache
= &fallback_allocator_cache
;
345 allocated
= allocator
.Allocate(cache
, needed_size
, 8, false);
347 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
348 // Clear the first allocated word (an old kMemalignMagic may still be there).
349 reinterpret_cast<uptr
*>(alloc_beg
)[0] = 0;
350 uptr alloc_end
= alloc_beg
+ needed_size
;
351 uptr beg_plus_redzone
= alloc_beg
+ rz_size
;
352 uptr user_beg
= beg_plus_redzone
;
353 if (!IsAligned(user_beg
, alignment
))
354 user_beg
= RoundUpTo(user_beg
, alignment
);
355 uptr user_end
= user_beg
+ size
;
356 CHECK_LE(user_end
, alloc_end
);
357 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
358 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
359 m
->chunk_state
= CHUNK_ALLOCATED
;
360 m
->alloc_type
= alloc_type
;
362 u32 alloc_tid
= t
? t
->tid() : 0;
363 m
->alloc_tid
= alloc_tid
;
364 CHECK_EQ(alloc_tid
, m
->alloc_tid
); // Does alloc_tid fit into the bitfield?
365 m
->free_tid
= kInvalidTid
;
366 m
->from_memalign
= user_beg
!= beg_plus_redzone
;
367 if (m
->from_memalign
) {
368 CHECK_LE(beg_plus_redzone
+ 2 * sizeof(uptr
), user_beg
);
369 uptr
*memalign_magic
= reinterpret_cast<uptr
*>(alloc_beg
);
370 memalign_magic
[0] = kMemalignMagic
;
371 memalign_magic
[1] = chunk_beg
;
373 if (using_primary_allocator
) {
375 m
->user_requested_size
= size
;
376 CHECK(allocator
.FromPrimary(allocated
));
378 CHECK(!allocator
.FromPrimary(allocated
));
379 m
->user_requested_size
= SizeClassMap::kMaxSize
;
380 uptr
*meta
= reinterpret_cast<uptr
*>(allocator
.GetMetaData(allocated
));
385 if (flags()->use_stack_depot
) {
386 m
->alloc_context_id
= StackDepotPut(stack
->trace
, stack
->size
);
388 m
->alloc_context_id
= 0;
389 StackTrace::CompressStack(stack
, m
->AllocStackBeg(), m
->AllocStackSize());
392 uptr size_rounded_down_to_granularity
= RoundDownTo(size
, SHADOW_GRANULARITY
);
393 // Unpoison the bulk of the memory region.
394 if (size_rounded_down_to_granularity
)
395 PoisonShadow(user_beg
, size_rounded_down_to_granularity
, 0);
396 // Deal with the end of the region if size is not aligned to granularity.
397 if (size
!= size_rounded_down_to_granularity
&& flags()->poison_heap
) {
398 u8
*shadow
= (u8
*)MemToShadow(user_beg
+ size_rounded_down_to_granularity
);
399 *shadow
= size
& (SHADOW_GRANULARITY
- 1);
402 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
403 thread_stats
.mallocs
++;
404 thread_stats
.malloced
+= size
;
405 thread_stats
.malloced_redzones
+= needed_size
- size
;
406 uptr class_id
= Min(kNumberOfSizeClasses
, SizeClassMap::ClassID(needed_size
));
407 thread_stats
.malloced_by_size
[class_id
]++;
408 if (needed_size
> SizeClassMap::kMaxSize
)
409 thread_stats
.malloc_large
++;
411 void *res
= reinterpret_cast<void *>(user_beg
);
412 ASAN_MALLOC_HOOK(res
, size
);
416 static void Deallocate(void *ptr
, StackTrace
*stack
, AllocType alloc_type
) {
417 uptr p
= reinterpret_cast<uptr
>(ptr
);
420 uptr chunk_beg
= p
- kChunkHeaderSize
;
421 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
423 // Flip the chunk_state atomically to avoid race on double-free.
424 u8 old_chunk_state
= atomic_exchange((atomic_uint8_t
*)m
, CHUNK_QUARANTINE
,
425 memory_order_relaxed
);
427 if (old_chunk_state
== CHUNK_QUARANTINE
)
428 ReportDoubleFree((uptr
)ptr
, stack
);
429 else if (old_chunk_state
!= CHUNK_ALLOCATED
)
430 ReportFreeNotMalloced((uptr
)ptr
, stack
);
431 CHECK(old_chunk_state
== CHUNK_ALLOCATED
);
432 if (m
->alloc_type
!= alloc_type
&& flags()->alloc_dealloc_mismatch
)
433 ReportAllocTypeMismatch((uptr
)ptr
, stack
,
434 (AllocType
)m
->alloc_type
, (AllocType
)alloc_type
);
436 CHECK_GE(m
->alloc_tid
, 0);
437 if (SANITIZER_WORDSIZE
== 64) // On 32-bits this resides in user area.
438 CHECK_EQ(m
->free_tid
, kInvalidTid
);
439 AsanThread
*t
= asanThreadRegistry().GetCurrent();
440 m
->free_tid
= t
? t
->tid() : 0;
441 if (flags()->use_stack_depot
) {
442 m
->free_context_id
= StackDepotPut(stack
->trace
, stack
->size
);
444 m
->free_context_id
= 0;
445 StackTrace::CompressStack(stack
, m
->FreeStackBeg(), m
->FreeStackSize());
447 CHECK(m
->chunk_state
== CHUNK_QUARANTINE
);
448 // Poison the region.
449 PoisonShadow(m
->Beg(),
450 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
453 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
454 thread_stats
.frees
++;
455 thread_stats
.freed
+= m
->UsedSize();
457 // Push into quarantine.
459 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
460 AllocatorCache
*ac
= GetAllocatorCache(ms
);
461 quarantine
.Put(GetQuarantineCache(ms
), QuarantineCallback(ac
),
464 SpinMutexLock
l(&fallback_mutex
);
465 AllocatorCache
*ac
= &fallback_allocator_cache
;
466 quarantine
.Put(&fallback_quarantine_cache
, QuarantineCallback(ac
),
471 static void *Reallocate(void *old_ptr
, uptr new_size
, StackTrace
*stack
) {
472 CHECK(old_ptr
&& new_size
);
473 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
474 uptr chunk_beg
= p
- kChunkHeaderSize
;
475 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
477 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
478 thread_stats
.reallocs
++;
479 thread_stats
.realloced
+= new_size
;
481 CHECK(m
->chunk_state
== CHUNK_ALLOCATED
);
482 uptr old_size
= m
->UsedSize();
483 uptr memcpy_size
= Min(new_size
, old_size
);
484 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
);
486 CHECK(REAL(memcpy
) != 0);
487 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
488 Deallocate(old_ptr
, stack
, FROM_MALLOC
);
493 static AsanChunk
*GetAsanChunkByAddr(uptr p
) {
494 void *ptr
= reinterpret_cast<void *>(p
);
495 uptr alloc_beg
= reinterpret_cast<uptr
>(allocator
.GetBlockBegin(ptr
));
496 if (!alloc_beg
) return 0;
497 uptr
*memalign_magic
= reinterpret_cast<uptr
*>(alloc_beg
);
498 if (memalign_magic
[0] == kMemalignMagic
) {
499 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(memalign_magic
[1]);
500 CHECK(m
->from_memalign
);
503 if (!allocator
.FromPrimary(ptr
)) {
504 uptr
*meta
= reinterpret_cast<uptr
*>(
505 allocator
.GetMetaData(reinterpret_cast<void *>(alloc_beg
)));
506 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(meta
[1]);
509 uptr actual_size
= allocator
.GetActuallyAllocatedSize(ptr
);
510 CHECK_LE(actual_size
, SizeClassMap::kMaxSize
);
511 // We know the actually allocted size, but we don't know the redzone size.
512 // Just try all possible redzone sizes.
513 for (u32 rz_log
= 0; rz_log
< 8; rz_log
++) {
514 u32 rz_size
= RZLog2Size(rz_log
);
515 uptr max_possible_size
= actual_size
- rz_size
;
516 if (ComputeRZLog(max_possible_size
) != rz_log
)
518 return reinterpret_cast<AsanChunk
*>(
519 alloc_beg
+ rz_size
- kChunkHeaderSize
);
524 static uptr
AllocationSize(uptr p
) {
525 AsanChunk
*m
= GetAsanChunkByAddr(p
);
527 if (m
->chunk_state
!= CHUNK_ALLOCATED
) return 0;
528 if (m
->Beg() != p
) return 0;
529 return m
->UsedSize();
532 // We have an address between two chunks, and we want to report just one.
533 AsanChunk
*ChooseChunk(uptr addr
,
534 AsanChunk
*left_chunk
, AsanChunk
*right_chunk
) {
535 // Prefer an allocated chunk over freed chunk and freed chunk
536 // over available chunk.
537 if (left_chunk
->chunk_state
!= right_chunk
->chunk_state
) {
538 if (left_chunk
->chunk_state
== CHUNK_ALLOCATED
)
540 if (right_chunk
->chunk_state
== CHUNK_ALLOCATED
)
542 if (left_chunk
->chunk_state
== CHUNK_QUARANTINE
)
544 if (right_chunk
->chunk_state
== CHUNK_QUARANTINE
)
547 // Same chunk_state: choose based on offset.
548 sptr l_offset
= 0, r_offset
= 0;
549 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
550 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
551 if (l_offset
< r_offset
)
556 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
557 AsanChunk
*m1
= GetAsanChunkByAddr(addr
);
558 if (!m1
) return AsanChunkView(m1
);
560 if (AsanChunkView(m1
).AddrIsAtLeft(addr
, 1, &offset
)) {
561 // The address is in the chunk's left redzone, so maybe it is actually
562 // a right buffer overflow from the other chunk to the left.
563 // Search a bit to the left to see if there is another chunk.
565 for (uptr l
= 1; l
< GetPageSizeCached(); l
++) {
566 m2
= GetAsanChunkByAddr(addr
- l
);
567 if (m2
== m1
) continue; // Still the same chunk.
570 if (m2
&& AsanChunkView(m2
).AddrIsAtRight(addr
, 1, &offset
))
571 m1
= ChooseChunk(addr
, m2
, m1
);
573 return AsanChunkView(m1
);
576 void AsanThreadLocalMallocStorage::CommitBack() {
577 AllocatorCache
*ac
= GetAllocatorCache(this);
578 quarantine
.Drain(GetQuarantineCache(this), QuarantineCallback(ac
));
579 allocator
.SwallowCache(GetAllocatorCache(this));
582 void PrintInternalAllocatorStats() {
583 allocator
.PrintStats();
586 SANITIZER_INTERFACE_ATTRIBUTE
587 void *asan_memalign(uptr alignment
, uptr size
, StackTrace
*stack
,
588 AllocType alloc_type
) {
589 return Allocate(size
, alignment
, stack
, alloc_type
);
592 SANITIZER_INTERFACE_ATTRIBUTE
593 void asan_free(void *ptr
, StackTrace
*stack
, AllocType alloc_type
) {
594 Deallocate(ptr
, stack
, alloc_type
);
597 SANITIZER_INTERFACE_ATTRIBUTE
598 void *asan_malloc(uptr size
, StackTrace
*stack
) {
599 return Allocate(size
, 8, stack
, FROM_MALLOC
);
602 void *asan_calloc(uptr nmemb
, uptr size
, StackTrace
*stack
) {
603 if (CallocShouldReturnNullDueToOverflow(size
, nmemb
)) return 0;
604 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
);
606 REAL(memset
)(ptr
, 0, nmemb
* size
);
610 void *asan_realloc(void *p
, uptr size
, StackTrace
*stack
) {
612 return Allocate(size
, 8, stack
, FROM_MALLOC
);
614 Deallocate(p
, stack
, FROM_MALLOC
);
617 return Reallocate(p
, size
, stack
);
620 void *asan_valloc(uptr size
, StackTrace
*stack
) {
621 return Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
);
624 void *asan_pvalloc(uptr size
, StackTrace
*stack
) {
625 uptr PageSize
= GetPageSizeCached();
626 size
= RoundUpTo(size
, PageSize
);
628 // pvalloc(0) should allocate one page.
631 return Allocate(size
, PageSize
, stack
, FROM_MALLOC
);
634 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
636 void *ptr
= Allocate(size
, alignment
, stack
, FROM_MALLOC
);
637 CHECK(IsAligned((uptr
)ptr
, alignment
));
642 uptr
asan_malloc_usable_size(void *ptr
, StackTrace
*stack
) {
644 if (ptr
== 0) return 0;
645 uptr usable_size
= AllocationSize(reinterpret_cast<uptr
>(ptr
));
646 if (flags()->check_malloc_usable_size
&& (usable_size
== 0))
647 ReportMallocUsableSizeNotOwned((uptr
)ptr
, stack
);
651 uptr
asan_mz_size(const void *ptr
) {
652 return AllocationSize(reinterpret_cast<uptr
>(ptr
));
655 void asan_mz_force_lock() {
656 allocator
.ForceLock();
657 fallback_mutex
.Lock();
660 void asan_mz_force_unlock() {
661 fallback_mutex
.Unlock();
662 allocator
.ForceUnlock();
665 } // namespace __asan
667 // ---------------------- Interface ---------------- {{{1
668 using namespace __asan
; // NOLINT
670 // ASan allocator doesn't reserve extra bytes, so normally we would
671 // just return "size". We don't want to expose our redzone sizes, etc here.
672 uptr
__asan_get_estimated_allocated_size(uptr size
) {
676 bool __asan_get_ownership(const void *p
) {
677 uptr ptr
= reinterpret_cast<uptr
>(p
);
678 return (AllocationSize(ptr
) > 0);
681 uptr
__asan_get_allocated_size(const void *p
) {
682 if (p
== 0) return 0;
683 uptr ptr
= reinterpret_cast<uptr
>(p
);
684 uptr allocated_size
= AllocationSize(ptr
);
685 // Die if p is not malloced or if it is already freed.
686 if (allocated_size
== 0) {
687 GET_STACK_TRACE_FATAL_HERE
;
688 ReportAsanGetAllocatedSizeNotOwned(ptr
, &stack
);
690 return allocated_size
;
693 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
694 // Provide default (no-op) implementation of malloc hooks.
696 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
697 void __asan_malloc_hook(void *ptr
, uptr size
) {
701 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
702 void __asan_free_hook(void *ptr
) {
709 #endif // ASAN_ALLOCATOR_VERSION