1 //===-- asan_allocator.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // Implementation of ASan's memory allocator.
11 // Evey piece of memory (AsanChunk) allocated by the allocator
12 // has a left redzone of REDZONE bytes and
13 // a right redzone such that the end of the chunk is aligned by REDZONE
14 // (i.e. the right redzone is between 0 and REDZONE-1).
15 // The left redzone is always poisoned.
16 // The right redzone is poisoned on malloc, the body is poisoned on free.
17 // Once freed, a chunk is moved to a quarantine (fifo list).
18 // After quarantine, a chunk is returned to freelists.
20 // The left redzone contains ASan's internal data and the stack trace of
22 // Once freed, the body of the chunk contains the stack trace of the free call.
24 //===----------------------------------------------------------------------===//
25 #include "asan_allocator.h"
27 #if ASAN_ALLOCATOR_VERSION == 1
28 #include "asan_interceptors.h"
29 #include "asan_internal.h"
30 #include "asan_mapping.h"
31 #include "asan_stats.h"
32 #include "asan_report.h"
33 #include "asan_thread.h"
34 #include "asan_thread_registry.h"
35 #include "sanitizer_common/sanitizer_allocator.h"
36 #include "sanitizer_common/sanitizer_atomic.h"
37 #include "sanitizer_common/sanitizer_mutex.h"
41 #define REDZONE ((uptr)(flags()->redzone))
42 static const uptr kMinAllocSize
= REDZONE
* 2;
43 static const u64 kMaxAvailableRam
= 128ULL << 30; // 128G
44 static const uptr kMaxThreadLocalQuarantine
= 1 << 20; // 1M
46 static const uptr kMinMmapSize
= (ASAN_LOW_MEMORY
) ? 4UL << 17 : 4UL << 20;
47 static const uptr kMaxSizeForThreadLocalFreeList
=
48 (ASAN_LOW_MEMORY
) ? 1 << 15 : 1 << 17;
50 // Size classes less than kMallocSizeClassStep are powers of two.
51 // All other size classes are multiples of kMallocSizeClassStep.
52 static const uptr kMallocSizeClassStepLog
= 26;
53 static const uptr kMallocSizeClassStep
= 1UL << kMallocSizeClassStepLog
;
55 static const uptr kMaxAllowedMallocSize
=
56 (SANITIZER_WORDSIZE
== 32) ? 3UL << 30 : 8UL << 30;
58 static inline uptr
SizeClassToSize(u8 size_class
) {
59 CHECK(size_class
< kNumberOfSizeClasses
);
60 if (size_class
<= kMallocSizeClassStepLog
) {
61 return 1UL << size_class
;
63 return (size_class
- kMallocSizeClassStepLog
) * kMallocSizeClassStep
;
67 static inline u8
SizeToSizeClass(uptr size
) {
69 if (size
<= kMallocSizeClassStep
) {
70 uptr rounded
= RoundUpToPowerOfTwo(size
);
73 res
= ((size
+ kMallocSizeClassStep
- 1) / kMallocSizeClassStep
)
74 + kMallocSizeClassStepLog
;
76 CHECK(res
< kNumberOfSizeClasses
);
77 CHECK(size
<= SizeClassToSize(res
));
81 // Given REDZONE bytes, we need to mark first size bytes
82 // as addressable and the rest REDZONE-size bytes as unaddressable.
83 static void PoisonHeapPartialRightRedzone(uptr mem
, uptr size
) {
84 CHECK(size
<= REDZONE
);
85 CHECK(IsAligned(mem
, REDZONE
));
86 CHECK(IsPowerOfTwo(SHADOW_GRANULARITY
));
87 CHECK(IsPowerOfTwo(REDZONE
));
88 CHECK(REDZONE
>= SHADOW_GRANULARITY
);
89 PoisonShadowPartialRightRedzone(mem
, size
, REDZONE
,
90 kAsanHeapRightRedzoneMagic
);
93 static u8
*MmapNewPagesAndPoisonShadow(uptr size
) {
94 CHECK(IsAligned(size
, GetPageSizeCached()));
95 u8
*res
= (u8
*)MmapOrDie(size
, __FUNCTION__
);
96 PoisonShadow((uptr
)res
, size
, kAsanHeapLeftRedzoneMagic
);
98 Printf("ASAN_MMAP: [%p, %p)\n", res
, res
+ size
);
103 // Every chunk of memory allocated by this allocator can be in one of 3 states:
104 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
105 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
106 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
108 // The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
109 // the beginning of a AsanChunk (in which the actual chunk resides at
110 // this - this->used_size).
112 // The magic numbers for the enum values are taken randomly.
114 CHUNK_AVAILABLE
= 0x57,
115 CHUNK_ALLOCATED
= 0x32,
116 CHUNK_QUARANTINE
= 0x19,
117 CHUNK_MEMALIGN
= 0xDC
122 uptr chunk_state
: 8;
128 uptr alignment_log
: 8;
130 uptr used_size
: FIRST_32_SECOND_64(32, 54); // Size requested by the user.
132 // This field may overlap with the user area and thus should not
133 // be used while the chunk is in CHUNK_ALLOCATED state.
136 // Typically the beginning of the user-accessible memory is 'this'+REDZONE
137 // and is also aligned by REDZONE. However, if the memory is allocated
138 // by memalign, the alignment might be higher and the user-accessible memory
139 // starts at the first properly aligned address after 'this'.
140 uptr
Beg() { return RoundUpTo((uptr
)this + 1, 1 << alignment_log
); }
141 uptr
Size() { return SizeClassToSize(size_class
); }
142 u8
SizeClass() { return size_class
; }
145 struct AsanChunk
: public ChunkBase
{
146 u32
*compressed_alloc_stack() {
147 return (u32
*)((uptr
)this + sizeof(ChunkBase
));
149 u32
*compressed_free_stack() {
150 return (u32
*)((uptr
)this + Max((uptr
)REDZONE
, (uptr
)sizeof(ChunkBase
)));
153 // The left redzone after the ChunkBase is given to the alloc stack trace.
154 uptr
compressed_alloc_stack_size() {
155 if (REDZONE
< sizeof(ChunkBase
)) return 0;
156 return (REDZONE
- sizeof(ChunkBase
)) / sizeof(u32
);
158 uptr
compressed_free_stack_size() {
159 if (REDZONE
< sizeof(ChunkBase
)) return 0;
160 return (REDZONE
) / sizeof(u32
);
164 uptr
AsanChunkView::Beg() { return chunk_
->Beg(); }
165 uptr
AsanChunkView::End() { return Beg() + UsedSize(); }
166 uptr
AsanChunkView::UsedSize() { return chunk_
->used_size
; }
167 uptr
AsanChunkView::AllocTid() { return chunk_
->alloc_tid
; }
168 uptr
AsanChunkView::FreeTid() { return chunk_
->free_tid
; }
170 void AsanChunkView::GetAllocStack(StackTrace
*stack
) {
171 StackTrace::UncompressStack(stack
, chunk_
->compressed_alloc_stack(),
172 chunk_
->compressed_alloc_stack_size());
175 void AsanChunkView::GetFreeStack(StackTrace
*stack
) {
176 StackTrace::UncompressStack(stack
, chunk_
->compressed_free_stack(),
177 chunk_
->compressed_free_stack_size());
180 static AsanChunk
*PtrToChunk(uptr ptr
) {
181 AsanChunk
*m
= (AsanChunk
*)(ptr
- REDZONE
);
182 if (m
->chunk_state
== CHUNK_MEMALIGN
) {
183 m
= (AsanChunk
*)((uptr
)m
- m
->used_size
);
188 void AsanChunkFifoList::PushList(AsanChunkFifoList
*q
) {
189 CHECK(q
->size() > 0);
195 void AsanChunkFifoList::Push(AsanChunk
*n
) {
200 // Interesting performance observation: this function takes up to 15% of overal
201 // allocator time. That's because *first_ has been evicted from cache long time
202 // ago. Not sure if we can or want to do anything with this.
203 AsanChunk
*AsanChunkFifoList::Pop() {
205 AsanChunk
*res
= front();
206 size_
-= res
->Size();
211 // All pages we ever allocated.
217 bool InRange(uptr addr
) {
218 return addr
>= beg
&& addr
< end
;
224 explicit MallocInfo(LinkerInitialized x
) : mu_(x
) { }
226 AsanChunk
*AllocateChunks(u8 size_class
, uptr n_chunks
) {
228 AsanChunk
**fl
= &free_lists_
[size_class
];
230 BlockingMutexLock
lock(&mu_
);
231 for (uptr i
= 0; i
< n_chunks
; i
++) {
233 *fl
= GetNewChunks(size_class
);
238 CHECK(t
->chunk_state
== CHUNK_AVAILABLE
);
245 void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage
*x
,
246 bool eat_free_lists
) {
247 CHECK(flags()->quarantine_size
> 0);
248 BlockingMutexLock
lock(&mu_
);
249 AsanChunkFifoList
*q
= &x
->quarantine_
;
251 quarantine_
.PushList(q
);
252 while (quarantine_
.size() > (uptr
)flags()->quarantine_size
) {
256 if (eat_free_lists
) {
257 for (uptr size_class
= 0; size_class
< kNumberOfSizeClasses
;
259 AsanChunk
*m
= x
->free_lists_
[size_class
];
261 AsanChunk
*t
= m
->next
;
262 m
->next
= free_lists_
[size_class
];
263 free_lists_
[size_class
] = m
;
266 x
->free_lists_
[size_class
] = 0;
271 void BypassThreadLocalQuarantine(AsanChunk
*chunk
) {
272 BlockingMutexLock
lock(&mu_
);
273 quarantine_
.Push(chunk
);
276 AsanChunk
*FindChunkByAddr(uptr addr
) {
277 BlockingMutexLock
lock(&mu_
);
278 return FindChunkByAddrUnlocked(addr
);
281 uptr
AllocationSize(uptr ptr
) {
283 BlockingMutexLock
lock(&mu_
);
285 // Make sure this is our chunk and |ptr| actually points to the beginning
286 // of the allocated memory.
287 AsanChunk
*m
= FindChunkByAddrUnlocked(ptr
);
288 if (!m
|| m
->Beg() != ptr
) return 0;
290 if (m
->chunk_state
== CHUNK_ALLOCATED
) {
306 BlockingMutexLock
lock(&mu_
);
309 Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
310 quarantine_
.size() >> 20, malloced
>> 20);
311 for (uptr j
= 1; j
< kNumberOfSizeClasses
; j
++) {
312 AsanChunk
*i
= free_lists_
[j
];
315 for (; i
; i
= i
->next
) {
318 Printf("%zu:%zu ", j
, t
>> 20);
323 PageGroup
*FindPageGroup(uptr addr
) {
324 BlockingMutexLock
lock(&mu_
);
325 return FindPageGroupUnlocked(addr
);
329 PageGroup
*FindPageGroupUnlocked(uptr addr
) {
330 int n
= atomic_load(&n_page_groups_
, memory_order_relaxed
);
331 // If the page groups are not sorted yet, sort them.
332 if (n_sorted_page_groups_
< n
) {
333 SortArray((uptr
*)page_groups_
, n
);
334 n_sorted_page_groups_
= n
;
336 // Binary search over the page groups.
337 int beg
= 0, end
= n
;
339 int med
= (beg
+ end
) / 2;
340 uptr g
= (uptr
)page_groups_
[med
];
342 // 'g' points to the end of the group, so 'addr'
343 // may not belong to page_groups_[med] or any previous group.
346 // 'addr' may belong to page_groups_[med] or a previous group.
352 PageGroup
*g
= page_groups_
[beg
];
354 if (g
->InRange(addr
))
359 // We have an address between two chunks, and we want to report just one.
360 AsanChunk
*ChooseChunk(uptr addr
,
361 AsanChunk
*left_chunk
, AsanChunk
*right_chunk
) {
362 // Prefer an allocated chunk or a chunk from quarantine.
363 if (left_chunk
->chunk_state
== CHUNK_AVAILABLE
&&
364 right_chunk
->chunk_state
!= CHUNK_AVAILABLE
)
366 if (right_chunk
->chunk_state
== CHUNK_AVAILABLE
&&
367 left_chunk
->chunk_state
!= CHUNK_AVAILABLE
)
369 // Choose based on offset.
370 sptr l_offset
= 0, r_offset
= 0;
371 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
372 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
373 if (l_offset
< r_offset
)
378 AsanChunk
*FindChunkByAddrUnlocked(uptr addr
) {
379 PageGroup
*g
= FindPageGroupUnlocked(addr
);
381 CHECK(g
->size_of_chunk
);
382 uptr offset_from_beg
= addr
- g
->beg
;
383 uptr this_chunk_addr
= g
->beg
+
384 (offset_from_beg
/ g
->size_of_chunk
) * g
->size_of_chunk
;
385 CHECK(g
->InRange(this_chunk_addr
));
386 AsanChunk
*m
= (AsanChunk
*)this_chunk_addr
;
387 CHECK(m
->chunk_state
== CHUNK_ALLOCATED
||
388 m
->chunk_state
== CHUNK_AVAILABLE
||
389 m
->chunk_state
== CHUNK_QUARANTINE
);
391 AsanChunkView
m_view(m
);
392 if (m_view
.AddrIsInside(addr
, 1, &offset
))
395 if (m_view
.AddrIsAtRight(addr
, 1, &offset
)) {
396 if (this_chunk_addr
== g
->last_chunk
) // rightmost chunk
398 uptr right_chunk_addr
= this_chunk_addr
+ g
->size_of_chunk
;
399 CHECK(g
->InRange(right_chunk_addr
));
400 return ChooseChunk(addr
, m
, (AsanChunk
*)right_chunk_addr
);
402 CHECK(m_view
.AddrIsAtLeft(addr
, 1, &offset
));
403 if (this_chunk_addr
== g
->beg
) // leftmost chunk
405 uptr left_chunk_addr
= this_chunk_addr
- g
->size_of_chunk
;
406 CHECK(g
->InRange(left_chunk_addr
));
407 return ChooseChunk(addr
, (AsanChunk
*)left_chunk_addr
, m
);
411 void QuarantinePop() {
412 CHECK(quarantine_
.size() > 0);
413 AsanChunk
*m
= quarantine_
.Pop();
415 // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
417 CHECK(m
->chunk_state
== CHUNK_QUARANTINE
);
418 m
->chunk_state
= CHUNK_AVAILABLE
;
419 PoisonShadow((uptr
)m
, m
->Size(), kAsanHeapLeftRedzoneMagic
);
420 CHECK(m
->alloc_tid
>= 0);
421 CHECK(m
->free_tid
>= 0);
423 uptr size_class
= m
->SizeClass();
424 m
->next
= free_lists_
[size_class
];
425 free_lists_
[size_class
] = m
;
428 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
429 thread_stats
.real_frees
++;
430 thread_stats
.really_freed
+= m
->used_size
;
431 thread_stats
.really_freed_redzones
+= m
->Size() - m
->used_size
;
432 thread_stats
.really_freed_by_size
[m
->SizeClass()]++;
435 // Get a list of newly allocated chunks.
436 AsanChunk
*GetNewChunks(u8 size_class
) {
437 uptr size
= SizeClassToSize(size_class
);
438 CHECK(IsPowerOfTwo(kMinMmapSize
));
439 CHECK(size
< kMinMmapSize
|| (size
% kMinMmapSize
) == 0);
440 uptr mmap_size
= Max(size
, kMinMmapSize
);
441 uptr n_chunks
= mmap_size
/ size
;
442 CHECK(n_chunks
* size
== mmap_size
);
443 uptr PageSize
= GetPageSizeCached();
444 if (size
< PageSize
) {
445 // Size is small, just poison the last chunk.
448 // Size is large, allocate an extra page at right and poison it.
449 mmap_size
+= PageSize
;
452 u8
*mem
= MmapNewPagesAndPoisonShadow(mmap_size
);
455 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
456 thread_stats
.mmaps
++;
457 thread_stats
.mmaped
+= mmap_size
;
458 thread_stats
.mmaped_by_size
[size_class
] += n_chunks
;
461 for (uptr i
= 0; i
< n_chunks
; i
++) {
462 AsanChunk
*m
= (AsanChunk
*)(mem
+ i
* size
);
463 m
->chunk_state
= CHUNK_AVAILABLE
;
464 m
->size_class
= size_class
;
468 PageGroup
*pg
= (PageGroup
*)(mem
+ n_chunks
* size
);
469 // This memory is already poisoned, no need to poison it again.
471 pg
->end
= pg
->beg
+ mmap_size
;
472 pg
->size_of_chunk
= size
;
473 pg
->last_chunk
= (uptr
)(mem
+ size
* (n_chunks
- 1));
474 int idx
= atomic_fetch_add(&n_page_groups_
, 1, memory_order_relaxed
);
475 CHECK(idx
< (int)ARRAY_SIZE(page_groups_
));
476 page_groups_
[idx
] = pg
;
480 AsanChunk
*free_lists_
[kNumberOfSizeClasses
];
481 AsanChunkFifoList quarantine_
;
484 PageGroup
*page_groups_
[kMaxAvailableRam
/ kMinMmapSize
];
485 atomic_uint32_t n_page_groups_
;
486 int n_sorted_page_groups_
;
489 static MallocInfo
malloc_info(LINKER_INITIALIZED
);
491 void AsanThreadLocalMallocStorage::CommitBack() {
492 malloc_info
.SwallowThreadLocalMallocStorage(this, true);
495 AsanChunkView
FindHeapChunkByAddress(uptr address
) {
496 return AsanChunkView(malloc_info
.FindChunkByAddr(address
));
499 static u8
*Allocate(uptr alignment
, uptr size
, StackTrace
*stack
,
500 AllocType alloc_type
) {
504 size
= 1; // TODO(kcc): do something smarter
506 CHECK(IsPowerOfTwo(alignment
));
507 uptr rounded_size
= RoundUpTo(size
, REDZONE
);
508 uptr needed_size
= rounded_size
+ REDZONE
;
509 if (alignment
> REDZONE
) {
510 needed_size
+= alignment
;
512 CHECK(IsAligned(needed_size
, REDZONE
));
513 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
) {
514 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
519 u8 size_class
= SizeToSizeClass(needed_size
);
520 uptr size_to_allocate
= SizeClassToSize(size_class
);
521 CHECK(size_to_allocate
>= kMinAllocSize
);
522 CHECK(size_to_allocate
>= needed_size
);
523 CHECK(IsAligned(size_to_allocate
, REDZONE
));
525 if (flags()->verbosity
>= 3) {
526 Printf("Allocate align: %zu size: %zu class: %u real: %zu\n",
527 alignment
, size
, size_class
, size_to_allocate
);
530 AsanThread
*t
= asanThreadRegistry().GetCurrent();
531 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
533 thread_stats
.mallocs
++;
534 thread_stats
.malloced
+= size
;
535 thread_stats
.malloced_redzones
+= size_to_allocate
- size
;
536 thread_stats
.malloced_by_size
[size_class
]++;
539 if (!t
|| size_to_allocate
>= kMaxSizeForThreadLocalFreeList
) {
540 // get directly from global storage.
541 m
= malloc_info
.AllocateChunks(size_class
, 1);
542 thread_stats
.malloc_large
++;
544 // get from the thread-local storage.
545 AsanChunk
**fl
= &t
->malloc_storage().free_lists_
[size_class
];
547 uptr n_new_chunks
= kMaxSizeForThreadLocalFreeList
/ size_to_allocate
;
548 *fl
= malloc_info
.AllocateChunks(size_class
, n_new_chunks
);
549 thread_stats
.malloc_small_slow
++;
555 CHECK(m
->chunk_state
== CHUNK_AVAILABLE
);
556 m
->chunk_state
= CHUNK_ALLOCATED
;
557 m
->alloc_type
= alloc_type
;
559 CHECK(m
->Size() == size_to_allocate
);
560 uptr addr
= (uptr
)m
+ REDZONE
;
561 CHECK(addr
<= (uptr
)m
->compressed_free_stack());
563 if (alignment
> REDZONE
&& (addr
& (alignment
- 1))) {
564 addr
= RoundUpTo(addr
, alignment
);
565 CHECK((addr
& (alignment
- 1)) == 0);
566 AsanChunk
*p
= (AsanChunk
*)(addr
- REDZONE
);
567 p
->chunk_state
= CHUNK_MEMALIGN
;
568 p
->used_size
= (uptr
)p
- (uptr
)m
;
569 m
->alignment_log
= Log2(alignment
);
570 CHECK(m
->Beg() == addr
);
572 m
->alignment_log
= Log2(REDZONE
);
574 CHECK(m
== PtrToChunk(addr
));
576 CHECK(m
->Beg() == addr
);
577 m
->alloc_tid
= t
? t
->tid() : 0;
578 m
->free_tid
= kInvalidTid
;
579 StackTrace::CompressStack(stack
, m
->compressed_alloc_stack(),
580 m
->compressed_alloc_stack_size());
581 PoisonShadow(addr
, rounded_size
, 0);
582 if (size
< rounded_size
) {
583 PoisonHeapPartialRightRedzone(addr
+ rounded_size
- REDZONE
,
584 size
& (REDZONE
- 1));
586 if (size
<= (uptr
)(flags()->max_malloc_fill_size
)) {
587 REAL(memset
)((void*)addr
, 0, rounded_size
);
592 static void Deallocate(u8
*ptr
, StackTrace
*stack
, AllocType alloc_type
) {
596 if (flags()->debug
) {
597 CHECK(malloc_info
.FindPageGroup((uptr
)ptr
));
600 // Printf("Deallocate %p\n", ptr);
601 AsanChunk
*m
= PtrToChunk((uptr
)ptr
);
603 // Flip the chunk_state atomically to avoid race on double-free.
604 u8 old_chunk_state
= atomic_exchange((atomic_uint8_t
*)m
, CHUNK_QUARANTINE
,
605 memory_order_acq_rel
);
607 if (old_chunk_state
== CHUNK_QUARANTINE
) {
608 ReportDoubleFree((uptr
)ptr
, stack
);
609 } else if (old_chunk_state
!= CHUNK_ALLOCATED
) {
610 ReportFreeNotMalloced((uptr
)ptr
, stack
);
612 CHECK(old_chunk_state
== CHUNK_ALLOCATED
);
613 if (m
->alloc_type
!= alloc_type
&& flags()->alloc_dealloc_mismatch
)
614 ReportAllocTypeMismatch((uptr
)ptr
, stack
,
615 (AllocType
)m
->alloc_type
, (AllocType
)alloc_type
);
616 // With REDZONE==16 m->next is in the user area, otherwise it should be 0.
617 CHECK(REDZONE
<= 16 || !m
->next
);
618 CHECK(m
->free_tid
== kInvalidTid
);
619 CHECK(m
->alloc_tid
>= 0);
620 AsanThread
*t
= asanThreadRegistry().GetCurrent();
621 m
->free_tid
= t
? t
->tid() : 0;
622 StackTrace::CompressStack(stack
, m
->compressed_free_stack(),
623 m
->compressed_free_stack_size());
624 uptr rounded_size
= RoundUpTo(m
->used_size
, REDZONE
);
625 PoisonShadow((uptr
)ptr
, rounded_size
, kAsanHeapFreeMagic
);
628 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
629 thread_stats
.frees
++;
630 thread_stats
.freed
+= m
->used_size
;
631 thread_stats
.freed_by_size
[m
->SizeClass()]++;
633 CHECK(m
->chunk_state
== CHUNK_QUARANTINE
);
636 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
637 ms
->quarantine_
.Push(m
);
639 if (ms
->quarantine_
.size() > kMaxThreadLocalQuarantine
) {
640 malloc_info
.SwallowThreadLocalMallocStorage(ms
, false);
643 malloc_info
.BypassThreadLocalQuarantine(m
);
647 static u8
*Reallocate(u8
*old_ptr
, uptr new_size
,
649 CHECK(old_ptr
&& new_size
);
652 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
653 thread_stats
.reallocs
++;
654 thread_stats
.realloced
+= new_size
;
656 AsanChunk
*m
= PtrToChunk((uptr
)old_ptr
);
657 CHECK(m
->chunk_state
== CHUNK_ALLOCATED
);
658 uptr old_size
= m
->used_size
;
659 uptr memcpy_size
= Min(new_size
, old_size
);
660 u8
*new_ptr
= Allocate(0, new_size
, stack
, FROM_MALLOC
);
662 CHECK(REAL(memcpy
) != 0);
663 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
664 Deallocate(old_ptr
, stack
, FROM_MALLOC
);
669 } // namespace __asan
671 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
672 // Provide default (no-op) implementation of malloc hooks.
674 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
675 void __asan_malloc_hook(void *ptr
, uptr size
) {
679 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
680 void __asan_free_hook(void *ptr
) {
688 void InitializeAllocator() { }
690 void PrintInternalAllocatorStats() {
693 SANITIZER_INTERFACE_ATTRIBUTE
694 void *asan_memalign(uptr alignment
, uptr size
, StackTrace
*stack
,
695 AllocType alloc_type
) {
696 void *ptr
= (void*)Allocate(alignment
, size
, stack
, alloc_type
);
697 ASAN_MALLOC_HOOK(ptr
, size
);
701 SANITIZER_INTERFACE_ATTRIBUTE
702 void asan_free(void *ptr
, StackTrace
*stack
, AllocType alloc_type
) {
704 Deallocate((u8
*)ptr
, stack
, alloc_type
);
707 SANITIZER_INTERFACE_ATTRIBUTE
708 void *asan_malloc(uptr size
, StackTrace
*stack
) {
709 void *ptr
= (void*)Allocate(0, size
, stack
, FROM_MALLOC
);
710 ASAN_MALLOC_HOOK(ptr
, size
);
714 void *asan_calloc(uptr nmemb
, uptr size
, StackTrace
*stack
) {
715 if (__sanitizer::CallocShouldReturnNullDueToOverflow(size
, nmemb
)) return 0;
716 void *ptr
= (void*)Allocate(0, nmemb
* size
, stack
, FROM_MALLOC
);
718 REAL(memset
)(ptr
, 0, nmemb
* size
);
719 ASAN_MALLOC_HOOK(ptr
, size
);
723 void *asan_realloc(void *p
, uptr size
, StackTrace
*stack
) {
725 void *ptr
= (void*)Allocate(0, size
, stack
, FROM_MALLOC
);
726 ASAN_MALLOC_HOOK(ptr
, size
);
728 } else if (size
== 0) {
730 Deallocate((u8
*)p
, stack
, FROM_MALLOC
);
733 return Reallocate((u8
*)p
, size
, stack
);
736 void *asan_valloc(uptr size
, StackTrace
*stack
) {
737 void *ptr
= (void*)Allocate(GetPageSizeCached(), size
, stack
, FROM_MALLOC
);
738 ASAN_MALLOC_HOOK(ptr
, size
);
742 void *asan_pvalloc(uptr size
, StackTrace
*stack
) {
743 uptr PageSize
= GetPageSizeCached();
744 size
= RoundUpTo(size
, PageSize
);
746 // pvalloc(0) should allocate one page.
749 void *ptr
= (void*)Allocate(PageSize
, size
, stack
, FROM_MALLOC
);
750 ASAN_MALLOC_HOOK(ptr
, size
);
754 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
756 void *ptr
= Allocate(alignment
, size
, stack
, FROM_MALLOC
);
757 CHECK(IsAligned((uptr
)ptr
, alignment
));
758 ASAN_MALLOC_HOOK(ptr
, size
);
763 uptr
asan_malloc_usable_size(void *ptr
, StackTrace
*stack
) {
765 if (ptr
== 0) return 0;
766 uptr usable_size
= malloc_info
.AllocationSize((uptr
)ptr
);
767 if (flags()->check_malloc_usable_size
&& (usable_size
== 0)) {
768 ReportMallocUsableSizeNotOwned((uptr
)ptr
, stack
);
773 uptr
asan_mz_size(const void *ptr
) {
774 return malloc_info
.AllocationSize((uptr
)ptr
);
777 void asan_mz_force_lock() {
778 malloc_info
.ForceLock();
781 void asan_mz_force_unlock() {
782 malloc_info
.ForceUnlock();
785 } // namespace __asan
787 // ---------------------- Interface ---------------- {{{1
788 using namespace __asan
; // NOLINT
790 // ASan allocator doesn't reserve extra bytes, so normally we would
791 // just return "size".
792 uptr
__asan_get_estimated_allocated_size(uptr size
) {
793 if (size
== 0) return 1;
794 return Min(size
, kMaxAllowedMallocSize
);
797 bool __asan_get_ownership(const void *p
) {
798 return malloc_info
.AllocationSize((uptr
)p
) > 0;
801 uptr
__asan_get_allocated_size(const void *p
) {
802 if (p
== 0) return 0;
803 uptr allocated_size
= malloc_info
.AllocationSize((uptr
)p
);
804 // Die if p is not malloced or if it is already freed.
805 if (allocated_size
== 0) {
806 GET_STACK_TRACE_FATAL_HERE
;
807 ReportAsanGetAllocatedSizeNotOwned((uptr
)p
, &stack
);
809 return allocated_size
;
811 #endif // ASAN_ALLOCATOR_VERSION