1 //===-- asan_allocator.cc ---------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of AddressSanitizer, an address sanity checker.
12 // Implementation of ASan's memory allocator.
13 // Evey piece of memory (AsanChunk) allocated by the allocator
14 // has a left redzone of REDZONE bytes and
15 // a right redzone such that the end of the chunk is aligned by REDZONE
16 // (i.e. the right redzone is between 0 and REDZONE-1).
17 // The left redzone is always poisoned.
18 // The right redzone is poisoned on malloc, the body is poisoned on free.
19 // Once freed, a chunk is moved to a quarantine (fifo list).
20 // After quarantine, a chunk is returned to freelists.
22 // The left redzone contains ASan's internal data and the stack trace of
24 // Once freed, the body of the chunk contains the stack trace of the free call.
26 //===----------------------------------------------------------------------===//
28 #include "asan_allocator.h"
29 #include "asan_interceptors.h"
30 #include "asan_interface.h"
31 #include "asan_internal.h"
32 #include "asan_lock.h"
33 #include "asan_mapping.h"
34 #include "asan_stats.h"
35 #include "asan_thread.h"
36 #include "asan_thread_registry.h"
44 #define REDZONE FLAG_redzone
45 static const size_t kMinAllocSize
= REDZONE
* 2;
46 static const uint64_t kMaxAvailableRam
= 128ULL << 30; // 128G
47 static const size_t kMaxThreadLocalQuarantine
= 1 << 20; // 1M
49 #if ASAN_LOW_MEMORY == 1
50 static const size_t kMinMmapSize
= 4UL << 17; // 128K
51 static const size_t kMaxSizeForThreadLocalFreeList
= 1 << 15; // 32K
53 static const size_t kMinMmapSize
= 4UL << 20; // 4M
54 static const size_t kMaxSizeForThreadLocalFreeList
= 1 << 17; // 128K
57 // Size classes less than kMallocSizeClassStep are powers of two.
58 // All other size classes are multiples of kMallocSizeClassStep.
59 static const size_t kMallocSizeClassStepLog
= 26;
60 static const size_t kMallocSizeClassStep
= 1UL << kMallocSizeClassStepLog
;
63 static const size_t kMaxAllowedMallocSize
= 3UL << 30; // 3G
65 static const size_t kMaxAllowedMallocSize
= 8UL << 30; // 8G
68 static inline bool IsAligned(uintptr_t a
, uintptr_t alignment
) {
69 return (a
& (alignment
- 1)) == 0;
72 static inline size_t Log2(size_t x
) {
73 CHECK(IsPowerOfTwo(x
));
75 unsigned long ret
; // NOLINT
76 _BitScanForward64(&ret
, x
);
79 unsigned long ret
; // NOLINT
80 _BitScanForward(&ret
, x
);
83 return __builtin_ctzl(x
);
87 static inline size_t RoundUpToPowerOfTwo(size_t size
) {
89 if (IsPowerOfTwo(size
)) return size
;
91 unsigned long up
; // NOLINT
93 _BitScanReverse64(&up
, size
);
95 _BitScanReverse(&up
, size
);
97 up
= __WORDSIZE
- 1 - __builtin_clzl(size
);
99 CHECK(size
< (1ULL << (up
+ 1)));
100 CHECK(size
> (1ULL << up
));
101 return 1UL << (up
+ 1);
104 static inline size_t SizeClassToSize(uint8_t size_class
) {
105 CHECK(size_class
< kNumberOfSizeClasses
);
106 if (size_class
<= kMallocSizeClassStepLog
) {
107 return 1UL << size_class
;
109 return (size_class
- kMallocSizeClassStepLog
) * kMallocSizeClassStep
;
113 static inline uint8_t SizeToSizeClass(size_t size
) {
115 if (size
<= kMallocSizeClassStep
) {
116 size_t rounded
= RoundUpToPowerOfTwo(size
);
119 res
= ((size
+ kMallocSizeClassStep
- 1) / kMallocSizeClassStep
)
120 + kMallocSizeClassStepLog
;
122 CHECK(res
< kNumberOfSizeClasses
);
123 CHECK(size
<= SizeClassToSize(res
));
127 // Given REDZONE bytes, we need to mark first size bytes
128 // as addressable and the rest REDZONE-size bytes as unaddressable.
129 static void PoisonHeapPartialRightRedzone(uintptr_t mem
, size_t size
) {
130 CHECK(size
<= REDZONE
);
131 CHECK(IsAligned(mem
, REDZONE
));
132 CHECK(IsPowerOfTwo(SHADOW_GRANULARITY
));
133 CHECK(IsPowerOfTwo(REDZONE
));
134 CHECK(REDZONE
>= SHADOW_GRANULARITY
);
135 PoisonShadowPartialRightRedzone(mem
, size
, REDZONE
,
136 kAsanHeapRightRedzoneMagic
);
139 static uint8_t *MmapNewPagesAndPoisonShadow(size_t size
) {
140 CHECK(IsAligned(size
, kPageSize
));
141 uint8_t *res
= (uint8_t*)AsanMmapSomewhereOrDie(size
, __FUNCTION__
);
142 PoisonShadow((uintptr_t)res
, size
, kAsanHeapLeftRedzoneMagic
);
144 Printf("ASAN_MMAP: [%p, %p)\n", res
, res
+ size
);
149 // Every chunk of memory allocated by this allocator can be in one of 3 states:
150 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
151 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
152 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
154 // The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
155 // the beginning of a AsanChunk (in which case 'next' contains the address
156 // of the AsanChunk).
158 // The magic numbers for the enum values are taken randomly.
160 CHUNK_AVAILABLE
= 0x573B,
161 CHUNK_ALLOCATED
= 0x3204,
162 CHUNK_QUARANTINE
= 0x1978,
163 CHUNK_MEMALIGN
= 0xDC68,
167 uint16_t chunk_state
;
169 uint32_t offset
; // User-visible memory starts at this+offset (beg()).
172 size_t used_size
; // Size requested by the user.
175 uintptr_t beg() { return (uintptr_t)this + offset
; }
176 size_t Size() { return SizeClassToSize(size_class
); }
177 uint8_t SizeClass() { return size_class
; }
180 struct AsanChunk
: public ChunkBase
{
181 uint32_t *compressed_alloc_stack() {
182 CHECK(REDZONE
>= sizeof(ChunkBase
));
183 return (uint32_t*)((uintptr_t)this + sizeof(ChunkBase
));
185 uint32_t *compressed_free_stack() {
186 CHECK(REDZONE
>= sizeof(ChunkBase
));
187 return (uint32_t*)((uintptr_t)this + REDZONE
);
190 // The left redzone after the ChunkBase is given to the alloc stack trace.
191 size_t compressed_alloc_stack_size() {
192 return (REDZONE
- sizeof(ChunkBase
)) / sizeof(uint32_t);
194 size_t compressed_free_stack_size() {
195 return (REDZONE
) / sizeof(uint32_t);
198 bool AddrIsInside(uintptr_t addr
, size_t access_size
, size_t *offset
) {
199 if (addr
>= beg() && (addr
+ access_size
) <= (beg() + used_size
)) {
200 *offset
= addr
- beg();
206 bool AddrIsAtLeft(uintptr_t addr
, size_t access_size
, size_t *offset
) {
208 *offset
= beg() - addr
;
214 bool AddrIsAtRight(uintptr_t addr
, size_t access_size
, size_t *offset
) {
215 if (addr
+ access_size
>= beg() + used_size
) {
216 if (addr
<= beg() + used_size
)
219 *offset
= addr
- (beg() + used_size
);
225 void DescribeAddress(uintptr_t addr
, size_t access_size
) {
227 Printf("%p is located ", addr
);
228 if (AddrIsInside(addr
, access_size
, &offset
)) {
229 Printf("%ld bytes inside of", offset
);
230 } else if (AddrIsAtLeft(addr
, access_size
, &offset
)) {
231 Printf("%ld bytes to the left of", offset
);
232 } else if (AddrIsAtRight(addr
, access_size
, &offset
)) {
233 Printf("%ld bytes to the right of", offset
);
235 Printf(" somewhere around (this is AddressSanitizer bug!)");
237 Printf(" %lu-byte region [%p,%p)\n",
238 used_size
, beg(), beg() + used_size
);
242 static AsanChunk
*PtrToChunk(uintptr_t ptr
) {
243 AsanChunk
*m
= (AsanChunk
*)(ptr
- REDZONE
);
244 if (m
->chunk_state
== CHUNK_MEMALIGN
) {
251 void AsanChunkFifoList::PushList(AsanChunkFifoList
*q
) {
255 last_
->next
= q
->first_
;
266 void AsanChunkFifoList::Push(AsanChunk
*n
) {
267 CHECK(n
->next
== NULL
);
280 // Interesting performance observation: this function takes up to 15% of overal
281 // allocator time. That's because *first_ has been evicted from cache long time
282 // ago. Not sure if we can or want to do anything with this.
283 AsanChunk
*AsanChunkFifoList::Pop() {
285 AsanChunk
*res
= first_
;
286 first_
= first_
->next
;
289 CHECK(size_
>= res
->Size());
290 size_
-= res
->Size();
297 // All pages we ever allocated.
301 size_t size_of_chunk
;
302 uintptr_t last_chunk
;
303 bool InRange(uintptr_t addr
) {
304 return addr
>= beg
&& addr
< end
;
311 explicit MallocInfo(LinkerInitialized x
) : mu_(x
) { }
313 AsanChunk
*AllocateChunks(uint8_t size_class
, size_t n_chunks
) {
315 AsanChunk
**fl
= &free_lists_
[size_class
];
317 ScopedLock
lock(&mu_
);
318 for (size_t i
= 0; i
< n_chunks
; i
++) {
320 *fl
= GetNewChunks(size_class
);
325 CHECK(t
->chunk_state
== CHUNK_AVAILABLE
);
332 void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage
*x
,
333 bool eat_free_lists
) {
334 CHECK(FLAG_quarantine_size
> 0);
335 ScopedLock
lock(&mu_
);
336 AsanChunkFifoList
*q
= &x
->quarantine_
;
338 quarantine_
.PushList(q
);
339 while (quarantine_
.size() > FLAG_quarantine_size
) {
343 if (eat_free_lists
) {
344 for (size_t size_class
= 0; size_class
< kNumberOfSizeClasses
;
346 AsanChunk
*m
= x
->free_lists_
[size_class
];
348 AsanChunk
*t
= m
->next
;
349 m
->next
= free_lists_
[size_class
];
350 free_lists_
[size_class
] = m
;
353 x
->free_lists_
[size_class
] = 0;
358 void BypassThreadLocalQuarantine(AsanChunk
*chunk
) {
359 ScopedLock
lock(&mu_
);
360 quarantine_
.Push(chunk
);
363 AsanChunk
*FindMallocedOrFreed(uintptr_t addr
, size_t access_size
) {
364 ScopedLock
lock(&mu_
);
365 return FindChunkByAddr(addr
);
368 // TODO(glider): AllocationSize() may become very slow if the size of
369 // page_groups_ grows. This can be fixed by increasing kMinMmapSize,
370 // but a better solution is to speed up the search somehow.
371 size_t AllocationSize(uintptr_t ptr
) {
373 ScopedLock
lock(&mu_
);
375 // first, check if this is our memory
376 PageGroup
*g
= FindPageGroupUnlocked(ptr
);
378 AsanChunk
*m
= PtrToChunk(ptr
);
379 if (m
->chunk_state
== CHUNK_ALLOCATED
) {
395 ScopedLock
lock(&mu_
);
398 Printf(" MallocInfo: in quarantine: %ld malloced: %ld; ",
399 quarantine_
.size() >> 20, malloced
>> 20);
400 for (size_t j
= 1; j
< kNumberOfSizeClasses
; j
++) {
401 AsanChunk
*i
= free_lists_
[j
];
404 for (; i
; i
= i
->next
) {
407 Printf("%ld:%ld ", j
, t
>> 20);
412 PageGroup
*FindPageGroup(uintptr_t addr
) {
413 ScopedLock
lock(&mu_
);
414 return FindPageGroupUnlocked(addr
);
418 PageGroup
*FindPageGroupUnlocked(uintptr_t addr
) {
419 for (int i
= 0; i
< n_page_groups_
; i
++) {
420 PageGroup
*g
= page_groups_
[i
];
421 if (g
->InRange(addr
)) {
428 // We have an address between two chunks, and we want to report just one.
429 AsanChunk
*ChooseChunk(uintptr_t addr
,
430 AsanChunk
*left_chunk
, AsanChunk
*right_chunk
) {
431 // Prefer an allocated chunk or a chunk from quarantine.
432 if (left_chunk
->chunk_state
== CHUNK_AVAILABLE
&&
433 right_chunk
->chunk_state
!= CHUNK_AVAILABLE
)
435 if (right_chunk
->chunk_state
== CHUNK_AVAILABLE
&&
436 left_chunk
->chunk_state
!= CHUNK_AVAILABLE
)
438 // Choose based on offset.
439 size_t l_offset
= 0, r_offset
= 0;
440 CHECK(left_chunk
->AddrIsAtRight(addr
, 1, &l_offset
));
441 CHECK(right_chunk
->AddrIsAtLeft(addr
, 1, &r_offset
));
442 if (l_offset
< r_offset
)
447 AsanChunk
*FindChunkByAddr(uintptr_t addr
) {
448 PageGroup
*g
= FindPageGroupUnlocked(addr
);
450 CHECK(g
->size_of_chunk
);
451 uintptr_t offset_from_beg
= addr
- g
->beg
;
452 uintptr_t this_chunk_addr
= g
->beg
+
453 (offset_from_beg
/ g
->size_of_chunk
) * g
->size_of_chunk
;
454 CHECK(g
->InRange(this_chunk_addr
));
455 AsanChunk
*m
= (AsanChunk
*)this_chunk_addr
;
456 CHECK(m
->chunk_state
== CHUNK_ALLOCATED
||
457 m
->chunk_state
== CHUNK_AVAILABLE
||
458 m
->chunk_state
== CHUNK_QUARANTINE
);
460 if (m
->AddrIsInside(addr
, 1, &offset
))
463 if (m
->AddrIsAtRight(addr
, 1, &offset
)) {
464 if (this_chunk_addr
== g
->last_chunk
) // rightmost chunk
466 uintptr_t right_chunk_addr
= this_chunk_addr
+ g
->size_of_chunk
;
467 CHECK(g
->InRange(right_chunk_addr
));
468 return ChooseChunk(addr
, m
, (AsanChunk
*)right_chunk_addr
);
470 CHECK(m
->AddrIsAtLeft(addr
, 1, &offset
));
471 if (this_chunk_addr
== g
->beg
) // leftmost chunk
473 uintptr_t left_chunk_addr
= this_chunk_addr
- g
->size_of_chunk
;
474 CHECK(g
->InRange(left_chunk_addr
));
475 return ChooseChunk(addr
, (AsanChunk
*)left_chunk_addr
, m
);
479 void QuarantinePop() {
480 CHECK(quarantine_
.size() > 0);
481 AsanChunk
*m
= quarantine_
.Pop();
483 // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
485 CHECK(m
->chunk_state
== CHUNK_QUARANTINE
);
486 m
->chunk_state
= CHUNK_AVAILABLE
;
487 CHECK(m
->alloc_tid
>= 0);
488 CHECK(m
->free_tid
>= 0);
490 size_t size_class
= m
->SizeClass();
491 m
->next
= free_lists_
[size_class
];
492 free_lists_
[size_class
] = m
;
495 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
496 thread_stats
.real_frees
++;
497 thread_stats
.really_freed
+= m
->used_size
;
498 thread_stats
.really_freed_redzones
+= m
->Size() - m
->used_size
;
499 thread_stats
.really_freed_by_size
[m
->SizeClass()]++;
502 // Get a list of newly allocated chunks.
503 AsanChunk
*GetNewChunks(uint8_t size_class
) {
504 size_t size
= SizeClassToSize(size_class
);
505 CHECK(IsPowerOfTwo(kMinMmapSize
));
506 CHECK(size
< kMinMmapSize
|| (size
% kMinMmapSize
) == 0);
507 size_t mmap_size
= Max(size
, kMinMmapSize
);
508 size_t n_chunks
= mmap_size
/ size
;
509 CHECK(n_chunks
* size
== mmap_size
);
510 if (size
< kPageSize
) {
511 // Size is small, just poison the last chunk.
514 // Size is large, allocate an extra page at right and poison it.
515 mmap_size
+= kPageSize
;
518 uint8_t *mem
= MmapNewPagesAndPoisonShadow(mmap_size
);
521 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
522 thread_stats
.mmaps
++;
523 thread_stats
.mmaped
+= mmap_size
;
524 thread_stats
.mmaped_by_size
[size_class
] += n_chunks
;
526 AsanChunk
*res
= NULL
;
527 for (size_t i
= 0; i
< n_chunks
; i
++) {
528 AsanChunk
*m
= (AsanChunk
*)(mem
+ i
* size
);
529 m
->chunk_state
= CHUNK_AVAILABLE
;
530 m
->size_class
= size_class
;
534 PageGroup
*pg
= (PageGroup
*)(mem
+ n_chunks
* size
);
535 // This memory is already poisoned, no need to poison it again.
536 pg
->beg
= (uintptr_t)mem
;
537 pg
->end
= pg
->beg
+ mmap_size
;
538 pg
->size_of_chunk
= size
;
539 pg
->last_chunk
= (uintptr_t)(mem
+ size
* (n_chunks
- 1));
540 int page_group_idx
= AtomicInc(&n_page_groups_
) - 1;
541 CHECK(page_group_idx
< (int)ASAN_ARRAY_SIZE(page_groups_
));
542 page_groups_
[page_group_idx
] = pg
;
546 AsanChunk
*free_lists_
[kNumberOfSizeClasses
];
547 AsanChunkFifoList quarantine_
;
550 PageGroup
*page_groups_
[kMaxAvailableRam
/ kMinMmapSize
];
551 int n_page_groups_
; // atomic
554 static MallocInfo
malloc_info(LINKER_INITIALIZED
);
556 void AsanThreadLocalMallocStorage::CommitBack() {
557 malloc_info
.SwallowThreadLocalMallocStorage(this, true);
560 static void Describe(uintptr_t addr
, size_t access_size
) {
561 AsanChunk
*m
= malloc_info
.FindMallocedOrFreed(addr
, access_size
);
563 m
->DescribeAddress(addr
, access_size
);
564 CHECK(m
->alloc_tid
>= 0);
565 AsanThreadSummary
*alloc_thread
=
566 asanThreadRegistry().FindByTid(m
->alloc_tid
);
567 AsanStackTrace alloc_stack
;
568 AsanStackTrace::UncompressStack(&alloc_stack
, m
->compressed_alloc_stack(),
569 m
->compressed_alloc_stack_size());
570 AsanThread
*t
= asanThreadRegistry().GetCurrent();
572 if (m
->free_tid
>= 0) {
573 AsanThreadSummary
*free_thread
=
574 asanThreadRegistry().FindByTid(m
->free_tid
);
575 Printf("freed by thread T%d here:\n", free_thread
->tid());
576 AsanStackTrace free_stack
;
577 AsanStackTrace::UncompressStack(&free_stack
, m
->compressed_free_stack(),
578 m
->compressed_free_stack_size());
579 free_stack
.PrintStack();
580 Printf("previously allocated by thread T%d here:\n",
581 alloc_thread
->tid());
583 alloc_stack
.PrintStack();
584 t
->summary()->Announce();
585 free_thread
->Announce();
586 alloc_thread
->Announce();
588 Printf("allocated by thread T%d here:\n", alloc_thread
->tid());
589 alloc_stack
.PrintStack();
590 t
->summary()->Announce();
591 alloc_thread
->Announce();
595 static uint8_t *Allocate(size_t alignment
, size_t size
, AsanStackTrace
*stack
) {
599 size
= 1; // TODO(kcc): do something smarter
601 CHECK(IsPowerOfTwo(alignment
));
602 size_t rounded_size
= RoundUpTo(size
, REDZONE
);
603 size_t needed_size
= rounded_size
+ REDZONE
;
604 if (alignment
> REDZONE
) {
605 needed_size
+= alignment
;
607 CHECK(IsAligned(needed_size
, REDZONE
));
608 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
) {
609 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", size
);
613 uint8_t size_class
= SizeToSizeClass(needed_size
);
614 size_t size_to_allocate
= SizeClassToSize(size_class
);
615 CHECK(size_to_allocate
>= kMinAllocSize
);
616 CHECK(size_to_allocate
>= needed_size
);
617 CHECK(IsAligned(size_to_allocate
, REDZONE
));
620 Printf("Allocate align: %ld size: %ld class: %d real: %ld\n",
621 alignment
, size
, size_class
, size_to_allocate
);
624 AsanThread
*t
= asanThreadRegistry().GetCurrent();
625 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
627 thread_stats
.mallocs
++;
628 thread_stats
.malloced
+= size
;
629 thread_stats
.malloced_redzones
+= size_to_allocate
- size
;
630 thread_stats
.malloced_by_size
[size_class
]++;
633 if (!t
|| size_to_allocate
>= kMaxSizeForThreadLocalFreeList
) {
634 // get directly from global storage.
635 m
= malloc_info
.AllocateChunks(size_class
, 1);
636 thread_stats
.malloc_large
++;
638 // get from the thread-local storage.
639 AsanChunk
**fl
= &t
->malloc_storage().free_lists_
[size_class
];
641 size_t n_new_chunks
= kMaxSizeForThreadLocalFreeList
/ size_to_allocate
;
642 *fl
= malloc_info
.AllocateChunks(size_class
, n_new_chunks
);
643 thread_stats
.malloc_small_slow
++;
649 CHECK(m
->chunk_state
== CHUNK_AVAILABLE
);
650 m
->chunk_state
= CHUNK_ALLOCATED
;
652 CHECK(m
->Size() == size_to_allocate
);
653 uintptr_t addr
= (uintptr_t)m
+ REDZONE
;
654 CHECK(addr
== (uintptr_t)m
->compressed_free_stack());
656 if (alignment
> REDZONE
&& (addr
& (alignment
- 1))) {
657 addr
= RoundUpTo(addr
, alignment
);
658 CHECK((addr
& (alignment
- 1)) == 0);
659 AsanChunk
*p
= (AsanChunk
*)(addr
- REDZONE
);
660 p
->chunk_state
= CHUNK_MEMALIGN
;
663 CHECK(m
== PtrToChunk(addr
));
665 m
->offset
= addr
- (uintptr_t)m
;
666 CHECK(m
->beg() == addr
);
667 m
->alloc_tid
= t
? t
->tid() : 0;
668 m
->free_tid
= AsanThread::kInvalidTid
;
669 AsanStackTrace::CompressStack(stack
, m
->compressed_alloc_stack(),
670 m
->compressed_alloc_stack_size());
671 PoisonShadow(addr
, rounded_size
, 0);
672 if (size
< rounded_size
) {
673 PoisonHeapPartialRightRedzone(addr
+ rounded_size
- REDZONE
,
674 size
& (REDZONE
- 1));
676 if (size
<= FLAG_max_malloc_fill_size
) {
677 REAL(memset
)((void*)addr
, 0, rounded_size
);
679 return (uint8_t*)addr
;
682 static void Deallocate(uint8_t *ptr
, AsanStackTrace
*stack
) {
687 CHECK(malloc_info
.FindPageGroup((uintptr_t)ptr
));
690 // Printf("Deallocate %p\n", ptr);
691 AsanChunk
*m
= PtrToChunk((uintptr_t)ptr
);
692 if (m
->chunk_state
== CHUNK_QUARANTINE
) {
693 Report("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr
);
695 Describe((uintptr_t)ptr
, 1);
697 } else if (m
->chunk_state
!= CHUNK_ALLOCATED
) {
698 Report("ERROR: AddressSanitizer attempting free on address which was not"
699 " malloc()-ed: %p\n", ptr
);
703 CHECK(m
->chunk_state
== CHUNK_ALLOCATED
);
704 CHECK(m
->free_tid
== AsanThread::kInvalidTid
);
705 CHECK(m
->alloc_tid
>= 0);
706 AsanThread
*t
= asanThreadRegistry().GetCurrent();
707 m
->free_tid
= t
? t
->tid() : 0;
708 AsanStackTrace::CompressStack(stack
, m
->compressed_free_stack(),
709 m
->compressed_free_stack_size());
710 size_t rounded_size
= RoundUpTo(m
->used_size
, REDZONE
);
711 PoisonShadow((uintptr_t)ptr
, rounded_size
, kAsanHeapFreeMagic
);
714 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
715 thread_stats
.frees
++;
716 thread_stats
.freed
+= m
->used_size
;
717 thread_stats
.freed_by_size
[m
->SizeClass()]++;
719 m
->chunk_state
= CHUNK_QUARANTINE
;
721 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
723 ms
->quarantine_
.Push(m
);
725 if (ms
->quarantine_
.size() > kMaxThreadLocalQuarantine
) {
726 malloc_info
.SwallowThreadLocalMallocStorage(ms
, false);
730 malloc_info
.BypassThreadLocalQuarantine(m
);
734 static uint8_t *Reallocate(uint8_t *old_ptr
, size_t new_size
,
735 AsanStackTrace
*stack
) {
736 CHECK(old_ptr
&& new_size
);
739 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
740 thread_stats
.reallocs
++;
741 thread_stats
.realloced
+= new_size
;
743 AsanChunk
*m
= PtrToChunk((uintptr_t)old_ptr
);
744 CHECK(m
->chunk_state
== CHUNK_ALLOCATED
);
745 size_t old_size
= m
->used_size
;
746 size_t memcpy_size
= Min(new_size
, old_size
);
747 uint8_t *new_ptr
= Allocate(0, new_size
, stack
);
749 CHECK(REAL(memcpy
) != NULL
);
750 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
751 Deallocate(old_ptr
, stack
);
756 } // namespace __asan
758 // Malloc hooks declaration.
759 // ASAN_NEW_HOOK(ptr, size) is called immediately after
760 // allocation of "size" bytes, which returned "ptr".
761 // ASAN_DELETE_HOOK(ptr) is called immediately before
762 // deallocation of "ptr".
763 // If ASAN_NEW_HOOK or ASAN_DELETE_HOOK is defined, user
764 // program must provide implementation of this hook.
765 // If macro is undefined, the hook is no-op.
767 extern "C" void ASAN_NEW_HOOK(void *ptr
, size_t size
);
769 static inline void ASAN_NEW_HOOK(void *ptr
, size_t size
) { }
772 #ifdef ASAN_DELETE_HOOK
773 extern "C" void ASAN_DELETE_HOOK(void *ptr
);
775 static inline void ASAN_DELETE_HOOK(void *ptr
) { }
780 void *asan_memalign(size_t alignment
, size_t size
, AsanStackTrace
*stack
) {
781 void *ptr
= (void*)Allocate(alignment
, size
, stack
);
782 ASAN_NEW_HOOK(ptr
, size
);
786 void asan_free(void *ptr
, AsanStackTrace
*stack
) {
787 ASAN_DELETE_HOOK(ptr
);
788 Deallocate((uint8_t*)ptr
, stack
);
791 void *asan_malloc(size_t size
, AsanStackTrace
*stack
) {
792 void *ptr
= (void*)Allocate(0, size
, stack
);
793 ASAN_NEW_HOOK(ptr
, size
);
797 void *asan_calloc(size_t nmemb
, size_t size
, AsanStackTrace
*stack
) {
798 void *ptr
= (void*)Allocate(0, nmemb
* size
, stack
);
800 REAL(memset
)(ptr
, 0, nmemb
* size
);
801 ASAN_NEW_HOOK(ptr
, nmemb
* size
);
805 void *asan_realloc(void *p
, size_t size
, AsanStackTrace
*stack
) {
807 void *ptr
= (void*)Allocate(0, size
, stack
);
808 ASAN_NEW_HOOK(ptr
, size
);
810 } else if (size
== 0) {
812 Deallocate((uint8_t*)p
, stack
);
815 return Reallocate((uint8_t*)p
, size
, stack
);
818 void *asan_valloc(size_t size
, AsanStackTrace
*stack
) {
819 void *ptr
= (void*)Allocate(kPageSize
, size
, stack
);
820 ASAN_NEW_HOOK(ptr
, size
);
824 void *asan_pvalloc(size_t size
, AsanStackTrace
*stack
) {
825 size
= RoundUpTo(size
, kPageSize
);
827 // pvalloc(0) should allocate one page.
830 void *ptr
= (void*)Allocate(kPageSize
, size
, stack
);
831 ASAN_NEW_HOOK(ptr
, size
);
835 int asan_posix_memalign(void **memptr
, size_t alignment
, size_t size
,
836 AsanStackTrace
*stack
) {
837 void *ptr
= Allocate(alignment
, size
, stack
);
838 CHECK(IsAligned((uintptr_t)ptr
, alignment
));
839 ASAN_NEW_HOOK(ptr
, size
);
844 size_t asan_malloc_usable_size(void *ptr
, AsanStackTrace
*stack
) {
846 if (ptr
== NULL
) return 0;
847 size_t usable_size
= malloc_info
.AllocationSize((uintptr_t)ptr
);
848 if (usable_size
== 0) {
849 Report("ERROR: AddressSanitizer attempting to call malloc_usable_size() "
850 "for pointer which is not owned: %p\n", ptr
);
852 Describe((uintptr_t)ptr
, 1);
858 size_t asan_mz_size(const void *ptr
) {
859 return malloc_info
.AllocationSize((uintptr_t)ptr
);
862 void DescribeHeapAddress(uintptr_t addr
, uintptr_t access_size
) {
863 Describe(addr
, access_size
);
866 void asan_mz_force_lock() {
867 malloc_info
.ForceLock();
870 void asan_mz_force_unlock() {
871 malloc_info
.ForceUnlock();
874 // ---------------------- Fake stack-------------------- {{{1
875 FakeStack::FakeStack() {
876 CHECK(REAL(memset
) != NULL
);
877 REAL(memset
)(this, 0, sizeof(*this));
880 bool FakeStack::AddrIsInSizeClass(uintptr_t addr
, size_t size_class
) {
881 uintptr_t mem
= allocated_size_classes_
[size_class
];
882 uintptr_t size
= ClassMmapSize(size_class
);
883 bool res
= mem
&& addr
>= mem
&& addr
< mem
+ size
;
887 uintptr_t FakeStack::AddrIsInFakeStack(uintptr_t addr
) {
888 for (size_t i
= 0; i
< kNumberOfSizeClasses
; i
++) {
889 if (AddrIsInSizeClass(addr
, i
)) return allocated_size_classes_
[i
];
894 // We may want to compute this during compilation.
895 inline size_t FakeStack::ComputeSizeClass(size_t alloc_size
) {
896 size_t rounded_size
= RoundUpToPowerOfTwo(alloc_size
);
897 size_t log
= Log2(rounded_size
);
898 CHECK(alloc_size
<= (1UL << log
));
899 if (!(alloc_size
> (1UL << (log
-1)))) {
900 Printf("alloc_size %ld log %ld\n", alloc_size
, log
);
902 CHECK(alloc_size
> (1UL << (log
-1)));
903 size_t res
= log
< kMinStackFrameSizeLog
? 0 : log
- kMinStackFrameSizeLog
;
904 CHECK(res
< kNumberOfSizeClasses
);
905 CHECK(ClassSize(res
) >= rounded_size
);
909 void FakeFrameFifo::FifoPush(FakeFrame
*node
) {
912 if (first_
== 0 && last_
== 0) {
913 first_
= last_
= node
;
922 FakeFrame
*FakeFrameFifo::FifoPop() {
923 CHECK(first_
&& last_
&& "Exhausted fake stack");
925 if (first_
== last_
) {
930 first_
= first_
->next
;
935 void FakeStack::Init(size_t stack_size
) {
936 stack_size_
= stack_size
;
940 void FakeStack::Cleanup() {
942 for (size_t i
= 0; i
< kNumberOfSizeClasses
; i
++) {
943 uintptr_t mem
= allocated_size_classes_
[i
];
945 PoisonShadow(mem
, ClassMmapSize(i
), 0);
946 allocated_size_classes_
[i
] = 0;
947 AsanUnmapOrDie((void*)mem
, ClassMmapSize(i
));
952 size_t FakeStack::ClassMmapSize(size_t size_class
) {
953 return RoundUpToPowerOfTwo(stack_size_
);
956 void FakeStack::AllocateOneSizeClass(size_t size_class
) {
957 CHECK(ClassMmapSize(size_class
) >= kPageSize
);
958 uintptr_t new_mem
= (uintptr_t)AsanMmapSomewhereOrDie(
959 ClassMmapSize(size_class
), __FUNCTION__
);
960 // Printf("T%d new_mem[%ld]: %p-%p mmap %ld\n",
961 // asanThreadRegistry().GetCurrent()->tid(),
962 // size_class, new_mem, new_mem + ClassMmapSize(size_class),
963 // ClassMmapSize(size_class));
965 for (i
= 0; i
< ClassMmapSize(size_class
);
966 i
+= ClassSize(size_class
)) {
967 size_classes_
[size_class
].FifoPush((FakeFrame
*)(new_mem
+ i
));
969 CHECK(i
== ClassMmapSize(size_class
));
970 allocated_size_classes_
[size_class
] = new_mem
;
973 uintptr_t FakeStack::AllocateStack(size_t size
, size_t real_stack
) {
974 if (!alive_
) return real_stack
;
975 CHECK(size
<= kMaxStackMallocSize
&& size
> 1);
976 size_t size_class
= ComputeSizeClass(size
);
977 if (!allocated_size_classes_
[size_class
]) {
978 AllocateOneSizeClass(size_class
);
980 FakeFrame
*fake_frame
= size_classes_
[size_class
].FifoPop();
982 fake_frame
->size_minus_one
= size
- 1;
983 fake_frame
->real_stack
= real_stack
;
984 while (FakeFrame
*top
= call_stack_
.top()) {
985 if (top
->real_stack
> real_stack
) break;
986 call_stack_
.LifoPop();
987 DeallocateFrame(top
);
989 call_stack_
.LifoPush(fake_frame
);
990 uintptr_t ptr
= (uintptr_t)fake_frame
;
991 PoisonShadow(ptr
, size
, 0);
995 void FakeStack::DeallocateFrame(FakeFrame
*fake_frame
) {
997 size_t size
= fake_frame
->size_minus_one
+ 1;
998 size_t size_class
= ComputeSizeClass(size
);
999 CHECK(allocated_size_classes_
[size_class
]);
1000 uintptr_t ptr
= (uintptr_t)fake_frame
;
1001 CHECK(AddrIsInSizeClass(ptr
, size_class
));
1002 CHECK(AddrIsInSizeClass(ptr
+ size
- 1, size_class
));
1003 size_classes_
[size_class
].FifoPush(fake_frame
);
1006 void FakeStack::OnFree(size_t ptr
, size_t size
, size_t real_stack
) {
1007 FakeFrame
*fake_frame
= (FakeFrame
*)ptr
;
1008 CHECK(fake_frame
->magic
= kRetiredStackFrameMagic
);
1009 CHECK(fake_frame
->descr
!= 0);
1010 CHECK(fake_frame
->size_minus_one
== size
- 1);
1011 PoisonShadow(ptr
, size
, kAsanStackAfterReturnMagic
);
1014 } // namespace __asan
1016 // ---------------------- Interface ---------------- {{{1
1017 using namespace __asan
; // NOLINT
1019 size_t __asan_stack_malloc(size_t size
, size_t real_stack
) {
1020 if (!FLAG_use_fake_stack
) return real_stack
;
1021 AsanThread
*t
= asanThreadRegistry().GetCurrent();
1023 // TSD is gone, use the real stack.
1026 size_t ptr
= t
->fake_stack().AllocateStack(size
, real_stack
);
1027 // Printf("__asan_stack_malloc %p %ld %p\n", ptr, size, real_stack);
1031 void __asan_stack_free(size_t ptr
, size_t size
, size_t real_stack
) {
1032 if (!FLAG_use_fake_stack
) return;
1033 if (ptr
!= real_stack
) {
1034 FakeStack::OnFree(ptr
, size
, real_stack
);
1038 // ASan allocator doesn't reserve extra bytes, so normally we would
1039 // just return "size".
1040 size_t __asan_get_estimated_allocated_size(size_t size
) {
1041 if (size
== 0) return 1;
1042 return Min(size
, kMaxAllowedMallocSize
);
1045 bool __asan_get_ownership(const void *p
) {
1046 return malloc_info
.AllocationSize((uintptr_t)p
) > 0;
1049 size_t __asan_get_allocated_size(const void *p
) {
1050 if (p
== NULL
) return 0;
1051 size_t allocated_size
= malloc_info
.AllocationSize((uintptr_t)p
);
1052 // Die if p is not malloced or if it is already freed.
1053 if (allocated_size
== 0) {
1054 Report("ERROR: AddressSanitizer attempting to call "
1055 "__asan_get_allocated_size() for pointer which is "
1056 "not owned: %p\n", p
);
1057 PRINT_CURRENT_STACK();
1058 Describe((uintptr_t)p
, 1);
1059 ShowStatsAndAbort();
1061 return allocated_size
;