1 //===-- asan_allocator.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // Implementation of ASan's memory allocator, 2-nd version.
11 // This variant uses the allocator from sanitizer_common, i.e. the one shared
12 // with ThreadSanitizer and MemorySanitizer.
14 //===----------------------------------------------------------------------===//
16 #include "asan_allocator.h"
17 #include "asan_mapping.h"
18 #include "asan_poisoning.h"
19 #include "asan_report.h"
20 #include "asan_stack.h"
21 #include "asan_thread.h"
22 #include "sanitizer_common/sanitizer_allocator_interface.h"
23 #include "sanitizer_common/sanitizer_flags.h"
24 #include "sanitizer_common/sanitizer_internal_defs.h"
25 #include "sanitizer_common/sanitizer_list.h"
26 #include "sanitizer_common/sanitizer_stackdepot.h"
27 #include "sanitizer_common/sanitizer_quarantine.h"
28 #include "lsan/lsan_common.h"
32 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
33 // We use adaptive redzones: for larger allocation larger redzones are used.
34 static u32
RZLog2Size(u32 rz_log
) {
39 static u32
RZSize2Log(u32 rz_size
) {
40 CHECK_GE(rz_size
, 16);
41 CHECK_LE(rz_size
, 2048);
42 CHECK(IsPowerOfTwo(rz_size
));
43 u32 res
= Log2(rz_size
) - 4;
44 CHECK_EQ(rz_size
, RZLog2Size(res
));
48 static AsanAllocator
&get_allocator();
50 // The memory chunk allocated from the underlying allocator looks like this:
51 // L L L L L L H H U U U U U U R R
52 // L -- left redzone words (0 or more bytes)
53 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
55 // R -- right redzone (0 or more bytes)
56 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
59 // If the left redzone is greater than the ChunkHeader size we store a magic
60 // value in the first uptr word of the memory block and store the address of
61 // ChunkBase in the next uptr.
62 // M B L L L L L L L L L H H U U U U U U
64 // ---------------------|
65 // M -- magic value kAllocBegMagic
66 // B -- address of ChunkHeader pointing to the first 'H'
67 static const uptr kAllocBegMagic
= 0xCC6E96B9;
71 u32 chunk_state
: 8; // Must be first.
75 u32 from_memalign
: 1;
80 // This field is used for small sizes. For large sizes it is equal to
81 // SizeClassMap::kMaxSize and the actual size is stored in the
82 // SecondaryAllocator's metadata.
83 u32 user_requested_size
;
87 struct ChunkBase
: ChunkHeader
{
88 // Header2, intersects with user memory.
92 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
93 static const uptr kChunkHeader2Size
= sizeof(ChunkBase
) - kChunkHeaderSize
;
94 COMPILER_CHECK(kChunkHeaderSize
== 16);
95 COMPILER_CHECK(kChunkHeader2Size
<= 16);
97 // Every chunk of memory allocated by this allocator can be in one of 3 states:
98 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
99 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
100 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
102 CHUNK_AVAILABLE
= 0, // 0 is the default value even if we didn't set it.
107 struct AsanChunk
: ChunkBase
{
108 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
109 uptr
UsedSize(bool locked_version
= false) {
110 if (user_requested_size
!= SizeClassMap::kMaxSize
)
111 return user_requested_size
;
112 return *reinterpret_cast<uptr
*>(
113 get_allocator().GetMetaData(AllocBeg(locked_version
)));
115 void *AllocBeg(bool locked_version
= false) {
118 return get_allocator().GetBlockBeginFastLocked(
119 reinterpret_cast<void *>(this));
120 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
122 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log
));
124 bool AddrIsInside(uptr addr
, bool locked_version
= false) {
125 return (addr
>= Beg()) && (addr
< Beg() + UsedSize(locked_version
));
129 struct QuarantineCallback
{
130 explicit QuarantineCallback(AllocatorCache
*cache
)
134 void Recycle(AsanChunk
*m
) {
135 CHECK_EQ(m
->chunk_state
, CHUNK_QUARANTINE
);
136 atomic_store((atomic_uint8_t
*)m
, CHUNK_AVAILABLE
, memory_order_relaxed
);
137 CHECK_NE(m
->alloc_tid
, kInvalidTid
);
138 CHECK_NE(m
->free_tid
, kInvalidTid
);
139 PoisonShadow(m
->Beg(),
140 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
141 kAsanHeapLeftRedzoneMagic
);
142 void *p
= reinterpret_cast<void *>(m
->AllocBeg());
144 uptr
*alloc_magic
= reinterpret_cast<uptr
*>(p
);
145 CHECK_EQ(alloc_magic
[0], kAllocBegMagic
);
146 // Clear the magic value, as allocator internals may overwrite the
147 // contents of deallocated chunk, confusing GetAsanChunk lookup.
149 CHECK_EQ(alloc_magic
[1], reinterpret_cast<uptr
>(m
));
153 AsanStats
&thread_stats
= GetCurrentThreadStats();
154 thread_stats
.real_frees
++;
155 thread_stats
.really_freed
+= m
->UsedSize();
157 get_allocator().Deallocate(cache_
, p
);
160 void *Allocate(uptr size
) {
161 return get_allocator().Allocate(cache_
, size
, 1, false);
164 void Deallocate(void *p
) {
165 get_allocator().Deallocate(cache_
, p
);
168 AllocatorCache
*cache_
;
171 typedef Quarantine
<QuarantineCallback
, AsanChunk
> AsanQuarantine
;
172 typedef AsanQuarantine::Cache QuarantineCache
;
174 void AsanMapUnmapCallback::OnMap(uptr p
, uptr size
) const {
175 PoisonShadow(p
, size
, kAsanHeapLeftRedzoneMagic
);
177 AsanStats
&thread_stats
= GetCurrentThreadStats();
178 thread_stats
.mmaps
++;
179 thread_stats
.mmaped
+= size
;
181 void AsanMapUnmapCallback::OnUnmap(uptr p
, uptr size
) const {
182 PoisonShadow(p
, size
, 0);
183 // We are about to unmap a chunk of user memory.
184 // Mark the corresponding shadow memory as not needed.
185 FlushUnneededASanShadowMemory(p
, size
);
187 AsanStats
&thread_stats
= GetCurrentThreadStats();
188 thread_stats
.munmaps
++;
189 thread_stats
.munmaped
+= size
;
192 // We can not use THREADLOCAL because it is not supported on some of the
193 // platforms we care about (OSX 10.6, Android).
194 // static THREADLOCAL AllocatorCache cache;
195 AllocatorCache
*GetAllocatorCache(AsanThreadLocalMallocStorage
*ms
) {
197 return &ms
->allocator_cache
;
200 QuarantineCache
*GetQuarantineCache(AsanThreadLocalMallocStorage
*ms
) {
202 CHECK_LE(sizeof(QuarantineCache
), sizeof(ms
->quarantine_cache
));
203 return reinterpret_cast<QuarantineCache
*>(ms
->quarantine_cache
);
206 void AllocatorOptions::SetFrom(const Flags
*f
, const CommonFlags
*cf
) {
207 quarantine_size_mb
= f
->quarantine_size_mb
;
208 min_redzone
= f
->redzone
;
209 max_redzone
= f
->max_redzone
;
210 may_return_null
= cf
->allocator_may_return_null
;
211 alloc_dealloc_mismatch
= f
->alloc_dealloc_mismatch
;
214 void AllocatorOptions::CopyTo(Flags
*f
, CommonFlags
*cf
) {
215 f
->quarantine_size_mb
= quarantine_size_mb
;
216 f
->redzone
= min_redzone
;
217 f
->max_redzone
= max_redzone
;
218 cf
->allocator_may_return_null
= may_return_null
;
219 f
->alloc_dealloc_mismatch
= alloc_dealloc_mismatch
;
223 static const uptr kMaxAllowedMallocSize
=
224 FIRST_32_SECOND_64(3UL << 30, 1UL << 40);
225 static const uptr kMaxThreadLocalQuarantine
=
226 FIRST_32_SECOND_64(1 << 18, 1 << 20);
228 AsanAllocator allocator
;
229 AsanQuarantine quarantine
;
230 StaticSpinMutex fallback_mutex
;
231 AllocatorCache fallback_allocator_cache
;
232 QuarantineCache fallback_quarantine_cache
;
234 // ------------------- Options --------------------------
235 atomic_uint16_t min_redzone
;
236 atomic_uint16_t max_redzone
;
237 atomic_uint8_t alloc_dealloc_mismatch
;
239 // ------------------- Initialization ------------------------
240 explicit Allocator(LinkerInitialized
)
241 : quarantine(LINKER_INITIALIZED
),
242 fallback_quarantine_cache(LINKER_INITIALIZED
) {}
244 void CheckOptions(const AllocatorOptions
&options
) const {
245 CHECK_GE(options
.min_redzone
, 16);
246 CHECK_GE(options
.max_redzone
, options
.min_redzone
);
247 CHECK_LE(options
.max_redzone
, 2048);
248 CHECK(IsPowerOfTwo(options
.min_redzone
));
249 CHECK(IsPowerOfTwo(options
.max_redzone
));
252 void SharedInitCode(const AllocatorOptions
&options
) {
253 CheckOptions(options
);
254 quarantine
.Init((uptr
)options
.quarantine_size_mb
<< 20,
255 kMaxThreadLocalQuarantine
);
256 atomic_store(&alloc_dealloc_mismatch
, options
.alloc_dealloc_mismatch
,
257 memory_order_release
);
258 atomic_store(&min_redzone
, options
.min_redzone
, memory_order_release
);
259 atomic_store(&max_redzone
, options
.max_redzone
, memory_order_release
);
262 void Initialize(const AllocatorOptions
&options
) {
263 allocator
.Init(options
.may_return_null
);
264 SharedInitCode(options
);
267 void ReInitialize(const AllocatorOptions
&options
) {
268 allocator
.SetMayReturnNull(options
.may_return_null
);
269 SharedInitCode(options
);
272 void GetOptions(AllocatorOptions
*options
) const {
273 options
->quarantine_size_mb
= quarantine
.GetSize() >> 20;
274 options
->min_redzone
= atomic_load(&min_redzone
, memory_order_acquire
);
275 options
->max_redzone
= atomic_load(&max_redzone
, memory_order_acquire
);
276 options
->may_return_null
= allocator
.MayReturnNull();
277 options
->alloc_dealloc_mismatch
=
278 atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
);
281 // -------------------- Helper methods. -------------------------
282 uptr
ComputeRZLog(uptr user_requested_size
) {
284 user_requested_size
<= 64 - 16 ? 0 :
285 user_requested_size
<= 128 - 32 ? 1 :
286 user_requested_size
<= 512 - 64 ? 2 :
287 user_requested_size
<= 4096 - 128 ? 3 :
288 user_requested_size
<= (1 << 14) - 256 ? 4 :
289 user_requested_size
<= (1 << 15) - 512 ? 5 :
290 user_requested_size
<= (1 << 16) - 1024 ? 6 : 7;
291 u32 min_rz
= atomic_load(&min_redzone
, memory_order_acquire
);
292 u32 max_rz
= atomic_load(&max_redzone
, memory_order_acquire
);
293 return Min(Max(rz_log
, RZSize2Log(min_rz
)), RZSize2Log(max_rz
));
296 // We have an address between two chunks, and we want to report just one.
297 AsanChunk
*ChooseChunk(uptr addr
, AsanChunk
*left_chunk
,
298 AsanChunk
*right_chunk
) {
299 // Prefer an allocated chunk over freed chunk and freed chunk
300 // over available chunk.
301 if (left_chunk
->chunk_state
!= right_chunk
->chunk_state
) {
302 if (left_chunk
->chunk_state
== CHUNK_ALLOCATED
)
304 if (right_chunk
->chunk_state
== CHUNK_ALLOCATED
)
306 if (left_chunk
->chunk_state
== CHUNK_QUARANTINE
)
308 if (right_chunk
->chunk_state
== CHUNK_QUARANTINE
)
311 // Same chunk_state: choose based on offset.
312 sptr l_offset
= 0, r_offset
= 0;
313 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
314 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
315 if (l_offset
< r_offset
)
320 // -------------------- Allocation/Deallocation routines ---------------
321 void *Allocate(uptr size
, uptr alignment
, BufferedStackTrace
*stack
,
322 AllocType alloc_type
, bool can_fill
) {
323 if (UNLIKELY(!asan_inited
))
325 Flags
&fl
= *flags();
327 const uptr min_alignment
= SHADOW_GRANULARITY
;
328 if (alignment
< min_alignment
)
329 alignment
= min_alignment
;
331 // We'd be happy to avoid allocating memory for zero-size requests, but
332 // some programs/tests depend on this behavior and assume that malloc
333 // would not return NULL even for zero-size allocations. Moreover, it
334 // looks like operator new should never return NULL, and results of
335 // consecutive "new" calls must be different even if the allocated size
339 CHECK(IsPowerOfTwo(alignment
));
340 uptr rz_log
= ComputeRZLog(size
);
341 uptr rz_size
= RZLog2Size(rz_log
);
342 uptr rounded_size
= RoundUpTo(Max(size
, kChunkHeader2Size
), alignment
);
343 uptr needed_size
= rounded_size
+ rz_size
;
344 if (alignment
> min_alignment
)
345 needed_size
+= alignment
;
346 bool using_primary_allocator
= true;
347 // If we are allocating from the secondary allocator, there will be no
348 // automatic right redzone, so add the right redzone manually.
349 if (!PrimaryAllocator::CanAllocate(needed_size
, alignment
)) {
350 needed_size
+= rz_size
;
351 using_primary_allocator
= false;
353 CHECK(IsAligned(needed_size
, min_alignment
));
354 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
) {
355 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
357 return allocator
.ReturnNullOrDie();
360 AsanThread
*t
= GetCurrentThread();
362 bool check_rss_limit
= true;
364 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
366 allocator
.Allocate(cache
, needed_size
, 8, false, check_rss_limit
);
368 SpinMutexLock
l(&fallback_mutex
);
369 AllocatorCache
*cache
= &fallback_allocator_cache
;
371 allocator
.Allocate(cache
, needed_size
, 8, false, check_rss_limit
);
375 return allocator
.ReturnNullOrDie();
377 if (*(u8
*)MEM_TO_SHADOW((uptr
)allocated
) == 0 && CanPoisonMemory()) {
378 // Heap poisoning is enabled, but the allocator provides an unpoisoned
379 // chunk. This is possible if CanPoisonMemory() was false for some
380 // time, for example, due to flags()->start_disabled.
381 // Anyway, poison the block before using it for anything else.
382 uptr allocated_size
= allocator
.GetActuallyAllocatedSize(allocated
);
383 PoisonShadow((uptr
)allocated
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
386 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
387 uptr alloc_end
= alloc_beg
+ needed_size
;
388 uptr beg_plus_redzone
= alloc_beg
+ rz_size
;
389 uptr user_beg
= beg_plus_redzone
;
390 if (!IsAligned(user_beg
, alignment
))
391 user_beg
= RoundUpTo(user_beg
, alignment
);
392 uptr user_end
= user_beg
+ size
;
393 CHECK_LE(user_end
, alloc_end
);
394 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
395 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
396 m
->alloc_type
= alloc_type
;
398 u32 alloc_tid
= t
? t
->tid() : 0;
399 m
->alloc_tid
= alloc_tid
;
400 CHECK_EQ(alloc_tid
, m
->alloc_tid
); // Does alloc_tid fit into the bitfield?
401 m
->free_tid
= kInvalidTid
;
402 m
->from_memalign
= user_beg
!= beg_plus_redzone
;
403 if (alloc_beg
!= chunk_beg
) {
404 CHECK_LE(alloc_beg
+ 2 * sizeof(uptr
), chunk_beg
);
405 reinterpret_cast<uptr
*>(alloc_beg
)[0] = kAllocBegMagic
;
406 reinterpret_cast<uptr
*>(alloc_beg
)[1] = chunk_beg
;
408 if (using_primary_allocator
) {
410 m
->user_requested_size
= size
;
411 CHECK(allocator
.FromPrimary(allocated
));
413 CHECK(!allocator
.FromPrimary(allocated
));
414 m
->user_requested_size
= SizeClassMap::kMaxSize
;
415 uptr
*meta
= reinterpret_cast<uptr
*>(allocator
.GetMetaData(allocated
));
420 m
->alloc_context_id
= StackDepotPut(*stack
);
422 uptr size_rounded_down_to_granularity
=
423 RoundDownTo(size
, SHADOW_GRANULARITY
);
424 // Unpoison the bulk of the memory region.
425 if (size_rounded_down_to_granularity
)
426 PoisonShadow(user_beg
, size_rounded_down_to_granularity
, 0);
427 // Deal with the end of the region if size is not aligned to granularity.
428 if (size
!= size_rounded_down_to_granularity
&& CanPoisonMemory()) {
430 (u8
*)MemToShadow(user_beg
+ size_rounded_down_to_granularity
);
431 *shadow
= fl
.poison_partial
? (size
& (SHADOW_GRANULARITY
- 1)) : 0;
434 AsanStats
&thread_stats
= GetCurrentThreadStats();
435 thread_stats
.mallocs
++;
436 thread_stats
.malloced
+= size
;
437 thread_stats
.malloced_redzones
+= needed_size
- size
;
438 if (needed_size
> SizeClassMap::kMaxSize
)
439 thread_stats
.malloc_large
++;
441 thread_stats
.malloced_by_size
[SizeClassMap::ClassID(needed_size
)]++;
443 void *res
= reinterpret_cast<void *>(user_beg
);
444 if (can_fill
&& fl
.max_malloc_fill_size
) {
445 uptr fill_size
= Min(size
, (uptr
)fl
.max_malloc_fill_size
);
446 REAL(memset
)(res
, fl
.malloc_fill_byte
, fill_size
);
448 #if CAN_SANITIZE_LEAKS
449 m
->lsan_tag
= __lsan::DisabledInThisThread() ? __lsan::kIgnored
450 : __lsan::kDirectlyLeaked
;
452 // Must be the last mutation of metadata in this function.
453 atomic_store((atomic_uint8_t
*)m
, CHUNK_ALLOCATED
, memory_order_release
);
454 ASAN_MALLOC_HOOK(res
, size
);
458 void AtomicallySetQuarantineFlag(AsanChunk
*m
, void *ptr
,
459 BufferedStackTrace
*stack
) {
460 u8 old_chunk_state
= CHUNK_ALLOCATED
;
461 // Flip the chunk_state atomically to avoid race on double-free.
462 if (!atomic_compare_exchange_strong((atomic_uint8_t
*)m
, &old_chunk_state
,
463 CHUNK_QUARANTINE
, memory_order_acquire
))
464 ReportInvalidFree(ptr
, old_chunk_state
, stack
);
465 CHECK_EQ(CHUNK_ALLOCATED
, old_chunk_state
);
468 // Expects the chunk to already be marked as quarantined by using
469 // AtomicallySetQuarantineFlag.
470 void QuarantineChunk(AsanChunk
*m
, void *ptr
, BufferedStackTrace
*stack
,
471 AllocType alloc_type
) {
472 CHECK_EQ(m
->chunk_state
, CHUNK_QUARANTINE
);
474 if (m
->alloc_type
!= alloc_type
) {
475 if (atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
)) {
476 ReportAllocTypeMismatch((uptr
)ptr
, stack
, (AllocType
)m
->alloc_type
,
477 (AllocType
)alloc_type
);
481 CHECK_GE(m
->alloc_tid
, 0);
482 if (SANITIZER_WORDSIZE
== 64) // On 32-bits this resides in user area.
483 CHECK_EQ(m
->free_tid
, kInvalidTid
);
484 AsanThread
*t
= GetCurrentThread();
485 m
->free_tid
= t
? t
->tid() : 0;
486 m
->free_context_id
= StackDepotPut(*stack
);
487 // Poison the region.
488 PoisonShadow(m
->Beg(),
489 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
492 AsanStats
&thread_stats
= GetCurrentThreadStats();
493 thread_stats
.frees
++;
494 thread_stats
.freed
+= m
->UsedSize();
496 // Push into quarantine.
498 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
499 AllocatorCache
*ac
= GetAllocatorCache(ms
);
500 quarantine
.Put(GetQuarantineCache(ms
), QuarantineCallback(ac
), m
,
503 SpinMutexLock
l(&fallback_mutex
);
504 AllocatorCache
*ac
= &fallback_allocator_cache
;
505 quarantine
.Put(&fallback_quarantine_cache
, QuarantineCallback(ac
), m
,
510 void Deallocate(void *ptr
, uptr delete_size
, BufferedStackTrace
*stack
,
511 AllocType alloc_type
) {
512 uptr p
= reinterpret_cast<uptr
>(ptr
);
515 uptr chunk_beg
= p
- kChunkHeaderSize
;
516 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
517 if (delete_size
&& flags()->new_delete_type_mismatch
&&
518 delete_size
!= m
->UsedSize()) {
519 ReportNewDeleteSizeMismatch(p
, delete_size
, stack
);
522 // Must mark the chunk as quarantined before any changes to its metadata.
523 AtomicallySetQuarantineFlag(m
, ptr
, stack
);
524 QuarantineChunk(m
, ptr
, stack
, alloc_type
);
527 void *Reallocate(void *old_ptr
, uptr new_size
, BufferedStackTrace
*stack
) {
528 CHECK(old_ptr
&& new_size
);
529 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
530 uptr chunk_beg
= p
- kChunkHeaderSize
;
531 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
533 AsanStats
&thread_stats
= GetCurrentThreadStats();
534 thread_stats
.reallocs
++;
535 thread_stats
.realloced
+= new_size
;
537 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
, true);
539 u8 chunk_state
= m
->chunk_state
;
540 if (chunk_state
!= CHUNK_ALLOCATED
)
541 ReportInvalidFree(old_ptr
, chunk_state
, stack
);
542 CHECK_NE(REAL(memcpy
), nullptr);
543 uptr memcpy_size
= Min(new_size
, m
->UsedSize());
544 // If realloc() races with free(), we may start copying freed memory.
545 // However, we will report racy double-free later anyway.
546 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
547 Deallocate(old_ptr
, 0, stack
, FROM_MALLOC
);
552 void *Calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
553 if (CallocShouldReturnNullDueToOverflow(size
, nmemb
))
554 return allocator
.ReturnNullOrDie();
555 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
, false);
556 // If the memory comes from the secondary allocator no need to clear it
557 // as it comes directly from mmap.
558 if (ptr
&& allocator
.FromPrimary(ptr
))
559 REAL(memset
)(ptr
, 0, nmemb
* size
);
563 void ReportInvalidFree(void *ptr
, u8 chunk_state
, BufferedStackTrace
*stack
) {
564 if (chunk_state
== CHUNK_QUARANTINE
)
565 ReportDoubleFree((uptr
)ptr
, stack
);
567 ReportFreeNotMalloced((uptr
)ptr
, stack
);
570 void CommitBack(AsanThreadLocalMallocStorage
*ms
) {
571 AllocatorCache
*ac
= GetAllocatorCache(ms
);
572 quarantine
.Drain(GetQuarantineCache(ms
), QuarantineCallback(ac
));
573 allocator
.SwallowCache(ac
);
576 // -------------------------- Chunk lookup ----------------------
578 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
579 AsanChunk
*GetAsanChunk(void *alloc_beg
) {
580 if (!alloc_beg
) return nullptr;
581 if (!allocator
.FromPrimary(alloc_beg
)) {
582 uptr
*meta
= reinterpret_cast<uptr
*>(allocator
.GetMetaData(alloc_beg
));
583 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(meta
[1]);
586 uptr
*alloc_magic
= reinterpret_cast<uptr
*>(alloc_beg
);
587 if (alloc_magic
[0] == kAllocBegMagic
)
588 return reinterpret_cast<AsanChunk
*>(alloc_magic
[1]);
589 return reinterpret_cast<AsanChunk
*>(alloc_beg
);
592 AsanChunk
*GetAsanChunkByAddr(uptr p
) {
593 void *alloc_beg
= allocator
.GetBlockBegin(reinterpret_cast<void *>(p
));
594 return GetAsanChunk(alloc_beg
);
597 // Allocator must be locked when this function is called.
598 AsanChunk
*GetAsanChunkByAddrFastLocked(uptr p
) {
600 allocator
.GetBlockBeginFastLocked(reinterpret_cast<void *>(p
));
601 return GetAsanChunk(alloc_beg
);
604 uptr
AllocationSize(uptr p
) {
605 AsanChunk
*m
= GetAsanChunkByAddr(p
);
607 if (m
->chunk_state
!= CHUNK_ALLOCATED
) return 0;
608 if (m
->Beg() != p
) return 0;
609 return m
->UsedSize();
612 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
613 AsanChunk
*m1
= GetAsanChunkByAddr(addr
);
614 if (!m1
) return AsanChunkView(m1
);
616 if (AsanChunkView(m1
).AddrIsAtLeft(addr
, 1, &offset
)) {
617 // The address is in the chunk's left redzone, so maybe it is actually
618 // a right buffer overflow from the other chunk to the left.
619 // Search a bit to the left to see if there is another chunk.
620 AsanChunk
*m2
= nullptr;
621 for (uptr l
= 1; l
< GetPageSizeCached(); l
++) {
622 m2
= GetAsanChunkByAddr(addr
- l
);
623 if (m2
== m1
) continue; // Still the same chunk.
626 if (m2
&& AsanChunkView(m2
).AddrIsAtRight(addr
, 1, &offset
))
627 m1
= ChooseChunk(addr
, m2
, m1
);
629 return AsanChunkView(m1
);
633 allocator
.PrintStats();
637 allocator
.ForceLock();
638 fallback_mutex
.Lock();
642 fallback_mutex
.Unlock();
643 allocator
.ForceUnlock();
647 static Allocator
instance(LINKER_INITIALIZED
);
649 static AsanAllocator
&get_allocator() {
650 return instance
.allocator
;
653 bool AsanChunkView::IsValid() {
654 return chunk_
&& chunk_
->chunk_state
!= CHUNK_AVAILABLE
;
656 uptr
AsanChunkView::Beg() { return chunk_
->Beg(); }
657 uptr
AsanChunkView::End() { return Beg() + UsedSize(); }
658 uptr
AsanChunkView::UsedSize() { return chunk_
->UsedSize(); }
659 uptr
AsanChunkView::AllocTid() { return chunk_
->alloc_tid
; }
660 uptr
AsanChunkView::FreeTid() { return chunk_
->free_tid
; }
662 static StackTrace
GetStackTraceFromId(u32 id
) {
664 StackTrace res
= StackDepotGet(id
);
669 StackTrace
AsanChunkView::GetAllocStack() {
670 return GetStackTraceFromId(chunk_
->alloc_context_id
);
673 StackTrace
AsanChunkView::GetFreeStack() {
674 return GetStackTraceFromId(chunk_
->free_context_id
);
677 void InitializeAllocator(const AllocatorOptions
&options
) {
678 instance
.Initialize(options
);
681 void ReInitializeAllocator(const AllocatorOptions
&options
) {
682 instance
.ReInitialize(options
);
685 void GetAllocatorOptions(AllocatorOptions
*options
) {
686 instance
.GetOptions(options
);
689 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
690 return instance
.FindHeapChunkByAddress(addr
);
693 void AsanThreadLocalMallocStorage::CommitBack() {
694 instance
.CommitBack(this);
697 void PrintInternalAllocatorStats() {
698 instance
.PrintStats();
701 void *asan_memalign(uptr alignment
, uptr size
, BufferedStackTrace
*stack
,
702 AllocType alloc_type
) {
703 return instance
.Allocate(size
, alignment
, stack
, alloc_type
, true);
706 void asan_free(void *ptr
, BufferedStackTrace
*stack
, AllocType alloc_type
) {
707 instance
.Deallocate(ptr
, 0, stack
, alloc_type
);
710 void asan_sized_free(void *ptr
, uptr size
, BufferedStackTrace
*stack
,
711 AllocType alloc_type
) {
712 instance
.Deallocate(ptr
, size
, stack
, alloc_type
);
715 void *asan_malloc(uptr size
, BufferedStackTrace
*stack
) {
716 return instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true);
719 void *asan_calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
720 return instance
.Calloc(nmemb
, size
, stack
);
723 void *asan_realloc(void *p
, uptr size
, BufferedStackTrace
*stack
) {
725 return instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true);
727 instance
.Deallocate(p
, 0, stack
, FROM_MALLOC
);
730 return instance
.Reallocate(p
, size
, stack
);
733 void *asan_valloc(uptr size
, BufferedStackTrace
*stack
) {
734 return instance
.Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
, true);
737 void *asan_pvalloc(uptr size
, BufferedStackTrace
*stack
) {
738 uptr PageSize
= GetPageSizeCached();
739 size
= RoundUpTo(size
, PageSize
);
741 // pvalloc(0) should allocate one page.
744 return instance
.Allocate(size
, PageSize
, stack
, FROM_MALLOC
, true);
747 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
748 BufferedStackTrace
*stack
) {
749 void *ptr
= instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true);
750 CHECK(IsAligned((uptr
)ptr
, alignment
));
755 uptr
asan_malloc_usable_size(void *ptr
, uptr pc
, uptr bp
) {
757 uptr usable_size
= instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
758 if (flags()->check_malloc_usable_size
&& (usable_size
== 0)) {
759 GET_STACK_TRACE_FATAL(pc
, bp
);
760 ReportMallocUsableSizeNotOwned((uptr
)ptr
, &stack
);
765 uptr
asan_mz_size(const void *ptr
) {
766 return instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
769 void asan_mz_force_lock() {
770 instance
.ForceLock();
773 void asan_mz_force_unlock() {
774 instance
.ForceUnlock();
777 void AsanSoftRssLimitExceededCallback(bool exceeded
) {
778 instance
.allocator
.SetRssLimitIsExceeded(exceeded
);
781 } // namespace __asan
783 // --- Implementation of LSan-specific functions --- {{{1
785 void LockAllocator() {
786 __asan::get_allocator().ForceLock();
789 void UnlockAllocator() {
790 __asan::get_allocator().ForceUnlock();
793 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
) {
794 *begin
= (uptr
)&__asan::get_allocator();
795 *end
= *begin
+ sizeof(__asan::get_allocator());
798 uptr
PointsIntoChunk(void* p
) {
799 uptr addr
= reinterpret_cast<uptr
>(p
);
800 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(addr
);
802 uptr chunk
= m
->Beg();
803 if (m
->chunk_state
!= __asan::CHUNK_ALLOCATED
)
805 if (m
->AddrIsInside(addr
, /*locked_version=*/true))
807 if (IsSpecialCaseOfOperatorNew0(chunk
, m
->UsedSize(/*locked_version*/ true),
813 uptr
GetUserBegin(uptr chunk
) {
814 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(chunk
);
819 LsanMetadata::LsanMetadata(uptr chunk
) {
820 metadata_
= reinterpret_cast<void *>(chunk
- __asan::kChunkHeaderSize
);
823 bool LsanMetadata::allocated() const {
824 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
825 return m
->chunk_state
== __asan::CHUNK_ALLOCATED
;
828 ChunkTag
LsanMetadata::tag() const {
829 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
830 return static_cast<ChunkTag
>(m
->lsan_tag
);
833 void LsanMetadata::set_tag(ChunkTag value
) {
834 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
838 uptr
LsanMetadata::requested_size() const {
839 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
840 return m
->UsedSize(/*locked_version=*/true);
843 u32
LsanMetadata::stack_trace_id() const {
844 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
845 return m
->alloc_context_id
;
848 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
849 __asan::get_allocator().ForEachChunk(callback
, arg
);
852 IgnoreObjectResult
IgnoreObjectLocked(const void *p
) {
853 uptr addr
= reinterpret_cast<uptr
>(p
);
854 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddr(addr
);
855 if (!m
) return kIgnoreObjectInvalid
;
856 if ((m
->chunk_state
== __asan::CHUNK_ALLOCATED
) && m
->AddrIsInside(addr
)) {
857 if (m
->lsan_tag
== kIgnored
)
858 return kIgnoreObjectAlreadyIgnored
;
859 m
->lsan_tag
= __lsan::kIgnored
;
860 return kIgnoreObjectSuccess
;
862 return kIgnoreObjectInvalid
;
865 } // namespace __lsan
867 // ---------------------- Interface ---------------- {{{1
868 using namespace __asan
; // NOLINT
870 // ASan allocator doesn't reserve extra bytes, so normally we would
871 // just return "size". We don't want to expose our redzone sizes, etc here.
872 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
876 int __sanitizer_get_ownership(const void *p
) {
877 uptr ptr
= reinterpret_cast<uptr
>(p
);
878 return instance
.AllocationSize(ptr
) > 0;
881 uptr
__sanitizer_get_allocated_size(const void *p
) {
883 uptr ptr
= reinterpret_cast<uptr
>(p
);
884 uptr allocated_size
= instance
.AllocationSize(ptr
);
885 // Die if p is not malloced or if it is already freed.
886 if (allocated_size
== 0) {
887 GET_STACK_TRACE_FATAL_HERE
;
888 ReportSanitizerGetAllocatedSizeNotOwned(ptr
, &stack
);
890 return allocated_size
;
893 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
894 // Provide default (no-op) implementation of malloc hooks.
896 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
897 void __sanitizer_malloc_hook(void *ptr
, uptr size
) {
901 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
902 void __sanitizer_free_hook(void *ptr
) {