1 //===-- asan_allocator.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // Implementation of ASan's memory allocator, 2-nd version.
11 // This variant uses the allocator from sanitizer_common, i.e. the one shared
12 // with ThreadSanitizer and MemorySanitizer.
14 //===----------------------------------------------------------------------===//
16 #include "asan_allocator.h"
17 #include "asan_mapping.h"
18 #include "asan_poisoning.h"
19 #include "asan_report.h"
20 #include "asan_stack.h"
21 #include "asan_thread.h"
22 #include "sanitizer_common/sanitizer_allocator_checks.h"
23 #include "sanitizer_common/sanitizer_allocator_interface.h"
24 #include "sanitizer_common/sanitizer_errno.h"
25 #include "sanitizer_common/sanitizer_flags.h"
26 #include "sanitizer_common/sanitizer_internal_defs.h"
27 #include "sanitizer_common/sanitizer_list.h"
28 #include "sanitizer_common/sanitizer_stackdepot.h"
29 #include "sanitizer_common/sanitizer_quarantine.h"
30 #include "lsan/lsan_common.h"
34 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
35 // We use adaptive redzones: for larger allocation larger redzones are used.
36 static u32
RZLog2Size(u32 rz_log
) {
41 static u32
RZSize2Log(u32 rz_size
) {
42 CHECK_GE(rz_size
, 16);
43 CHECK_LE(rz_size
, 2048);
44 CHECK(IsPowerOfTwo(rz_size
));
45 u32 res
= Log2(rz_size
) - 4;
46 CHECK_EQ(rz_size
, RZLog2Size(res
));
50 static AsanAllocator
&get_allocator();
52 // The memory chunk allocated from the underlying allocator looks like this:
53 // L L L L L L H H U U U U U U R R
54 // L -- left redzone words (0 or more bytes)
55 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
57 // R -- right redzone (0 or more bytes)
58 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
61 // If the left redzone is greater than the ChunkHeader size we store a magic
62 // value in the first uptr word of the memory block and store the address of
63 // ChunkBase in the next uptr.
64 // M B L L L L L L L L L H H U U U U U U
66 // ---------------------|
67 // M -- magic value kAllocBegMagic
68 // B -- address of ChunkHeader pointing to the first 'H'
69 static const uptr kAllocBegMagic
= 0xCC6E96B9;
73 u32 chunk_state
: 8; // Must be first.
77 u32 from_memalign
: 1;
82 // This field is used for small sizes. For large sizes it is equal to
83 // SizeClassMap::kMaxSize and the actual size is stored in the
84 // SecondaryAllocator's metadata.
85 u32 user_requested_size
: 29;
87 // else -> log2(min(align, 512)) - 2
88 u32 user_requested_alignment_log
: 3;
92 struct ChunkBase
: ChunkHeader
{
93 // Header2, intersects with user memory.
97 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
98 static const uptr kChunkHeader2Size
= sizeof(ChunkBase
) - kChunkHeaderSize
;
99 COMPILER_CHECK(kChunkHeaderSize
== 16);
100 COMPILER_CHECK(kChunkHeader2Size
<= 16);
102 // Every chunk of memory allocated by this allocator can be in one of 3 states:
103 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
104 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
105 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
107 CHUNK_AVAILABLE
= 0, // 0 is the default value even if we didn't set it.
112 struct AsanChunk
: ChunkBase
{
113 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
114 uptr
UsedSize(bool locked_version
= false) {
115 if (user_requested_size
!= SizeClassMap::kMaxSize
)
116 return user_requested_size
;
117 return *reinterpret_cast<uptr
*>(
118 get_allocator().GetMetaData(AllocBeg(locked_version
)));
120 void *AllocBeg(bool locked_version
= false) {
123 return get_allocator().GetBlockBeginFastLocked(
124 reinterpret_cast<void *>(this));
125 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
127 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log
));
129 bool AddrIsInside(uptr addr
, bool locked_version
= false) {
130 return (addr
>= Beg()) && (addr
< Beg() + UsedSize(locked_version
));
134 struct QuarantineCallback
{
135 QuarantineCallback(AllocatorCache
*cache
, BufferedStackTrace
*stack
)
140 void Recycle(AsanChunk
*m
) {
141 CHECK_EQ(m
->chunk_state
, CHUNK_QUARANTINE
);
142 atomic_store((atomic_uint8_t
*)m
, CHUNK_AVAILABLE
, memory_order_relaxed
);
143 CHECK_NE(m
->alloc_tid
, kInvalidTid
);
144 CHECK_NE(m
->free_tid
, kInvalidTid
);
145 PoisonShadow(m
->Beg(),
146 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
147 kAsanHeapLeftRedzoneMagic
);
148 void *p
= reinterpret_cast<void *>(m
->AllocBeg());
150 uptr
*alloc_magic
= reinterpret_cast<uptr
*>(p
);
151 CHECK_EQ(alloc_magic
[0], kAllocBegMagic
);
152 // Clear the magic value, as allocator internals may overwrite the
153 // contents of deallocated chunk, confusing GetAsanChunk lookup.
155 CHECK_EQ(alloc_magic
[1], reinterpret_cast<uptr
>(m
));
159 AsanStats
&thread_stats
= GetCurrentThreadStats();
160 thread_stats
.real_frees
++;
161 thread_stats
.really_freed
+= m
->UsedSize();
163 get_allocator().Deallocate(cache_
, p
);
166 void *Allocate(uptr size
) {
167 void *res
= get_allocator().Allocate(cache_
, size
, 1);
168 // TODO(alekseys): Consider making quarantine OOM-friendly.
170 ReportOutOfMemory(size
, stack_
);
174 void Deallocate(void *p
) {
175 get_allocator().Deallocate(cache_
, p
);
179 AllocatorCache
* const cache_
;
180 BufferedStackTrace
* const stack_
;
183 typedef Quarantine
<QuarantineCallback
, AsanChunk
> AsanQuarantine
;
184 typedef AsanQuarantine::Cache QuarantineCache
;
186 void AsanMapUnmapCallback::OnMap(uptr p
, uptr size
) const {
187 PoisonShadow(p
, size
, kAsanHeapLeftRedzoneMagic
);
189 AsanStats
&thread_stats
= GetCurrentThreadStats();
190 thread_stats
.mmaps
++;
191 thread_stats
.mmaped
+= size
;
193 void AsanMapUnmapCallback::OnUnmap(uptr p
, uptr size
) const {
194 PoisonShadow(p
, size
, 0);
195 // We are about to unmap a chunk of user memory.
196 // Mark the corresponding shadow memory as not needed.
197 FlushUnneededASanShadowMemory(p
, size
);
199 AsanStats
&thread_stats
= GetCurrentThreadStats();
200 thread_stats
.munmaps
++;
201 thread_stats
.munmaped
+= size
;
204 // We can not use THREADLOCAL because it is not supported on some of the
205 // platforms we care about (OSX 10.6, Android).
206 // static THREADLOCAL AllocatorCache cache;
207 AllocatorCache
*GetAllocatorCache(AsanThreadLocalMallocStorage
*ms
) {
209 return &ms
->allocator_cache
;
212 QuarantineCache
*GetQuarantineCache(AsanThreadLocalMallocStorage
*ms
) {
214 CHECK_LE(sizeof(QuarantineCache
), sizeof(ms
->quarantine_cache
));
215 return reinterpret_cast<QuarantineCache
*>(ms
->quarantine_cache
);
218 void AllocatorOptions::SetFrom(const Flags
*f
, const CommonFlags
*cf
) {
219 quarantine_size_mb
= f
->quarantine_size_mb
;
220 thread_local_quarantine_size_kb
= f
->thread_local_quarantine_size_kb
;
221 min_redzone
= f
->redzone
;
222 max_redzone
= f
->max_redzone
;
223 may_return_null
= cf
->allocator_may_return_null
;
224 alloc_dealloc_mismatch
= f
->alloc_dealloc_mismatch
;
225 release_to_os_interval_ms
= cf
->allocator_release_to_os_interval_ms
;
228 void AllocatorOptions::CopyTo(Flags
*f
, CommonFlags
*cf
) {
229 f
->quarantine_size_mb
= quarantine_size_mb
;
230 f
->thread_local_quarantine_size_kb
= thread_local_quarantine_size_kb
;
231 f
->redzone
= min_redzone
;
232 f
->max_redzone
= max_redzone
;
233 cf
->allocator_may_return_null
= may_return_null
;
234 f
->alloc_dealloc_mismatch
= alloc_dealloc_mismatch
;
235 cf
->allocator_release_to_os_interval_ms
= release_to_os_interval_ms
;
239 static const uptr kMaxAllowedMallocSize
=
240 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
242 AsanAllocator allocator
;
243 AsanQuarantine quarantine
;
244 StaticSpinMutex fallback_mutex
;
245 AllocatorCache fallback_allocator_cache
;
246 QuarantineCache fallback_quarantine_cache
;
248 atomic_uint8_t rss_limit_exceeded
;
250 // ------------------- Options --------------------------
251 atomic_uint16_t min_redzone
;
252 atomic_uint16_t max_redzone
;
253 atomic_uint8_t alloc_dealloc_mismatch
;
255 // ------------------- Initialization ------------------------
256 explicit Allocator(LinkerInitialized
)
257 : quarantine(LINKER_INITIALIZED
),
258 fallback_quarantine_cache(LINKER_INITIALIZED
) {}
260 void CheckOptions(const AllocatorOptions
&options
) const {
261 CHECK_GE(options
.min_redzone
, 16);
262 CHECK_GE(options
.max_redzone
, options
.min_redzone
);
263 CHECK_LE(options
.max_redzone
, 2048);
264 CHECK(IsPowerOfTwo(options
.min_redzone
));
265 CHECK(IsPowerOfTwo(options
.max_redzone
));
268 void SharedInitCode(const AllocatorOptions
&options
) {
269 CheckOptions(options
);
270 quarantine
.Init((uptr
)options
.quarantine_size_mb
<< 20,
271 (uptr
)options
.thread_local_quarantine_size_kb
<< 10);
272 atomic_store(&alloc_dealloc_mismatch
, options
.alloc_dealloc_mismatch
,
273 memory_order_release
);
274 atomic_store(&min_redzone
, options
.min_redzone
, memory_order_release
);
275 atomic_store(&max_redzone
, options
.max_redzone
, memory_order_release
);
278 void InitLinkerInitialized(const AllocatorOptions
&options
) {
279 SetAllocatorMayReturnNull(options
.may_return_null
);
280 allocator
.InitLinkerInitialized(options
.release_to_os_interval_ms
);
281 SharedInitCode(options
);
284 bool RssLimitExceeded() {
285 return atomic_load(&rss_limit_exceeded
, memory_order_relaxed
);
288 void SetRssLimitExceeded(bool limit_exceeded
) {
289 atomic_store(&rss_limit_exceeded
, limit_exceeded
, memory_order_relaxed
);
292 void RePoisonChunk(uptr chunk
) {
293 // This could be a user-facing chunk (with redzones), or some internal
294 // housekeeping chunk, like TransferBatch. Start by assuming the former.
295 AsanChunk
*ac
= GetAsanChunk((void *)chunk
);
296 uptr allocated_size
= allocator
.GetActuallyAllocatedSize((void *)ac
);
297 uptr beg
= ac
->Beg();
298 uptr end
= ac
->Beg() + ac
->UsedSize(true);
299 uptr chunk_end
= chunk
+ allocated_size
;
300 if (chunk
< beg
&& beg
< end
&& end
<= chunk_end
&&
301 ac
->chunk_state
== CHUNK_ALLOCATED
) {
302 // Looks like a valid AsanChunk in use, poison redzones only.
303 PoisonShadow(chunk
, beg
- chunk
, kAsanHeapLeftRedzoneMagic
);
304 uptr end_aligned_down
= RoundDownTo(end
, SHADOW_GRANULARITY
);
305 FastPoisonShadowPartialRightRedzone(
306 end_aligned_down
, end
- end_aligned_down
,
307 chunk_end
- end_aligned_down
, kAsanHeapLeftRedzoneMagic
);
309 // This is either not an AsanChunk or freed or quarantined AsanChunk.
310 // In either case, poison everything.
311 PoisonShadow(chunk
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
315 void ReInitialize(const AllocatorOptions
&options
) {
316 SetAllocatorMayReturnNull(options
.may_return_null
);
317 allocator
.SetReleaseToOSIntervalMs(options
.release_to_os_interval_ms
);
318 SharedInitCode(options
);
320 // Poison all existing allocation's redzones.
321 if (CanPoisonMemory()) {
322 allocator
.ForceLock();
323 allocator
.ForEachChunk(
324 [](uptr chunk
, void *alloc
) {
325 ((Allocator
*)alloc
)->RePoisonChunk(chunk
);
328 allocator
.ForceUnlock();
332 void GetOptions(AllocatorOptions
*options
) const {
333 options
->quarantine_size_mb
= quarantine
.GetSize() >> 20;
334 options
->thread_local_quarantine_size_kb
= quarantine
.GetCacheSize() >> 10;
335 options
->min_redzone
= atomic_load(&min_redzone
, memory_order_acquire
);
336 options
->max_redzone
= atomic_load(&max_redzone
, memory_order_acquire
);
337 options
->may_return_null
= AllocatorMayReturnNull();
338 options
->alloc_dealloc_mismatch
=
339 atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
);
340 options
->release_to_os_interval_ms
= allocator
.ReleaseToOSIntervalMs();
343 // -------------------- Helper methods. -------------------------
344 uptr
ComputeRZLog(uptr user_requested_size
) {
346 user_requested_size
<= 64 - 16 ? 0 :
347 user_requested_size
<= 128 - 32 ? 1 :
348 user_requested_size
<= 512 - 64 ? 2 :
349 user_requested_size
<= 4096 - 128 ? 3 :
350 user_requested_size
<= (1 << 14) - 256 ? 4 :
351 user_requested_size
<= (1 << 15) - 512 ? 5 :
352 user_requested_size
<= (1 << 16) - 1024 ? 6 : 7;
353 u32 min_rz
= atomic_load(&min_redzone
, memory_order_acquire
);
354 u32 max_rz
= atomic_load(&max_redzone
, memory_order_acquire
);
355 return Min(Max(rz_log
, RZSize2Log(min_rz
)), RZSize2Log(max_rz
));
358 static uptr
ComputeUserRequestedAlignmentLog(uptr user_requested_alignment
) {
359 if (user_requested_alignment
< 8)
361 if (user_requested_alignment
> 512)
362 user_requested_alignment
= 512;
363 return Log2(user_requested_alignment
) - 2;
366 static uptr
ComputeUserAlignment(uptr user_requested_alignment_log
) {
367 if (user_requested_alignment_log
== 0)
369 return 1LL << (user_requested_alignment_log
+ 2);
372 // We have an address between two chunks, and we want to report just one.
373 AsanChunk
*ChooseChunk(uptr addr
, AsanChunk
*left_chunk
,
374 AsanChunk
*right_chunk
) {
375 // Prefer an allocated chunk over freed chunk and freed chunk
376 // over available chunk.
377 if (left_chunk
->chunk_state
!= right_chunk
->chunk_state
) {
378 if (left_chunk
->chunk_state
== CHUNK_ALLOCATED
)
380 if (right_chunk
->chunk_state
== CHUNK_ALLOCATED
)
382 if (left_chunk
->chunk_state
== CHUNK_QUARANTINE
)
384 if (right_chunk
->chunk_state
== CHUNK_QUARANTINE
)
387 // Same chunk_state: choose based on offset.
388 sptr l_offset
= 0, r_offset
= 0;
389 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
390 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
391 if (l_offset
< r_offset
)
396 // -------------------- Allocation/Deallocation routines ---------------
397 void *Allocate(uptr size
, uptr alignment
, BufferedStackTrace
*stack
,
398 AllocType alloc_type
, bool can_fill
) {
399 if (UNLIKELY(!asan_inited
))
401 if (RssLimitExceeded()) {
402 if (AllocatorMayReturnNull())
404 ReportRssLimitExceeded(stack
);
406 Flags
&fl
= *flags();
408 const uptr min_alignment
= SHADOW_GRANULARITY
;
409 const uptr user_requested_alignment_log
=
410 ComputeUserRequestedAlignmentLog(alignment
);
411 if (alignment
< min_alignment
)
412 alignment
= min_alignment
;
414 // We'd be happy to avoid allocating memory for zero-size requests, but
415 // some programs/tests depend on this behavior and assume that malloc
416 // would not return NULL even for zero-size allocations. Moreover, it
417 // looks like operator new should never return NULL, and results of
418 // consecutive "new" calls must be different even if the allocated size
422 CHECK(IsPowerOfTwo(alignment
));
423 uptr rz_log
= ComputeRZLog(size
);
424 uptr rz_size
= RZLog2Size(rz_log
);
425 uptr rounded_size
= RoundUpTo(Max(size
, kChunkHeader2Size
), alignment
);
426 uptr needed_size
= rounded_size
+ rz_size
;
427 if (alignment
> min_alignment
)
428 needed_size
+= alignment
;
429 bool using_primary_allocator
= true;
430 // If we are allocating from the secondary allocator, there will be no
431 // automatic right redzone, so add the right redzone manually.
432 if (!PrimaryAllocator::CanAllocate(needed_size
, alignment
)) {
433 needed_size
+= rz_size
;
434 using_primary_allocator
= false;
436 CHECK(IsAligned(needed_size
, min_alignment
));
437 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
) {
438 if (AllocatorMayReturnNull()) {
439 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
443 ReportAllocationSizeTooBig(size
, needed_size
, kMaxAllowedMallocSize
,
447 AsanThread
*t
= GetCurrentThread();
450 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
451 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
453 SpinMutexLock
l(&fallback_mutex
);
454 AllocatorCache
*cache
= &fallback_allocator_cache
;
455 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
457 if (UNLIKELY(!allocated
)) {
458 SetAllocatorOutOfMemory();
459 if (AllocatorMayReturnNull())
461 ReportOutOfMemory(size
, stack
);
464 if (*(u8
*)MEM_TO_SHADOW((uptr
)allocated
) == 0 && CanPoisonMemory()) {
465 // Heap poisoning is enabled, but the allocator provides an unpoisoned
466 // chunk. This is possible if CanPoisonMemory() was false for some
467 // time, for example, due to flags()->start_disabled.
468 // Anyway, poison the block before using it for anything else.
469 uptr allocated_size
= allocator
.GetActuallyAllocatedSize(allocated
);
470 PoisonShadow((uptr
)allocated
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
473 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
474 uptr alloc_end
= alloc_beg
+ needed_size
;
475 uptr beg_plus_redzone
= alloc_beg
+ rz_size
;
476 uptr user_beg
= beg_plus_redzone
;
477 if (!IsAligned(user_beg
, alignment
))
478 user_beg
= RoundUpTo(user_beg
, alignment
);
479 uptr user_end
= user_beg
+ size
;
480 CHECK_LE(user_end
, alloc_end
);
481 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
482 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
483 m
->alloc_type
= alloc_type
;
485 u32 alloc_tid
= t
? t
->tid() : 0;
486 m
->alloc_tid
= alloc_tid
;
487 CHECK_EQ(alloc_tid
, m
->alloc_tid
); // Does alloc_tid fit into the bitfield?
488 m
->free_tid
= kInvalidTid
;
489 m
->from_memalign
= user_beg
!= beg_plus_redzone
;
490 if (alloc_beg
!= chunk_beg
) {
491 CHECK_LE(alloc_beg
+ 2 * sizeof(uptr
), chunk_beg
);
492 reinterpret_cast<uptr
*>(alloc_beg
)[0] = kAllocBegMagic
;
493 reinterpret_cast<uptr
*>(alloc_beg
)[1] = chunk_beg
;
495 if (using_primary_allocator
) {
497 m
->user_requested_size
= size
;
498 CHECK(allocator
.FromPrimary(allocated
));
500 CHECK(!allocator
.FromPrimary(allocated
));
501 m
->user_requested_size
= SizeClassMap::kMaxSize
;
502 uptr
*meta
= reinterpret_cast<uptr
*>(allocator
.GetMetaData(allocated
));
506 m
->user_requested_alignment_log
= user_requested_alignment_log
;
508 m
->alloc_context_id
= StackDepotPut(*stack
);
510 uptr size_rounded_down_to_granularity
=
511 RoundDownTo(size
, SHADOW_GRANULARITY
);
512 // Unpoison the bulk of the memory region.
513 if (size_rounded_down_to_granularity
)
514 PoisonShadow(user_beg
, size_rounded_down_to_granularity
, 0);
515 // Deal with the end of the region if size is not aligned to granularity.
516 if (size
!= size_rounded_down_to_granularity
&& CanPoisonMemory()) {
518 (u8
*)MemToShadow(user_beg
+ size_rounded_down_to_granularity
);
519 *shadow
= fl
.poison_partial
? (size
& (SHADOW_GRANULARITY
- 1)) : 0;
522 AsanStats
&thread_stats
= GetCurrentThreadStats();
523 thread_stats
.mallocs
++;
524 thread_stats
.malloced
+= size
;
525 thread_stats
.malloced_redzones
+= needed_size
- size
;
526 if (needed_size
> SizeClassMap::kMaxSize
)
527 thread_stats
.malloc_large
++;
529 thread_stats
.malloced_by_size
[SizeClassMap::ClassID(needed_size
)]++;
531 void *res
= reinterpret_cast<void *>(user_beg
);
532 if (can_fill
&& fl
.max_malloc_fill_size
) {
533 uptr fill_size
= Min(size
, (uptr
)fl
.max_malloc_fill_size
);
534 REAL(memset
)(res
, fl
.malloc_fill_byte
, fill_size
);
536 #if CAN_SANITIZE_LEAKS
537 m
->lsan_tag
= __lsan::DisabledInThisThread() ? __lsan::kIgnored
538 : __lsan::kDirectlyLeaked
;
540 // Must be the last mutation of metadata in this function.
541 atomic_store((atomic_uint8_t
*)m
, CHUNK_ALLOCATED
, memory_order_release
);
542 ASAN_MALLOC_HOOK(res
, size
);
546 // Set quarantine flag if chunk is allocated, issue ASan error report on
547 // available and quarantined chunks. Return true on success, false otherwise.
548 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk
*m
, void *ptr
,
549 BufferedStackTrace
*stack
) {
550 u8 old_chunk_state
= CHUNK_ALLOCATED
;
551 // Flip the chunk_state atomically to avoid race on double-free.
552 if (!atomic_compare_exchange_strong((atomic_uint8_t
*)m
, &old_chunk_state
,
554 memory_order_acquire
)) {
555 ReportInvalidFree(ptr
, old_chunk_state
, stack
);
556 // It's not safe to push a chunk in quarantine on invalid free.
559 CHECK_EQ(CHUNK_ALLOCATED
, old_chunk_state
);
563 // Expects the chunk to already be marked as quarantined by using
564 // AtomicallySetQuarantineFlagIfAllocated.
565 void QuarantineChunk(AsanChunk
*m
, void *ptr
, BufferedStackTrace
*stack
) {
566 CHECK_EQ(m
->chunk_state
, CHUNK_QUARANTINE
);
567 CHECK_GE(m
->alloc_tid
, 0);
568 if (SANITIZER_WORDSIZE
== 64) // On 32-bits this resides in user area.
569 CHECK_EQ(m
->free_tid
, kInvalidTid
);
570 AsanThread
*t
= GetCurrentThread();
571 m
->free_tid
= t
? t
->tid() : 0;
572 m
->free_context_id
= StackDepotPut(*stack
);
574 Flags
&fl
= *flags();
575 if (fl
.max_free_fill_size
> 0) {
576 // We have to skip the chunk header, it contains free_context_id.
577 uptr scribble_start
= (uptr
)m
+ kChunkHeaderSize
+ kChunkHeader2Size
;
578 if (m
->UsedSize() >= kChunkHeader2Size
) { // Skip Header2 in user area.
579 uptr size_to_fill
= m
->UsedSize() - kChunkHeader2Size
;
580 size_to_fill
= Min(size_to_fill
, (uptr
)fl
.max_free_fill_size
);
581 REAL(memset
)((void *)scribble_start
, fl
.free_fill_byte
, size_to_fill
);
585 // Poison the region.
586 PoisonShadow(m
->Beg(),
587 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
590 AsanStats
&thread_stats
= GetCurrentThreadStats();
591 thread_stats
.frees
++;
592 thread_stats
.freed
+= m
->UsedSize();
594 // Push into quarantine.
596 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
597 AllocatorCache
*ac
= GetAllocatorCache(ms
);
598 quarantine
.Put(GetQuarantineCache(ms
), QuarantineCallback(ac
, stack
), m
,
601 SpinMutexLock
l(&fallback_mutex
);
602 AllocatorCache
*ac
= &fallback_allocator_cache
;
603 quarantine
.Put(&fallback_quarantine_cache
, QuarantineCallback(ac
, stack
),
608 void Deallocate(void *ptr
, uptr delete_size
, uptr delete_alignment
,
609 BufferedStackTrace
*stack
, AllocType alloc_type
) {
610 uptr p
= reinterpret_cast<uptr
>(ptr
);
613 uptr chunk_beg
= p
- kChunkHeaderSize
;
614 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
616 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
617 // malloc. Don't report an invalid free in this case.
618 if (SANITIZER_WINDOWS
&&
619 !get_allocator().PointerIsMine(ptr
)) {
620 if (!IsSystemHeapAddress(p
))
621 ReportFreeNotMalloced(p
, stack
);
627 // Must mark the chunk as quarantined before any changes to its metadata.
628 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
629 if (!AtomicallySetQuarantineFlagIfAllocated(m
, ptr
, stack
)) return;
631 if (m
->alloc_type
!= alloc_type
) {
632 if (atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
)) {
633 ReportAllocTypeMismatch((uptr
)ptr
, stack
, (AllocType
)m
->alloc_type
,
634 (AllocType
)alloc_type
);
637 if (flags()->new_delete_type_mismatch
&&
638 (alloc_type
== FROM_NEW
|| alloc_type
== FROM_NEW_BR
) &&
639 ((delete_size
&& delete_size
!= m
->UsedSize()) ||
640 ComputeUserRequestedAlignmentLog(delete_alignment
) !=
641 m
->user_requested_alignment_log
)) {
642 ReportNewDeleteTypeMismatch(p
, delete_size
, delete_alignment
, stack
);
646 QuarantineChunk(m
, ptr
, stack
);
649 void *Reallocate(void *old_ptr
, uptr new_size
, BufferedStackTrace
*stack
) {
650 CHECK(old_ptr
&& new_size
);
651 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
652 uptr chunk_beg
= p
- kChunkHeaderSize
;
653 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
655 AsanStats
&thread_stats
= GetCurrentThreadStats();
656 thread_stats
.reallocs
++;
657 thread_stats
.realloced
+= new_size
;
659 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
, true);
661 u8 chunk_state
= m
->chunk_state
;
662 if (chunk_state
!= CHUNK_ALLOCATED
)
663 ReportInvalidFree(old_ptr
, chunk_state
, stack
);
664 CHECK_NE(REAL(memcpy
), nullptr);
665 uptr memcpy_size
= Min(new_size
, m
->UsedSize());
666 // If realloc() races with free(), we may start copying freed memory.
667 // However, we will report racy double-free later anyway.
668 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
669 Deallocate(old_ptr
, 0, 0, stack
, FROM_MALLOC
);
674 void *Calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
675 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
676 if (AllocatorMayReturnNull())
678 ReportCallocOverflow(nmemb
, size
, stack
);
680 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
, false);
681 // If the memory comes from the secondary allocator no need to clear it
682 // as it comes directly from mmap.
683 if (ptr
&& allocator
.FromPrimary(ptr
))
684 REAL(memset
)(ptr
, 0, nmemb
* size
);
688 void ReportInvalidFree(void *ptr
, u8 chunk_state
, BufferedStackTrace
*stack
) {
689 if (chunk_state
== CHUNK_QUARANTINE
)
690 ReportDoubleFree((uptr
)ptr
, stack
);
692 ReportFreeNotMalloced((uptr
)ptr
, stack
);
695 void CommitBack(AsanThreadLocalMallocStorage
*ms
, BufferedStackTrace
*stack
) {
696 AllocatorCache
*ac
= GetAllocatorCache(ms
);
697 quarantine
.Drain(GetQuarantineCache(ms
), QuarantineCallback(ac
, stack
));
698 allocator
.SwallowCache(ac
);
701 // -------------------------- Chunk lookup ----------------------
703 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
704 AsanChunk
*GetAsanChunk(void *alloc_beg
) {
705 if (!alloc_beg
) return nullptr;
706 if (!allocator
.FromPrimary(alloc_beg
)) {
707 uptr
*meta
= reinterpret_cast<uptr
*>(allocator
.GetMetaData(alloc_beg
));
708 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(meta
[1]);
711 uptr
*alloc_magic
= reinterpret_cast<uptr
*>(alloc_beg
);
712 if (alloc_magic
[0] == kAllocBegMagic
)
713 return reinterpret_cast<AsanChunk
*>(alloc_magic
[1]);
714 return reinterpret_cast<AsanChunk
*>(alloc_beg
);
717 AsanChunk
*GetAsanChunkByAddr(uptr p
) {
718 void *alloc_beg
= allocator
.GetBlockBegin(reinterpret_cast<void *>(p
));
719 return GetAsanChunk(alloc_beg
);
722 // Allocator must be locked when this function is called.
723 AsanChunk
*GetAsanChunkByAddrFastLocked(uptr p
) {
725 allocator
.GetBlockBeginFastLocked(reinterpret_cast<void *>(p
));
726 return GetAsanChunk(alloc_beg
);
729 uptr
AllocationSize(uptr p
) {
730 AsanChunk
*m
= GetAsanChunkByAddr(p
);
732 if (m
->chunk_state
!= CHUNK_ALLOCATED
) return 0;
733 if (m
->Beg() != p
) return 0;
734 return m
->UsedSize();
737 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
738 AsanChunk
*m1
= GetAsanChunkByAddr(addr
);
739 if (!m1
) return AsanChunkView(m1
);
741 if (AsanChunkView(m1
).AddrIsAtLeft(addr
, 1, &offset
)) {
742 // The address is in the chunk's left redzone, so maybe it is actually
743 // a right buffer overflow from the other chunk to the left.
744 // Search a bit to the left to see if there is another chunk.
745 AsanChunk
*m2
= nullptr;
746 for (uptr l
= 1; l
< GetPageSizeCached(); l
++) {
747 m2
= GetAsanChunkByAddr(addr
- l
);
748 if (m2
== m1
) continue; // Still the same chunk.
751 if (m2
&& AsanChunkView(m2
).AddrIsAtRight(addr
, 1, &offset
))
752 m1
= ChooseChunk(addr
, m2
, m1
);
754 return AsanChunkView(m1
);
757 void Purge(BufferedStackTrace
*stack
) {
758 AsanThread
*t
= GetCurrentThread();
760 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
761 quarantine
.DrainAndRecycle(GetQuarantineCache(ms
),
762 QuarantineCallback(GetAllocatorCache(ms
),
766 SpinMutexLock
l(&fallback_mutex
);
767 quarantine
.DrainAndRecycle(&fallback_quarantine_cache
,
768 QuarantineCallback(&fallback_allocator_cache
,
772 allocator
.ForceReleaseToOS();
776 allocator
.PrintStats();
777 quarantine
.PrintStats();
781 allocator
.ForceLock();
782 fallback_mutex
.Lock();
786 fallback_mutex
.Unlock();
787 allocator
.ForceUnlock();
791 static Allocator
instance(LINKER_INITIALIZED
);
793 static AsanAllocator
&get_allocator() {
794 return instance
.allocator
;
797 bool AsanChunkView::IsValid() const {
798 return chunk_
&& chunk_
->chunk_state
!= CHUNK_AVAILABLE
;
800 bool AsanChunkView::IsAllocated() const {
801 return chunk_
&& chunk_
->chunk_state
== CHUNK_ALLOCATED
;
803 bool AsanChunkView::IsQuarantined() const {
804 return chunk_
&& chunk_
->chunk_state
== CHUNK_QUARANTINE
;
806 uptr
AsanChunkView::Beg() const { return chunk_
->Beg(); }
807 uptr
AsanChunkView::End() const { return Beg() + UsedSize(); }
808 uptr
AsanChunkView::UsedSize() const { return chunk_
->UsedSize(); }
809 u32
AsanChunkView::UserRequestedAlignment() const {
810 return Allocator::ComputeUserAlignment(chunk_
->user_requested_alignment_log
);
812 uptr
AsanChunkView::AllocTid() const { return chunk_
->alloc_tid
; }
813 uptr
AsanChunkView::FreeTid() const { return chunk_
->free_tid
; }
814 AllocType
AsanChunkView::GetAllocType() const {
815 return (AllocType
)chunk_
->alloc_type
;
818 static StackTrace
GetStackTraceFromId(u32 id
) {
820 StackTrace res
= StackDepotGet(id
);
825 u32
AsanChunkView::GetAllocStackId() const { return chunk_
->alloc_context_id
; }
826 u32
AsanChunkView::GetFreeStackId() const { return chunk_
->free_context_id
; }
828 StackTrace
AsanChunkView::GetAllocStack() const {
829 return GetStackTraceFromId(GetAllocStackId());
832 StackTrace
AsanChunkView::GetFreeStack() const {
833 return GetStackTraceFromId(GetFreeStackId());
836 void InitializeAllocator(const AllocatorOptions
&options
) {
837 instance
.InitLinkerInitialized(options
);
840 void ReInitializeAllocator(const AllocatorOptions
&options
) {
841 instance
.ReInitialize(options
);
844 void GetAllocatorOptions(AllocatorOptions
*options
) {
845 instance
.GetOptions(options
);
848 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
849 return instance
.FindHeapChunkByAddress(addr
);
851 AsanChunkView
FindHeapChunkByAllocBeg(uptr addr
) {
852 return AsanChunkView(instance
.GetAsanChunk(reinterpret_cast<void*>(addr
)));
855 void AsanThreadLocalMallocStorage::CommitBack() {
856 GET_STACK_TRACE_MALLOC
;
857 instance
.CommitBack(this, &stack
);
860 void PrintInternalAllocatorStats() {
861 instance
.PrintStats();
864 void asan_free(void *ptr
, BufferedStackTrace
*stack
, AllocType alloc_type
) {
865 instance
.Deallocate(ptr
, 0, 0, stack
, alloc_type
);
868 void asan_delete(void *ptr
, uptr size
, uptr alignment
,
869 BufferedStackTrace
*stack
, AllocType alloc_type
) {
870 instance
.Deallocate(ptr
, size
, alignment
, stack
, alloc_type
);
873 void *asan_malloc(uptr size
, BufferedStackTrace
*stack
) {
874 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
877 void *asan_calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
878 return SetErrnoOnNull(instance
.Calloc(nmemb
, size
, stack
));
881 void *asan_realloc(void *p
, uptr size
, BufferedStackTrace
*stack
) {
883 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
885 if (flags()->allocator_frees_and_returns_null_on_realloc_zero
) {
886 instance
.Deallocate(p
, 0, 0, stack
, FROM_MALLOC
);
889 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
892 return SetErrnoOnNull(instance
.Reallocate(p
, size
, stack
));
895 void *asan_valloc(uptr size
, BufferedStackTrace
*stack
) {
896 return SetErrnoOnNull(
897 instance
.Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
, true));
900 void *asan_pvalloc(uptr size
, BufferedStackTrace
*stack
) {
901 uptr PageSize
= GetPageSizeCached();
902 if (UNLIKELY(CheckForPvallocOverflow(size
, PageSize
))) {
903 errno
= errno_ENOMEM
;
904 if (AllocatorMayReturnNull())
906 ReportPvallocOverflow(size
, stack
);
908 // pvalloc(0) should allocate one page.
909 size
= size
? RoundUpTo(size
, PageSize
) : PageSize
;
910 return SetErrnoOnNull(
911 instance
.Allocate(size
, PageSize
, stack
, FROM_MALLOC
, true));
914 void *asan_memalign(uptr alignment
, uptr size
, BufferedStackTrace
*stack
,
915 AllocType alloc_type
) {
916 if (UNLIKELY(!IsPowerOfTwo(alignment
))) {
917 errno
= errno_EINVAL
;
918 if (AllocatorMayReturnNull())
920 ReportInvalidAllocationAlignment(alignment
, stack
);
922 return SetErrnoOnNull(
923 instance
.Allocate(size
, alignment
, stack
, alloc_type
, true));
926 void *asan_aligned_alloc(uptr alignment
, uptr size
, BufferedStackTrace
*stack
) {
927 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment
, size
))) {
928 errno
= errno_EINVAL
;
929 if (AllocatorMayReturnNull())
931 ReportInvalidAlignedAllocAlignment(size
, alignment
, stack
);
933 return SetErrnoOnNull(
934 instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true));
937 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
938 BufferedStackTrace
*stack
) {
939 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment
))) {
940 if (AllocatorMayReturnNull())
942 ReportInvalidPosixMemalignAlignment(alignment
, stack
);
944 void *ptr
= instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true);
946 // OOM error is already taken care of by Allocate.
948 CHECK(IsAligned((uptr
)ptr
, alignment
));
953 uptr
asan_malloc_usable_size(const void *ptr
, uptr pc
, uptr bp
) {
955 uptr usable_size
= instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
956 if (flags()->check_malloc_usable_size
&& (usable_size
== 0)) {
957 GET_STACK_TRACE_FATAL(pc
, bp
);
958 ReportMallocUsableSizeNotOwned((uptr
)ptr
, &stack
);
963 uptr
asan_mz_size(const void *ptr
) {
964 return instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
967 void asan_mz_force_lock() {
968 instance
.ForceLock();
971 void asan_mz_force_unlock() {
972 instance
.ForceUnlock();
975 void AsanSoftRssLimitExceededCallback(bool limit_exceeded
) {
976 instance
.SetRssLimitExceeded(limit_exceeded
);
979 } // namespace __asan
981 // --- Implementation of LSan-specific functions --- {{{1
983 void LockAllocator() {
984 __asan::get_allocator().ForceLock();
987 void UnlockAllocator() {
988 __asan::get_allocator().ForceUnlock();
991 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
) {
992 *begin
= (uptr
)&__asan::get_allocator();
993 *end
= *begin
+ sizeof(__asan::get_allocator());
996 uptr
PointsIntoChunk(void* p
) {
997 uptr addr
= reinterpret_cast<uptr
>(p
);
998 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(addr
);
1000 uptr chunk
= m
->Beg();
1001 if (m
->chunk_state
!= __asan::CHUNK_ALLOCATED
)
1003 if (m
->AddrIsInside(addr
, /*locked_version=*/true))
1005 if (IsSpecialCaseOfOperatorNew0(chunk
, m
->UsedSize(/*locked_version*/ true),
1011 uptr
GetUserBegin(uptr chunk
) {
1012 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(chunk
);
1017 LsanMetadata::LsanMetadata(uptr chunk
) {
1018 metadata_
= reinterpret_cast<void *>(chunk
- __asan::kChunkHeaderSize
);
1021 bool LsanMetadata::allocated() const {
1022 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1023 return m
->chunk_state
== __asan::CHUNK_ALLOCATED
;
1026 ChunkTag
LsanMetadata::tag() const {
1027 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1028 return static_cast<ChunkTag
>(m
->lsan_tag
);
1031 void LsanMetadata::set_tag(ChunkTag value
) {
1032 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1033 m
->lsan_tag
= value
;
1036 uptr
LsanMetadata::requested_size() const {
1037 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1038 return m
->UsedSize(/*locked_version=*/true);
1041 u32
LsanMetadata::stack_trace_id() const {
1042 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1043 return m
->alloc_context_id
;
1046 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
1047 __asan::get_allocator().ForEachChunk(callback
, arg
);
1050 IgnoreObjectResult
IgnoreObjectLocked(const void *p
) {
1051 uptr addr
= reinterpret_cast<uptr
>(p
);
1052 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddr(addr
);
1053 if (!m
) return kIgnoreObjectInvalid
;
1054 if ((m
->chunk_state
== __asan::CHUNK_ALLOCATED
) && m
->AddrIsInside(addr
)) {
1055 if (m
->lsan_tag
== kIgnored
)
1056 return kIgnoreObjectAlreadyIgnored
;
1057 m
->lsan_tag
= __lsan::kIgnored
;
1058 return kIgnoreObjectSuccess
;
1060 return kIgnoreObjectInvalid
;
1063 } // namespace __lsan
1065 // ---------------------- Interface ---------------- {{{1
1066 using namespace __asan
; // NOLINT
1068 // ASan allocator doesn't reserve extra bytes, so normally we would
1069 // just return "size". We don't want to expose our redzone sizes, etc here.
1070 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
1074 int __sanitizer_get_ownership(const void *p
) {
1075 uptr ptr
= reinterpret_cast<uptr
>(p
);
1076 return instance
.AllocationSize(ptr
) > 0;
1079 uptr
__sanitizer_get_allocated_size(const void *p
) {
1081 uptr ptr
= reinterpret_cast<uptr
>(p
);
1082 uptr allocated_size
= instance
.AllocationSize(ptr
);
1083 // Die if p is not malloced or if it is already freed.
1084 if (allocated_size
== 0) {
1085 GET_STACK_TRACE_FATAL_HERE
;
1086 ReportSanitizerGetAllocatedSizeNotOwned(ptr
, &stack
);
1088 return allocated_size
;
1091 void __sanitizer_purge_allocator() {
1092 GET_STACK_TRACE_MALLOC
;
1093 instance
.Purge(&stack
);
1096 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1097 // Provide default (no-op) implementation of malloc hooks.
1098 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook
,
1099 void *ptr
, uptr size
) {
1104 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook
, void *ptr
) {