1 //===-- asan_allocator.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // Implementation of ASan's memory allocator, 2-nd version.
11 // This variant uses the allocator from sanitizer_common, i.e. the one shared
12 // with ThreadSanitizer and MemorySanitizer.
14 //===----------------------------------------------------------------------===//
16 #include "asan_allocator.h"
17 #include "asan_mapping.h"
18 #include "asan_poisoning.h"
19 #include "asan_report.h"
20 #include "asan_stack.h"
21 #include "asan_thread.h"
22 #include "sanitizer_common/sanitizer_allocator_checks.h"
23 #include "sanitizer_common/sanitizer_allocator_interface.h"
24 #include "sanitizer_common/sanitizer_errno.h"
25 #include "sanitizer_common/sanitizer_flags.h"
26 #include "sanitizer_common/sanitizer_internal_defs.h"
27 #include "sanitizer_common/sanitizer_list.h"
28 #include "sanitizer_common/sanitizer_stackdepot.h"
29 #include "sanitizer_common/sanitizer_quarantine.h"
30 #include "lsan/lsan_common.h"
34 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
35 // We use adaptive redzones: for larger allocation larger redzones are used.
36 static u32
RZLog2Size(u32 rz_log
) {
41 static u32
RZSize2Log(u32 rz_size
) {
42 CHECK_GE(rz_size
, 16);
43 CHECK_LE(rz_size
, 2048);
44 CHECK(IsPowerOfTwo(rz_size
));
45 u32 res
= Log2(rz_size
) - 4;
46 CHECK_EQ(rz_size
, RZLog2Size(res
));
50 static AsanAllocator
&get_allocator();
52 // The memory chunk allocated from the underlying allocator looks like this:
53 // L L L L L L H H U U U U U U R R
54 // L -- left redzone words (0 or more bytes)
55 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
57 // R -- right redzone (0 or more bytes)
58 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
61 // If the left redzone is greater than the ChunkHeader size we store a magic
62 // value in the first uptr word of the memory block and store the address of
63 // ChunkBase in the next uptr.
64 // M B L L L L L L L L L H H U U U U U U
66 // ---------------------|
67 // M -- magic value kAllocBegMagic
68 // B -- address of ChunkHeader pointing to the first 'H'
69 static const uptr kAllocBegMagic
= 0xCC6E96B9;
73 u32 chunk_state
: 8; // Must be first.
77 u32 from_memalign
: 1;
82 // This field is used for small sizes. For large sizes it is equal to
83 // SizeClassMap::kMaxSize and the actual size is stored in the
84 // SecondaryAllocator's metadata.
85 u32 user_requested_size
;
89 struct ChunkBase
: ChunkHeader
{
90 // Header2, intersects with user memory.
94 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
95 static const uptr kChunkHeader2Size
= sizeof(ChunkBase
) - kChunkHeaderSize
;
96 COMPILER_CHECK(kChunkHeaderSize
== 16);
97 COMPILER_CHECK(kChunkHeader2Size
<= 16);
99 // Every chunk of memory allocated by this allocator can be in one of 3 states:
100 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
101 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
102 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
104 CHUNK_AVAILABLE
= 0, // 0 is the default value even if we didn't set it.
109 struct AsanChunk
: ChunkBase
{
110 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
111 uptr
UsedSize(bool locked_version
= false) {
112 if (user_requested_size
!= SizeClassMap::kMaxSize
)
113 return user_requested_size
;
114 return *reinterpret_cast<uptr
*>(
115 get_allocator().GetMetaData(AllocBeg(locked_version
)));
117 void *AllocBeg(bool locked_version
= false) {
120 return get_allocator().GetBlockBeginFastLocked(
121 reinterpret_cast<void *>(this));
122 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
124 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log
));
126 bool AddrIsInside(uptr addr
, bool locked_version
= false) {
127 return (addr
>= Beg()) && (addr
< Beg() + UsedSize(locked_version
));
131 struct QuarantineCallback
{
132 explicit QuarantineCallback(AllocatorCache
*cache
)
136 void Recycle(AsanChunk
*m
) {
137 CHECK_EQ(m
->chunk_state
, CHUNK_QUARANTINE
);
138 atomic_store((atomic_uint8_t
*)m
, CHUNK_AVAILABLE
, memory_order_relaxed
);
139 CHECK_NE(m
->alloc_tid
, kInvalidTid
);
140 CHECK_NE(m
->free_tid
, kInvalidTid
);
141 PoisonShadow(m
->Beg(),
142 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
143 kAsanHeapLeftRedzoneMagic
);
144 void *p
= reinterpret_cast<void *>(m
->AllocBeg());
146 uptr
*alloc_magic
= reinterpret_cast<uptr
*>(p
);
147 CHECK_EQ(alloc_magic
[0], kAllocBegMagic
);
148 // Clear the magic value, as allocator internals may overwrite the
149 // contents of deallocated chunk, confusing GetAsanChunk lookup.
151 CHECK_EQ(alloc_magic
[1], reinterpret_cast<uptr
>(m
));
155 AsanStats
&thread_stats
= GetCurrentThreadStats();
156 thread_stats
.real_frees
++;
157 thread_stats
.really_freed
+= m
->UsedSize();
159 get_allocator().Deallocate(cache_
, p
);
162 void *Allocate(uptr size
) {
163 void *res
= get_allocator().Allocate(cache_
, size
, 1);
164 // TODO(alekseys): Consider making quarantine OOM-friendly.
166 return DieOnFailure::OnOOM();
170 void Deallocate(void *p
) {
171 get_allocator().Deallocate(cache_
, p
);
174 AllocatorCache
*cache_
;
177 typedef Quarantine
<QuarantineCallback
, AsanChunk
> AsanQuarantine
;
178 typedef AsanQuarantine::Cache QuarantineCache
;
180 void AsanMapUnmapCallback::OnMap(uptr p
, uptr size
) const {
181 PoisonShadow(p
, size
, kAsanHeapLeftRedzoneMagic
);
183 AsanStats
&thread_stats
= GetCurrentThreadStats();
184 thread_stats
.mmaps
++;
185 thread_stats
.mmaped
+= size
;
187 void AsanMapUnmapCallback::OnUnmap(uptr p
, uptr size
) const {
188 PoisonShadow(p
, size
, 0);
189 // We are about to unmap a chunk of user memory.
190 // Mark the corresponding shadow memory as not needed.
191 FlushUnneededASanShadowMemory(p
, size
);
193 AsanStats
&thread_stats
= GetCurrentThreadStats();
194 thread_stats
.munmaps
++;
195 thread_stats
.munmaped
+= size
;
198 // We can not use THREADLOCAL because it is not supported on some of the
199 // platforms we care about (OSX 10.6, Android).
200 // static THREADLOCAL AllocatorCache cache;
201 AllocatorCache
*GetAllocatorCache(AsanThreadLocalMallocStorage
*ms
) {
203 return &ms
->allocator_cache
;
206 QuarantineCache
*GetQuarantineCache(AsanThreadLocalMallocStorage
*ms
) {
208 CHECK_LE(sizeof(QuarantineCache
), sizeof(ms
->quarantine_cache
));
209 return reinterpret_cast<QuarantineCache
*>(ms
->quarantine_cache
);
212 void AllocatorOptions::SetFrom(const Flags
*f
, const CommonFlags
*cf
) {
213 quarantine_size_mb
= f
->quarantine_size_mb
;
214 thread_local_quarantine_size_kb
= f
->thread_local_quarantine_size_kb
;
215 min_redzone
= f
->redzone
;
216 max_redzone
= f
->max_redzone
;
217 may_return_null
= cf
->allocator_may_return_null
;
218 alloc_dealloc_mismatch
= f
->alloc_dealloc_mismatch
;
219 release_to_os_interval_ms
= cf
->allocator_release_to_os_interval_ms
;
222 void AllocatorOptions::CopyTo(Flags
*f
, CommonFlags
*cf
) {
223 f
->quarantine_size_mb
= quarantine_size_mb
;
224 f
->thread_local_quarantine_size_kb
= thread_local_quarantine_size_kb
;
225 f
->redzone
= min_redzone
;
226 f
->max_redzone
= max_redzone
;
227 cf
->allocator_may_return_null
= may_return_null
;
228 f
->alloc_dealloc_mismatch
= alloc_dealloc_mismatch
;
229 cf
->allocator_release_to_os_interval_ms
= release_to_os_interval_ms
;
233 static const uptr kMaxAllowedMallocSize
=
234 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
236 AsanAllocator allocator
;
237 AsanQuarantine quarantine
;
238 StaticSpinMutex fallback_mutex
;
239 AllocatorCache fallback_allocator_cache
;
240 QuarantineCache fallback_quarantine_cache
;
242 atomic_uint8_t rss_limit_exceeded
;
244 // ------------------- Options --------------------------
245 atomic_uint16_t min_redzone
;
246 atomic_uint16_t max_redzone
;
247 atomic_uint8_t alloc_dealloc_mismatch
;
249 // ------------------- Initialization ------------------------
250 explicit Allocator(LinkerInitialized
)
251 : quarantine(LINKER_INITIALIZED
),
252 fallback_quarantine_cache(LINKER_INITIALIZED
) {}
254 void CheckOptions(const AllocatorOptions
&options
) const {
255 CHECK_GE(options
.min_redzone
, 16);
256 CHECK_GE(options
.max_redzone
, options
.min_redzone
);
257 CHECK_LE(options
.max_redzone
, 2048);
258 CHECK(IsPowerOfTwo(options
.min_redzone
));
259 CHECK(IsPowerOfTwo(options
.max_redzone
));
262 void SharedInitCode(const AllocatorOptions
&options
) {
263 CheckOptions(options
);
264 quarantine
.Init((uptr
)options
.quarantine_size_mb
<< 20,
265 (uptr
)options
.thread_local_quarantine_size_kb
<< 10);
266 atomic_store(&alloc_dealloc_mismatch
, options
.alloc_dealloc_mismatch
,
267 memory_order_release
);
268 atomic_store(&min_redzone
, options
.min_redzone
, memory_order_release
);
269 atomic_store(&max_redzone
, options
.max_redzone
, memory_order_release
);
272 void Initialize(const AllocatorOptions
&options
) {
273 SetAllocatorMayReturnNull(options
.may_return_null
);
274 allocator
.Init(options
.release_to_os_interval_ms
);
275 SharedInitCode(options
);
278 bool RssLimitExceeded() {
279 return atomic_load(&rss_limit_exceeded
, memory_order_relaxed
);
282 void SetRssLimitExceeded(bool limit_exceeded
) {
283 atomic_store(&rss_limit_exceeded
, limit_exceeded
, memory_order_relaxed
);
286 void RePoisonChunk(uptr chunk
) {
287 // This could be a user-facing chunk (with redzones), or some internal
288 // housekeeping chunk, like TransferBatch. Start by assuming the former.
289 AsanChunk
*ac
= GetAsanChunk((void *)chunk
);
290 uptr allocated_size
= allocator
.GetActuallyAllocatedSize((void *)ac
);
291 uptr beg
= ac
->Beg();
292 uptr end
= ac
->Beg() + ac
->UsedSize(true);
293 uptr chunk_end
= chunk
+ allocated_size
;
294 if (chunk
< beg
&& beg
< end
&& end
<= chunk_end
&&
295 ac
->chunk_state
== CHUNK_ALLOCATED
) {
296 // Looks like a valid AsanChunk in use, poison redzones only.
297 PoisonShadow(chunk
, beg
- chunk
, kAsanHeapLeftRedzoneMagic
);
298 uptr end_aligned_down
= RoundDownTo(end
, SHADOW_GRANULARITY
);
299 FastPoisonShadowPartialRightRedzone(
300 end_aligned_down
, end
- end_aligned_down
,
301 chunk_end
- end_aligned_down
, kAsanHeapLeftRedzoneMagic
);
303 // This is either not an AsanChunk or freed or quarantined AsanChunk.
304 // In either case, poison everything.
305 PoisonShadow(chunk
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
309 void ReInitialize(const AllocatorOptions
&options
) {
310 SetAllocatorMayReturnNull(options
.may_return_null
);
311 allocator
.SetReleaseToOSIntervalMs(options
.release_to_os_interval_ms
);
312 SharedInitCode(options
);
314 // Poison all existing allocation's redzones.
315 if (CanPoisonMemory()) {
316 allocator
.ForceLock();
317 allocator
.ForEachChunk(
318 [](uptr chunk
, void *alloc
) {
319 ((Allocator
*)alloc
)->RePoisonChunk(chunk
);
322 allocator
.ForceUnlock();
326 void GetOptions(AllocatorOptions
*options
) const {
327 options
->quarantine_size_mb
= quarantine
.GetSize() >> 20;
328 options
->thread_local_quarantine_size_kb
= quarantine
.GetCacheSize() >> 10;
329 options
->min_redzone
= atomic_load(&min_redzone
, memory_order_acquire
);
330 options
->max_redzone
= atomic_load(&max_redzone
, memory_order_acquire
);
331 options
->may_return_null
= AllocatorMayReturnNull();
332 options
->alloc_dealloc_mismatch
=
333 atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
);
334 options
->release_to_os_interval_ms
= allocator
.ReleaseToOSIntervalMs();
337 // -------------------- Helper methods. -------------------------
338 uptr
ComputeRZLog(uptr user_requested_size
) {
340 user_requested_size
<= 64 - 16 ? 0 :
341 user_requested_size
<= 128 - 32 ? 1 :
342 user_requested_size
<= 512 - 64 ? 2 :
343 user_requested_size
<= 4096 - 128 ? 3 :
344 user_requested_size
<= (1 << 14) - 256 ? 4 :
345 user_requested_size
<= (1 << 15) - 512 ? 5 :
346 user_requested_size
<= (1 << 16) - 1024 ? 6 : 7;
347 u32 min_rz
= atomic_load(&min_redzone
, memory_order_acquire
);
348 u32 max_rz
= atomic_load(&max_redzone
, memory_order_acquire
);
349 return Min(Max(rz_log
, RZSize2Log(min_rz
)), RZSize2Log(max_rz
));
352 // We have an address between two chunks, and we want to report just one.
353 AsanChunk
*ChooseChunk(uptr addr
, AsanChunk
*left_chunk
,
354 AsanChunk
*right_chunk
) {
355 // Prefer an allocated chunk over freed chunk and freed chunk
356 // over available chunk.
357 if (left_chunk
->chunk_state
!= right_chunk
->chunk_state
) {
358 if (left_chunk
->chunk_state
== CHUNK_ALLOCATED
)
360 if (right_chunk
->chunk_state
== CHUNK_ALLOCATED
)
362 if (left_chunk
->chunk_state
== CHUNK_QUARANTINE
)
364 if (right_chunk
->chunk_state
== CHUNK_QUARANTINE
)
367 // Same chunk_state: choose based on offset.
368 sptr l_offset
= 0, r_offset
= 0;
369 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
370 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
371 if (l_offset
< r_offset
)
376 // -------------------- Allocation/Deallocation routines ---------------
377 void *Allocate(uptr size
, uptr alignment
, BufferedStackTrace
*stack
,
378 AllocType alloc_type
, bool can_fill
) {
379 if (UNLIKELY(!asan_inited
))
381 if (RssLimitExceeded())
382 return AsanAllocator::FailureHandler::OnOOM();
383 Flags
&fl
= *flags();
385 const uptr min_alignment
= SHADOW_GRANULARITY
;
386 if (alignment
< min_alignment
)
387 alignment
= min_alignment
;
389 // We'd be happy to avoid allocating memory for zero-size requests, but
390 // some programs/tests depend on this behavior and assume that malloc
391 // would not return NULL even for zero-size allocations. Moreover, it
392 // looks like operator new should never return NULL, and results of
393 // consecutive "new" calls must be different even if the allocated size
397 CHECK(IsPowerOfTwo(alignment
));
398 uptr rz_log
= ComputeRZLog(size
);
399 uptr rz_size
= RZLog2Size(rz_log
);
400 uptr rounded_size
= RoundUpTo(Max(size
, kChunkHeader2Size
), alignment
);
401 uptr needed_size
= rounded_size
+ rz_size
;
402 if (alignment
> min_alignment
)
403 needed_size
+= alignment
;
404 bool using_primary_allocator
= true;
405 // If we are allocating from the secondary allocator, there will be no
406 // automatic right redzone, so add the right redzone manually.
407 if (!PrimaryAllocator::CanAllocate(needed_size
, alignment
)) {
408 needed_size
+= rz_size
;
409 using_primary_allocator
= false;
411 CHECK(IsAligned(needed_size
, min_alignment
));
412 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
) {
413 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
415 return AsanAllocator::FailureHandler::OnBadRequest();
418 AsanThread
*t
= GetCurrentThread();
421 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
422 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
424 SpinMutexLock
l(&fallback_mutex
);
425 AllocatorCache
*cache
= &fallback_allocator_cache
;
426 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
431 if (*(u8
*)MEM_TO_SHADOW((uptr
)allocated
) == 0 && CanPoisonMemory()) {
432 // Heap poisoning is enabled, but the allocator provides an unpoisoned
433 // chunk. This is possible if CanPoisonMemory() was false for some
434 // time, for example, due to flags()->start_disabled.
435 // Anyway, poison the block before using it for anything else.
436 uptr allocated_size
= allocator
.GetActuallyAllocatedSize(allocated
);
437 PoisonShadow((uptr
)allocated
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
440 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
441 uptr alloc_end
= alloc_beg
+ needed_size
;
442 uptr beg_plus_redzone
= alloc_beg
+ rz_size
;
443 uptr user_beg
= beg_plus_redzone
;
444 if (!IsAligned(user_beg
, alignment
))
445 user_beg
= RoundUpTo(user_beg
, alignment
);
446 uptr user_end
= user_beg
+ size
;
447 CHECK_LE(user_end
, alloc_end
);
448 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
449 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
450 m
->alloc_type
= alloc_type
;
452 u32 alloc_tid
= t
? t
->tid() : 0;
453 m
->alloc_tid
= alloc_tid
;
454 CHECK_EQ(alloc_tid
, m
->alloc_tid
); // Does alloc_tid fit into the bitfield?
455 m
->free_tid
= kInvalidTid
;
456 m
->from_memalign
= user_beg
!= beg_plus_redzone
;
457 if (alloc_beg
!= chunk_beg
) {
458 CHECK_LE(alloc_beg
+ 2 * sizeof(uptr
), chunk_beg
);
459 reinterpret_cast<uptr
*>(alloc_beg
)[0] = kAllocBegMagic
;
460 reinterpret_cast<uptr
*>(alloc_beg
)[1] = chunk_beg
;
462 if (using_primary_allocator
) {
464 m
->user_requested_size
= size
;
465 CHECK(allocator
.FromPrimary(allocated
));
467 CHECK(!allocator
.FromPrimary(allocated
));
468 m
->user_requested_size
= SizeClassMap::kMaxSize
;
469 uptr
*meta
= reinterpret_cast<uptr
*>(allocator
.GetMetaData(allocated
));
474 m
->alloc_context_id
= StackDepotPut(*stack
);
476 uptr size_rounded_down_to_granularity
=
477 RoundDownTo(size
, SHADOW_GRANULARITY
);
478 // Unpoison the bulk of the memory region.
479 if (size_rounded_down_to_granularity
)
480 PoisonShadow(user_beg
, size_rounded_down_to_granularity
, 0);
481 // Deal with the end of the region if size is not aligned to granularity.
482 if (size
!= size_rounded_down_to_granularity
&& CanPoisonMemory()) {
484 (u8
*)MemToShadow(user_beg
+ size_rounded_down_to_granularity
);
485 *shadow
= fl
.poison_partial
? (size
& (SHADOW_GRANULARITY
- 1)) : 0;
488 AsanStats
&thread_stats
= GetCurrentThreadStats();
489 thread_stats
.mallocs
++;
490 thread_stats
.malloced
+= size
;
491 thread_stats
.malloced_redzones
+= needed_size
- size
;
492 if (needed_size
> SizeClassMap::kMaxSize
)
493 thread_stats
.malloc_large
++;
495 thread_stats
.malloced_by_size
[SizeClassMap::ClassID(needed_size
)]++;
497 void *res
= reinterpret_cast<void *>(user_beg
);
498 if (can_fill
&& fl
.max_malloc_fill_size
) {
499 uptr fill_size
= Min(size
, (uptr
)fl
.max_malloc_fill_size
);
500 REAL(memset
)(res
, fl
.malloc_fill_byte
, fill_size
);
502 #if CAN_SANITIZE_LEAKS
503 m
->lsan_tag
= __lsan::DisabledInThisThread() ? __lsan::kIgnored
504 : __lsan::kDirectlyLeaked
;
506 // Must be the last mutation of metadata in this function.
507 atomic_store((atomic_uint8_t
*)m
, CHUNK_ALLOCATED
, memory_order_release
);
508 ASAN_MALLOC_HOOK(res
, size
);
512 // Set quarantine flag if chunk is allocated, issue ASan error report on
513 // available and quarantined chunks. Return true on success, false otherwise.
514 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk
*m
, void *ptr
,
515 BufferedStackTrace
*stack
) {
516 u8 old_chunk_state
= CHUNK_ALLOCATED
;
517 // Flip the chunk_state atomically to avoid race on double-free.
518 if (!atomic_compare_exchange_strong((atomic_uint8_t
*)m
, &old_chunk_state
,
520 memory_order_acquire
)) {
521 ReportInvalidFree(ptr
, old_chunk_state
, stack
);
522 // It's not safe to push a chunk in quarantine on invalid free.
525 CHECK_EQ(CHUNK_ALLOCATED
, old_chunk_state
);
529 // Expects the chunk to already be marked as quarantined by using
530 // AtomicallySetQuarantineFlagIfAllocated.
531 void QuarantineChunk(AsanChunk
*m
, void *ptr
, BufferedStackTrace
*stack
) {
532 CHECK_EQ(m
->chunk_state
, CHUNK_QUARANTINE
);
533 CHECK_GE(m
->alloc_tid
, 0);
534 if (SANITIZER_WORDSIZE
== 64) // On 32-bits this resides in user area.
535 CHECK_EQ(m
->free_tid
, kInvalidTid
);
536 AsanThread
*t
= GetCurrentThread();
537 m
->free_tid
= t
? t
->tid() : 0;
538 m
->free_context_id
= StackDepotPut(*stack
);
540 Flags
&fl
= *flags();
541 if (fl
.max_free_fill_size
> 0) {
542 // We have to skip the chunk header, it contains free_context_id.
543 uptr scribble_start
= (uptr
)m
+ kChunkHeaderSize
+ kChunkHeader2Size
;
544 if (m
->UsedSize() >= kChunkHeader2Size
) { // Skip Header2 in user area.
545 uptr size_to_fill
= m
->UsedSize() - kChunkHeader2Size
;
546 size_to_fill
= Min(size_to_fill
, (uptr
)fl
.max_free_fill_size
);
547 REAL(memset
)((void *)scribble_start
, fl
.free_fill_byte
, size_to_fill
);
551 // Poison the region.
552 PoisonShadow(m
->Beg(),
553 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
556 AsanStats
&thread_stats
= GetCurrentThreadStats();
557 thread_stats
.frees
++;
558 thread_stats
.freed
+= m
->UsedSize();
560 // Push into quarantine.
562 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
563 AllocatorCache
*ac
= GetAllocatorCache(ms
);
564 quarantine
.Put(GetQuarantineCache(ms
), QuarantineCallback(ac
), m
,
567 SpinMutexLock
l(&fallback_mutex
);
568 AllocatorCache
*ac
= &fallback_allocator_cache
;
569 quarantine
.Put(&fallback_quarantine_cache
, QuarantineCallback(ac
), m
,
574 void Deallocate(void *ptr
, uptr delete_size
, BufferedStackTrace
*stack
,
575 AllocType alloc_type
) {
576 uptr p
= reinterpret_cast<uptr
>(ptr
);
579 uptr chunk_beg
= p
- kChunkHeaderSize
;
580 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
582 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
583 // malloc. Don't report an invalid free in this case.
584 if (SANITIZER_WINDOWS
&&
585 !get_allocator().PointerIsMine(ptr
)) {
586 if (!IsSystemHeapAddress(p
))
587 ReportFreeNotMalloced(p
, stack
);
593 // Must mark the chunk as quarantined before any changes to its metadata.
594 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
595 if (!AtomicallySetQuarantineFlagIfAllocated(m
, ptr
, stack
)) return;
597 if (m
->alloc_type
!= alloc_type
) {
598 if (atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
)) {
599 ReportAllocTypeMismatch((uptr
)ptr
, stack
, (AllocType
)m
->alloc_type
,
600 (AllocType
)alloc_type
);
604 if (delete_size
&& flags()->new_delete_type_mismatch
&&
605 delete_size
!= m
->UsedSize()) {
606 ReportNewDeleteSizeMismatch(p
, delete_size
, stack
);
609 QuarantineChunk(m
, ptr
, stack
);
612 void *Reallocate(void *old_ptr
, uptr new_size
, BufferedStackTrace
*stack
) {
613 CHECK(old_ptr
&& new_size
);
614 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
615 uptr chunk_beg
= p
- kChunkHeaderSize
;
616 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
618 AsanStats
&thread_stats
= GetCurrentThreadStats();
619 thread_stats
.reallocs
++;
620 thread_stats
.realloced
+= new_size
;
622 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
, true);
624 u8 chunk_state
= m
->chunk_state
;
625 if (chunk_state
!= CHUNK_ALLOCATED
)
626 ReportInvalidFree(old_ptr
, chunk_state
, stack
);
627 CHECK_NE(REAL(memcpy
), nullptr);
628 uptr memcpy_size
= Min(new_size
, m
->UsedSize());
629 // If realloc() races with free(), we may start copying freed memory.
630 // However, we will report racy double-free later anyway.
631 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
632 Deallocate(old_ptr
, 0, stack
, FROM_MALLOC
);
637 void *Calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
638 if (CheckForCallocOverflow(size
, nmemb
))
639 return AsanAllocator::FailureHandler::OnBadRequest();
640 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
, false);
641 // If the memory comes from the secondary allocator no need to clear it
642 // as it comes directly from mmap.
643 if (ptr
&& allocator
.FromPrimary(ptr
))
644 REAL(memset
)(ptr
, 0, nmemb
* size
);
648 void ReportInvalidFree(void *ptr
, u8 chunk_state
, BufferedStackTrace
*stack
) {
649 if (chunk_state
== CHUNK_QUARANTINE
)
650 ReportDoubleFree((uptr
)ptr
, stack
);
652 ReportFreeNotMalloced((uptr
)ptr
, stack
);
655 void CommitBack(AsanThreadLocalMallocStorage
*ms
) {
656 AllocatorCache
*ac
= GetAllocatorCache(ms
);
657 quarantine
.Drain(GetQuarantineCache(ms
), QuarantineCallback(ac
));
658 allocator
.SwallowCache(ac
);
661 // -------------------------- Chunk lookup ----------------------
663 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
664 AsanChunk
*GetAsanChunk(void *alloc_beg
) {
665 if (!alloc_beg
) return nullptr;
666 if (!allocator
.FromPrimary(alloc_beg
)) {
667 uptr
*meta
= reinterpret_cast<uptr
*>(allocator
.GetMetaData(alloc_beg
));
668 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(meta
[1]);
671 uptr
*alloc_magic
= reinterpret_cast<uptr
*>(alloc_beg
);
672 if (alloc_magic
[0] == kAllocBegMagic
)
673 return reinterpret_cast<AsanChunk
*>(alloc_magic
[1]);
674 return reinterpret_cast<AsanChunk
*>(alloc_beg
);
677 AsanChunk
*GetAsanChunkByAddr(uptr p
) {
678 void *alloc_beg
= allocator
.GetBlockBegin(reinterpret_cast<void *>(p
));
679 return GetAsanChunk(alloc_beg
);
682 // Allocator must be locked when this function is called.
683 AsanChunk
*GetAsanChunkByAddrFastLocked(uptr p
) {
685 allocator
.GetBlockBeginFastLocked(reinterpret_cast<void *>(p
));
686 return GetAsanChunk(alloc_beg
);
689 uptr
AllocationSize(uptr p
) {
690 AsanChunk
*m
= GetAsanChunkByAddr(p
);
692 if (m
->chunk_state
!= CHUNK_ALLOCATED
) return 0;
693 if (m
->Beg() != p
) return 0;
694 return m
->UsedSize();
697 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
698 AsanChunk
*m1
= GetAsanChunkByAddr(addr
);
699 if (!m1
) return AsanChunkView(m1
);
701 if (AsanChunkView(m1
).AddrIsAtLeft(addr
, 1, &offset
)) {
702 // The address is in the chunk's left redzone, so maybe it is actually
703 // a right buffer overflow from the other chunk to the left.
704 // Search a bit to the left to see if there is another chunk.
705 AsanChunk
*m2
= nullptr;
706 for (uptr l
= 1; l
< GetPageSizeCached(); l
++) {
707 m2
= GetAsanChunkByAddr(addr
- l
);
708 if (m2
== m1
) continue; // Still the same chunk.
711 if (m2
&& AsanChunkView(m2
).AddrIsAtRight(addr
, 1, &offset
))
712 m1
= ChooseChunk(addr
, m2
, m1
);
714 return AsanChunkView(m1
);
718 allocator
.PrintStats();
719 quarantine
.PrintStats();
723 allocator
.ForceLock();
724 fallback_mutex
.Lock();
728 fallback_mutex
.Unlock();
729 allocator
.ForceUnlock();
733 static Allocator
instance(LINKER_INITIALIZED
);
735 static AsanAllocator
&get_allocator() {
736 return instance
.allocator
;
739 bool AsanChunkView::IsValid() const {
740 return chunk_
&& chunk_
->chunk_state
!= CHUNK_AVAILABLE
;
742 bool AsanChunkView::IsAllocated() const {
743 return chunk_
&& chunk_
->chunk_state
== CHUNK_ALLOCATED
;
745 bool AsanChunkView::IsQuarantined() const {
746 return chunk_
&& chunk_
->chunk_state
== CHUNK_QUARANTINE
;
748 uptr
AsanChunkView::Beg() const { return chunk_
->Beg(); }
749 uptr
AsanChunkView::End() const { return Beg() + UsedSize(); }
750 uptr
AsanChunkView::UsedSize() const { return chunk_
->UsedSize(); }
751 uptr
AsanChunkView::AllocTid() const { return chunk_
->alloc_tid
; }
752 uptr
AsanChunkView::FreeTid() const { return chunk_
->free_tid
; }
753 AllocType
AsanChunkView::GetAllocType() const {
754 return (AllocType
)chunk_
->alloc_type
;
757 static StackTrace
GetStackTraceFromId(u32 id
) {
759 StackTrace res
= StackDepotGet(id
);
764 u32
AsanChunkView::GetAllocStackId() const { return chunk_
->alloc_context_id
; }
765 u32
AsanChunkView::GetFreeStackId() const { return chunk_
->free_context_id
; }
767 StackTrace
AsanChunkView::GetAllocStack() const {
768 return GetStackTraceFromId(GetAllocStackId());
771 StackTrace
AsanChunkView::GetFreeStack() const {
772 return GetStackTraceFromId(GetFreeStackId());
775 void InitializeAllocator(const AllocatorOptions
&options
) {
776 instance
.Initialize(options
);
779 void ReInitializeAllocator(const AllocatorOptions
&options
) {
780 instance
.ReInitialize(options
);
783 void GetAllocatorOptions(AllocatorOptions
*options
) {
784 instance
.GetOptions(options
);
787 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
788 return instance
.FindHeapChunkByAddress(addr
);
790 AsanChunkView
FindHeapChunkByAllocBeg(uptr addr
) {
791 return AsanChunkView(instance
.GetAsanChunk(reinterpret_cast<void*>(addr
)));
794 void AsanThreadLocalMallocStorage::CommitBack() {
795 instance
.CommitBack(this);
798 void PrintInternalAllocatorStats() {
799 instance
.PrintStats();
802 void asan_free(void *ptr
, BufferedStackTrace
*stack
, AllocType alloc_type
) {
803 instance
.Deallocate(ptr
, 0, stack
, alloc_type
);
806 void asan_sized_free(void *ptr
, uptr size
, BufferedStackTrace
*stack
,
807 AllocType alloc_type
) {
808 instance
.Deallocate(ptr
, size
, stack
, alloc_type
);
811 void *asan_malloc(uptr size
, BufferedStackTrace
*stack
) {
812 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
815 void *asan_calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
816 return SetErrnoOnNull(instance
.Calloc(nmemb
, size
, stack
));
819 void *asan_realloc(void *p
, uptr size
, BufferedStackTrace
*stack
) {
821 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
823 if (flags()->allocator_frees_and_returns_null_on_realloc_zero
) {
824 instance
.Deallocate(p
, 0, stack
, FROM_MALLOC
);
827 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
830 return SetErrnoOnNull(instance
.Reallocate(p
, size
, stack
));
833 void *asan_valloc(uptr size
, BufferedStackTrace
*stack
) {
834 return SetErrnoOnNull(
835 instance
.Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
, true));
838 void *asan_pvalloc(uptr size
, BufferedStackTrace
*stack
) {
839 uptr PageSize
= GetPageSizeCached();
840 if (UNLIKELY(CheckForPvallocOverflow(size
, PageSize
))) {
841 errno
= errno_ENOMEM
;
842 return AsanAllocator::FailureHandler::OnBadRequest();
844 // pvalloc(0) should allocate one page.
845 size
= size
? RoundUpTo(size
, PageSize
) : PageSize
;
846 return SetErrnoOnNull(
847 instance
.Allocate(size
, PageSize
, stack
, FROM_MALLOC
, true));
850 void *asan_memalign(uptr alignment
, uptr size
, BufferedStackTrace
*stack
,
851 AllocType alloc_type
) {
852 if (UNLIKELY(!IsPowerOfTwo(alignment
))) {
853 errno
= errno_EINVAL
;
854 return AsanAllocator::FailureHandler::OnBadRequest();
856 return SetErrnoOnNull(
857 instance
.Allocate(size
, alignment
, stack
, alloc_type
, true));
860 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
861 BufferedStackTrace
*stack
) {
862 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment
))) {
863 AsanAllocator::FailureHandler::OnBadRequest();
866 void *ptr
= instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true);
869 CHECK(IsAligned((uptr
)ptr
, alignment
));
874 uptr
asan_malloc_usable_size(const void *ptr
, uptr pc
, uptr bp
) {
876 uptr usable_size
= instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
877 if (flags()->check_malloc_usable_size
&& (usable_size
== 0)) {
878 GET_STACK_TRACE_FATAL(pc
, bp
);
879 ReportMallocUsableSizeNotOwned((uptr
)ptr
, &stack
);
884 uptr
asan_mz_size(const void *ptr
) {
885 return instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
888 void asan_mz_force_lock() {
889 instance
.ForceLock();
892 void asan_mz_force_unlock() {
893 instance
.ForceUnlock();
896 void AsanSoftRssLimitExceededCallback(bool limit_exceeded
) {
897 instance
.SetRssLimitExceeded(limit_exceeded
);
900 } // namespace __asan
902 // --- Implementation of LSan-specific functions --- {{{1
904 void LockAllocator() {
905 __asan::get_allocator().ForceLock();
908 void UnlockAllocator() {
909 __asan::get_allocator().ForceUnlock();
912 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
) {
913 *begin
= (uptr
)&__asan::get_allocator();
914 *end
= *begin
+ sizeof(__asan::get_allocator());
917 uptr
PointsIntoChunk(void* p
) {
918 uptr addr
= reinterpret_cast<uptr
>(p
);
919 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(addr
);
921 uptr chunk
= m
->Beg();
922 if (m
->chunk_state
!= __asan::CHUNK_ALLOCATED
)
924 if (m
->AddrIsInside(addr
, /*locked_version=*/true))
926 if (IsSpecialCaseOfOperatorNew0(chunk
, m
->UsedSize(/*locked_version*/ true),
932 uptr
GetUserBegin(uptr chunk
) {
933 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(chunk
);
938 LsanMetadata::LsanMetadata(uptr chunk
) {
939 metadata_
= reinterpret_cast<void *>(chunk
- __asan::kChunkHeaderSize
);
942 bool LsanMetadata::allocated() const {
943 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
944 return m
->chunk_state
== __asan::CHUNK_ALLOCATED
;
947 ChunkTag
LsanMetadata::tag() const {
948 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
949 return static_cast<ChunkTag
>(m
->lsan_tag
);
952 void LsanMetadata::set_tag(ChunkTag value
) {
953 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
957 uptr
LsanMetadata::requested_size() const {
958 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
959 return m
->UsedSize(/*locked_version=*/true);
962 u32
LsanMetadata::stack_trace_id() const {
963 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
964 return m
->alloc_context_id
;
967 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
968 __asan::get_allocator().ForEachChunk(callback
, arg
);
971 IgnoreObjectResult
IgnoreObjectLocked(const void *p
) {
972 uptr addr
= reinterpret_cast<uptr
>(p
);
973 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddr(addr
);
974 if (!m
) return kIgnoreObjectInvalid
;
975 if ((m
->chunk_state
== __asan::CHUNK_ALLOCATED
) && m
->AddrIsInside(addr
)) {
976 if (m
->lsan_tag
== kIgnored
)
977 return kIgnoreObjectAlreadyIgnored
;
978 m
->lsan_tag
= __lsan::kIgnored
;
979 return kIgnoreObjectSuccess
;
981 return kIgnoreObjectInvalid
;
984 } // namespace __lsan
986 // ---------------------- Interface ---------------- {{{1
987 using namespace __asan
; // NOLINT
989 // ASan allocator doesn't reserve extra bytes, so normally we would
990 // just return "size". We don't want to expose our redzone sizes, etc here.
991 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
995 int __sanitizer_get_ownership(const void *p
) {
996 uptr ptr
= reinterpret_cast<uptr
>(p
);
997 return instance
.AllocationSize(ptr
) > 0;
1000 uptr
__sanitizer_get_allocated_size(const void *p
) {
1002 uptr ptr
= reinterpret_cast<uptr
>(p
);
1003 uptr allocated_size
= instance
.AllocationSize(ptr
);
1004 // Die if p is not malloced or if it is already freed.
1005 if (allocated_size
== 0) {
1006 GET_STACK_TRACE_FATAL_HERE
;
1007 ReportSanitizerGetAllocatedSizeNotOwned(ptr
, &stack
);
1009 return allocated_size
;
1012 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1013 // Provide default (no-op) implementation of malloc hooks.
1014 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook
,
1015 void *ptr
, uptr size
) {
1020 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook
, void *ptr
) {