1 //===-- asan_allocator.cpp ------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Implementation of ASan's memory allocator, 2-nd version.
12 // This variant uses the allocator from sanitizer_common, i.e. the one shared
13 // with ThreadSanitizer and MemorySanitizer.
15 //===----------------------------------------------------------------------===//
17 #include "asan_allocator.h"
19 #include "asan_mapping.h"
20 #include "asan_poisoning.h"
21 #include "asan_report.h"
22 #include "asan_stack.h"
23 #include "asan_thread.h"
24 #include "lsan/lsan_common.h"
25 #include "sanitizer_common/sanitizer_allocator_checks.h"
26 #include "sanitizer_common/sanitizer_allocator_interface.h"
27 #include "sanitizer_common/sanitizer_errno.h"
28 #include "sanitizer_common/sanitizer_flags.h"
29 #include "sanitizer_common/sanitizer_internal_defs.h"
30 #include "sanitizer_common/sanitizer_list.h"
31 #include "sanitizer_common/sanitizer_quarantine.h"
32 #include "sanitizer_common/sanitizer_stackdepot.h"
36 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
37 // We use adaptive redzones: for larger allocation larger redzones are used.
38 static u32
RZLog2Size(u32 rz_log
) {
43 static u32
RZSize2Log(u32 rz_size
) {
44 CHECK_GE(rz_size
, 16);
45 CHECK_LE(rz_size
, 2048);
46 CHECK(IsPowerOfTwo(rz_size
));
47 u32 res
= Log2(rz_size
) - 4;
48 CHECK_EQ(rz_size
, RZLog2Size(res
));
52 static AsanAllocator
&get_allocator();
54 static void AtomicContextStore(volatile atomic_uint64_t
*atomic_context
,
59 atomic_store(atomic_context
, context
, memory_order_relaxed
);
62 static void AtomicContextLoad(const volatile atomic_uint64_t
*atomic_context
,
63 u32
&tid
, u32
&stack
) {
64 u64 context
= atomic_load(atomic_context
, memory_order_relaxed
);
70 // The memory chunk allocated from the underlying allocator looks like this:
71 // L L L L L L H H U U U U U U R R
72 // L -- left redzone words (0 or more bytes)
73 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
75 // R -- right redzone (0 or more bytes)
76 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
79 // If the left redzone is greater than the ChunkHeader size we store a magic
80 // value in the first uptr word of the memory block and store the address of
81 // ChunkBase in the next uptr.
82 // M B L L L L L L L L L H H U U U U U U
84 // ---------------------|
85 // M -- magic value kAllocBegMagic
86 // B -- address of ChunkHeader pointing to the first 'H'
90 atomic_uint8_t chunk_state
;
95 // else -> log2(min(align, 512)) - 2
96 u8 user_requested_alignment_log
: 3;
99 u16 user_requested_size_hi
;
100 u32 user_requested_size_lo
;
101 atomic_uint64_t alloc_context_id
;
104 uptr
UsedSize() const {
105 static_assert(sizeof(user_requested_size_lo
) == 4,
106 "Expression below requires this");
107 return FIRST_32_SECOND_64(0, ((uptr
)user_requested_size_hi
<< 32)) +
108 user_requested_size_lo
;
111 void SetUsedSize(uptr size
) {
112 user_requested_size_lo
= size
;
113 static_assert(sizeof(user_requested_size_lo
) == 4,
114 "Expression below requires this");
115 user_requested_size_hi
= FIRST_32_SECOND_64(0, size
>> 32);
116 CHECK_EQ(UsedSize(), size
);
119 void SetAllocContext(u32 tid
, u32 stack
) {
120 AtomicContextStore(&alloc_context_id
, tid
, stack
);
123 void GetAllocContext(u32
&tid
, u32
&stack
) const {
124 AtomicContextLoad(&alloc_context_id
, tid
, stack
);
128 class ChunkBase
: public ChunkHeader
{
129 atomic_uint64_t free_context_id
;
132 void SetFreeContext(u32 tid
, u32 stack
) {
133 AtomicContextStore(&free_context_id
, tid
, stack
);
136 void GetFreeContext(u32
&tid
, u32
&stack
) const {
137 AtomicContextLoad(&free_context_id
, tid
, stack
);
141 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
142 static const uptr kChunkHeader2Size
= sizeof(ChunkBase
) - kChunkHeaderSize
;
143 COMPILER_CHECK(kChunkHeaderSize
== 16);
144 COMPILER_CHECK(kChunkHeader2Size
<= 16);
147 // Either just allocated by underlying allocator, but AsanChunk is not yet
148 // ready, or almost returned to undelying allocator and AsanChunk is already
151 // The chunk is allocated and not yet freed.
153 // The chunk was freed and put into quarantine zone.
154 CHUNK_QUARANTINE
= 3,
157 class AsanChunk
: public ChunkBase
{
159 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
160 bool AddrIsInside(uptr addr
) {
161 return (addr
>= Beg()) && (addr
< Beg() + UsedSize());
165 class LargeChunkHeader
{
166 static constexpr uptr kAllocBegMagic
=
167 FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL
);
168 atomic_uintptr_t magic
;
169 AsanChunk
*chunk_header
;
172 AsanChunk
*Get() const {
173 return atomic_load(&magic
, memory_order_acquire
) == kAllocBegMagic
178 void Set(AsanChunk
*p
) {
181 atomic_store(&magic
, kAllocBegMagic
, memory_order_release
);
185 uptr old
= kAllocBegMagic
;
186 if (!atomic_compare_exchange_strong(&magic
, &old
, 0,
187 memory_order_release
)) {
188 CHECK_EQ(old
, kAllocBegMagic
);
193 struct QuarantineCallback
{
194 QuarantineCallback(AllocatorCache
*cache
, BufferedStackTrace
*stack
)
199 void Recycle(AsanChunk
*m
) {
200 void *p
= get_allocator().GetBlockBegin(m
);
202 // Clear the magic value, as allocator internals may overwrite the
203 // contents of deallocated chunk, confusing GetAsanChunk lookup.
204 reinterpret_cast<LargeChunkHeader
*>(p
)->Set(nullptr);
207 u8 old_chunk_state
= CHUNK_QUARANTINE
;
208 if (!atomic_compare_exchange_strong(&m
->chunk_state
, &old_chunk_state
,
209 CHUNK_INVALID
, memory_order_acquire
)) {
210 CHECK_EQ(old_chunk_state
, CHUNK_QUARANTINE
);
213 PoisonShadow(m
->Beg(), RoundUpTo(m
->UsedSize(), ASAN_SHADOW_GRANULARITY
),
214 kAsanHeapLeftRedzoneMagic
);
217 AsanStats
&thread_stats
= GetCurrentThreadStats();
218 thread_stats
.real_frees
++;
219 thread_stats
.really_freed
+= m
->UsedSize();
221 get_allocator().Deallocate(cache_
, p
);
224 void *Allocate(uptr size
) {
225 void *res
= get_allocator().Allocate(cache_
, size
, 1);
226 // TODO(alekseys): Consider making quarantine OOM-friendly.
228 ReportOutOfMemory(size
, stack_
);
232 void Deallocate(void *p
) {
233 get_allocator().Deallocate(cache_
, p
);
237 AllocatorCache
* const cache_
;
238 BufferedStackTrace
* const stack_
;
241 typedef Quarantine
<QuarantineCallback
, AsanChunk
> AsanQuarantine
;
242 typedef AsanQuarantine::Cache QuarantineCache
;
244 void AsanMapUnmapCallback::OnMap(uptr p
, uptr size
) const {
245 PoisonShadow(p
, size
, kAsanHeapLeftRedzoneMagic
);
247 AsanStats
&thread_stats
= GetCurrentThreadStats();
248 thread_stats
.mmaps
++;
249 thread_stats
.mmaped
+= size
;
251 void AsanMapUnmapCallback::OnUnmap(uptr p
, uptr size
) const {
252 PoisonShadow(p
, size
, 0);
253 // We are about to unmap a chunk of user memory.
254 // Mark the corresponding shadow memory as not needed.
255 FlushUnneededASanShadowMemory(p
, size
);
257 AsanStats
&thread_stats
= GetCurrentThreadStats();
258 thread_stats
.munmaps
++;
259 thread_stats
.munmaped
+= size
;
262 // We can not use THREADLOCAL because it is not supported on some of the
263 // platforms we care about (OSX 10.6, Android).
264 // static THREADLOCAL AllocatorCache cache;
265 AllocatorCache
*GetAllocatorCache(AsanThreadLocalMallocStorage
*ms
) {
267 return &ms
->allocator_cache
;
270 QuarantineCache
*GetQuarantineCache(AsanThreadLocalMallocStorage
*ms
) {
272 CHECK_LE(sizeof(QuarantineCache
), sizeof(ms
->quarantine_cache
));
273 return reinterpret_cast<QuarantineCache
*>(ms
->quarantine_cache
);
276 void AllocatorOptions::SetFrom(const Flags
*f
, const CommonFlags
*cf
) {
277 quarantine_size_mb
= f
->quarantine_size_mb
;
278 thread_local_quarantine_size_kb
= f
->thread_local_quarantine_size_kb
;
279 min_redzone
= f
->redzone
;
280 max_redzone
= f
->max_redzone
;
281 may_return_null
= cf
->allocator_may_return_null
;
282 alloc_dealloc_mismatch
= f
->alloc_dealloc_mismatch
;
283 release_to_os_interval_ms
= cf
->allocator_release_to_os_interval_ms
;
286 void AllocatorOptions::CopyTo(Flags
*f
, CommonFlags
*cf
) {
287 f
->quarantine_size_mb
= quarantine_size_mb
;
288 f
->thread_local_quarantine_size_kb
= thread_local_quarantine_size_kb
;
289 f
->redzone
= min_redzone
;
290 f
->max_redzone
= max_redzone
;
291 cf
->allocator_may_return_null
= may_return_null
;
292 f
->alloc_dealloc_mismatch
= alloc_dealloc_mismatch
;
293 cf
->allocator_release_to_os_interval_ms
= release_to_os_interval_ms
;
297 static const uptr kMaxAllowedMallocSize
=
298 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
300 AsanAllocator allocator
;
301 AsanQuarantine quarantine
;
302 StaticSpinMutex fallback_mutex
;
303 AllocatorCache fallback_allocator_cache
;
304 QuarantineCache fallback_quarantine_cache
;
306 uptr max_user_defined_malloc_size
;
308 // ------------------- Options --------------------------
309 atomic_uint16_t min_redzone
;
310 atomic_uint16_t max_redzone
;
311 atomic_uint8_t alloc_dealloc_mismatch
;
313 // ------------------- Initialization ------------------------
314 explicit Allocator(LinkerInitialized
)
315 : quarantine(LINKER_INITIALIZED
),
316 fallback_quarantine_cache(LINKER_INITIALIZED
) {}
318 void CheckOptions(const AllocatorOptions
&options
) const {
319 CHECK_GE(options
.min_redzone
, 16);
320 CHECK_GE(options
.max_redzone
, options
.min_redzone
);
321 CHECK_LE(options
.max_redzone
, 2048);
322 CHECK(IsPowerOfTwo(options
.min_redzone
));
323 CHECK(IsPowerOfTwo(options
.max_redzone
));
326 void SharedInitCode(const AllocatorOptions
&options
) {
327 CheckOptions(options
);
328 quarantine
.Init((uptr
)options
.quarantine_size_mb
<< 20,
329 (uptr
)options
.thread_local_quarantine_size_kb
<< 10);
330 atomic_store(&alloc_dealloc_mismatch
, options
.alloc_dealloc_mismatch
,
331 memory_order_release
);
332 atomic_store(&min_redzone
, options
.min_redzone
, memory_order_release
);
333 atomic_store(&max_redzone
, options
.max_redzone
, memory_order_release
);
336 void InitLinkerInitialized(const AllocatorOptions
&options
) {
337 SetAllocatorMayReturnNull(options
.may_return_null
);
338 allocator
.InitLinkerInitialized(options
.release_to_os_interval_ms
);
339 SharedInitCode(options
);
340 max_user_defined_malloc_size
= common_flags()->max_allocation_size_mb
341 ? common_flags()->max_allocation_size_mb
343 : kMaxAllowedMallocSize
;
346 void RePoisonChunk(uptr chunk
) {
347 // This could be a user-facing chunk (with redzones), or some internal
348 // housekeeping chunk, like TransferBatch. Start by assuming the former.
349 AsanChunk
*ac
= GetAsanChunk((void *)chunk
);
350 uptr allocated_size
= allocator
.GetActuallyAllocatedSize((void *)chunk
);
351 if (ac
&& atomic_load(&ac
->chunk_state
, memory_order_acquire
) ==
353 uptr beg
= ac
->Beg();
354 uptr end
= ac
->Beg() + ac
->UsedSize();
355 uptr chunk_end
= chunk
+ allocated_size
;
356 if (chunk
< beg
&& beg
< end
&& end
<= chunk_end
) {
357 // Looks like a valid AsanChunk in use, poison redzones only.
358 PoisonShadow(chunk
, beg
- chunk
, kAsanHeapLeftRedzoneMagic
);
359 uptr end_aligned_down
= RoundDownTo(end
, ASAN_SHADOW_GRANULARITY
);
360 FastPoisonShadowPartialRightRedzone(
361 end_aligned_down
, end
- end_aligned_down
,
362 chunk_end
- end_aligned_down
, kAsanHeapLeftRedzoneMagic
);
367 // This is either not an AsanChunk or freed or quarantined AsanChunk.
368 // In either case, poison everything.
369 PoisonShadow(chunk
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
372 void ReInitialize(const AllocatorOptions
&options
) {
373 SetAllocatorMayReturnNull(options
.may_return_null
);
374 allocator
.SetReleaseToOSIntervalMs(options
.release_to_os_interval_ms
);
375 SharedInitCode(options
);
377 // Poison all existing allocation's redzones.
378 if (CanPoisonMemory()) {
379 allocator
.ForceLock();
380 allocator
.ForEachChunk(
381 [](uptr chunk
, void *alloc
) {
382 ((Allocator
*)alloc
)->RePoisonChunk(chunk
);
385 allocator
.ForceUnlock();
389 void GetOptions(AllocatorOptions
*options
) const {
390 options
->quarantine_size_mb
= quarantine
.GetSize() >> 20;
391 options
->thread_local_quarantine_size_kb
= quarantine
.GetCacheSize() >> 10;
392 options
->min_redzone
= atomic_load(&min_redzone
, memory_order_acquire
);
393 options
->max_redzone
= atomic_load(&max_redzone
, memory_order_acquire
);
394 options
->may_return_null
= AllocatorMayReturnNull();
395 options
->alloc_dealloc_mismatch
=
396 atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
);
397 options
->release_to_os_interval_ms
= allocator
.ReleaseToOSIntervalMs();
400 // -------------------- Helper methods. -------------------------
401 uptr
ComputeRZLog(uptr user_requested_size
) {
402 u32 rz_log
= user_requested_size
<= 64 - 16 ? 0
403 : user_requested_size
<= 128 - 32 ? 1
404 : user_requested_size
<= 512 - 64 ? 2
405 : user_requested_size
<= 4096 - 128 ? 3
406 : user_requested_size
<= (1 << 14) - 256 ? 4
407 : user_requested_size
<= (1 << 15) - 512 ? 5
408 : user_requested_size
<= (1 << 16) - 1024 ? 6
410 u32 hdr_log
= RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader
)));
411 u32 min_log
= RZSize2Log(atomic_load(&min_redzone
, memory_order_acquire
));
412 u32 max_log
= RZSize2Log(atomic_load(&max_redzone
, memory_order_acquire
));
413 return Min(Max(rz_log
, Max(min_log
, hdr_log
)), Max(max_log
, hdr_log
));
416 static uptr
ComputeUserRequestedAlignmentLog(uptr user_requested_alignment
) {
417 if (user_requested_alignment
< 8)
419 if (user_requested_alignment
> 512)
420 user_requested_alignment
= 512;
421 return Log2(user_requested_alignment
) - 2;
424 static uptr
ComputeUserAlignment(uptr user_requested_alignment_log
) {
425 if (user_requested_alignment_log
== 0)
427 return 1LL << (user_requested_alignment_log
+ 2);
430 // We have an address between two chunks, and we want to report just one.
431 AsanChunk
*ChooseChunk(uptr addr
, AsanChunk
*left_chunk
,
432 AsanChunk
*right_chunk
) {
437 // Prefer an allocated chunk over freed chunk and freed chunk
438 // over available chunk.
439 u8 left_state
= atomic_load(&left_chunk
->chunk_state
, memory_order_relaxed
);
441 atomic_load(&right_chunk
->chunk_state
, memory_order_relaxed
);
442 if (left_state
!= right_state
) {
443 if (left_state
== CHUNK_ALLOCATED
)
445 if (right_state
== CHUNK_ALLOCATED
)
447 if (left_state
== CHUNK_QUARANTINE
)
449 if (right_state
== CHUNK_QUARANTINE
)
452 // Same chunk_state: choose based on offset.
453 sptr l_offset
= 0, r_offset
= 0;
454 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
455 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
456 if (l_offset
< r_offset
)
461 bool UpdateAllocationStack(uptr addr
, BufferedStackTrace
*stack
) {
462 AsanChunk
*m
= GetAsanChunkByAddr(addr
);
463 if (!m
) return false;
464 if (atomic_load(&m
->chunk_state
, memory_order_acquire
) != CHUNK_ALLOCATED
)
466 if (m
->Beg() != addr
) return false;
467 AsanThread
*t
= GetCurrentThread();
468 m
->SetAllocContext(t
? t
->tid() : kMainTid
, StackDepotPut(*stack
));
472 // -------------------- Allocation/Deallocation routines ---------------
473 void *Allocate(uptr size
, uptr alignment
, BufferedStackTrace
*stack
,
474 AllocType alloc_type
, bool can_fill
) {
475 if (UNLIKELY(!asan_inited
))
477 if (UNLIKELY(IsRssLimitExceeded())) {
478 if (AllocatorMayReturnNull())
480 ReportRssLimitExceeded(stack
);
482 Flags
&fl
= *flags();
484 const uptr min_alignment
= ASAN_SHADOW_GRANULARITY
;
485 const uptr user_requested_alignment_log
=
486 ComputeUserRequestedAlignmentLog(alignment
);
487 if (alignment
< min_alignment
)
488 alignment
= min_alignment
;
490 // We'd be happy to avoid allocating memory for zero-size requests, but
491 // some programs/tests depend on this behavior and assume that malloc
492 // would not return NULL even for zero-size allocations. Moreover, it
493 // looks like operator new should never return NULL, and results of
494 // consecutive "new" calls must be different even if the allocated size
498 CHECK(IsPowerOfTwo(alignment
));
499 uptr rz_log
= ComputeRZLog(size
);
500 uptr rz_size
= RZLog2Size(rz_log
);
501 uptr rounded_size
= RoundUpTo(Max(size
, kChunkHeader2Size
), alignment
);
502 uptr needed_size
= rounded_size
+ rz_size
;
503 if (alignment
> min_alignment
)
504 needed_size
+= alignment
;
505 // If we are allocating from the secondary allocator, there will be no
506 // automatic right redzone, so add the right redzone manually.
507 if (!PrimaryAllocator::CanAllocate(needed_size
, alignment
))
508 needed_size
+= rz_size
;
509 CHECK(IsAligned(needed_size
, min_alignment
));
510 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
||
511 size
> max_user_defined_malloc_size
) {
512 if (AllocatorMayReturnNull()) {
513 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
518 Min(kMaxAllowedMallocSize
, max_user_defined_malloc_size
);
519 ReportAllocationSizeTooBig(size
, needed_size
, malloc_limit
, stack
);
522 AsanThread
*t
= GetCurrentThread();
525 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
526 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
528 SpinMutexLock
l(&fallback_mutex
);
529 AllocatorCache
*cache
= &fallback_allocator_cache
;
530 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
532 if (UNLIKELY(!allocated
)) {
533 SetAllocatorOutOfMemory();
534 if (AllocatorMayReturnNull())
536 ReportOutOfMemory(size
, stack
);
539 if (*(u8
*)MEM_TO_SHADOW((uptr
)allocated
) == 0 && CanPoisonMemory()) {
540 // Heap poisoning is enabled, but the allocator provides an unpoisoned
541 // chunk. This is possible if CanPoisonMemory() was false for some
542 // time, for example, due to flags()->start_disabled.
543 // Anyway, poison the block before using it for anything else.
544 uptr allocated_size
= allocator
.GetActuallyAllocatedSize(allocated
);
545 PoisonShadow((uptr
)allocated
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
548 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
549 uptr alloc_end
= alloc_beg
+ needed_size
;
550 uptr user_beg
= alloc_beg
+ rz_size
;
551 if (!IsAligned(user_beg
, alignment
))
552 user_beg
= RoundUpTo(user_beg
, alignment
);
553 uptr user_end
= user_beg
+ size
;
554 CHECK_LE(user_end
, alloc_end
);
555 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
556 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
557 m
->alloc_type
= alloc_type
;
559 m
->SetUsedSize(size
);
560 m
->user_requested_alignment_log
= user_requested_alignment_log
;
562 m
->SetAllocContext(t
? t
->tid() : kMainTid
, StackDepotPut(*stack
));
564 uptr size_rounded_down_to_granularity
=
565 RoundDownTo(size
, ASAN_SHADOW_GRANULARITY
);
566 // Unpoison the bulk of the memory region.
567 if (size_rounded_down_to_granularity
)
568 PoisonShadow(user_beg
, size_rounded_down_to_granularity
, 0);
569 // Deal with the end of the region if size is not aligned to granularity.
570 if (size
!= size_rounded_down_to_granularity
&& CanPoisonMemory()) {
572 (u8
*)MemToShadow(user_beg
+ size_rounded_down_to_granularity
);
573 *shadow
= fl
.poison_partial
? (size
& (ASAN_SHADOW_GRANULARITY
- 1)) : 0;
576 AsanStats
&thread_stats
= GetCurrentThreadStats();
577 thread_stats
.mallocs
++;
578 thread_stats
.malloced
+= size
;
579 thread_stats
.malloced_redzones
+= needed_size
- size
;
580 if (needed_size
> SizeClassMap::kMaxSize
)
581 thread_stats
.malloc_large
++;
583 thread_stats
.malloced_by_size
[SizeClassMap::ClassID(needed_size
)]++;
585 void *res
= reinterpret_cast<void *>(user_beg
);
586 if (can_fill
&& fl
.max_malloc_fill_size
) {
587 uptr fill_size
= Min(size
, (uptr
)fl
.max_malloc_fill_size
);
588 REAL(memset
)(res
, fl
.malloc_fill_byte
, fill_size
);
590 #if CAN_SANITIZE_LEAKS
591 m
->lsan_tag
= __lsan::DisabledInThisThread() ? __lsan::kIgnored
592 : __lsan::kDirectlyLeaked
;
594 // Must be the last mutation of metadata in this function.
595 atomic_store(&m
->chunk_state
, CHUNK_ALLOCATED
, memory_order_release
);
596 if (alloc_beg
!= chunk_beg
) {
597 CHECK_LE(alloc_beg
+ sizeof(LargeChunkHeader
), chunk_beg
);
598 reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Set(m
);
600 RunMallocHooks(res
, size
);
604 // Set quarantine flag if chunk is allocated, issue ASan error report on
605 // available and quarantined chunks. Return true on success, false otherwise.
606 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk
*m
, void *ptr
,
607 BufferedStackTrace
*stack
) {
608 u8 old_chunk_state
= CHUNK_ALLOCATED
;
609 // Flip the chunk_state atomically to avoid race on double-free.
610 if (!atomic_compare_exchange_strong(&m
->chunk_state
, &old_chunk_state
,
612 memory_order_acquire
)) {
613 ReportInvalidFree(ptr
, old_chunk_state
, stack
);
614 // It's not safe to push a chunk in quarantine on invalid free.
617 CHECK_EQ(CHUNK_ALLOCATED
, old_chunk_state
);
618 // It was a user data.
619 m
->SetFreeContext(kInvalidTid
, 0);
623 // Expects the chunk to already be marked as quarantined by using
624 // AtomicallySetQuarantineFlagIfAllocated.
625 void QuarantineChunk(AsanChunk
*m
, void *ptr
, BufferedStackTrace
*stack
) {
626 CHECK_EQ(atomic_load(&m
->chunk_state
, memory_order_relaxed
),
628 AsanThread
*t
= GetCurrentThread();
629 m
->SetFreeContext(t
? t
->tid() : 0, StackDepotPut(*stack
));
631 Flags
&fl
= *flags();
632 if (fl
.max_free_fill_size
> 0) {
633 // We have to skip the chunk header, it contains free_context_id.
634 uptr scribble_start
= (uptr
)m
+ kChunkHeaderSize
+ kChunkHeader2Size
;
635 if (m
->UsedSize() >= kChunkHeader2Size
) { // Skip Header2 in user area.
636 uptr size_to_fill
= m
->UsedSize() - kChunkHeader2Size
;
637 size_to_fill
= Min(size_to_fill
, (uptr
)fl
.max_free_fill_size
);
638 REAL(memset
)((void *)scribble_start
, fl
.free_fill_byte
, size_to_fill
);
642 // Poison the region.
643 PoisonShadow(m
->Beg(), RoundUpTo(m
->UsedSize(), ASAN_SHADOW_GRANULARITY
),
646 AsanStats
&thread_stats
= GetCurrentThreadStats();
647 thread_stats
.frees
++;
648 thread_stats
.freed
+= m
->UsedSize();
650 // Push into quarantine.
652 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
653 AllocatorCache
*ac
= GetAllocatorCache(ms
);
654 quarantine
.Put(GetQuarantineCache(ms
), QuarantineCallback(ac
, stack
), m
,
657 SpinMutexLock
l(&fallback_mutex
);
658 AllocatorCache
*ac
= &fallback_allocator_cache
;
659 quarantine
.Put(&fallback_quarantine_cache
, QuarantineCallback(ac
, stack
),
664 void Deallocate(void *ptr
, uptr delete_size
, uptr delete_alignment
,
665 BufferedStackTrace
*stack
, AllocType alloc_type
) {
666 uptr p
= reinterpret_cast<uptr
>(ptr
);
669 uptr chunk_beg
= p
- kChunkHeaderSize
;
670 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
672 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
673 // malloc. Don't report an invalid free in this case.
674 if (SANITIZER_WINDOWS
&&
675 !get_allocator().PointerIsMine(ptr
)) {
676 if (!IsSystemHeapAddress(p
))
677 ReportFreeNotMalloced(p
, stack
);
683 // Must mark the chunk as quarantined before any changes to its metadata.
684 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
685 if (!AtomicallySetQuarantineFlagIfAllocated(m
, ptr
, stack
)) return;
687 if (m
->alloc_type
!= alloc_type
) {
688 if (atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
)) {
689 ReportAllocTypeMismatch((uptr
)ptr
, stack
, (AllocType
)m
->alloc_type
,
690 (AllocType
)alloc_type
);
693 if (flags()->new_delete_type_mismatch
&&
694 (alloc_type
== FROM_NEW
|| alloc_type
== FROM_NEW_BR
) &&
695 ((delete_size
&& delete_size
!= m
->UsedSize()) ||
696 ComputeUserRequestedAlignmentLog(delete_alignment
) !=
697 m
->user_requested_alignment_log
)) {
698 ReportNewDeleteTypeMismatch(p
, delete_size
, delete_alignment
, stack
);
702 QuarantineChunk(m
, ptr
, stack
);
705 void *Reallocate(void *old_ptr
, uptr new_size
, BufferedStackTrace
*stack
) {
706 CHECK(old_ptr
&& new_size
);
707 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
708 uptr chunk_beg
= p
- kChunkHeaderSize
;
709 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
711 AsanStats
&thread_stats
= GetCurrentThreadStats();
712 thread_stats
.reallocs
++;
713 thread_stats
.realloced
+= new_size
;
715 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
, true);
717 u8 chunk_state
= atomic_load(&m
->chunk_state
, memory_order_acquire
);
718 if (chunk_state
!= CHUNK_ALLOCATED
)
719 ReportInvalidFree(old_ptr
, chunk_state
, stack
);
720 CHECK_NE(REAL(memcpy
), nullptr);
721 uptr memcpy_size
= Min(new_size
, m
->UsedSize());
722 // If realloc() races with free(), we may start copying freed memory.
723 // However, we will report racy double-free later anyway.
724 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
725 Deallocate(old_ptr
, 0, 0, stack
, FROM_MALLOC
);
730 void *Calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
731 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
732 if (AllocatorMayReturnNull())
734 ReportCallocOverflow(nmemb
, size
, stack
);
736 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
, false);
737 // If the memory comes from the secondary allocator no need to clear it
738 // as it comes directly from mmap.
739 if (ptr
&& allocator
.FromPrimary(ptr
))
740 REAL(memset
)(ptr
, 0, nmemb
* size
);
744 void ReportInvalidFree(void *ptr
, u8 chunk_state
, BufferedStackTrace
*stack
) {
745 if (chunk_state
== CHUNK_QUARANTINE
)
746 ReportDoubleFree((uptr
)ptr
, stack
);
748 ReportFreeNotMalloced((uptr
)ptr
, stack
);
751 void CommitBack(AsanThreadLocalMallocStorage
*ms
, BufferedStackTrace
*stack
) {
752 AllocatorCache
*ac
= GetAllocatorCache(ms
);
753 quarantine
.Drain(GetQuarantineCache(ms
), QuarantineCallback(ac
, stack
));
754 allocator
.SwallowCache(ac
);
757 // -------------------------- Chunk lookup ----------------------
759 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
760 // Returns nullptr if AsanChunk is not yet initialized just after
761 // get_allocator().Allocate(), or is being destroyed just before
762 // get_allocator().Deallocate().
763 AsanChunk
*GetAsanChunk(void *alloc_beg
) {
766 AsanChunk
*p
= reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Get();
768 if (!allocator
.FromPrimary(alloc_beg
))
770 p
= reinterpret_cast<AsanChunk
*>(alloc_beg
);
772 u8 state
= atomic_load(&p
->chunk_state
, memory_order_relaxed
);
773 // It does not guaranty that Chunk is initialized, but it's
774 // definitely not for any other value.
775 if (state
== CHUNK_ALLOCATED
|| state
== CHUNK_QUARANTINE
)
780 AsanChunk
*GetAsanChunkByAddr(uptr p
) {
781 void *alloc_beg
= allocator
.GetBlockBegin(reinterpret_cast<void *>(p
));
782 return GetAsanChunk(alloc_beg
);
785 // Allocator must be locked when this function is called.
786 AsanChunk
*GetAsanChunkByAddrFastLocked(uptr p
) {
788 allocator
.GetBlockBeginFastLocked(reinterpret_cast<void *>(p
));
789 return GetAsanChunk(alloc_beg
);
792 uptr
AllocationSize(uptr p
) {
793 AsanChunk
*m
= GetAsanChunkByAddr(p
);
795 if (atomic_load(&m
->chunk_state
, memory_order_acquire
) != CHUNK_ALLOCATED
)
797 if (m
->Beg() != p
) return 0;
798 return m
->UsedSize();
801 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
802 AsanChunk
*m1
= GetAsanChunkByAddr(addr
);
804 if (!m1
|| AsanChunkView(m1
).AddrIsAtLeft(addr
, 1, &offset
)) {
805 // The address is in the chunk's left redzone, so maybe it is actually
806 // a right buffer overflow from the other chunk before.
807 // Search a bit before to see if there is another chunk.
808 AsanChunk
*m2
= nullptr;
809 for (uptr l
= 1; l
< GetPageSizeCached(); l
++) {
810 m2
= GetAsanChunkByAddr(addr
- l
);
811 if (m2
== m1
) continue; // Still the same chunk.
814 if (m2
&& AsanChunkView(m2
).AddrIsAtRight(addr
, 1, &offset
))
815 m1
= ChooseChunk(addr
, m2
, m1
);
817 return AsanChunkView(m1
);
820 void Purge(BufferedStackTrace
*stack
) {
821 AsanThread
*t
= GetCurrentThread();
823 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
824 quarantine
.DrainAndRecycle(GetQuarantineCache(ms
),
825 QuarantineCallback(GetAllocatorCache(ms
),
829 SpinMutexLock
l(&fallback_mutex
);
830 quarantine
.DrainAndRecycle(&fallback_quarantine_cache
,
831 QuarantineCallback(&fallback_allocator_cache
,
835 allocator
.ForceReleaseToOS();
839 allocator
.PrintStats();
840 quarantine
.PrintStats();
843 void ForceLock() SANITIZER_ACQUIRE(fallback_mutex
) {
844 allocator
.ForceLock();
845 fallback_mutex
.Lock();
848 void ForceUnlock() SANITIZER_RELEASE(fallback_mutex
) {
849 fallback_mutex
.Unlock();
850 allocator
.ForceUnlock();
854 static Allocator
instance(LINKER_INITIALIZED
);
856 static AsanAllocator
&get_allocator() {
857 return instance
.allocator
;
860 bool AsanChunkView::IsValid() const {
861 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) !=
864 bool AsanChunkView::IsAllocated() const {
865 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) ==
868 bool AsanChunkView::IsQuarantined() const {
869 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) ==
872 uptr
AsanChunkView::Beg() const { return chunk_
->Beg(); }
873 uptr
AsanChunkView::End() const { return Beg() + UsedSize(); }
874 uptr
AsanChunkView::UsedSize() const { return chunk_
->UsedSize(); }
875 u32
AsanChunkView::UserRequestedAlignment() const {
876 return Allocator::ComputeUserAlignment(chunk_
->user_requested_alignment_log
);
879 uptr
AsanChunkView::AllocTid() const {
882 chunk_
->GetAllocContext(tid
, stack
);
886 uptr
AsanChunkView::FreeTid() const {
887 if (!IsQuarantined())
891 chunk_
->GetFreeContext(tid
, stack
);
895 AllocType
AsanChunkView::GetAllocType() const {
896 return (AllocType
)chunk_
->alloc_type
;
899 u32
AsanChunkView::GetAllocStackId() const {
902 chunk_
->GetAllocContext(tid
, stack
);
906 u32
AsanChunkView::GetFreeStackId() const {
907 if (!IsQuarantined())
911 chunk_
->GetFreeContext(tid
, stack
);
915 void InitializeAllocator(const AllocatorOptions
&options
) {
916 instance
.InitLinkerInitialized(options
);
919 void ReInitializeAllocator(const AllocatorOptions
&options
) {
920 instance
.ReInitialize(options
);
923 void GetAllocatorOptions(AllocatorOptions
*options
) {
924 instance
.GetOptions(options
);
927 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
928 return instance
.FindHeapChunkByAddress(addr
);
930 AsanChunkView
FindHeapChunkByAllocBeg(uptr addr
) {
931 return AsanChunkView(instance
.GetAsanChunk(reinterpret_cast<void*>(addr
)));
934 void AsanThreadLocalMallocStorage::CommitBack() {
935 GET_STACK_TRACE_MALLOC
;
936 instance
.CommitBack(this, &stack
);
939 void PrintInternalAllocatorStats() {
940 instance
.PrintStats();
943 void asan_free(void *ptr
, BufferedStackTrace
*stack
, AllocType alloc_type
) {
944 instance
.Deallocate(ptr
, 0, 0, stack
, alloc_type
);
947 void asan_delete(void *ptr
, uptr size
, uptr alignment
,
948 BufferedStackTrace
*stack
, AllocType alloc_type
) {
949 instance
.Deallocate(ptr
, size
, alignment
, stack
, alloc_type
);
952 void *asan_malloc(uptr size
, BufferedStackTrace
*stack
) {
953 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
956 void *asan_calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
957 return SetErrnoOnNull(instance
.Calloc(nmemb
, size
, stack
));
960 void *asan_reallocarray(void *p
, uptr nmemb
, uptr size
,
961 BufferedStackTrace
*stack
) {
962 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
963 errno
= errno_ENOMEM
;
964 if (AllocatorMayReturnNull())
966 ReportReallocArrayOverflow(nmemb
, size
, stack
);
968 return asan_realloc(p
, nmemb
* size
, stack
);
971 void *asan_realloc(void *p
, uptr size
, BufferedStackTrace
*stack
) {
973 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
975 if (flags()->allocator_frees_and_returns_null_on_realloc_zero
) {
976 instance
.Deallocate(p
, 0, 0, stack
, FROM_MALLOC
);
979 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
982 return SetErrnoOnNull(instance
.Reallocate(p
, size
, stack
));
985 void *asan_valloc(uptr size
, BufferedStackTrace
*stack
) {
986 return SetErrnoOnNull(
987 instance
.Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
, true));
990 void *asan_pvalloc(uptr size
, BufferedStackTrace
*stack
) {
991 uptr PageSize
= GetPageSizeCached();
992 if (UNLIKELY(CheckForPvallocOverflow(size
, PageSize
))) {
993 errno
= errno_ENOMEM
;
994 if (AllocatorMayReturnNull())
996 ReportPvallocOverflow(size
, stack
);
998 // pvalloc(0) should allocate one page.
999 size
= size
? RoundUpTo(size
, PageSize
) : PageSize
;
1000 return SetErrnoOnNull(
1001 instance
.Allocate(size
, PageSize
, stack
, FROM_MALLOC
, true));
1004 void *asan_memalign(uptr alignment
, uptr size
, BufferedStackTrace
*stack
,
1005 AllocType alloc_type
) {
1006 if (UNLIKELY(!IsPowerOfTwo(alignment
))) {
1007 errno
= errno_EINVAL
;
1008 if (AllocatorMayReturnNull())
1010 ReportInvalidAllocationAlignment(alignment
, stack
);
1012 return SetErrnoOnNull(
1013 instance
.Allocate(size
, alignment
, stack
, alloc_type
, true));
1016 void *asan_aligned_alloc(uptr alignment
, uptr size
, BufferedStackTrace
*stack
) {
1017 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment
, size
))) {
1018 errno
= errno_EINVAL
;
1019 if (AllocatorMayReturnNull())
1021 ReportInvalidAlignedAllocAlignment(size
, alignment
, stack
);
1023 return SetErrnoOnNull(
1024 instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true));
1027 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
1028 BufferedStackTrace
*stack
) {
1029 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment
))) {
1030 if (AllocatorMayReturnNull())
1031 return errno_EINVAL
;
1032 ReportInvalidPosixMemalignAlignment(alignment
, stack
);
1034 void *ptr
= instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true);
1036 // OOM error is already taken care of by Allocate.
1037 return errno_ENOMEM
;
1038 CHECK(IsAligned((uptr
)ptr
, alignment
));
1043 uptr
asan_malloc_usable_size(const void *ptr
, uptr pc
, uptr bp
) {
1045 uptr usable_size
= instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
1046 if (flags()->check_malloc_usable_size
&& (usable_size
== 0)) {
1047 GET_STACK_TRACE_FATAL(pc
, bp
);
1048 ReportMallocUsableSizeNotOwned((uptr
)ptr
, &stack
);
1053 uptr
asan_mz_size(const void *ptr
) {
1054 return instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
1057 void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
1058 instance
.ForceLock();
1061 void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
1062 instance
.ForceUnlock();
1065 } // namespace __asan
1067 // --- Implementation of LSan-specific functions --- {{{1
1069 void LockAllocator() {
1070 __asan::get_allocator().ForceLock();
1073 void UnlockAllocator() {
1074 __asan::get_allocator().ForceUnlock();
1077 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
) {
1078 *begin
= (uptr
)&__asan::get_allocator();
1079 *end
= *begin
+ sizeof(__asan::get_allocator());
1082 uptr
PointsIntoChunk(void *p
) {
1083 uptr addr
= reinterpret_cast<uptr
>(p
);
1084 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(addr
);
1085 if (!m
|| atomic_load(&m
->chunk_state
, memory_order_acquire
) !=
1086 __asan::CHUNK_ALLOCATED
)
1088 uptr chunk
= m
->Beg();
1089 if (m
->AddrIsInside(addr
))
1091 if (IsSpecialCaseOfOperatorNew0(chunk
, m
->UsedSize(), addr
))
1096 uptr
GetUserBegin(uptr chunk
) {
1097 // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
1099 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(chunk
);
1100 return m
? m
->Beg() : 0;
1103 uptr
GetUserAddr(uptr chunk
) {
1107 LsanMetadata::LsanMetadata(uptr chunk
) {
1108 metadata_
= chunk
? reinterpret_cast<void *>(chunk
- __asan::kChunkHeaderSize
)
1112 bool LsanMetadata::allocated() const {
1115 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1116 return atomic_load(&m
->chunk_state
, memory_order_relaxed
) ==
1117 __asan::CHUNK_ALLOCATED
;
1120 ChunkTag
LsanMetadata::tag() const {
1121 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1122 return static_cast<ChunkTag
>(m
->lsan_tag
);
1125 void LsanMetadata::set_tag(ChunkTag value
) {
1126 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1127 m
->lsan_tag
= value
;
1130 uptr
LsanMetadata::requested_size() const {
1131 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1132 return m
->UsedSize();
1135 u32
LsanMetadata::stack_trace_id() const {
1136 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1139 m
->GetAllocContext(tid
, stack
);
1143 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
1144 __asan::get_allocator().ForEachChunk(callback
, arg
);
1147 IgnoreObjectResult
IgnoreObject(const void *p
) {
1148 uptr addr
= reinterpret_cast<uptr
>(p
);
1149 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddr(addr
);
1151 (atomic_load(&m
->chunk_state
, memory_order_acquire
) !=
1152 __asan::CHUNK_ALLOCATED
) ||
1153 !m
->AddrIsInside(addr
)) {
1154 return kIgnoreObjectInvalid
;
1156 if (m
->lsan_tag
== kIgnored
)
1157 return kIgnoreObjectAlreadyIgnored
;
1158 m
->lsan_tag
= __lsan::kIgnored
;
1159 return kIgnoreObjectSuccess
;
1162 } // namespace __lsan
1164 // ---------------------- Interface ---------------- {{{1
1165 using namespace __asan
;
1167 static const void *AllocationBegin(const void *p
) {
1168 AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddr((uptr
)p
);
1171 if (atomic_load(&m
->chunk_state
, memory_order_acquire
) != CHUNK_ALLOCATED
)
1173 if (m
->UsedSize() == 0)
1175 return (const void *)(m
->Beg());
1178 // ASan allocator doesn't reserve extra bytes, so normally we would
1179 // just return "size". We don't want to expose our redzone sizes, etc here.
1180 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
1184 int __sanitizer_get_ownership(const void *p
) {
1185 uptr ptr
= reinterpret_cast<uptr
>(p
);
1186 return instance
.AllocationSize(ptr
) > 0;
1189 uptr
__sanitizer_get_allocated_size(const void *p
) {
1191 uptr ptr
= reinterpret_cast<uptr
>(p
);
1192 uptr allocated_size
= instance
.AllocationSize(ptr
);
1193 // Die if p is not malloced or if it is already freed.
1194 if (allocated_size
== 0) {
1195 GET_STACK_TRACE_FATAL_HERE
;
1196 ReportSanitizerGetAllocatedSizeNotOwned(ptr
, &stack
);
1198 return allocated_size
;
1201 const void *__sanitizer_get_allocated_begin(const void *p
) {
1202 return AllocationBegin(p
);
1205 void __sanitizer_purge_allocator() {
1206 GET_STACK_TRACE_MALLOC
;
1207 instance
.Purge(&stack
);
1210 int __asan_update_allocation_context(void* addr
) {
1211 GET_STACK_TRACE_MALLOC
;
1212 return instance
.UpdateAllocationStack((uptr
)addr
, &stack
);