1 //===-- asan_poisoning.cpp ------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Shadow memory poisoning by ASan RTL and by user application.
12 //===----------------------------------------------------------------------===//
14 #include "asan_poisoning.h"
16 #include "asan_report.h"
17 #include "asan_stack.h"
18 #include "sanitizer_common/sanitizer_atomic.h"
19 #include "sanitizer_common/sanitizer_flags.h"
20 #include "sanitizer_common/sanitizer_interface_internal.h"
21 #include "sanitizer_common/sanitizer_libc.h"
25 static atomic_uint8_t can_poison_memory
;
27 void SetCanPoisonMemory(bool value
) {
28 atomic_store(&can_poison_memory
, value
, memory_order_release
);
31 bool CanPoisonMemory() {
32 return atomic_load(&can_poison_memory
, memory_order_acquire
);
35 void PoisonShadow(uptr addr
, uptr size
, u8 value
) {
36 if (value
&& !CanPoisonMemory()) return;
37 CHECK(AddrIsAlignedByGranularity(addr
));
38 CHECK(AddrIsInMem(addr
));
39 CHECK(AddrIsAlignedByGranularity(addr
+ size
));
40 CHECK(AddrIsInMem(addr
+ size
- ASAN_SHADOW_GRANULARITY
));
42 FastPoisonShadow(addr
, size
, value
);
45 void PoisonShadowPartialRightRedzone(uptr addr
,
49 if (!CanPoisonMemory()) return;
50 CHECK(AddrIsAlignedByGranularity(addr
));
51 CHECK(AddrIsInMem(addr
));
52 FastPoisonShadowPartialRightRedzone(addr
, size
, redzone_size
, value
);
55 struct ShadowSegmentEndpoint
{
57 s8 offset
; // in [0, ASAN_SHADOW_GRANULARITY)
58 s8 value
; // = *chunk;
60 explicit ShadowSegmentEndpoint(uptr address
) {
61 chunk
= (u8
*)MemToShadow(address
);
62 offset
= address
& (ASAN_SHADOW_GRANULARITY
- 1);
67 void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr
, uptr size
, bool poison
) {
68 uptr end
= ptr
+ size
;
70 Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
71 poison
? "" : "un", (void *)ptr
, (void *)end
, size
);
73 PRINT_CURRENT_STACK();
77 CHECK(IsAligned(end
, ASAN_SHADOW_GRANULARITY
));
78 if (!IsAligned(ptr
, ASAN_SHADOW_GRANULARITY
)) {
79 *(u8
*)MemToShadow(ptr
) =
80 poison
? static_cast<u8
>(ptr
% ASAN_SHADOW_GRANULARITY
) : 0;
81 ptr
|= ASAN_SHADOW_GRANULARITY
- 1;
84 for (; ptr
< end
; ptr
+= ASAN_SHADOW_GRANULARITY
)
85 *(u8
*)MemToShadow(ptr
) = poison
? kAsanIntraObjectRedzone
: 0;
90 // ---------------------- Interface ---------------- {{{1
91 using namespace __asan
;
93 // Current implementation of __asan_(un)poison_memory_region doesn't check
94 // that user program (un)poisons the memory it owns. It poisons memory
95 // conservatively, and unpoisons progressively to make sure asan shadow
96 // mapping invariant is preserved (see detailed mapping description here:
97 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
99 // * if user asks to poison region [left, right), the program poisons
100 // at least [left, AlignDown(right)).
101 // * if user asks to unpoison region [left, right), the program unpoisons
102 // at most [AlignDown(left), right).
103 void __asan_poison_memory_region(void const volatile *addr
, uptr size
) {
104 if (!flags()->allow_user_poisoning
|| size
== 0) return;
105 uptr beg_addr
= (uptr
)addr
;
106 uptr end_addr
= beg_addr
+ size
;
107 VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr
,
109 ShadowSegmentEndpoint
beg(beg_addr
);
110 ShadowSegmentEndpoint
end(end_addr
);
111 if (beg
.chunk
== end
.chunk
) {
112 CHECK_LT(beg
.offset
, end
.offset
);
113 s8 value
= beg
.value
;
114 CHECK_EQ(value
, end
.value
);
115 // We can only poison memory if the byte in end.offset is unaddressable.
116 // No need to re-poison memory if it is poisoned already.
117 if (value
> 0 && value
<= end
.offset
) {
118 if (beg
.offset
> 0) {
119 *beg
.chunk
= Min(value
, beg
.offset
);
121 *beg
.chunk
= kAsanUserPoisonedMemoryMagic
;
126 CHECK_LT(beg
.chunk
, end
.chunk
);
127 if (beg
.offset
> 0) {
128 // Mark bytes from beg.offset as unaddressable.
129 if (beg
.value
== 0) {
130 *beg
.chunk
= beg
.offset
;
132 *beg
.chunk
= Min(beg
.value
, beg
.offset
);
136 REAL(memset
)(beg
.chunk
, kAsanUserPoisonedMemoryMagic
, end
.chunk
- beg
.chunk
);
137 // Poison if byte in end.offset is unaddressable.
138 if (end
.value
> 0 && end
.value
<= end
.offset
) {
139 *end
.chunk
= kAsanUserPoisonedMemoryMagic
;
143 void __asan_unpoison_memory_region(void const volatile *addr
, uptr size
) {
144 if (!flags()->allow_user_poisoning
|| size
== 0) return;
145 uptr beg_addr
= (uptr
)addr
;
146 uptr end_addr
= beg_addr
+ size
;
147 VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr
,
149 ShadowSegmentEndpoint
beg(beg_addr
);
150 ShadowSegmentEndpoint
end(end_addr
);
151 if (beg
.chunk
== end
.chunk
) {
152 CHECK_LT(beg
.offset
, end
.offset
);
153 s8 value
= beg
.value
;
154 CHECK_EQ(value
, end
.value
);
155 // We unpoison memory bytes up to enbytes up to end.offset if it is not
156 // unpoisoned already.
158 *beg
.chunk
= Max(value
, end
.offset
);
162 CHECK_LT(beg
.chunk
, end
.chunk
);
163 if (beg
.offset
> 0) {
167 REAL(memset
)(beg
.chunk
, 0, end
.chunk
- beg
.chunk
);
168 if (end
.offset
> 0 && end
.value
!= 0) {
169 *end
.chunk
= Max(end
.value
, end
.offset
);
173 int __asan_address_is_poisoned(void const volatile *addr
) {
174 return __asan::AddressIsPoisoned((uptr
)addr
);
177 uptr
__asan_region_is_poisoned(uptr beg
, uptr size
) {
180 uptr end
= beg
+ size
;
181 if (!AddrIsInMem(beg
))
183 if (!AddrIsInMem(end
))
186 uptr aligned_b
= RoundUpTo(beg
, ASAN_SHADOW_GRANULARITY
);
187 uptr aligned_e
= RoundDownTo(end
, ASAN_SHADOW_GRANULARITY
);
188 uptr shadow_beg
= MemToShadow(aligned_b
);
189 uptr shadow_end
= MemToShadow(aligned_e
);
190 // First check the first and the last application bytes,
191 // then check the ASAN_SHADOW_GRANULARITY-aligned region by calling
192 // mem_is_zero on the corresponding shadow.
193 if (!__asan::AddressIsPoisoned(beg
) && !__asan::AddressIsPoisoned(end
- 1) &&
194 (shadow_end
<= shadow_beg
||
195 __sanitizer::mem_is_zero((const char *)shadow_beg
,
196 shadow_end
- shadow_beg
)))
198 // The fast check failed, so we have a poisoned byte somewhere.
200 for (; beg
< end
; beg
++)
201 if (__asan::AddressIsPoisoned(beg
))
203 UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
207 #define CHECK_SMALL_REGION(p, size, isWrite) \
209 uptr __p = reinterpret_cast<uptr>(p); \
210 uptr __size = size; \
211 if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
212 __asan::AddressIsPoisoned(__p + __size - 1))) { \
213 GET_CURRENT_PC_BP_SP; \
214 uptr __bad = __asan_region_is_poisoned(__p, __size); \
215 __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
220 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
221 u16
__sanitizer_unaligned_load16(const uu16
*p
) {
222 CHECK_SMALL_REGION(p
, sizeof(*p
), false);
226 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
227 u32
__sanitizer_unaligned_load32(const uu32
*p
) {
228 CHECK_SMALL_REGION(p
, sizeof(*p
), false);
232 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
233 u64
__sanitizer_unaligned_load64(const uu64
*p
) {
234 CHECK_SMALL_REGION(p
, sizeof(*p
), false);
238 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
239 void __sanitizer_unaligned_store16(uu16
*p
, u16 x
) {
240 CHECK_SMALL_REGION(p
, sizeof(*p
), true);
244 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
245 void __sanitizer_unaligned_store32(uu32
*p
, u32 x
) {
246 CHECK_SMALL_REGION(p
, sizeof(*p
), true);
250 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
251 void __sanitizer_unaligned_store64(uu64
*p
, u64 x
) {
252 CHECK_SMALL_REGION(p
, sizeof(*p
), true);
256 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
257 void __asan_poison_cxx_array_cookie(uptr p
) {
258 if (SANITIZER_WORDSIZE
!= 64) return;
259 if (!flags()->poison_array_cookie
) return;
260 uptr s
= MEM_TO_SHADOW(p
);
261 *reinterpret_cast<u8
*>(s
) = kAsanArrayCookieMagic
;
264 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
265 uptr
__asan_load_cxx_array_cookie(uptr
*p
) {
266 if (SANITIZER_WORDSIZE
!= 64) return *p
;
267 if (!flags()->poison_array_cookie
) return *p
;
268 uptr s
= MEM_TO_SHADOW(reinterpret_cast<uptr
>(p
));
269 u8 sval
= *reinterpret_cast<u8
*>(s
);
270 if (sval
== kAsanArrayCookieMagic
) return *p
;
271 // If sval is not kAsanArrayCookieMagic it can only be freed memory,
272 // which means that we are going to get double-free. So, return 0 to avoid
273 // infinite loop of destructors. We don't want to report a double-free here
274 // though, so print a warning just in case.
275 // CHECK_EQ(sval, kAsanHeapFreeMagic);
276 if (sval
== kAsanHeapFreeMagic
) {
277 Report("AddressSanitizer: loaded array cookie from free-d memory; "
278 "expect a double-free report\n");
281 // The cookie may remain unpoisoned if e.g. it comes from a custom
282 // operator new defined inside a class.
286 // This is a simplified version of __asan_(un)poison_memory_region, which
287 // assumes that left border of region to be poisoned is properly aligned.
288 static void PoisonAlignedStackMemory(uptr addr
, uptr size
, bool do_poison
) {
289 if (size
== 0) return;
290 uptr aligned_size
= size
& ~(ASAN_SHADOW_GRANULARITY
- 1);
291 PoisonShadow(addr
, aligned_size
,
292 do_poison
? kAsanStackUseAfterScopeMagic
: 0);
293 if (size
== aligned_size
)
295 s8 end_offset
= (s8
)(size
- aligned_size
);
296 s8
* shadow_end
= (s8
*)MemToShadow(addr
+ aligned_size
);
297 s8 end_value
= *shadow_end
;
299 // If possible, mark all the bytes mapping to last shadow byte as
301 if (end_value
> 0 && end_value
<= end_offset
)
302 *shadow_end
= (s8
)kAsanStackUseAfterScopeMagic
;
304 // If necessary, mark few first bytes mapping to last shadow byte
307 *shadow_end
= Max(end_value
, end_offset
);
311 void __asan_set_shadow_00(uptr addr
, uptr size
) {
312 REAL(memset
)((void *)addr
, 0, size
);
315 void __asan_set_shadow_01(uptr addr
, uptr size
) {
316 REAL(memset
)((void *)addr
, 0x01, size
);
319 void __asan_set_shadow_02(uptr addr
, uptr size
) {
320 REAL(memset
)((void *)addr
, 0x02, size
);
323 void __asan_set_shadow_03(uptr addr
, uptr size
) {
324 REAL(memset
)((void *)addr
, 0x03, size
);
327 void __asan_set_shadow_04(uptr addr
, uptr size
) {
328 REAL(memset
)((void *)addr
, 0x04, size
);
331 void __asan_set_shadow_05(uptr addr
, uptr size
) {
332 REAL(memset
)((void *)addr
, 0x05, size
);
335 void __asan_set_shadow_06(uptr addr
, uptr size
) {
336 REAL(memset
)((void *)addr
, 0x06, size
);
339 void __asan_set_shadow_07(uptr addr
, uptr size
) {
340 REAL(memset
)((void *)addr
, 0x07, size
);
343 void __asan_set_shadow_f1(uptr addr
, uptr size
) {
344 REAL(memset
)((void *)addr
, 0xf1, size
);
347 void __asan_set_shadow_f2(uptr addr
, uptr size
) {
348 REAL(memset
)((void *)addr
, 0xf2, size
);
351 void __asan_set_shadow_f3(uptr addr
, uptr size
) {
352 REAL(memset
)((void *)addr
, 0xf3, size
);
355 void __asan_set_shadow_f5(uptr addr
, uptr size
) {
356 REAL(memset
)((void *)addr
, 0xf5, size
);
359 void __asan_set_shadow_f8(uptr addr
, uptr size
) {
360 REAL(memset
)((void *)addr
, 0xf8, size
);
363 void __asan_poison_stack_memory(uptr addr
, uptr size
) {
364 VReport(1, "poisoning: %p %zx\n", (void *)addr
, size
);
365 PoisonAlignedStackMemory(addr
, size
, true);
368 void __asan_unpoison_stack_memory(uptr addr
, uptr size
) {
369 VReport(1, "unpoisoning: %p %zx\n", (void *)addr
, size
);
370 PoisonAlignedStackMemory(addr
, size
, false);
373 static void FixUnalignedStorage(uptr storage_beg
, uptr storage_end
,
374 uptr
&old_beg
, uptr
&old_end
, uptr
&new_beg
,
376 constexpr uptr granularity
= ASAN_SHADOW_GRANULARITY
;
377 if (UNLIKELY(!AddrIsAlignedByGranularity(storage_end
))) {
378 uptr end_down
= RoundDownTo(storage_end
, granularity
);
379 // Ignore the last unaligned granule if the storage is followed by
380 // unpoisoned byte, because we can't poison the prefix anyway. Don't call
381 // AddressIsPoisoned at all if container changes does not affect the last
383 if ((((old_end
!= new_end
) && Max(old_end
, new_end
) > end_down
) ||
384 ((old_beg
!= new_beg
) && Max(old_beg
, new_beg
) > end_down
)) &&
385 !AddressIsPoisoned(storage_end
)) {
386 old_beg
= Min(end_down
, old_beg
);
387 old_end
= Min(end_down
, old_end
);
388 new_beg
= Min(end_down
, new_beg
);
389 new_end
= Min(end_down
, new_end
);
393 // Handle misaligned begin and cut it off.
394 if (UNLIKELY(!AddrIsAlignedByGranularity(storage_beg
))) {
395 uptr beg_up
= RoundUpTo(storage_beg
, granularity
);
396 // The first unaligned granule needs special handling only if we had bytes
397 // there before and will have none after.
398 if ((new_beg
== new_end
|| new_beg
>= beg_up
) && old_beg
!= old_end
&&
400 // Keep granule prefix outside of the storage unpoisoned.
401 uptr beg_down
= RoundDownTo(storage_beg
, granularity
);
402 *(u8
*)MemToShadow(beg_down
) = storage_beg
- beg_down
;
403 old_beg
= Max(beg_up
, old_beg
);
404 old_end
= Max(beg_up
, old_end
);
405 new_beg
= Max(beg_up
, new_beg
);
406 new_end
= Max(beg_up
, new_end
);
411 void __sanitizer_annotate_contiguous_container(const void *beg_p
,
413 const void *old_mid_p
,
414 const void *new_mid_p
) {
415 if (!flags()->detect_container_overflow
)
417 VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p
, end_p
, old_mid_p
,
419 uptr storage_beg
= reinterpret_cast<uptr
>(beg_p
);
420 uptr storage_end
= reinterpret_cast<uptr
>(end_p
);
421 uptr old_end
= reinterpret_cast<uptr
>(old_mid_p
);
422 uptr new_end
= reinterpret_cast<uptr
>(new_mid_p
);
423 uptr old_beg
= storage_beg
;
424 uptr new_beg
= storage_beg
;
425 uptr granularity
= ASAN_SHADOW_GRANULARITY
;
426 if (!(storage_beg
<= old_end
&& storage_beg
<= new_end
&&
427 old_end
<= storage_end
&& new_end
<= storage_end
)) {
428 GET_STACK_TRACE_FATAL_HERE
;
429 ReportBadParamsToAnnotateContiguousContainer(storage_beg
, storage_end
,
430 old_end
, new_end
, &stack
);
432 CHECK_LE(storage_end
- storage_beg
,
433 FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
435 if (old_end
== new_end
)
436 return; // Nothing to do here.
438 FixUnalignedStorage(storage_beg
, storage_end
, old_beg
, old_end
, new_beg
,
441 uptr a
= RoundDownTo(Min(old_end
, new_end
), granularity
);
442 uptr c
= RoundUpTo(Max(old_end
, new_end
), granularity
);
443 uptr d1
= RoundDownTo(old_end
, granularity
);
444 // uptr d2 = RoundUpTo(old_mid, granularity);
445 // Currently we should be in this state:
446 // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
447 // Make a quick sanity check that we are indeed in this state.
449 // FIXME: Two of these three checks are disabled until we fix
450 // https://github.com/google/sanitizers/issues/258.
452 // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
453 if (a
+ granularity
<= d1
)
454 CHECK_EQ(*(u8
*)MemToShadow(a
), 0);
455 // if (d2 + granularity <= c && c <= end)
456 // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
457 // kAsanContiguousContainerOOBMagic);
459 uptr b1
= RoundDownTo(new_end
, granularity
);
460 uptr b2
= RoundUpTo(new_end
, granularity
);
462 // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
464 PoisonShadow(a
, b1
- a
, 0);
466 PoisonShadow(b2
, c
- b2
, kAsanContiguousContainerOOBMagic
);
468 CHECK_EQ(b2
- b1
, granularity
);
469 *(u8
*)MemToShadow(b1
) = static_cast<u8
>(new_end
- b1
);
473 // Annotates a double ended contiguous memory area like std::deque's chunk.
474 // It allows detecting buggy accesses to allocated but not used begining
475 // or end items of such a container.
476 void __sanitizer_annotate_double_ended_contiguous_container(
477 const void *storage_beg_p
, const void *storage_end_p
,
478 const void *old_container_beg_p
, const void *old_container_end_p
,
479 const void *new_container_beg_p
, const void *new_container_end_p
) {
480 if (!flags()->detect_container_overflow
)
483 VPrintf(2, "contiguous_container: %p %p %p %p %p %p\n", storage_beg_p
,
484 storage_end_p
, old_container_beg_p
, old_container_end_p
,
485 new_container_beg_p
, new_container_end_p
);
487 uptr storage_beg
= reinterpret_cast<uptr
>(storage_beg_p
);
488 uptr storage_end
= reinterpret_cast<uptr
>(storage_end_p
);
489 uptr old_beg
= reinterpret_cast<uptr
>(old_container_beg_p
);
490 uptr old_end
= reinterpret_cast<uptr
>(old_container_end_p
);
491 uptr new_beg
= reinterpret_cast<uptr
>(new_container_beg_p
);
492 uptr new_end
= reinterpret_cast<uptr
>(new_container_end_p
);
494 constexpr uptr granularity
= ASAN_SHADOW_GRANULARITY
;
496 if (!(old_beg
<= old_end
&& new_beg
<= new_end
) ||
497 !(storage_beg
<= new_beg
&& new_end
<= storage_end
) ||
498 !(storage_beg
<= old_beg
&& old_end
<= storage_end
)) {
499 GET_STACK_TRACE_FATAL_HERE
;
500 ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
501 storage_beg
, storage_end
, old_beg
, old_end
, new_beg
, new_end
, &stack
);
503 CHECK_LE(storage_end
- storage_beg
,
504 FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
506 if ((old_beg
== old_end
&& new_beg
== new_end
) ||
507 (old_beg
== new_beg
&& old_end
== new_end
))
508 return; // Nothing to do here.
510 FixUnalignedStorage(storage_beg
, storage_end
, old_beg
, old_end
, new_beg
,
513 // Handle non-intersecting new/old containers separately have simpler
514 // intersecting case.
515 if (old_beg
== old_end
|| new_beg
== new_end
|| new_end
<= old_beg
||
516 old_end
<= new_beg
) {
517 if (old_beg
!= old_end
) {
518 // Poisoning the old container.
519 uptr a
= RoundDownTo(old_beg
, granularity
);
520 uptr b
= RoundUpTo(old_end
, granularity
);
521 PoisonShadow(a
, b
- a
, kAsanContiguousContainerOOBMagic
);
524 if (new_beg
!= new_end
) {
525 // Unpoisoning the new container.
526 uptr a
= RoundDownTo(new_beg
, granularity
);
527 uptr b
= RoundDownTo(new_end
, granularity
);
528 PoisonShadow(a
, b
- a
, 0);
529 if (!AddrIsAlignedByGranularity(new_end
))
530 *(u8
*)MemToShadow(b
) = static_cast<u8
>(new_end
- b
);
536 // Intersection of old and new containers is not empty.
537 CHECK_LT(new_beg
, old_end
);
538 CHECK_GT(new_end
, old_beg
);
540 if (new_beg
< old_beg
) {
541 // Round down because we can't poison prefixes.
542 uptr a
= RoundDownTo(new_beg
, granularity
);
543 // Round down and ignore the [c, old_beg) as its state defined by unchanged
544 // [old_beg, old_end).
545 uptr c
= RoundDownTo(old_beg
, granularity
);
546 PoisonShadow(a
, c
- a
, 0);
547 } else if (new_beg
> old_beg
) {
548 // Round down and poison [a, old_beg) because it was unpoisoned only as a
550 uptr a
= RoundDownTo(old_beg
, granularity
);
551 // Round down and ignore the [c, new_beg) as its state defined by unchanged
552 // [new_beg, old_end).
553 uptr c
= RoundDownTo(new_beg
, granularity
);
555 PoisonShadow(a
, c
- a
, kAsanContiguousContainerOOBMagic
);
558 if (new_end
> old_end
) {
559 // Round down to poison the prefix.
560 uptr a
= RoundDownTo(old_end
, granularity
);
561 // Round down and handle remainder below.
562 uptr c
= RoundDownTo(new_end
, granularity
);
563 PoisonShadow(a
, c
- a
, 0);
564 if (!AddrIsAlignedByGranularity(new_end
))
565 *(u8
*)MemToShadow(c
) = static_cast<u8
>(new_end
- c
);
566 } else if (new_end
< old_end
) {
567 // Round up and handle remained below.
568 uptr a2
= RoundUpTo(new_end
, granularity
);
569 // Round up to poison entire granule as we had nothing in [old_end, c2).
570 uptr c2
= RoundUpTo(old_end
, granularity
);
571 PoisonShadow(a2
, c2
- a2
, kAsanContiguousContainerOOBMagic
);
573 if (!AddrIsAlignedByGranularity(new_end
)) {
574 uptr a
= RoundDownTo(new_end
, granularity
);
575 *(u8
*)MemToShadow(a
) = static_cast<u8
>(new_end
- a
);
580 static const void *FindBadAddress(uptr begin
, uptr end
, bool poisoned
) {
581 CHECK_LE(begin
, end
);
582 constexpr uptr kMaxRangeToCheck
= 32;
583 if (end
- begin
> kMaxRangeToCheck
* 2) {
584 if (auto *bad
= FindBadAddress(begin
, begin
+ kMaxRangeToCheck
, poisoned
))
586 if (auto *bad
= FindBadAddress(end
- kMaxRangeToCheck
, end
, poisoned
))
590 for (uptr i
= begin
; i
< end
; ++i
)
591 if (AddressIsPoisoned(i
) != poisoned
)
592 return reinterpret_cast<const void *>(i
);
596 const void *__sanitizer_contiguous_container_find_bad_address(
597 const void *beg_p
, const void *mid_p
, const void *end_p
) {
598 if (!flags()->detect_container_overflow
)
600 uptr granularity
= ASAN_SHADOW_GRANULARITY
;
601 uptr beg
= reinterpret_cast<uptr
>(beg_p
);
602 uptr end
= reinterpret_cast<uptr
>(end_p
);
603 uptr mid
= reinterpret_cast<uptr
>(mid_p
);
606 // If the byte after the storage is unpoisoned, everything in the granule
607 // before must stay unpoisoned.
608 uptr annotations_end
=
609 (!AddrIsAlignedByGranularity(end
) && !AddressIsPoisoned(end
))
610 ? RoundDownTo(end
, granularity
)
612 beg
= Min(beg
, annotations_end
);
613 mid
= Min(mid
, annotations_end
);
614 if (auto *bad
= FindBadAddress(beg
, mid
, false))
616 if (auto *bad
= FindBadAddress(mid
, annotations_end
, true))
618 return FindBadAddress(annotations_end
, end
, false);
621 int __sanitizer_verify_contiguous_container(const void *beg_p
,
624 return __sanitizer_contiguous_container_find_bad_address(beg_p
, mid_p
,
628 const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
629 const void *storage_beg_p
, const void *container_beg_p
,
630 const void *container_end_p
, const void *storage_end_p
) {
631 if (!flags()->detect_container_overflow
)
633 uptr granularity
= ASAN_SHADOW_GRANULARITY
;
634 uptr storage_beg
= reinterpret_cast<uptr
>(storage_beg_p
);
635 uptr storage_end
= reinterpret_cast<uptr
>(storage_end_p
);
636 uptr beg
= reinterpret_cast<uptr
>(container_beg_p
);
637 uptr end
= reinterpret_cast<uptr
>(container_end_p
);
639 // The prefix of the firs granule of the container is unpoisoned.
641 beg
= Max(storage_beg
, RoundDownTo(beg
, granularity
));
643 // If the byte after the storage is unpoisoned, the prefix of the last granule
645 uptr annotations_end
= (!AddrIsAlignedByGranularity(storage_end
) &&
646 !AddressIsPoisoned(storage_end
))
647 ? RoundDownTo(storage_end
, granularity
)
649 storage_beg
= Min(storage_beg
, annotations_end
);
650 beg
= Min(beg
, annotations_end
);
651 end
= Min(end
, annotations_end
);
653 if (auto *bad
= FindBadAddress(storage_beg
, beg
, true))
655 if (auto *bad
= FindBadAddress(beg
, end
, false))
657 if (auto *bad
= FindBadAddress(end
, annotations_end
, true))
659 return FindBadAddress(annotations_end
, storage_end
, false);
662 int __sanitizer_verify_double_ended_contiguous_container(
663 const void *storage_beg_p
, const void *container_beg_p
,
664 const void *container_end_p
, const void *storage_end_p
) {
665 return __sanitizer_double_ended_contiguous_container_find_bad_address(
666 storage_beg_p
, container_beg_p
, container_end_p
, storage_end_p
) ==
670 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
671 void __asan_poison_intra_object_redzone(uptr ptr
, uptr size
) {
672 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr
, size
, true);
675 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
676 void __asan_unpoison_intra_object_redzone(uptr ptr
, uptr size
) {
677 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr
, size
, false);
680 // --- Implementation of LSan-specific functions --- {{{1
682 bool WordIsPoisoned(uptr addr
) {
683 return (__asan_region_is_poisoned(addr
, sizeof(uptr
)) != 0);