1 //===-- asan_poisoning.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // Shadow memory poisoning by ASan RTL and by user application.
11 //===----------------------------------------------------------------------===//
13 #include "asan_poisoning.h"
14 #include "asan_report.h"
15 #include "asan_stack.h"
16 #include "sanitizer_common/sanitizer_libc.h"
17 #include "sanitizer_common/sanitizer_flags.h"
21 void PoisonShadow(uptr addr
, uptr size
, u8 value
) {
22 if (!flags()->poison_heap
) return;
23 CHECK(AddrIsAlignedByGranularity(addr
));
24 CHECK(AddrIsInMem(addr
));
25 CHECK(AddrIsAlignedByGranularity(addr
+ size
));
26 CHECK(AddrIsInMem(addr
+ size
- SHADOW_GRANULARITY
));
28 FastPoisonShadow(addr
, size
, value
);
31 void PoisonShadowPartialRightRedzone(uptr addr
,
35 if (!flags()->poison_heap
) return;
36 CHECK(AddrIsAlignedByGranularity(addr
));
37 CHECK(AddrIsInMem(addr
));
38 FastPoisonShadowPartialRightRedzone(addr
, size
, redzone_size
, value
);
41 struct ShadowSegmentEndpoint
{
43 s8 offset
; // in [0, SHADOW_GRANULARITY)
44 s8 value
; // = *chunk;
46 explicit ShadowSegmentEndpoint(uptr address
) {
47 chunk
= (u8
*)MemToShadow(address
);
48 offset
= address
& (SHADOW_GRANULARITY
- 1);
53 void FlushUnneededASanShadowMemory(uptr p
, uptr size
) {
54 // Since asan's mapping is compacting, the shadow chunk may be
55 // not page-aligned, so we only flush the page-aligned portion.
56 uptr page_size
= GetPageSizeCached();
57 uptr shadow_beg
= RoundUpTo(MemToShadow(p
), page_size
);
58 uptr shadow_end
= RoundDownTo(MemToShadow(p
+ size
), page_size
);
59 FlushUnneededShadowMemory(shadow_beg
, shadow_end
- shadow_beg
);
64 // ---------------------- Interface ---------------- {{{1
65 using namespace __asan
; // NOLINT
67 // Current implementation of __asan_(un)poison_memory_region doesn't check
68 // that user program (un)poisons the memory it owns. It poisons memory
69 // conservatively, and unpoisons progressively to make sure asan shadow
70 // mapping invariant is preserved (see detailed mapping description here:
71 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm).
73 // * if user asks to poison region [left, right), the program poisons
74 // at least [left, AlignDown(right)).
75 // * if user asks to unpoison region [left, right), the program unpoisons
76 // at most [AlignDown(left), right).
77 void __asan_poison_memory_region(void const volatile *addr
, uptr size
) {
78 if (!flags()->allow_user_poisoning
|| size
== 0) return;
79 uptr beg_addr
= (uptr
)addr
;
80 uptr end_addr
= beg_addr
+ size
;
81 VPrintf(1, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr
,
83 ShadowSegmentEndpoint
beg(beg_addr
);
84 ShadowSegmentEndpoint
end(end_addr
);
85 if (beg
.chunk
== end
.chunk
) {
86 CHECK(beg
.offset
< end
.offset
);
88 CHECK(value
== end
.value
);
89 // We can only poison memory if the byte in end.offset is unaddressable.
90 // No need to re-poison memory if it is poisoned already.
91 if (value
> 0 && value
<= end
.offset
) {
93 *beg
.chunk
= Min(value
, beg
.offset
);
95 *beg
.chunk
= kAsanUserPoisonedMemoryMagic
;
100 CHECK(beg
.chunk
< end
.chunk
);
101 if (beg
.offset
> 0) {
102 // Mark bytes from beg.offset as unaddressable.
103 if (beg
.value
== 0) {
104 *beg
.chunk
= beg
.offset
;
106 *beg
.chunk
= Min(beg
.value
, beg
.offset
);
110 REAL(memset
)(beg
.chunk
, kAsanUserPoisonedMemoryMagic
, end
.chunk
- beg
.chunk
);
111 // Poison if byte in end.offset is unaddressable.
112 if (end
.value
> 0 && end
.value
<= end
.offset
) {
113 *end
.chunk
= kAsanUserPoisonedMemoryMagic
;
117 void __asan_unpoison_memory_region(void const volatile *addr
, uptr size
) {
118 if (!flags()->allow_user_poisoning
|| size
== 0) return;
119 uptr beg_addr
= (uptr
)addr
;
120 uptr end_addr
= beg_addr
+ size
;
121 VPrintf(1, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr
,
123 ShadowSegmentEndpoint
beg(beg_addr
);
124 ShadowSegmentEndpoint
end(end_addr
);
125 if (beg
.chunk
== end
.chunk
) {
126 CHECK(beg
.offset
< end
.offset
);
127 s8 value
= beg
.value
;
128 CHECK(value
== end
.value
);
129 // We unpoison memory bytes up to enbytes up to end.offset if it is not
130 // unpoisoned already.
132 *beg
.chunk
= Max(value
, end
.offset
);
136 CHECK(beg
.chunk
< end
.chunk
);
137 if (beg
.offset
> 0) {
141 REAL(memset
)(beg
.chunk
, 0, end
.chunk
- beg
.chunk
);
142 if (end
.offset
> 0 && end
.value
!= 0) {
143 *end
.chunk
= Max(end
.value
, end
.offset
);
147 int __asan_address_is_poisoned(void const volatile *addr
) {
148 return __asan::AddressIsPoisoned((uptr
)addr
);
151 uptr
__asan_region_is_poisoned(uptr beg
, uptr size
) {
153 uptr end
= beg
+ size
;
154 if (!AddrIsInMem(beg
)) return beg
;
155 if (!AddrIsInMem(end
)) return end
;
157 uptr aligned_b
= RoundUpTo(beg
, SHADOW_GRANULARITY
);
158 uptr aligned_e
= RoundDownTo(end
, SHADOW_GRANULARITY
);
159 uptr shadow_beg
= MemToShadow(aligned_b
);
160 uptr shadow_end
= MemToShadow(aligned_e
);
161 // First check the first and the last application bytes,
162 // then check the SHADOW_GRANULARITY-aligned region by calling
163 // mem_is_zero on the corresponding shadow.
164 if (!__asan::AddressIsPoisoned(beg
) &&
165 !__asan::AddressIsPoisoned(end
- 1) &&
166 (shadow_end
<= shadow_beg
||
167 __sanitizer::mem_is_zero((const char *)shadow_beg
,
168 shadow_end
- shadow_beg
)))
170 // The fast check failed, so we have a poisoned byte somewhere.
172 for (; beg
< end
; beg
++)
173 if (__asan::AddressIsPoisoned(beg
))
175 UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
179 #define CHECK_SMALL_REGION(p, size, isWrite) \
181 uptr __p = reinterpret_cast<uptr>(p); \
182 uptr __size = size; \
183 if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
184 __asan::AddressIsPoisoned(__p + __size - 1))) { \
185 GET_CURRENT_PC_BP_SP; \
186 uptr __bad = __asan_region_is_poisoned(__p, __size); \
187 __asan_report_error(pc, bp, sp, __bad, isWrite, __size);\
192 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
193 u16
__sanitizer_unaligned_load16(const uu16
*p
) {
194 CHECK_SMALL_REGION(p
, sizeof(*p
), false);
198 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
199 u32
__sanitizer_unaligned_load32(const uu32
*p
) {
200 CHECK_SMALL_REGION(p
, sizeof(*p
), false);
204 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
205 u64
__sanitizer_unaligned_load64(const uu64
*p
) {
206 CHECK_SMALL_REGION(p
, sizeof(*p
), false);
210 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
211 void __sanitizer_unaligned_store16(uu16
*p
, u16 x
) {
212 CHECK_SMALL_REGION(p
, sizeof(*p
), true);
216 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
217 void __sanitizer_unaligned_store32(uu32
*p
, u32 x
) {
218 CHECK_SMALL_REGION(p
, sizeof(*p
), true);
222 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
223 void __sanitizer_unaligned_store64(uu64
*p
, u64 x
) {
224 CHECK_SMALL_REGION(p
, sizeof(*p
), true);
228 // This is a simplified version of __asan_(un)poison_memory_region, which
229 // assumes that left border of region to be poisoned is properly aligned.
230 static void PoisonAlignedStackMemory(uptr addr
, uptr size
, bool do_poison
) {
231 if (size
== 0) return;
232 uptr aligned_size
= size
& ~(SHADOW_GRANULARITY
- 1);
233 PoisonShadow(addr
, aligned_size
,
234 do_poison
? kAsanStackUseAfterScopeMagic
: 0);
235 if (size
== aligned_size
)
237 s8 end_offset
= (s8
)(size
- aligned_size
);
238 s8
* shadow_end
= (s8
*)MemToShadow(addr
+ aligned_size
);
239 s8 end_value
= *shadow_end
;
241 // If possible, mark all the bytes mapping to last shadow byte as
243 if (end_value
> 0 && end_value
<= end_offset
)
244 *shadow_end
= (s8
)kAsanStackUseAfterScopeMagic
;
246 // If necessary, mark few first bytes mapping to last shadow byte
249 *shadow_end
= Max(end_value
, end_offset
);
253 void __asan_poison_stack_memory(uptr addr
, uptr size
) {
254 VReport(1, "poisoning: %p %zx\n", (void *)addr
, size
);
255 PoisonAlignedStackMemory(addr
, size
, true);
258 void __asan_unpoison_stack_memory(uptr addr
, uptr size
) {
259 VReport(1, "unpoisoning: %p %zx\n", (void *)addr
, size
);
260 PoisonAlignedStackMemory(addr
, size
, false);
263 void __sanitizer_annotate_contiguous_container(const void *beg_p
,
265 const void *old_mid_p
,
266 const void *new_mid_p
) {
267 if (!flags()->detect_container_overflow
) return;
268 VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p
, end_p
, old_mid_p
,
270 uptr beg
= reinterpret_cast<uptr
>(beg_p
);
271 uptr end
= reinterpret_cast<uptr
>(end_p
);
272 uptr old_mid
= reinterpret_cast<uptr
>(old_mid_p
);
273 uptr new_mid
= reinterpret_cast<uptr
>(new_mid_p
);
274 uptr granularity
= SHADOW_GRANULARITY
;
275 if (!(beg
<= old_mid
&& beg
<= new_mid
&& old_mid
<= end
&& new_mid
<= end
&&
276 IsAligned(beg
, granularity
))) {
277 GET_STACK_TRACE_FATAL_HERE
;
278 ReportBadParamsToAnnotateContiguousContainer(beg
, end
, old_mid
, new_mid
,
282 FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
284 uptr a
= RoundDownTo(Min(old_mid
, new_mid
), granularity
);
285 uptr c
= RoundUpTo(Max(old_mid
, new_mid
), granularity
);
286 uptr d1
= RoundDownTo(old_mid
, granularity
);
287 // uptr d2 = RoundUpTo(old_mid, granularity);
288 // Currently we should be in this state:
289 // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
290 // Make a quick sanity check that we are indeed in this state.
292 // FIXME: Two of these three checks are disabled until we fix
293 // https://code.google.com/p/address-sanitizer/issues/detail?id=258.
295 // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
296 if (a
+ granularity
<= d1
)
297 CHECK_EQ(*(u8
*)MemToShadow(a
), 0);
298 // if (d2 + granularity <= c && c <= end)
299 // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
300 // kAsanContiguousContainerOOBMagic);
302 uptr b1
= RoundDownTo(new_mid
, granularity
);
303 uptr b2
= RoundUpTo(new_mid
, granularity
);
305 // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
306 PoisonShadow(a
, b1
- a
, 0);
307 PoisonShadow(b2
, c
- b2
, kAsanContiguousContainerOOBMagic
);
309 CHECK_EQ(b2
- b1
, granularity
);
310 *(u8
*)MemToShadow(b1
) = static_cast<u8
>(new_mid
- b1
);
314 int __sanitizer_verify_contiguous_container(const void *beg_p
,
317 if (!flags()->detect_container_overflow
) return 1;
318 uptr beg
= reinterpret_cast<uptr
>(beg_p
);
319 uptr end
= reinterpret_cast<uptr
>(end_p
);
320 uptr mid
= reinterpret_cast<uptr
>(mid_p
);
323 // Check some bytes starting from beg, some bytes around mid, and some bytes
325 uptr kMaxRangeToCheck
= 32;
327 uptr r1_end
= Min(end
+ kMaxRangeToCheck
, mid
);
328 uptr r2_beg
= Max(beg
, mid
- kMaxRangeToCheck
);
329 uptr r2_end
= Min(end
, mid
+ kMaxRangeToCheck
);
330 uptr r3_beg
= Max(end
- kMaxRangeToCheck
, mid
);
332 for (uptr i
= r1_beg
; i
< r1_end
; i
++)
333 if (AddressIsPoisoned(i
))
335 for (uptr i
= r2_beg
; i
< mid
; i
++)
336 if (AddressIsPoisoned(i
))
338 for (uptr i
= mid
; i
< r2_end
; i
++)
339 if (!AddressIsPoisoned(i
))
341 for (uptr i
= r3_beg
; i
< r3_end
; i
++)
342 if (!AddressIsPoisoned(i
))
346 // --- Implementation of LSan-specific functions --- {{{1
348 bool WordIsPoisoned(uptr addr
) {
349 return (__asan_region_is_poisoned(addr
, sizeof(uptr
)) != 0);