PR middle-end/84406
[official-gcc.git] / libsanitizer / asan / asan_poisoning.cc
blob1343dfbd39efaca129234c659739a7222c9435de
1 //===-- asan_poisoning.cc -------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of AddressSanitizer, an address sanity checker.
9 //
10 // Shadow memory poisoning by ASan RTL and by user application.
11 //===----------------------------------------------------------------------===//
13 #include "asan_poisoning.h"
14 #include "asan_report.h"
15 #include "asan_stack.h"
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_flags.h"
20 namespace __asan {
22 static atomic_uint8_t can_poison_memory;
24 void SetCanPoisonMemory(bool value) {
25 atomic_store(&can_poison_memory, value, memory_order_release);
28 bool CanPoisonMemory() {
29 return atomic_load(&can_poison_memory, memory_order_acquire);
32 void PoisonShadow(uptr addr, uptr size, u8 value) {
33 if (!CanPoisonMemory()) return;
34 CHECK(AddrIsAlignedByGranularity(addr));
35 CHECK(AddrIsInMem(addr));
36 CHECK(AddrIsAlignedByGranularity(addr + size));
37 CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
38 CHECK(REAL(memset));
39 FastPoisonShadow(addr, size, value);
42 void PoisonShadowPartialRightRedzone(uptr addr,
43 uptr size,
44 uptr redzone_size,
45 u8 value) {
46 if (!CanPoisonMemory()) return;
47 CHECK(AddrIsAlignedByGranularity(addr));
48 CHECK(AddrIsInMem(addr));
49 FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
52 struct ShadowSegmentEndpoint {
53 u8 *chunk;
54 s8 offset; // in [0, SHADOW_GRANULARITY)
55 s8 value; // = *chunk;
57 explicit ShadowSegmentEndpoint(uptr address) {
58 chunk = (u8*)MemToShadow(address);
59 offset = address & (SHADOW_GRANULARITY - 1);
60 value = *chunk;
64 void FlushUnneededASanShadowMemory(uptr p, uptr size) {
65 // Since asan's mapping is compacting, the shadow chunk may be
66 // not page-aligned, so we only flush the page-aligned portion.
67 ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
70 void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
71 uptr end = ptr + size;
72 if (Verbosity()) {
73 Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
74 poison ? "" : "un", ptr, end, size);
75 if (Verbosity() >= 2)
76 PRINT_CURRENT_STACK();
78 CHECK(size);
79 CHECK_LE(size, 4096);
80 CHECK(IsAligned(end, SHADOW_GRANULARITY));
81 if (!IsAligned(ptr, SHADOW_GRANULARITY)) {
82 *(u8 *)MemToShadow(ptr) =
83 poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0;
84 ptr |= SHADOW_GRANULARITY - 1;
85 ptr++;
87 for (; ptr < end; ptr += SHADOW_GRANULARITY)
88 *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
91 } // namespace __asan
93 // ---------------------- Interface ---------------- {{{1
94 using namespace __asan; // NOLINT
96 // Current implementation of __asan_(un)poison_memory_region doesn't check
97 // that user program (un)poisons the memory it owns. It poisons memory
98 // conservatively, and unpoisons progressively to make sure asan shadow
99 // mapping invariant is preserved (see detailed mapping description here:
100 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
102 // * if user asks to poison region [left, right), the program poisons
103 // at least [left, AlignDown(right)).
104 // * if user asks to unpoison region [left, right), the program unpoisons
105 // at most [AlignDown(left), right).
106 void __asan_poison_memory_region(void const volatile *addr, uptr size) {
107 if (!flags()->allow_user_poisoning || size == 0) return;
108 uptr beg_addr = (uptr)addr;
109 uptr end_addr = beg_addr + size;
110 VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
111 (void *)end_addr);
112 ShadowSegmentEndpoint beg(beg_addr);
113 ShadowSegmentEndpoint end(end_addr);
114 if (beg.chunk == end.chunk) {
115 CHECK_LT(beg.offset, end.offset);
116 s8 value = beg.value;
117 CHECK_EQ(value, end.value);
118 // We can only poison memory if the byte in end.offset is unaddressable.
119 // No need to re-poison memory if it is poisoned already.
120 if (value > 0 && value <= end.offset) {
121 if (beg.offset > 0) {
122 *beg.chunk = Min(value, beg.offset);
123 } else {
124 *beg.chunk = kAsanUserPoisonedMemoryMagic;
127 return;
129 CHECK_LT(beg.chunk, end.chunk);
130 if (beg.offset > 0) {
131 // Mark bytes from beg.offset as unaddressable.
132 if (beg.value == 0) {
133 *beg.chunk = beg.offset;
134 } else {
135 *beg.chunk = Min(beg.value, beg.offset);
137 beg.chunk++;
139 REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
140 // Poison if byte in end.offset is unaddressable.
141 if (end.value > 0 && end.value <= end.offset) {
142 *end.chunk = kAsanUserPoisonedMemoryMagic;
146 void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
147 if (!flags()->allow_user_poisoning || size == 0) return;
148 uptr beg_addr = (uptr)addr;
149 uptr end_addr = beg_addr + size;
150 VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
151 (void *)end_addr);
152 ShadowSegmentEndpoint beg(beg_addr);
153 ShadowSegmentEndpoint end(end_addr);
154 if (beg.chunk == end.chunk) {
155 CHECK_LT(beg.offset, end.offset);
156 s8 value = beg.value;
157 CHECK_EQ(value, end.value);
158 // We unpoison memory bytes up to enbytes up to end.offset if it is not
159 // unpoisoned already.
160 if (value != 0) {
161 *beg.chunk = Max(value, end.offset);
163 return;
165 CHECK_LT(beg.chunk, end.chunk);
166 if (beg.offset > 0) {
167 *beg.chunk = 0;
168 beg.chunk++;
170 REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
171 if (end.offset > 0 && end.value != 0) {
172 *end.chunk = Max(end.value, end.offset);
176 int __asan_address_is_poisoned(void const volatile *addr) {
177 return __asan::AddressIsPoisoned((uptr)addr);
180 uptr __asan_region_is_poisoned(uptr beg, uptr size) {
181 if (!size) return 0;
182 uptr end = beg + size;
183 if (!AddrIsInMem(beg)) return beg;
184 if (!AddrIsInMem(end)) return end;
185 CHECK_LT(beg, end);
186 uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
187 uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
188 uptr shadow_beg = MemToShadow(aligned_b);
189 uptr shadow_end = MemToShadow(aligned_e);
190 // First check the first and the last application bytes,
191 // then check the SHADOW_GRANULARITY-aligned region by calling
192 // mem_is_zero on the corresponding shadow.
193 if (!__asan::AddressIsPoisoned(beg) &&
194 !__asan::AddressIsPoisoned(end - 1) &&
195 (shadow_end <= shadow_beg ||
196 __sanitizer::mem_is_zero((const char *)shadow_beg,
197 shadow_end - shadow_beg)))
198 return 0;
199 // The fast check failed, so we have a poisoned byte somewhere.
200 // Find it slowly.
201 for (; beg < end; beg++)
202 if (__asan::AddressIsPoisoned(beg))
203 return beg;
204 UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
205 return 0;
208 #define CHECK_SMALL_REGION(p, size, isWrite) \
209 do { \
210 uptr __p = reinterpret_cast<uptr>(p); \
211 uptr __size = size; \
212 if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
213 __asan::AddressIsPoisoned(__p + __size - 1))) { \
214 GET_CURRENT_PC_BP_SP; \
215 uptr __bad = __asan_region_is_poisoned(__p, __size); \
216 __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
218 } while (false)
221 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
222 u16 __sanitizer_unaligned_load16(const uu16 *p) {
223 CHECK_SMALL_REGION(p, sizeof(*p), false);
224 return *p;
227 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
228 u32 __sanitizer_unaligned_load32(const uu32 *p) {
229 CHECK_SMALL_REGION(p, sizeof(*p), false);
230 return *p;
233 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
234 u64 __sanitizer_unaligned_load64(const uu64 *p) {
235 CHECK_SMALL_REGION(p, sizeof(*p), false);
236 return *p;
239 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
240 void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
241 CHECK_SMALL_REGION(p, sizeof(*p), true);
242 *p = x;
245 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
246 void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
247 CHECK_SMALL_REGION(p, sizeof(*p), true);
248 *p = x;
251 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
252 void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
253 CHECK_SMALL_REGION(p, sizeof(*p), true);
254 *p = x;
257 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
258 void __asan_poison_cxx_array_cookie(uptr p) {
259 if (SANITIZER_WORDSIZE != 64) return;
260 if (!flags()->poison_array_cookie) return;
261 uptr s = MEM_TO_SHADOW(p);
262 *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
265 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
266 uptr __asan_load_cxx_array_cookie(uptr *p) {
267 if (SANITIZER_WORDSIZE != 64) return *p;
268 if (!flags()->poison_array_cookie) return *p;
269 uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
270 u8 sval = *reinterpret_cast<u8*>(s);
271 if (sval == kAsanArrayCookieMagic) return *p;
272 // If sval is not kAsanArrayCookieMagic it can only be freed memory,
273 // which means that we are going to get double-free. So, return 0 to avoid
274 // infinite loop of destructors. We don't want to report a double-free here
275 // though, so print a warning just in case.
276 // CHECK_EQ(sval, kAsanHeapFreeMagic);
277 if (sval == kAsanHeapFreeMagic) {
278 Report("AddressSanitizer: loaded array cookie from free-d memory; "
279 "expect a double-free report\n");
280 return 0;
282 // The cookie may remain unpoisoned if e.g. it comes from a custom
283 // operator new defined inside a class.
284 return *p;
287 // This is a simplified version of __asan_(un)poison_memory_region, which
288 // assumes that left border of region to be poisoned is properly aligned.
289 static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
290 if (size == 0) return;
291 uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1);
292 PoisonShadow(addr, aligned_size,
293 do_poison ? kAsanStackUseAfterScopeMagic : 0);
294 if (size == aligned_size)
295 return;
296 s8 end_offset = (s8)(size - aligned_size);
297 s8* shadow_end = (s8*)MemToShadow(addr + aligned_size);
298 s8 end_value = *shadow_end;
299 if (do_poison) {
300 // If possible, mark all the bytes mapping to last shadow byte as
301 // unaddressable.
302 if (end_value > 0 && end_value <= end_offset)
303 *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
304 } else {
305 // If necessary, mark few first bytes mapping to last shadow byte
306 // as addressable
307 if (end_value != 0)
308 *shadow_end = Max(end_value, end_offset);
312 void __asan_set_shadow_00(uptr addr, uptr size) {
313 REAL(memset)((void *)addr, 0, size);
316 void __asan_set_shadow_f1(uptr addr, uptr size) {
317 REAL(memset)((void *)addr, 0xf1, size);
320 void __asan_set_shadow_f2(uptr addr, uptr size) {
321 REAL(memset)((void *)addr, 0xf2, size);
324 void __asan_set_shadow_f3(uptr addr, uptr size) {
325 REAL(memset)((void *)addr, 0xf3, size);
328 void __asan_set_shadow_f5(uptr addr, uptr size) {
329 REAL(memset)((void *)addr, 0xf5, size);
332 void __asan_set_shadow_f8(uptr addr, uptr size) {
333 REAL(memset)((void *)addr, 0xf8, size);
336 void __asan_poison_stack_memory(uptr addr, uptr size) {
337 VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
338 PoisonAlignedStackMemory(addr, size, true);
341 void __asan_unpoison_stack_memory(uptr addr, uptr size) {
342 VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
343 PoisonAlignedStackMemory(addr, size, false);
346 void __sanitizer_annotate_contiguous_container(const void *beg_p,
347 const void *end_p,
348 const void *old_mid_p,
349 const void *new_mid_p) {
350 if (!flags()->detect_container_overflow) return;
351 VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
352 new_mid_p);
353 uptr beg = reinterpret_cast<uptr>(beg_p);
354 uptr end = reinterpret_cast<uptr>(end_p);
355 uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
356 uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
357 uptr granularity = SHADOW_GRANULARITY;
358 if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
359 IsAligned(beg, granularity))) {
360 GET_STACK_TRACE_FATAL_HERE;
361 ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
362 &stack);
364 CHECK_LE(end - beg,
365 FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check.
367 uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
368 uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
369 uptr d1 = RoundDownTo(old_mid, granularity);
370 // uptr d2 = RoundUpTo(old_mid, granularity);
371 // Currently we should be in this state:
372 // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
373 // Make a quick sanity check that we are indeed in this state.
375 // FIXME: Two of these three checks are disabled until we fix
376 // https://github.com/google/sanitizers/issues/258.
377 // if (d1 != d2)
378 // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
379 if (a + granularity <= d1)
380 CHECK_EQ(*(u8*)MemToShadow(a), 0);
381 // if (d2 + granularity <= c && c <= end)
382 // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
383 // kAsanContiguousContainerOOBMagic);
385 uptr b1 = RoundDownTo(new_mid, granularity);
386 uptr b2 = RoundUpTo(new_mid, granularity);
387 // New state:
388 // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
389 PoisonShadow(a, b1 - a, 0);
390 PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
391 if (b1 != b2) {
392 CHECK_EQ(b2 - b1, granularity);
393 *(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
397 const void *__sanitizer_contiguous_container_find_bad_address(
398 const void *beg_p, const void *mid_p, const void *end_p) {
399 if (!flags()->detect_container_overflow)
400 return nullptr;
401 uptr beg = reinterpret_cast<uptr>(beg_p);
402 uptr end = reinterpret_cast<uptr>(end_p);
403 uptr mid = reinterpret_cast<uptr>(mid_p);
404 CHECK_LE(beg, mid);
405 CHECK_LE(mid, end);
406 // Check some bytes starting from beg, some bytes around mid, and some bytes
407 // ending with end.
408 uptr kMaxRangeToCheck = 32;
409 uptr r1_beg = beg;
410 uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
411 uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
412 uptr r2_end = Min(end, mid + kMaxRangeToCheck);
413 uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
414 uptr r3_end = end;
415 for (uptr i = r1_beg; i < r1_end; i++)
416 if (AddressIsPoisoned(i))
417 return reinterpret_cast<const void *>(i);
418 for (uptr i = r2_beg; i < mid; i++)
419 if (AddressIsPoisoned(i))
420 return reinterpret_cast<const void *>(i);
421 for (uptr i = mid; i < r2_end; i++)
422 if (!AddressIsPoisoned(i))
423 return reinterpret_cast<const void *>(i);
424 for (uptr i = r3_beg; i < r3_end; i++)
425 if (!AddressIsPoisoned(i))
426 return reinterpret_cast<const void *>(i);
427 return nullptr;
430 int __sanitizer_verify_contiguous_container(const void *beg_p,
431 const void *mid_p,
432 const void *end_p) {
433 return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,
434 end_p) == nullptr;
437 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
438 void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
439 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
442 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
443 void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
444 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false);
447 // --- Implementation of LSan-specific functions --- {{{1
448 namespace __lsan {
449 bool WordIsPoisoned(uptr addr) {
450 return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);