2016-09-25 François Dumont <fdumont@gcc.gnu.org>
[official-gcc.git] / libsanitizer / asan / asan_poisoning.cc
blob39f74879b38f758af64ea19a8de1b893c8b92aa0
1 //===-- asan_poisoning.cc -------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of AddressSanitizer, an address sanity checker.
9 //
10 // Shadow memory poisoning by ASan RTL and by user application.
11 //===----------------------------------------------------------------------===//
13 #include "asan_poisoning.h"
14 #include "asan_report.h"
15 #include "asan_stack.h"
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_flags.h"
20 namespace __asan {
22 static atomic_uint8_t can_poison_memory;
24 void SetCanPoisonMemory(bool value) {
25 atomic_store(&can_poison_memory, value, memory_order_release);
28 bool CanPoisonMemory() {
29 return atomic_load(&can_poison_memory, memory_order_acquire);
32 void PoisonShadow(uptr addr, uptr size, u8 value) {
33 if (!CanPoisonMemory()) return;
34 CHECK(AddrIsAlignedByGranularity(addr));
35 CHECK(AddrIsInMem(addr));
36 CHECK(AddrIsAlignedByGranularity(addr + size));
37 CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
38 CHECK(REAL(memset));
39 FastPoisonShadow(addr, size, value);
42 void PoisonShadowPartialRightRedzone(uptr addr,
43 uptr size,
44 uptr redzone_size,
45 u8 value) {
46 if (!CanPoisonMemory()) return;
47 CHECK(AddrIsAlignedByGranularity(addr));
48 CHECK(AddrIsInMem(addr));
49 FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
52 struct ShadowSegmentEndpoint {
53 u8 *chunk;
54 s8 offset; // in [0, SHADOW_GRANULARITY)
55 s8 value; // = *chunk;
57 explicit ShadowSegmentEndpoint(uptr address) {
58 chunk = (u8*)MemToShadow(address);
59 offset = address & (SHADOW_GRANULARITY - 1);
60 value = *chunk;
64 void FlushUnneededASanShadowMemory(uptr p, uptr size) {
65 // Since asan's mapping is compacting, the shadow chunk may be
66 // not page-aligned, so we only flush the page-aligned portion.
67 uptr page_size = GetPageSizeCached();
68 uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
69 uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
70 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
73 void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
74 uptr end = ptr + size;
75 if (Verbosity()) {
76 Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
77 poison ? "" : "un", ptr, end, size);
78 if (Verbosity() >= 2)
79 PRINT_CURRENT_STACK();
81 CHECK(size);
82 CHECK_LE(size, 4096);
83 CHECK(IsAligned(end, SHADOW_GRANULARITY));
84 if (!IsAligned(ptr, SHADOW_GRANULARITY)) {
85 *(u8 *)MemToShadow(ptr) =
86 poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0;
87 ptr |= SHADOW_GRANULARITY - 1;
88 ptr++;
90 for (; ptr < end; ptr += SHADOW_GRANULARITY)
91 *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
94 } // namespace __asan
96 // ---------------------- Interface ---------------- {{{1
97 using namespace __asan; // NOLINT
99 // Current implementation of __asan_(un)poison_memory_region doesn't check
100 // that user program (un)poisons the memory it owns. It poisons memory
101 // conservatively, and unpoisons progressively to make sure asan shadow
102 // mapping invariant is preserved (see detailed mapping description here:
103 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm).
105 // * if user asks to poison region [left, right), the program poisons
106 // at least [left, AlignDown(right)).
107 // * if user asks to unpoison region [left, right), the program unpoisons
108 // at most [AlignDown(left), right).
109 void __asan_poison_memory_region(void const volatile *addr, uptr size) {
110 if (!flags()->allow_user_poisoning || size == 0) return;
111 uptr beg_addr = (uptr)addr;
112 uptr end_addr = beg_addr + size;
113 VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
114 (void *)end_addr);
115 ShadowSegmentEndpoint beg(beg_addr);
116 ShadowSegmentEndpoint end(end_addr);
117 if (beg.chunk == end.chunk) {
118 CHECK(beg.offset < end.offset);
119 s8 value = beg.value;
120 CHECK(value == end.value);
121 // We can only poison memory if the byte in end.offset is unaddressable.
122 // No need to re-poison memory if it is poisoned already.
123 if (value > 0 && value <= end.offset) {
124 if (beg.offset > 0) {
125 *beg.chunk = Min(value, beg.offset);
126 } else {
127 *beg.chunk = kAsanUserPoisonedMemoryMagic;
130 return;
132 CHECK(beg.chunk < end.chunk);
133 if (beg.offset > 0) {
134 // Mark bytes from beg.offset as unaddressable.
135 if (beg.value == 0) {
136 *beg.chunk = beg.offset;
137 } else {
138 *beg.chunk = Min(beg.value, beg.offset);
140 beg.chunk++;
142 REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
143 // Poison if byte in end.offset is unaddressable.
144 if (end.value > 0 && end.value <= end.offset) {
145 *end.chunk = kAsanUserPoisonedMemoryMagic;
149 void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
150 if (!flags()->allow_user_poisoning || size == 0) return;
151 uptr beg_addr = (uptr)addr;
152 uptr end_addr = beg_addr + size;
153 VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
154 (void *)end_addr);
155 ShadowSegmentEndpoint beg(beg_addr);
156 ShadowSegmentEndpoint end(end_addr);
157 if (beg.chunk == end.chunk) {
158 CHECK(beg.offset < end.offset);
159 s8 value = beg.value;
160 CHECK(value == end.value);
161 // We unpoison memory bytes up to enbytes up to end.offset if it is not
162 // unpoisoned already.
163 if (value != 0) {
164 *beg.chunk = Max(value, end.offset);
166 return;
168 CHECK(beg.chunk < end.chunk);
169 if (beg.offset > 0) {
170 *beg.chunk = 0;
171 beg.chunk++;
173 REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
174 if (end.offset > 0 && end.value != 0) {
175 *end.chunk = Max(end.value, end.offset);
179 int __asan_address_is_poisoned(void const volatile *addr) {
180 return __asan::AddressIsPoisoned((uptr)addr);
183 uptr __asan_region_is_poisoned(uptr beg, uptr size) {
184 if (!size) return 0;
185 uptr end = beg + size;
186 if (!AddrIsInMem(beg)) return beg;
187 if (!AddrIsInMem(end)) return end;
188 CHECK_LT(beg, end);
189 uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
190 uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
191 uptr shadow_beg = MemToShadow(aligned_b);
192 uptr shadow_end = MemToShadow(aligned_e);
193 // First check the first and the last application bytes,
194 // then check the SHADOW_GRANULARITY-aligned region by calling
195 // mem_is_zero on the corresponding shadow.
196 if (!__asan::AddressIsPoisoned(beg) &&
197 !__asan::AddressIsPoisoned(end - 1) &&
198 (shadow_end <= shadow_beg ||
199 __sanitizer::mem_is_zero((const char *)shadow_beg,
200 shadow_end - shadow_beg)))
201 return 0;
202 // The fast check failed, so we have a poisoned byte somewhere.
203 // Find it slowly.
204 for (; beg < end; beg++)
205 if (__asan::AddressIsPoisoned(beg))
206 return beg;
207 UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
208 return 0;
211 #define CHECK_SMALL_REGION(p, size, isWrite) \
212 do { \
213 uptr __p = reinterpret_cast<uptr>(p); \
214 uptr __size = size; \
215 if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
216 __asan::AddressIsPoisoned(__p + __size - 1))) { \
217 GET_CURRENT_PC_BP_SP; \
218 uptr __bad = __asan_region_is_poisoned(__p, __size); \
219 __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
221 } while (false); \
224 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
225 u16 __sanitizer_unaligned_load16(const uu16 *p) {
226 CHECK_SMALL_REGION(p, sizeof(*p), false);
227 return *p;
230 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
231 u32 __sanitizer_unaligned_load32(const uu32 *p) {
232 CHECK_SMALL_REGION(p, sizeof(*p), false);
233 return *p;
236 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
237 u64 __sanitizer_unaligned_load64(const uu64 *p) {
238 CHECK_SMALL_REGION(p, sizeof(*p), false);
239 return *p;
242 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
243 void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
244 CHECK_SMALL_REGION(p, sizeof(*p), true);
245 *p = x;
248 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
249 void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
250 CHECK_SMALL_REGION(p, sizeof(*p), true);
251 *p = x;
254 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
255 void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
256 CHECK_SMALL_REGION(p, sizeof(*p), true);
257 *p = x;
260 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
261 void __asan_poison_cxx_array_cookie(uptr p) {
262 if (SANITIZER_WORDSIZE != 64) return;
263 if (!flags()->poison_array_cookie) return;
264 uptr s = MEM_TO_SHADOW(p);
265 *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
268 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
269 uptr __asan_load_cxx_array_cookie(uptr *p) {
270 if (SANITIZER_WORDSIZE != 64) return *p;
271 if (!flags()->poison_array_cookie) return *p;
272 uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
273 u8 sval = *reinterpret_cast<u8*>(s);
274 if (sval == kAsanArrayCookieMagic) return *p;
275 // If sval is not kAsanArrayCookieMagic it can only be freed memory,
276 // which means that we are going to get double-free. So, return 0 to avoid
277 // infinite loop of destructors. We don't want to report a double-free here
278 // though, so print a warning just in case.
279 // CHECK_EQ(sval, kAsanHeapFreeMagic);
280 if (sval == kAsanHeapFreeMagic) {
281 Report("AddressSanitizer: loaded array cookie from free-d memory; "
282 "expect a double-free report\n");
283 return 0;
285 // The cookie may remain unpoisoned if e.g. it comes from a custom
286 // operator new defined inside a class.
287 return *p;
290 // This is a simplified version of __asan_(un)poison_memory_region, which
291 // assumes that left border of region to be poisoned is properly aligned.
292 static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
293 if (size == 0) return;
294 uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1);
295 PoisonShadow(addr, aligned_size,
296 do_poison ? kAsanStackUseAfterScopeMagic : 0);
297 if (size == aligned_size)
298 return;
299 s8 end_offset = (s8)(size - aligned_size);
300 s8* shadow_end = (s8*)MemToShadow(addr + aligned_size);
301 s8 end_value = *shadow_end;
302 if (do_poison) {
303 // If possible, mark all the bytes mapping to last shadow byte as
304 // unaddressable.
305 if (end_value > 0 && end_value <= end_offset)
306 *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
307 } else {
308 // If necessary, mark few first bytes mapping to last shadow byte
309 // as addressable
310 if (end_value != 0)
311 *shadow_end = Max(end_value, end_offset);
315 void __asan_poison_stack_memory(uptr addr, uptr size) {
316 VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
317 PoisonAlignedStackMemory(addr, size, true);
320 void __asan_unpoison_stack_memory(uptr addr, uptr size) {
321 VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
322 PoisonAlignedStackMemory(addr, size, false);
325 void __sanitizer_annotate_contiguous_container(const void *beg_p,
326 const void *end_p,
327 const void *old_mid_p,
328 const void *new_mid_p) {
329 if (!flags()->detect_container_overflow) return;
330 VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
331 new_mid_p);
332 uptr beg = reinterpret_cast<uptr>(beg_p);
333 uptr end = reinterpret_cast<uptr>(end_p);
334 uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
335 uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
336 uptr granularity = SHADOW_GRANULARITY;
337 if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
338 IsAligned(beg, granularity))) {
339 GET_STACK_TRACE_FATAL_HERE;
340 ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
341 &stack);
343 CHECK_LE(end - beg,
344 FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
346 uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
347 uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
348 uptr d1 = RoundDownTo(old_mid, granularity);
349 // uptr d2 = RoundUpTo(old_mid, granularity);
350 // Currently we should be in this state:
351 // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
352 // Make a quick sanity check that we are indeed in this state.
354 // FIXME: Two of these three checks are disabled until we fix
355 // https://code.google.com/p/address-sanitizer/issues/detail?id=258.
356 // if (d1 != d2)
357 // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
358 if (a + granularity <= d1)
359 CHECK_EQ(*(u8*)MemToShadow(a), 0);
360 // if (d2 + granularity <= c && c <= end)
361 // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
362 // kAsanContiguousContainerOOBMagic);
364 uptr b1 = RoundDownTo(new_mid, granularity);
365 uptr b2 = RoundUpTo(new_mid, granularity);
366 // New state:
367 // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
368 PoisonShadow(a, b1 - a, 0);
369 PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
370 if (b1 != b2) {
371 CHECK_EQ(b2 - b1, granularity);
372 *(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
376 const void *__sanitizer_contiguous_container_find_bad_address(
377 const void *beg_p, const void *mid_p, const void *end_p) {
378 if (!flags()->detect_container_overflow)
379 return nullptr;
380 uptr beg = reinterpret_cast<uptr>(beg_p);
381 uptr end = reinterpret_cast<uptr>(end_p);
382 uptr mid = reinterpret_cast<uptr>(mid_p);
383 CHECK_LE(beg, mid);
384 CHECK_LE(mid, end);
385 // Check some bytes starting from beg, some bytes around mid, and some bytes
386 // ending with end.
387 uptr kMaxRangeToCheck = 32;
388 uptr r1_beg = beg;
389 uptr r1_end = Min(end + kMaxRangeToCheck, mid);
390 uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
391 uptr r2_end = Min(end, mid + kMaxRangeToCheck);
392 uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
393 uptr r3_end = end;
394 for (uptr i = r1_beg; i < r1_end; i++)
395 if (AddressIsPoisoned(i))
396 return reinterpret_cast<const void *>(i);
397 for (uptr i = r2_beg; i < mid; i++)
398 if (AddressIsPoisoned(i))
399 return reinterpret_cast<const void *>(i);
400 for (uptr i = mid; i < r2_end; i++)
401 if (!AddressIsPoisoned(i))
402 return reinterpret_cast<const void *>(i);
403 for (uptr i = r3_beg; i < r3_end; i++)
404 if (!AddressIsPoisoned(i))
405 return reinterpret_cast<const void *>(i);
406 return nullptr;
409 int __sanitizer_verify_contiguous_container(const void *beg_p,
410 const void *mid_p,
411 const void *end_p) {
412 return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,
413 end_p) == nullptr;
416 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
417 void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
418 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
421 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
422 void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
423 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false);
426 // --- Implementation of LSan-specific functions --- {{{1
427 namespace __lsan {
428 bool WordIsPoisoned(uptr addr) {
429 return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);