ARM: defconfig: qcom: Enable SSBI drivers
[linux-2.6/btrfs-unstable.git] / mm / kasan / kasan.c
blobd41b21bce6a030a0ea0356d34e9a495f6dfbb5b0
1 /*
2 * This file contains shadow memory manipulation code.
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8 * Andrey Konovalov <adech.fo@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
19 #include <linux/export.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/memblock.h>
23 #include <linux/memory.h>
24 #include <linux/mm.h>
25 #include <linux/module.h>
26 #include <linux/printk.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/stacktrace.h>
30 #include <linux/string.h>
31 #include <linux/types.h>
32 #include <linux/vmalloc.h>
33 #include <linux/kasan.h>
35 #include "kasan.h"
36 #include "../slab.h"
39 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
40 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
42 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
44 void *shadow_start, *shadow_end;
46 shadow_start = kasan_mem_to_shadow(address);
47 shadow_end = kasan_mem_to_shadow(address + size);
49 memset(shadow_start, value, shadow_end - shadow_start);
52 void kasan_unpoison_shadow(const void *address, size_t size)
54 kasan_poison_shadow(address, size, 0);
56 if (size & KASAN_SHADOW_MASK) {
57 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
58 *shadow = size & KASAN_SHADOW_MASK;
64 * All functions below always inlined so compiler could
65 * perform better optimizations in each of __asan_loadX/__assn_storeX
66 * depending on memory access size X.
69 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
71 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
73 if (unlikely(shadow_value)) {
74 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
75 return unlikely(last_accessible_byte >= shadow_value);
78 return false;
81 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
83 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
85 if (unlikely(*shadow_addr)) {
86 if (memory_is_poisoned_1(addr + 1))
87 return true;
90 * If single shadow byte covers 2-byte access, we don't
91 * need to do anything more. Otherwise, test the first
92 * shadow byte.
94 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
95 return false;
97 return unlikely(*(u8 *)shadow_addr);
100 return false;
103 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
105 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
107 if (unlikely(*shadow_addr)) {
108 if (memory_is_poisoned_1(addr + 3))
109 return true;
112 * If single shadow byte covers 4-byte access, we don't
113 * need to do anything more. Otherwise, test the first
114 * shadow byte.
116 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
117 return false;
119 return unlikely(*(u8 *)shadow_addr);
122 return false;
125 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
127 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
129 if (unlikely(*shadow_addr)) {
130 if (memory_is_poisoned_1(addr + 7))
131 return true;
134 * If single shadow byte covers 8-byte access, we don't
135 * need to do anything more. Otherwise, test the first
136 * shadow byte.
138 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
139 return false;
141 return unlikely(*(u8 *)shadow_addr);
144 return false;
147 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
149 u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
151 if (unlikely(*shadow_addr)) {
152 u16 shadow_first_bytes = *(u16 *)shadow_addr;
154 if (unlikely(shadow_first_bytes))
155 return true;
158 * If two shadow bytes covers 16-byte access, we don't
159 * need to do anything more. Otherwise, test the last
160 * shadow byte.
162 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
163 return false;
165 return memory_is_poisoned_1(addr + 15);
168 return false;
171 static __always_inline unsigned long bytes_is_zero(const u8 *start,
172 size_t size)
174 while (size) {
175 if (unlikely(*start))
176 return (unsigned long)start;
177 start++;
178 size--;
181 return 0;
184 static __always_inline unsigned long memory_is_zero(const void *start,
185 const void *end)
187 unsigned int words;
188 unsigned long ret;
189 unsigned int prefix = (unsigned long)start % 8;
191 if (end - start <= 16)
192 return bytes_is_zero(start, end - start);
194 if (prefix) {
195 prefix = 8 - prefix;
196 ret = bytes_is_zero(start, prefix);
197 if (unlikely(ret))
198 return ret;
199 start += prefix;
202 words = (end - start) / 8;
203 while (words) {
204 if (unlikely(*(u64 *)start))
205 return bytes_is_zero(start, 8);
206 start += 8;
207 words--;
210 return bytes_is_zero(start, (end - start) % 8);
213 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
214 size_t size)
216 unsigned long ret;
218 ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
219 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
221 if (unlikely(ret)) {
222 unsigned long last_byte = addr + size - 1;
223 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
225 if (unlikely(ret != (unsigned long)last_shadow ||
226 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
227 return true;
229 return false;
232 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
234 if (__builtin_constant_p(size)) {
235 switch (size) {
236 case 1:
237 return memory_is_poisoned_1(addr);
238 case 2:
239 return memory_is_poisoned_2(addr);
240 case 4:
241 return memory_is_poisoned_4(addr);
242 case 8:
243 return memory_is_poisoned_8(addr);
244 case 16:
245 return memory_is_poisoned_16(addr);
246 default:
247 BUILD_BUG();
251 return memory_is_poisoned_n(addr, size);
255 static __always_inline void check_memory_region(unsigned long addr,
256 size_t size, bool write)
258 if (unlikely(size == 0))
259 return;
261 if (unlikely((void *)addr <
262 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
263 kasan_report(addr, size, write, _RET_IP_);
264 return;
267 if (likely(!memory_is_poisoned(addr, size)))
268 return;
270 kasan_report(addr, size, write, _RET_IP_);
273 void __asan_loadN(unsigned long addr, size_t size);
274 void __asan_storeN(unsigned long addr, size_t size);
276 #undef memset
277 void *memset(void *addr, int c, size_t len)
279 __asan_storeN((unsigned long)addr, len);
281 return __memset(addr, c, len);
284 #undef memmove
285 void *memmove(void *dest, const void *src, size_t len)
287 __asan_loadN((unsigned long)src, len);
288 __asan_storeN((unsigned long)dest, len);
290 return __memmove(dest, src, len);
293 #undef memcpy
294 void *memcpy(void *dest, const void *src, size_t len)
296 __asan_loadN((unsigned long)src, len);
297 __asan_storeN((unsigned long)dest, len);
299 return __memcpy(dest, src, len);
302 void kasan_alloc_pages(struct page *page, unsigned int order)
304 if (likely(!PageHighMem(page)))
305 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
308 void kasan_free_pages(struct page *page, unsigned int order)
310 if (likely(!PageHighMem(page)))
311 kasan_poison_shadow(page_address(page),
312 PAGE_SIZE << order,
313 KASAN_FREE_PAGE);
316 void kasan_poison_slab(struct page *page)
318 kasan_poison_shadow(page_address(page),
319 PAGE_SIZE << compound_order(page),
320 KASAN_KMALLOC_REDZONE);
323 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
325 kasan_unpoison_shadow(object, cache->object_size);
328 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
330 kasan_poison_shadow(object,
331 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
332 KASAN_KMALLOC_REDZONE);
335 void kasan_slab_alloc(struct kmem_cache *cache, void *object)
337 kasan_kmalloc(cache, object, cache->object_size);
340 void kasan_slab_free(struct kmem_cache *cache, void *object)
342 unsigned long size = cache->object_size;
343 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
345 /* RCU slabs could be legally used after free within the RCU period */
346 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
347 return;
349 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
352 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
354 unsigned long redzone_start;
355 unsigned long redzone_end;
357 if (unlikely(object == NULL))
358 return;
360 redzone_start = round_up((unsigned long)(object + size),
361 KASAN_SHADOW_SCALE_SIZE);
362 redzone_end = round_up((unsigned long)object + cache->object_size,
363 KASAN_SHADOW_SCALE_SIZE);
365 kasan_unpoison_shadow(object, size);
366 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
367 KASAN_KMALLOC_REDZONE);
369 EXPORT_SYMBOL(kasan_kmalloc);
371 void kasan_kmalloc_large(const void *ptr, size_t size)
373 struct page *page;
374 unsigned long redzone_start;
375 unsigned long redzone_end;
377 if (unlikely(ptr == NULL))
378 return;
380 page = virt_to_page(ptr);
381 redzone_start = round_up((unsigned long)(ptr + size),
382 KASAN_SHADOW_SCALE_SIZE);
383 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
385 kasan_unpoison_shadow(ptr, size);
386 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
387 KASAN_PAGE_REDZONE);
390 void kasan_krealloc(const void *object, size_t size)
392 struct page *page;
394 if (unlikely(object == ZERO_SIZE_PTR))
395 return;
397 page = virt_to_head_page(object);
399 if (unlikely(!PageSlab(page)))
400 kasan_kmalloc_large(object, size);
401 else
402 kasan_kmalloc(page->slab_cache, object, size);
405 void kasan_kfree(void *ptr)
407 struct page *page;
409 page = virt_to_head_page(ptr);
411 if (unlikely(!PageSlab(page)))
412 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
413 KASAN_FREE_PAGE);
414 else
415 kasan_slab_free(page->slab_cache, ptr);
418 void kasan_kfree_large(const void *ptr)
420 struct page *page = virt_to_page(ptr);
422 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
423 KASAN_FREE_PAGE);
426 int kasan_module_alloc(void *addr, size_t size)
428 void *ret;
429 size_t shadow_size;
430 unsigned long shadow_start;
432 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
433 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
434 PAGE_SIZE);
436 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
437 return -EINVAL;
439 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
440 shadow_start + shadow_size,
441 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
442 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
443 __builtin_return_address(0));
445 if (ret) {
446 find_vm_area(addr)->flags |= VM_KASAN;
447 return 0;
450 return -ENOMEM;
453 void kasan_free_shadow(const struct vm_struct *vm)
455 if (vm->flags & VM_KASAN)
456 vfree(kasan_mem_to_shadow(vm->addr));
459 static void register_global(struct kasan_global *global)
461 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
463 kasan_unpoison_shadow(global->beg, global->size);
465 kasan_poison_shadow(global->beg + aligned_size,
466 global->size_with_redzone - aligned_size,
467 KASAN_GLOBAL_REDZONE);
470 void __asan_register_globals(struct kasan_global *globals, size_t size)
472 int i;
474 for (i = 0; i < size; i++)
475 register_global(&globals[i]);
477 EXPORT_SYMBOL(__asan_register_globals);
479 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
482 EXPORT_SYMBOL(__asan_unregister_globals);
484 #define DEFINE_ASAN_LOAD_STORE(size) \
485 void __asan_load##size(unsigned long addr) \
487 check_memory_region(addr, size, false); \
489 EXPORT_SYMBOL(__asan_load##size); \
490 __alias(__asan_load##size) \
491 void __asan_load##size##_noabort(unsigned long); \
492 EXPORT_SYMBOL(__asan_load##size##_noabort); \
493 void __asan_store##size(unsigned long addr) \
495 check_memory_region(addr, size, true); \
497 EXPORT_SYMBOL(__asan_store##size); \
498 __alias(__asan_store##size) \
499 void __asan_store##size##_noabort(unsigned long); \
500 EXPORT_SYMBOL(__asan_store##size##_noabort)
502 DEFINE_ASAN_LOAD_STORE(1);
503 DEFINE_ASAN_LOAD_STORE(2);
504 DEFINE_ASAN_LOAD_STORE(4);
505 DEFINE_ASAN_LOAD_STORE(8);
506 DEFINE_ASAN_LOAD_STORE(16);
508 void __asan_loadN(unsigned long addr, size_t size)
510 check_memory_region(addr, size, false);
512 EXPORT_SYMBOL(__asan_loadN);
514 __alias(__asan_loadN)
515 void __asan_loadN_noabort(unsigned long, size_t);
516 EXPORT_SYMBOL(__asan_loadN_noabort);
518 void __asan_storeN(unsigned long addr, size_t size)
520 check_memory_region(addr, size, true);
522 EXPORT_SYMBOL(__asan_storeN);
524 __alias(__asan_storeN)
525 void __asan_storeN_noabort(unsigned long, size_t);
526 EXPORT_SYMBOL(__asan_storeN_noabort);
528 /* to shut up compiler complaints */
529 void __asan_handle_no_return(void) {}
530 EXPORT_SYMBOL(__asan_handle_no_return);
532 #ifdef CONFIG_MEMORY_HOTPLUG
533 static int kasan_mem_notifier(struct notifier_block *nb,
534 unsigned long action, void *data)
536 return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
539 static int __init kasan_memhotplug_init(void)
541 pr_err("WARNING: KASAN doesn't support memory hot-add\n");
542 pr_err("Memory hot-add will be disabled\n");
544 hotplug_memory_notifier(kasan_mem_notifier, 0);
546 return 0;
549 module_init(kasan_memhotplug_init);
550 #endif