1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common tag-based KASAN code.
5 * Copyright (c) 2018 Google, Inc.
6 * Copyright (c) 2020 Google, Inc.
9 #include <linux/atomic.h>
10 #include <linux/init.h>
11 #include <linux/kasan.h>
12 #include <linux/kernel.h>
13 #include <linux/memblock.h>
14 #include <linux/memory.h>
16 #include <linux/sched/clock.h>
17 #include <linux/stackdepot.h>
18 #include <linux/static_key.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
25 #define KASAN_STACK_RING_SIZE_DEFAULT (32 << 10)
27 enum kasan_arg_stacktrace
{
28 KASAN_ARG_STACKTRACE_DEFAULT
,
29 KASAN_ARG_STACKTRACE_OFF
,
30 KASAN_ARG_STACKTRACE_ON
,
33 static enum kasan_arg_stacktrace kasan_arg_stacktrace __initdata
;
35 /* Whether to collect alloc/free stack traces. */
36 DEFINE_STATIC_KEY_TRUE(kasan_flag_stacktrace
);
38 /* Non-zero, as initial pointer values are 0. */
39 #define STACK_RING_BUSY_PTR ((void *)1)
41 struct kasan_stack_ring stack_ring
= {
42 .lock
= __RW_LOCK_UNLOCKED(stack_ring
.lock
)
45 /* kasan.stacktrace=off/on */
46 static int __init
early_kasan_flag_stacktrace(char *arg
)
51 if (!strcmp(arg
, "off"))
52 kasan_arg_stacktrace
= KASAN_ARG_STACKTRACE_OFF
;
53 else if (!strcmp(arg
, "on"))
54 kasan_arg_stacktrace
= KASAN_ARG_STACKTRACE_ON
;
60 early_param("kasan.stacktrace", early_kasan_flag_stacktrace
);
62 /* kasan.stack_ring_size=<number of entries> */
63 static int __init
early_kasan_flag_stack_ring_size(char *arg
)
68 return kstrtoul(arg
, 0, &stack_ring
.size
);
70 early_param("kasan.stack_ring_size", early_kasan_flag_stack_ring_size
);
72 void __init
kasan_init_tags(void)
74 switch (kasan_arg_stacktrace
) {
75 case KASAN_ARG_STACKTRACE_DEFAULT
:
76 /* Default is specified by kasan_flag_stacktrace definition. */
78 case KASAN_ARG_STACKTRACE_OFF
:
79 static_branch_disable(&kasan_flag_stacktrace
);
81 case KASAN_ARG_STACKTRACE_ON
:
82 static_branch_enable(&kasan_flag_stacktrace
);
86 if (kasan_stack_collection_enabled()) {
88 stack_ring
.size
= KASAN_STACK_RING_SIZE_DEFAULT
;
89 stack_ring
.entries
= memblock_alloc(
90 sizeof(stack_ring
.entries
[0]) * stack_ring
.size
,
92 if (WARN_ON(!stack_ring
.entries
))
93 static_branch_disable(&kasan_flag_stacktrace
);
97 static void save_stack_info(struct kmem_cache
*cache
, void *object
,
98 gfp_t gfp_flags
, bool is_free
)
101 depot_stack_handle_t stack
, old_stack
;
103 struct kasan_stack_ring_entry
*entry
;
106 stack
= kasan_save_stack(gfp_flags
,
107 STACK_DEPOT_FLAG_CAN_ALLOC
| STACK_DEPOT_FLAG_GET
);
110 * Prevent save_stack_info() from modifying stack ring
111 * when kasan_complete_mode_report_info() is walking it.
113 read_lock_irqsave(&stack_ring
.lock
, flags
);
116 pos
= atomic64_fetch_add(1, &stack_ring
.pos
);
117 entry
= &stack_ring
.entries
[pos
% stack_ring
.size
];
119 /* Detect stack ring entry slots that are being written to. */
120 old_ptr
= READ_ONCE(entry
->ptr
);
121 if (old_ptr
== STACK_RING_BUSY_PTR
)
122 goto next
; /* Busy slot. */
123 if (!try_cmpxchg(&entry
->ptr
, &old_ptr
, STACK_RING_BUSY_PTR
))
124 goto next
; /* Busy slot. */
126 old_stack
= entry
->track
.stack
;
128 entry
->size
= cache
->object_size
;
129 kasan_set_track(&entry
->track
, stack
);
130 entry
->is_free
= is_free
;
134 read_unlock_irqrestore(&stack_ring
.lock
, flags
);
137 stack_depot_put(old_stack
);
140 void kasan_save_alloc_info(struct kmem_cache
*cache
, void *object
, gfp_t flags
)
142 save_stack_info(cache
, object
, flags
, false);
145 void kasan_save_free_info(struct kmem_cache
*cache
, void *object
)
147 save_stack_info(cache
, object
, 0, true);