1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "kcov: " fmt
4 #define DISABLE_BRANCH_PROFILING
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/errno.h>
8 #include <linux/export.h>
9 #include <linux/types.h>
10 #include <linux/file.h>
12 #include <linux/init.h>
14 #include <linux/preempt.h>
15 #include <linux/printk.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/vmalloc.h>
20 #include <linux/debugfs.h>
21 #include <linux/uaccess.h>
22 #include <linux/kcov.h>
23 #include <asm/setup.h>
25 /* Number of 64-bit words written per one comparison: */
26 #define KCOV_WORDS_PER_CMP 4
29 * kcov descriptor (one per opened debugfs file).
30 * State transitions of the descriptor:
31 * - initial state after open()
32 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
33 * - then, mmap() call (several calls are allowed but not useful)
34 * - then, ioctl(KCOV_ENABLE, arg), where arg is
35 * KCOV_TRACE_PC - to trace only the PCs
37 * KCOV_TRACE_CMP - to trace only the comparison operands
38 * - then, ioctl(KCOV_DISABLE) to disable the task.
39 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
43 * Reference counter. We keep one for:
44 * - opened file descriptor
45 * - task with enabled coverage (we can't unwire it from another task)
48 /* The lock protects mode, size, area and t. */
51 /* Size of arena (in long's for KCOV_MODE_TRACE). */
53 /* Coverage buffer shared with user space. */
55 /* Task for which we collect coverage, or NULL. */
56 struct task_struct
*t
;
59 static bool check_kcov_mode(enum kcov_mode needed_mode
, struct task_struct
*t
)
64 * We are interested in code coverage as a function of a syscall inputs,
65 * so we ignore code executed in interrupts.
69 mode
= READ_ONCE(t
->kcov_mode
);
71 * There is some code that runs in interrupts but for which
72 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
73 * READ_ONCE()/barrier() effectively provides load-acquire wrt
74 * interrupts, there are paired barrier()/WRITE_ONCE() in
75 * kcov_ioctl_locked().
78 return mode
== needed_mode
;
81 static unsigned long canonicalize_ip(unsigned long ip
)
83 #ifdef CONFIG_RANDOMIZE_BASE
90 * Entry point from instrumented code.
91 * This is called once per basic-block/edge.
93 void notrace
__sanitizer_cov_trace_pc(void)
95 struct task_struct
*t
;
97 unsigned long ip
= canonicalize_ip(_RET_IP_
);
101 if (!check_kcov_mode(KCOV_MODE_TRACE_PC
, t
))
105 /* The first 64-bit word is the number of subsequent PCs. */
106 pos
= READ_ONCE(area
[0]) + 1;
107 if (likely(pos
< t
->kcov_size
)) {
109 WRITE_ONCE(area
[0], pos
);
112 EXPORT_SYMBOL(__sanitizer_cov_trace_pc
);
114 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
115 static void write_comp_data(u64 type
, u64 arg1
, u64 arg2
, u64 ip
)
117 struct task_struct
*t
;
119 u64 count
, start_index
, end_pos
, max_pos
;
122 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP
, t
))
125 ip
= canonicalize_ip(ip
);
128 * We write all comparison arguments and types as u64.
129 * The buffer was allocated for t->kcov_size unsigned longs.
131 area
= (u64
*)t
->kcov_area
;
132 max_pos
= t
->kcov_size
* sizeof(unsigned long);
134 count
= READ_ONCE(area
[0]);
136 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
137 start_index
= 1 + count
* KCOV_WORDS_PER_CMP
;
138 end_pos
= (start_index
+ KCOV_WORDS_PER_CMP
) * sizeof(u64
);
139 if (likely(end_pos
<= max_pos
)) {
140 area
[start_index
] = type
;
141 area
[start_index
+ 1] = arg1
;
142 area
[start_index
+ 2] = arg2
;
143 area
[start_index
+ 3] = ip
;
144 WRITE_ONCE(area
[0], count
+ 1);
148 void notrace
__sanitizer_cov_trace_cmp1(u8 arg1
, u8 arg2
)
150 write_comp_data(KCOV_CMP_SIZE(0), arg1
, arg2
, _RET_IP_
);
152 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1
);
154 void notrace
__sanitizer_cov_trace_cmp2(u16 arg1
, u16 arg2
)
156 write_comp_data(KCOV_CMP_SIZE(1), arg1
, arg2
, _RET_IP_
);
158 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2
);
160 void notrace
__sanitizer_cov_trace_cmp4(u32 arg1
, u32 arg2
)
162 write_comp_data(KCOV_CMP_SIZE(2), arg1
, arg2
, _RET_IP_
);
164 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4
);
166 void notrace
__sanitizer_cov_trace_cmp8(u64 arg1
, u64 arg2
)
168 write_comp_data(KCOV_CMP_SIZE(3), arg1
, arg2
, _RET_IP_
);
170 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8
);
172 void notrace
__sanitizer_cov_trace_const_cmp1(u8 arg1
, u8 arg2
)
174 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST
, arg1
, arg2
,
177 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1
);
179 void notrace
__sanitizer_cov_trace_const_cmp2(u16 arg1
, u16 arg2
)
181 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST
, arg1
, arg2
,
184 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2
);
186 void notrace
__sanitizer_cov_trace_const_cmp4(u32 arg1
, u32 arg2
)
188 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST
, arg1
, arg2
,
191 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4
);
193 void notrace
__sanitizer_cov_trace_const_cmp8(u64 arg1
, u64 arg2
)
195 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST
, arg1
, arg2
,
198 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8
);
200 void notrace
__sanitizer_cov_trace_switch(u64 val
, u64
*cases
)
203 u64 count
= cases
[0];
205 u64 type
= KCOV_CMP_CONST
;
209 type
|= KCOV_CMP_SIZE(0);
212 type
|= KCOV_CMP_SIZE(1);
215 type
|= KCOV_CMP_SIZE(2);
218 type
|= KCOV_CMP_SIZE(3);
223 for (i
= 0; i
< count
; i
++)
224 write_comp_data(type
, cases
[i
+ 2], val
, _RET_IP_
);
226 EXPORT_SYMBOL(__sanitizer_cov_trace_switch
);
227 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
229 static void kcov_get(struct kcov
*kcov
)
231 atomic_inc(&kcov
->refcount
);
234 static void kcov_put(struct kcov
*kcov
)
236 if (atomic_dec_and_test(&kcov
->refcount
)) {
242 void kcov_task_init(struct task_struct
*t
)
244 WRITE_ONCE(t
->kcov_mode
, KCOV_MODE_DISABLED
);
251 void kcov_task_exit(struct task_struct
*t
)
258 spin_lock(&kcov
->lock
);
259 if (WARN_ON(kcov
->t
!= t
)) {
260 spin_unlock(&kcov
->lock
);
263 /* Just to not leave dangling references behind. */
266 kcov
->mode
= KCOV_MODE_INIT
;
267 spin_unlock(&kcov
->lock
);
271 static int kcov_mmap(struct file
*filep
, struct vm_area_struct
*vma
)
275 struct kcov
*kcov
= vma
->vm_file
->private_data
;
276 unsigned long size
, off
;
279 area
= vmalloc_user(vma
->vm_end
- vma
->vm_start
);
283 spin_lock(&kcov
->lock
);
284 size
= kcov
->size
* sizeof(unsigned long);
285 if (kcov
->mode
!= KCOV_MODE_INIT
|| vma
->vm_pgoff
!= 0 ||
286 vma
->vm_end
- vma
->vm_start
!= size
) {
292 vma
->vm_flags
|= VM_DONTEXPAND
;
293 spin_unlock(&kcov
->lock
);
294 for (off
= 0; off
< size
; off
+= PAGE_SIZE
) {
295 page
= vmalloc_to_page(kcov
->area
+ off
);
296 if (vm_insert_page(vma
, vma
->vm_start
+ off
, page
))
297 WARN_ONCE(1, "vm_insert_page() failed");
302 spin_unlock(&kcov
->lock
);
307 static int kcov_open(struct inode
*inode
, struct file
*filep
)
311 kcov
= kzalloc(sizeof(*kcov
), GFP_KERNEL
);
314 kcov
->mode
= KCOV_MODE_DISABLED
;
315 atomic_set(&kcov
->refcount
, 1);
316 spin_lock_init(&kcov
->lock
);
317 filep
->private_data
= kcov
;
318 return nonseekable_open(inode
, filep
);
321 static int kcov_close(struct inode
*inode
, struct file
*filep
)
323 kcov_put(filep
->private_data
);
328 * Fault in a lazily-faulted vmalloc area before it can be used by
329 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
330 * vmalloc fault handling path is instrumented.
332 static void kcov_fault_in_area(struct kcov
*kcov
)
334 unsigned long stride
= PAGE_SIZE
/ sizeof(unsigned long);
335 unsigned long *area
= kcov
->area
;
336 unsigned long offset
;
338 for (offset
= 0; offset
< kcov
->size
; offset
+= stride
)
339 READ_ONCE(area
[offset
]);
342 static int kcov_ioctl_locked(struct kcov
*kcov
, unsigned int cmd
,
345 struct task_struct
*t
;
346 unsigned long size
, unused
;
349 case KCOV_INIT_TRACE
:
351 * Enable kcov in trace mode and setup buffer size.
352 * Must happen before anything else.
354 if (kcov
->mode
!= KCOV_MODE_DISABLED
)
357 * Size must be at least 2 to hold current position and one PC.
358 * Later we allocate size * sizeof(unsigned long) memory,
359 * that must not overflow.
362 if (size
< 2 || size
> INT_MAX
/ sizeof(unsigned long))
365 kcov
->mode
= KCOV_MODE_INIT
;
369 * Enable coverage for the current task.
370 * At this point user must have been enabled trace mode,
371 * and mmapped the file. Coverage collection is disabled only
372 * at task exit or voluntary by KCOV_DISABLE. After that it can
373 * be enabled for another task.
375 if (kcov
->mode
!= KCOV_MODE_INIT
|| !kcov
->area
)
378 if (kcov
->t
!= NULL
|| t
->kcov
!= NULL
)
380 if (arg
== KCOV_TRACE_PC
)
381 kcov
->mode
= KCOV_MODE_TRACE_PC
;
382 else if (arg
== KCOV_TRACE_CMP
)
383 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
384 kcov
->mode
= KCOV_MODE_TRACE_CMP
;
390 kcov_fault_in_area(kcov
);
391 /* Cache in task struct for performance. */
392 t
->kcov_size
= kcov
->size
;
393 t
->kcov_area
= kcov
->area
;
394 /* See comment in check_kcov_mode(). */
396 WRITE_ONCE(t
->kcov_mode
, kcov
->mode
);
399 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
403 /* Disable coverage for the current task. */
405 if (unused
!= 0 || current
->kcov
!= kcov
)
408 if (WARN_ON(kcov
->t
!= t
))
412 kcov
->mode
= KCOV_MODE_INIT
;
420 static long kcov_ioctl(struct file
*filep
, unsigned int cmd
, unsigned long arg
)
425 kcov
= filep
->private_data
;
426 spin_lock(&kcov
->lock
);
427 res
= kcov_ioctl_locked(kcov
, cmd
, arg
);
428 spin_unlock(&kcov
->lock
);
432 static const struct file_operations kcov_fops
= {
434 .unlocked_ioctl
= kcov_ioctl
,
435 .compat_ioctl
= kcov_ioctl
,
437 .release
= kcov_close
,
440 static int __init
kcov_init(void)
443 * The kcov debugfs file won't ever get removed and thus,
444 * there is no need to protect it against removal races. The
445 * use of debugfs_create_file_unsafe() is actually safe here.
447 if (!debugfs_create_file_unsafe("kcov", 0600, NULL
, NULL
, &kcov_fops
)) {
448 pr_err("failed to create kcov in debugfs\n");
454 device_initcall(kcov_init
);