1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/syscalls.h>
4 #include <linux/slab.h>
5 #include <linux/kernel.h>
6 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
7 #include <linux/ftrace.h>
8 #include <linux/perf_event.h>
9 #include <asm/syscall.h>
11 #include "trace_output.h"
14 static DEFINE_MUTEX(syscall_trace_lock
);
15 static int sys_refcount_enter
;
16 static int sys_refcount_exit
;
17 static DECLARE_BITMAP(enabled_enter_syscalls
, NR_syscalls
);
18 static DECLARE_BITMAP(enabled_exit_syscalls
, NR_syscalls
);
20 static int syscall_enter_register(struct ftrace_event_call
*event
,
21 enum trace_reg type
, void *data
);
22 static int syscall_exit_register(struct ftrace_event_call
*event
,
23 enum trace_reg type
, void *data
);
25 static struct list_head
*
26 syscall_get_enter_fields(struct ftrace_event_call
*call
)
28 struct syscall_metadata
*entry
= call
->data
;
30 return &entry
->enter_fields
;
33 extern struct syscall_metadata
*__start_syscalls_metadata
[];
34 extern struct syscall_metadata
*__stop_syscalls_metadata
[];
36 static struct syscall_metadata
**syscalls_metadata
;
38 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
39 static inline bool arch_syscall_match_sym_name(const char *sym
, const char *name
)
42 * Only compare after the "sys" prefix. Archs that use
43 * syscall wrappers may have syscalls symbols aliases prefixed
44 * with "SyS" instead of "sys", leading to an unwanted
47 return !strcmp(sym
+ 3, name
+ 3);
51 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
53 * Some architectures that allow for 32bit applications
54 * to run on a 64bit kernel, do not map the syscalls for
55 * the 32bit tasks the same as they do for 64bit tasks.
59 * In such a case, instead of reporting the wrong syscalls,
62 * For an arch to ignore the compat syscalls it needs to
63 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
64 * define the function arch_trace_is_compat_syscall() to let
65 * the tracing system know that it should ignore it.
68 trace_get_syscall_nr(struct task_struct
*task
, struct pt_regs
*regs
)
70 if (unlikely(arch_trace_is_compat_syscall(regs
)))
73 return syscall_get_nr(task
, regs
);
77 trace_get_syscall_nr(struct task_struct
*task
, struct pt_regs
*regs
)
79 return syscall_get_nr(task
, regs
);
81 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
83 static __init
struct syscall_metadata
*
84 find_syscall_meta(unsigned long syscall
)
86 struct syscall_metadata
**start
;
87 struct syscall_metadata
**stop
;
88 char str
[KSYM_SYMBOL_LEN
];
91 start
= __start_syscalls_metadata
;
92 stop
= __stop_syscalls_metadata
;
93 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
95 if (arch_syscall_match_sym_name(str
, "sys_ni_syscall"))
98 for ( ; start
< stop
; start
++) {
99 if ((*start
)->name
&& arch_syscall_match_sym_name(str
, (*start
)->name
))
105 static struct syscall_metadata
*syscall_nr_to_meta(int nr
)
107 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
110 return syscalls_metadata
[nr
];
113 static enum print_line_t
114 print_syscall_enter(struct trace_iterator
*iter
, int flags
,
115 struct trace_event
*event
)
117 struct trace_seq
*s
= &iter
->seq
;
118 struct trace_entry
*ent
= iter
->ent
;
119 struct syscall_trace_enter
*trace
;
120 struct syscall_metadata
*entry
;
123 trace
= (typeof(trace
))ent
;
125 entry
= syscall_nr_to_meta(syscall
);
130 if (entry
->enter_event
->event
.type
!= ent
->type
) {
135 ret
= trace_seq_printf(s
, "%s(", entry
->name
);
137 return TRACE_TYPE_PARTIAL_LINE
;
139 for (i
= 0; i
< entry
->nb_args
; i
++) {
140 /* parameter types */
141 if (trace_flags
& TRACE_ITER_VERBOSE
) {
142 ret
= trace_seq_printf(s
, "%s ", entry
->types
[i
]);
144 return TRACE_TYPE_PARTIAL_LINE
;
146 /* parameter values */
147 ret
= trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
149 i
== entry
->nb_args
- 1 ? "" : ", ");
151 return TRACE_TYPE_PARTIAL_LINE
;
154 ret
= trace_seq_putc(s
, ')');
156 return TRACE_TYPE_PARTIAL_LINE
;
159 ret
= trace_seq_putc(s
, '\n');
161 return TRACE_TYPE_PARTIAL_LINE
;
163 return TRACE_TYPE_HANDLED
;
166 static enum print_line_t
167 print_syscall_exit(struct trace_iterator
*iter
, int flags
,
168 struct trace_event
*event
)
170 struct trace_seq
*s
= &iter
->seq
;
171 struct trace_entry
*ent
= iter
->ent
;
172 struct syscall_trace_exit
*trace
;
174 struct syscall_metadata
*entry
;
177 trace
= (typeof(trace
))ent
;
179 entry
= syscall_nr_to_meta(syscall
);
182 trace_seq_printf(s
, "\n");
183 return TRACE_TYPE_HANDLED
;
186 if (entry
->exit_event
->event
.type
!= ent
->type
) {
188 return TRACE_TYPE_UNHANDLED
;
191 ret
= trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
194 return TRACE_TYPE_PARTIAL_LINE
;
196 return TRACE_TYPE_HANDLED
;
199 extern char *__bad_type_size(void);
201 #define SYSCALL_FIELD(type, name) \
202 sizeof(type) != sizeof(trace.name) ? \
203 __bad_type_size() : \
204 #type, #name, offsetof(typeof(trace), name), \
205 sizeof(trace.name), is_signed_type(type)
208 int __set_enter_print_fmt(struct syscall_metadata
*entry
, char *buf
, int len
)
213 /* When len=0, we just calculate the needed length */
214 #define LEN_OR_ZERO (len ? len - pos : 0)
216 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
217 for (i
= 0; i
< entry
->nb_args
; i
++) {
218 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s: 0x%%0%zulx%s",
219 entry
->args
[i
], sizeof(unsigned long),
220 i
== entry
->nb_args
- 1 ? "" : ", ");
222 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
224 for (i
= 0; i
< entry
->nb_args
; i
++) {
225 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
226 ", ((unsigned long)(REC->%s))", entry
->args
[i
]);
231 /* return the length of print_fmt */
235 static int set_syscall_print_fmt(struct ftrace_event_call
*call
)
239 struct syscall_metadata
*entry
= call
->data
;
241 if (entry
->enter_event
!= call
) {
242 call
->print_fmt
= "\"0x%lx\", REC->ret";
246 /* First: called with 0 length to calculate the needed length */
247 len
= __set_enter_print_fmt(entry
, NULL
, 0);
249 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
253 /* Second: actually write the @print_fmt */
254 __set_enter_print_fmt(entry
, print_fmt
, len
+ 1);
255 call
->print_fmt
= print_fmt
;
260 static void free_syscall_print_fmt(struct ftrace_event_call
*call
)
262 struct syscall_metadata
*entry
= call
->data
;
264 if (entry
->enter_event
== call
)
265 kfree(call
->print_fmt
);
268 static int syscall_enter_define_fields(struct ftrace_event_call
*call
)
270 struct syscall_trace_enter trace
;
271 struct syscall_metadata
*meta
= call
->data
;
274 int offset
= offsetof(typeof(trace
), args
);
276 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
280 for (i
= 0; i
< meta
->nb_args
; i
++) {
281 ret
= trace_define_field(call
, meta
->types
[i
],
282 meta
->args
[i
], offset
,
283 sizeof(unsigned long), 0,
285 offset
+= sizeof(unsigned long);
291 static int syscall_exit_define_fields(struct ftrace_event_call
*call
)
293 struct syscall_trace_exit trace
;
296 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
300 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
),
306 static void ftrace_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
308 struct syscall_trace_enter
*entry
;
309 struct syscall_metadata
*sys_data
;
310 struct ring_buffer_event
*event
;
311 struct ring_buffer
*buffer
;
315 syscall_nr
= trace_get_syscall_nr(current
, regs
);
318 if (!test_bit(syscall_nr
, enabled_enter_syscalls
))
321 sys_data
= syscall_nr_to_meta(syscall_nr
);
325 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
327 event
= trace_current_buffer_lock_reserve(&buffer
,
328 sys_data
->enter_event
->event
.type
, size
, 0, 0);
332 entry
= ring_buffer_event_data(event
);
333 entry
->nr
= syscall_nr
;
334 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
336 if (!filter_current_check_discard(buffer
, sys_data
->enter_event
,
338 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
341 static void ftrace_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
343 struct syscall_trace_exit
*entry
;
344 struct syscall_metadata
*sys_data
;
345 struct ring_buffer_event
*event
;
346 struct ring_buffer
*buffer
;
349 syscall_nr
= trace_get_syscall_nr(current
, regs
);
352 if (!test_bit(syscall_nr
, enabled_exit_syscalls
))
355 sys_data
= syscall_nr_to_meta(syscall_nr
);
359 event
= trace_current_buffer_lock_reserve(&buffer
,
360 sys_data
->exit_event
->event
.type
, sizeof(*entry
), 0, 0);
364 entry
= ring_buffer_event_data(event
);
365 entry
->nr
= syscall_nr
;
366 entry
->ret
= syscall_get_return_value(current
, regs
);
368 if (!filter_current_check_discard(buffer
, sys_data
->exit_event
,
370 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
373 static int reg_event_syscall_enter(struct ftrace_event_call
*call
)
378 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
379 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
381 mutex_lock(&syscall_trace_lock
);
382 if (!sys_refcount_enter
)
383 ret
= register_trace_sys_enter(ftrace_syscall_enter
, NULL
);
385 set_bit(num
, enabled_enter_syscalls
);
386 sys_refcount_enter
++;
388 mutex_unlock(&syscall_trace_lock
);
392 static void unreg_event_syscall_enter(struct ftrace_event_call
*call
)
396 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
397 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
399 mutex_lock(&syscall_trace_lock
);
400 sys_refcount_enter
--;
401 clear_bit(num
, enabled_enter_syscalls
);
402 if (!sys_refcount_enter
)
403 unregister_trace_sys_enter(ftrace_syscall_enter
, NULL
);
404 mutex_unlock(&syscall_trace_lock
);
407 static int reg_event_syscall_exit(struct ftrace_event_call
*call
)
412 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
413 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
415 mutex_lock(&syscall_trace_lock
);
416 if (!sys_refcount_exit
)
417 ret
= register_trace_sys_exit(ftrace_syscall_exit
, NULL
);
419 set_bit(num
, enabled_exit_syscalls
);
422 mutex_unlock(&syscall_trace_lock
);
426 static void unreg_event_syscall_exit(struct ftrace_event_call
*call
)
430 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
431 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
433 mutex_lock(&syscall_trace_lock
);
435 clear_bit(num
, enabled_exit_syscalls
);
436 if (!sys_refcount_exit
)
437 unregister_trace_sys_exit(ftrace_syscall_exit
, NULL
);
438 mutex_unlock(&syscall_trace_lock
);
441 static int init_syscall_trace(struct ftrace_event_call
*call
)
446 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
447 if (num
< 0 || num
>= NR_syscalls
) {
448 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
449 ((struct syscall_metadata
*)call
->data
)->name
);
453 if (set_syscall_print_fmt(call
) < 0)
456 id
= trace_event_raw_init(call
);
459 free_syscall_print_fmt(call
);
466 struct trace_event_functions enter_syscall_print_funcs
= {
467 .trace
= print_syscall_enter
,
470 struct trace_event_functions exit_syscall_print_funcs
= {
471 .trace
= print_syscall_exit
,
474 struct ftrace_event_class event_class_syscall_enter
= {
475 .system
= "syscalls",
476 .reg
= syscall_enter_register
,
477 .define_fields
= syscall_enter_define_fields
,
478 .get_fields
= syscall_get_enter_fields
,
479 .raw_init
= init_syscall_trace
,
482 struct ftrace_event_class event_class_syscall_exit
= {
483 .system
= "syscalls",
484 .reg
= syscall_exit_register
,
485 .define_fields
= syscall_exit_define_fields
,
486 .fields
= LIST_HEAD_INIT(event_class_syscall_exit
.fields
),
487 .raw_init
= init_syscall_trace
,
490 unsigned long __init __weak
arch_syscall_addr(int nr
)
492 return (unsigned long)sys_call_table
[nr
];
495 static int __init
init_ftrace_syscalls(void)
497 struct syscall_metadata
*meta
;
501 syscalls_metadata
= kcalloc(NR_syscalls
, sizeof(*syscalls_metadata
),
503 if (!syscalls_metadata
) {
508 for (i
= 0; i
< NR_syscalls
; i
++) {
509 addr
= arch_syscall_addr(i
);
510 meta
= find_syscall_meta(addr
);
514 meta
->syscall_nr
= i
;
515 syscalls_metadata
[i
] = meta
;
520 early_initcall(init_ftrace_syscalls
);
522 #ifdef CONFIG_PERF_EVENTS
524 static DECLARE_BITMAP(enabled_perf_enter_syscalls
, NR_syscalls
);
525 static DECLARE_BITMAP(enabled_perf_exit_syscalls
, NR_syscalls
);
526 static int sys_perf_refcount_enter
;
527 static int sys_perf_refcount_exit
;
529 static void perf_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
531 struct syscall_metadata
*sys_data
;
532 struct syscall_trace_enter
*rec
;
533 struct hlist_head
*head
;
538 syscall_nr
= trace_get_syscall_nr(current
, regs
);
541 if (!test_bit(syscall_nr
, enabled_perf_enter_syscalls
))
544 sys_data
= syscall_nr_to_meta(syscall_nr
);
548 /* get the size after alignment with the u32 buffer size field */
549 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
550 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
553 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
554 "perf buffer not large enough"))
557 rec
= (struct syscall_trace_enter
*)perf_trace_buf_prepare(size
,
558 sys_data
->enter_event
->event
.type
, regs
, &rctx
);
562 rec
->nr
= syscall_nr
;
563 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
564 (unsigned long *)&rec
->args
);
566 head
= this_cpu_ptr(sys_data
->enter_event
->perf_events
);
567 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
, NULL
);
570 static int perf_sysenter_enable(struct ftrace_event_call
*call
)
575 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
577 mutex_lock(&syscall_trace_lock
);
578 if (!sys_perf_refcount_enter
)
579 ret
= register_trace_sys_enter(perf_syscall_enter
, NULL
);
581 pr_info("event trace: Could not activate"
582 "syscall entry trace point");
584 set_bit(num
, enabled_perf_enter_syscalls
);
585 sys_perf_refcount_enter
++;
587 mutex_unlock(&syscall_trace_lock
);
591 static void perf_sysenter_disable(struct ftrace_event_call
*call
)
595 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
597 mutex_lock(&syscall_trace_lock
);
598 sys_perf_refcount_enter
--;
599 clear_bit(num
, enabled_perf_enter_syscalls
);
600 if (!sys_perf_refcount_enter
)
601 unregister_trace_sys_enter(perf_syscall_enter
, NULL
);
602 mutex_unlock(&syscall_trace_lock
);
605 static void perf_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
607 struct syscall_metadata
*sys_data
;
608 struct syscall_trace_exit
*rec
;
609 struct hlist_head
*head
;
614 syscall_nr
= trace_get_syscall_nr(current
, regs
);
617 if (!test_bit(syscall_nr
, enabled_perf_exit_syscalls
))
620 sys_data
= syscall_nr_to_meta(syscall_nr
);
624 /* We can probably do that at build time */
625 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
629 * Impossible, but be paranoid with the future
630 * How to put this check outside runtime?
632 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
633 "exit event has grown above perf buffer size"))
636 rec
= (struct syscall_trace_exit
*)perf_trace_buf_prepare(size
,
637 sys_data
->exit_event
->event
.type
, regs
, &rctx
);
641 rec
->nr
= syscall_nr
;
642 rec
->ret
= syscall_get_return_value(current
, regs
);
644 head
= this_cpu_ptr(sys_data
->exit_event
->perf_events
);
645 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
, NULL
);
648 static int perf_sysexit_enable(struct ftrace_event_call
*call
)
653 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
655 mutex_lock(&syscall_trace_lock
);
656 if (!sys_perf_refcount_exit
)
657 ret
= register_trace_sys_exit(perf_syscall_exit
, NULL
);
659 pr_info("event trace: Could not activate"
660 "syscall exit trace point");
662 set_bit(num
, enabled_perf_exit_syscalls
);
663 sys_perf_refcount_exit
++;
665 mutex_unlock(&syscall_trace_lock
);
669 static void perf_sysexit_disable(struct ftrace_event_call
*call
)
673 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
675 mutex_lock(&syscall_trace_lock
);
676 sys_perf_refcount_exit
--;
677 clear_bit(num
, enabled_perf_exit_syscalls
);
678 if (!sys_perf_refcount_exit
)
679 unregister_trace_sys_exit(perf_syscall_exit
, NULL
);
680 mutex_unlock(&syscall_trace_lock
);
683 #endif /* CONFIG_PERF_EVENTS */
685 static int syscall_enter_register(struct ftrace_event_call
*event
,
686 enum trace_reg type
, void *data
)
689 case TRACE_REG_REGISTER
:
690 return reg_event_syscall_enter(event
);
691 case TRACE_REG_UNREGISTER
:
692 unreg_event_syscall_enter(event
);
695 #ifdef CONFIG_PERF_EVENTS
696 case TRACE_REG_PERF_REGISTER
:
697 return perf_sysenter_enable(event
);
698 case TRACE_REG_PERF_UNREGISTER
:
699 perf_sysenter_disable(event
);
701 case TRACE_REG_PERF_OPEN
:
702 case TRACE_REG_PERF_CLOSE
:
703 case TRACE_REG_PERF_ADD
:
704 case TRACE_REG_PERF_DEL
:
711 static int syscall_exit_register(struct ftrace_event_call
*event
,
712 enum trace_reg type
, void *data
)
715 case TRACE_REG_REGISTER
:
716 return reg_event_syscall_exit(event
);
717 case TRACE_REG_UNREGISTER
:
718 unreg_event_syscall_exit(event
);
721 #ifdef CONFIG_PERF_EVENTS
722 case TRACE_REG_PERF_REGISTER
:
723 return perf_sysexit_enable(event
);
724 case TRACE_REG_PERF_UNREGISTER
:
725 perf_sysexit_disable(event
);
727 case TRACE_REG_PERF_OPEN
:
728 case TRACE_REG_PERF_CLOSE
:
729 case TRACE_REG_PERF_ADD
:
730 case TRACE_REG_PERF_DEL
: