1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/slab.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
6 #include <linux/ftrace.h>
7 #include <linux/perf_event.h>
8 #include <asm/syscall.h>
10 #include "trace_output.h"
13 static DEFINE_MUTEX(syscall_trace_lock
);
14 static int sys_refcount_enter
;
15 static int sys_refcount_exit
;
16 static DECLARE_BITMAP(enabled_enter_syscalls
, NR_syscalls
);
17 static DECLARE_BITMAP(enabled_exit_syscalls
, NR_syscalls
);
19 static int syscall_enter_register(struct ftrace_event_call
*event
,
20 enum trace_reg type
, void *data
);
21 static int syscall_exit_register(struct ftrace_event_call
*event
,
22 enum trace_reg type
, void *data
);
24 static int syscall_enter_define_fields(struct ftrace_event_call
*call
);
25 static int syscall_exit_define_fields(struct ftrace_event_call
*call
);
27 static struct list_head
*
28 syscall_get_enter_fields(struct ftrace_event_call
*call
)
30 struct syscall_metadata
*entry
= call
->data
;
32 return &entry
->enter_fields
;
35 struct trace_event_functions enter_syscall_print_funcs
= {
36 .trace
= print_syscall_enter
,
39 struct trace_event_functions exit_syscall_print_funcs
= {
40 .trace
= print_syscall_exit
,
43 struct ftrace_event_class event_class_syscall_enter
= {
45 .reg
= syscall_enter_register
,
46 .define_fields
= syscall_enter_define_fields
,
47 .get_fields
= syscall_get_enter_fields
,
48 .raw_init
= init_syscall_trace
,
51 struct ftrace_event_class event_class_syscall_exit
= {
53 .reg
= syscall_exit_register
,
54 .define_fields
= syscall_exit_define_fields
,
55 .fields
= LIST_HEAD_INIT(event_class_syscall_exit
.fields
),
56 .raw_init
= init_syscall_trace
,
59 extern struct syscall_metadata
*__start_syscalls_metadata
[];
60 extern struct syscall_metadata
*__stop_syscalls_metadata
[];
62 static struct syscall_metadata
**syscalls_metadata
;
64 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
65 static inline bool arch_syscall_match_sym_name(const char *sym
, const char *name
)
68 * Only compare after the "sys" prefix. Archs that use
69 * syscall wrappers may have syscalls symbols aliases prefixed
70 * with "SyS" instead of "sys", leading to an unwanted
73 return !strcmp(sym
+ 3, name
+ 3);
77 static __init
struct syscall_metadata
*
78 find_syscall_meta(unsigned long syscall
)
80 struct syscall_metadata
**start
;
81 struct syscall_metadata
**stop
;
82 char str
[KSYM_SYMBOL_LEN
];
85 start
= __start_syscalls_metadata
;
86 stop
= __stop_syscalls_metadata
;
87 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
89 if (arch_syscall_match_sym_name(str
, "sys_ni_syscall"))
92 for ( ; start
< stop
; start
++) {
93 if ((*start
)->name
&& arch_syscall_match_sym_name(str
, (*start
)->name
))
99 static struct syscall_metadata
*syscall_nr_to_meta(int nr
)
101 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
104 return syscalls_metadata
[nr
];
108 print_syscall_enter(struct trace_iterator
*iter
, int flags
,
109 struct trace_event
*event
)
111 struct trace_seq
*s
= &iter
->seq
;
112 struct trace_entry
*ent
= iter
->ent
;
113 struct syscall_trace_enter
*trace
;
114 struct syscall_metadata
*entry
;
117 trace
= (typeof(trace
))ent
;
119 entry
= syscall_nr_to_meta(syscall
);
124 if (entry
->enter_event
->event
.type
!= ent
->type
) {
129 ret
= trace_seq_printf(s
, "%s(", entry
->name
);
131 return TRACE_TYPE_PARTIAL_LINE
;
133 for (i
= 0; i
< entry
->nb_args
; i
++) {
134 /* parameter types */
135 if (trace_flags
& TRACE_ITER_VERBOSE
) {
136 ret
= trace_seq_printf(s
, "%s ", entry
->types
[i
]);
138 return TRACE_TYPE_PARTIAL_LINE
;
140 /* parameter values */
141 ret
= trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
143 i
== entry
->nb_args
- 1 ? "" : ", ");
145 return TRACE_TYPE_PARTIAL_LINE
;
148 ret
= trace_seq_putc(s
, ')');
150 return TRACE_TYPE_PARTIAL_LINE
;
153 ret
= trace_seq_putc(s
, '\n');
155 return TRACE_TYPE_PARTIAL_LINE
;
157 return TRACE_TYPE_HANDLED
;
161 print_syscall_exit(struct trace_iterator
*iter
, int flags
,
162 struct trace_event
*event
)
164 struct trace_seq
*s
= &iter
->seq
;
165 struct trace_entry
*ent
= iter
->ent
;
166 struct syscall_trace_exit
*trace
;
168 struct syscall_metadata
*entry
;
171 trace
= (typeof(trace
))ent
;
173 entry
= syscall_nr_to_meta(syscall
);
176 trace_seq_printf(s
, "\n");
177 return TRACE_TYPE_HANDLED
;
180 if (entry
->exit_event
->event
.type
!= ent
->type
) {
182 return TRACE_TYPE_UNHANDLED
;
185 ret
= trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
188 return TRACE_TYPE_PARTIAL_LINE
;
190 return TRACE_TYPE_HANDLED
;
193 extern char *__bad_type_size(void);
195 #define SYSCALL_FIELD(type, name) \
196 sizeof(type) != sizeof(trace.name) ? \
197 __bad_type_size() : \
198 #type, #name, offsetof(typeof(trace), name), \
199 sizeof(trace.name), is_signed_type(type)
202 int __set_enter_print_fmt(struct syscall_metadata
*entry
, char *buf
, int len
)
207 /* When len=0, we just calculate the needed length */
208 #define LEN_OR_ZERO (len ? len - pos : 0)
210 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
211 for (i
= 0; i
< entry
->nb_args
; i
++) {
212 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s: 0x%%0%zulx%s",
213 entry
->args
[i
], sizeof(unsigned long),
214 i
== entry
->nb_args
- 1 ? "" : ", ");
216 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
218 for (i
= 0; i
< entry
->nb_args
; i
++) {
219 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
220 ", ((unsigned long)(REC->%s))", entry
->args
[i
]);
225 /* return the length of print_fmt */
229 static int set_syscall_print_fmt(struct ftrace_event_call
*call
)
233 struct syscall_metadata
*entry
= call
->data
;
235 if (entry
->enter_event
!= call
) {
236 call
->print_fmt
= "\"0x%lx\", REC->ret";
240 /* First: called with 0 length to calculate the needed length */
241 len
= __set_enter_print_fmt(entry
, NULL
, 0);
243 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
247 /* Second: actually write the @print_fmt */
248 __set_enter_print_fmt(entry
, print_fmt
, len
+ 1);
249 call
->print_fmt
= print_fmt
;
254 static void free_syscall_print_fmt(struct ftrace_event_call
*call
)
256 struct syscall_metadata
*entry
= call
->data
;
258 if (entry
->enter_event
== call
)
259 kfree(call
->print_fmt
);
262 static int syscall_enter_define_fields(struct ftrace_event_call
*call
)
264 struct syscall_trace_enter trace
;
265 struct syscall_metadata
*meta
= call
->data
;
268 int offset
= offsetof(typeof(trace
), args
);
270 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
274 for (i
= 0; i
< meta
->nb_args
; i
++) {
275 ret
= trace_define_field(call
, meta
->types
[i
],
276 meta
->args
[i
], offset
,
277 sizeof(unsigned long), 0,
279 offset
+= sizeof(unsigned long);
285 static int syscall_exit_define_fields(struct ftrace_event_call
*call
)
287 struct syscall_trace_exit trace
;
290 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
294 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
),
300 void ftrace_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
302 struct syscall_trace_enter
*entry
;
303 struct syscall_metadata
*sys_data
;
304 struct ring_buffer_event
*event
;
305 struct ring_buffer
*buffer
;
309 syscall_nr
= syscall_get_nr(current
, regs
);
312 if (!test_bit(syscall_nr
, enabled_enter_syscalls
))
315 sys_data
= syscall_nr_to_meta(syscall_nr
);
319 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
321 event
= trace_current_buffer_lock_reserve(&buffer
,
322 sys_data
->enter_event
->event
.type
, size
, 0, 0);
326 entry
= ring_buffer_event_data(event
);
327 entry
->nr
= syscall_nr
;
328 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
330 if (!filter_current_check_discard(buffer
, sys_data
->enter_event
,
332 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
335 void ftrace_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
337 struct syscall_trace_exit
*entry
;
338 struct syscall_metadata
*sys_data
;
339 struct ring_buffer_event
*event
;
340 struct ring_buffer
*buffer
;
343 syscall_nr
= syscall_get_nr(current
, regs
);
346 if (!test_bit(syscall_nr
, enabled_exit_syscalls
))
349 sys_data
= syscall_nr_to_meta(syscall_nr
);
353 event
= trace_current_buffer_lock_reserve(&buffer
,
354 sys_data
->exit_event
->event
.type
, sizeof(*entry
), 0, 0);
358 entry
= ring_buffer_event_data(event
);
359 entry
->nr
= syscall_nr
;
360 entry
->ret
= syscall_get_return_value(current
, regs
);
362 if (!filter_current_check_discard(buffer
, sys_data
->exit_event
,
364 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
367 int reg_event_syscall_enter(struct ftrace_event_call
*call
)
372 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
373 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
375 mutex_lock(&syscall_trace_lock
);
376 if (!sys_refcount_enter
)
377 ret
= register_trace_sys_enter(ftrace_syscall_enter
, NULL
);
379 set_bit(num
, enabled_enter_syscalls
);
380 sys_refcount_enter
++;
382 mutex_unlock(&syscall_trace_lock
);
386 void unreg_event_syscall_enter(struct ftrace_event_call
*call
)
390 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
391 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
393 mutex_lock(&syscall_trace_lock
);
394 sys_refcount_enter
--;
395 clear_bit(num
, enabled_enter_syscalls
);
396 if (!sys_refcount_enter
)
397 unregister_trace_sys_enter(ftrace_syscall_enter
, NULL
);
398 mutex_unlock(&syscall_trace_lock
);
401 int reg_event_syscall_exit(struct ftrace_event_call
*call
)
406 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
407 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
409 mutex_lock(&syscall_trace_lock
);
410 if (!sys_refcount_exit
)
411 ret
= register_trace_sys_exit(ftrace_syscall_exit
, NULL
);
413 set_bit(num
, enabled_exit_syscalls
);
416 mutex_unlock(&syscall_trace_lock
);
420 void unreg_event_syscall_exit(struct ftrace_event_call
*call
)
424 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
425 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
427 mutex_lock(&syscall_trace_lock
);
429 clear_bit(num
, enabled_exit_syscalls
);
430 if (!sys_refcount_exit
)
431 unregister_trace_sys_exit(ftrace_syscall_exit
, NULL
);
432 mutex_unlock(&syscall_trace_lock
);
435 int init_syscall_trace(struct ftrace_event_call
*call
)
440 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
441 if (num
< 0 || num
>= NR_syscalls
) {
442 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
443 ((struct syscall_metadata
*)call
->data
)->name
);
447 if (set_syscall_print_fmt(call
) < 0)
450 id
= trace_event_raw_init(call
);
453 free_syscall_print_fmt(call
);
460 unsigned long __init __weak
arch_syscall_addr(int nr
)
462 return (unsigned long)sys_call_table
[nr
];
465 int __init
init_ftrace_syscalls(void)
467 struct syscall_metadata
*meta
;
471 syscalls_metadata
= kcalloc(NR_syscalls
, sizeof(*syscalls_metadata
),
473 if (!syscalls_metadata
) {
478 for (i
= 0; i
< NR_syscalls
; i
++) {
479 addr
= arch_syscall_addr(i
);
480 meta
= find_syscall_meta(addr
);
484 meta
->syscall_nr
= i
;
485 syscalls_metadata
[i
] = meta
;
490 core_initcall(init_ftrace_syscalls
);
492 #ifdef CONFIG_PERF_EVENTS
494 static DECLARE_BITMAP(enabled_perf_enter_syscalls
, NR_syscalls
);
495 static DECLARE_BITMAP(enabled_perf_exit_syscalls
, NR_syscalls
);
496 static int sys_perf_refcount_enter
;
497 static int sys_perf_refcount_exit
;
499 static void perf_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
501 struct syscall_metadata
*sys_data
;
502 struct syscall_trace_enter
*rec
;
503 struct hlist_head
*head
;
508 syscall_nr
= syscall_get_nr(current
, regs
);
509 if (!test_bit(syscall_nr
, enabled_perf_enter_syscalls
))
512 sys_data
= syscall_nr_to_meta(syscall_nr
);
516 /* get the size after alignment with the u32 buffer size field */
517 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
518 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
521 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
522 "perf buffer not large enough"))
525 rec
= (struct syscall_trace_enter
*)perf_trace_buf_prepare(size
,
526 sys_data
->enter_event
->event
.type
, regs
, &rctx
);
530 rec
->nr
= syscall_nr
;
531 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
532 (unsigned long *)&rec
->args
);
534 head
= this_cpu_ptr(sys_data
->enter_event
->perf_events
);
535 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
);
538 int perf_sysenter_enable(struct ftrace_event_call
*call
)
543 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
545 mutex_lock(&syscall_trace_lock
);
546 if (!sys_perf_refcount_enter
)
547 ret
= register_trace_sys_enter(perf_syscall_enter
, NULL
);
549 pr_info("event trace: Could not activate"
550 "syscall entry trace point");
552 set_bit(num
, enabled_perf_enter_syscalls
);
553 sys_perf_refcount_enter
++;
555 mutex_unlock(&syscall_trace_lock
);
559 void perf_sysenter_disable(struct ftrace_event_call
*call
)
563 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
565 mutex_lock(&syscall_trace_lock
);
566 sys_perf_refcount_enter
--;
567 clear_bit(num
, enabled_perf_enter_syscalls
);
568 if (!sys_perf_refcount_enter
)
569 unregister_trace_sys_enter(perf_syscall_enter
, NULL
);
570 mutex_unlock(&syscall_trace_lock
);
573 static void perf_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
575 struct syscall_metadata
*sys_data
;
576 struct syscall_trace_exit
*rec
;
577 struct hlist_head
*head
;
582 syscall_nr
= syscall_get_nr(current
, regs
);
583 if (!test_bit(syscall_nr
, enabled_perf_exit_syscalls
))
586 sys_data
= syscall_nr_to_meta(syscall_nr
);
590 /* We can probably do that at build time */
591 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
595 * Impossible, but be paranoid with the future
596 * How to put this check outside runtime?
598 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
599 "exit event has grown above perf buffer size"))
602 rec
= (struct syscall_trace_exit
*)perf_trace_buf_prepare(size
,
603 sys_data
->exit_event
->event
.type
, regs
, &rctx
);
607 rec
->nr
= syscall_nr
;
608 rec
->ret
= syscall_get_return_value(current
, regs
);
610 head
= this_cpu_ptr(sys_data
->exit_event
->perf_events
);
611 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
);
614 int perf_sysexit_enable(struct ftrace_event_call
*call
)
619 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
621 mutex_lock(&syscall_trace_lock
);
622 if (!sys_perf_refcount_exit
)
623 ret
= register_trace_sys_exit(perf_syscall_exit
, NULL
);
625 pr_info("event trace: Could not activate"
626 "syscall exit trace point");
628 set_bit(num
, enabled_perf_exit_syscalls
);
629 sys_perf_refcount_exit
++;
631 mutex_unlock(&syscall_trace_lock
);
635 void perf_sysexit_disable(struct ftrace_event_call
*call
)
639 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
641 mutex_lock(&syscall_trace_lock
);
642 sys_perf_refcount_exit
--;
643 clear_bit(num
, enabled_perf_exit_syscalls
);
644 if (!sys_perf_refcount_exit
)
645 unregister_trace_sys_exit(perf_syscall_exit
, NULL
);
646 mutex_unlock(&syscall_trace_lock
);
649 #endif /* CONFIG_PERF_EVENTS */
651 static int syscall_enter_register(struct ftrace_event_call
*event
,
652 enum trace_reg type
, void *data
)
655 case TRACE_REG_REGISTER
:
656 return reg_event_syscall_enter(event
);
657 case TRACE_REG_UNREGISTER
:
658 unreg_event_syscall_enter(event
);
661 #ifdef CONFIG_PERF_EVENTS
662 case TRACE_REG_PERF_REGISTER
:
663 return perf_sysenter_enable(event
);
664 case TRACE_REG_PERF_UNREGISTER
:
665 perf_sysenter_disable(event
);
667 case TRACE_REG_PERF_OPEN
:
668 case TRACE_REG_PERF_CLOSE
:
669 case TRACE_REG_PERF_ADD
:
670 case TRACE_REG_PERF_DEL
:
677 static int syscall_exit_register(struct ftrace_event_call
*event
,
678 enum trace_reg type
, void *data
)
681 case TRACE_REG_REGISTER
:
682 return reg_event_syscall_exit(event
);
683 case TRACE_REG_UNREGISTER
:
684 unreg_event_syscall_exit(event
);
687 #ifdef CONFIG_PERF_EVENTS
688 case TRACE_REG_PERF_REGISTER
:
689 return perf_sysexit_enable(event
);
690 case TRACE_REG_PERF_UNREGISTER
:
691 perf_sysexit_disable(event
);
693 case TRACE_REG_PERF_OPEN
:
694 case TRACE_REG_PERF_CLOSE
:
695 case TRACE_REG_PERF_ADD
:
696 case TRACE_REG_PERF_DEL
: