1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/slab.h>
4 #include <linux/kernel.h>
5 #include <linux/ftrace.h>
6 #include <linux/perf_event.h>
7 #include <asm/syscall.h>
9 #include "trace_output.h"
12 static DEFINE_MUTEX(syscall_trace_lock
);
13 static int sys_refcount_enter
;
14 static int sys_refcount_exit
;
15 static DECLARE_BITMAP(enabled_enter_syscalls
, NR_syscalls
);
16 static DECLARE_BITMAP(enabled_exit_syscalls
, NR_syscalls
);
18 static int syscall_enter_register(struct ftrace_event_call
*event
,
20 static int syscall_exit_register(struct ftrace_event_call
*event
,
23 static int syscall_enter_define_fields(struct ftrace_event_call
*call
);
24 static int syscall_exit_define_fields(struct ftrace_event_call
*call
);
26 static struct list_head
*
27 syscall_get_enter_fields(struct ftrace_event_call
*call
)
29 struct syscall_metadata
*entry
= call
->data
;
31 return &entry
->enter_fields
;
34 struct trace_event_functions enter_syscall_print_funcs
= {
35 .trace
= print_syscall_enter
,
38 struct trace_event_functions exit_syscall_print_funcs
= {
39 .trace
= print_syscall_exit
,
42 struct ftrace_event_class event_class_syscall_enter
= {
44 .reg
= syscall_enter_register
,
45 .define_fields
= syscall_enter_define_fields
,
46 .get_fields
= syscall_get_enter_fields
,
47 .raw_init
= init_syscall_trace
,
50 struct ftrace_event_class event_class_syscall_exit
= {
52 .reg
= syscall_exit_register
,
53 .define_fields
= syscall_exit_define_fields
,
54 .fields
= LIST_HEAD_INIT(event_class_syscall_exit
.fields
),
55 .raw_init
= init_syscall_trace
,
58 extern struct syscall_metadata
*__start_syscalls_metadata
[];
59 extern struct syscall_metadata
*__stop_syscalls_metadata
[];
61 static struct syscall_metadata
**syscalls_metadata
;
63 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
64 static inline bool arch_syscall_match_sym_name(const char *sym
, const char *name
)
67 * Only compare after the "sys" prefix. Archs that use
68 * syscall wrappers may have syscalls symbols aliases prefixed
69 * with "SyS" instead of "sys", leading to an unwanted
72 return !strcmp(sym
+ 3, name
+ 3);
76 static __init
struct syscall_metadata
*
77 find_syscall_meta(unsigned long syscall
)
79 struct syscall_metadata
**start
;
80 struct syscall_metadata
**stop
;
81 char str
[KSYM_SYMBOL_LEN
];
84 start
= __start_syscalls_metadata
;
85 stop
= __stop_syscalls_metadata
;
86 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
88 if (arch_syscall_match_sym_name(str
, "sys_ni_syscall"))
91 for ( ; start
< stop
; start
++) {
92 if ((*start
)->name
&& arch_syscall_match_sym_name(str
, (*start
)->name
))
98 static struct syscall_metadata
*syscall_nr_to_meta(int nr
)
100 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
103 return syscalls_metadata
[nr
];
107 print_syscall_enter(struct trace_iterator
*iter
, int flags
,
108 struct trace_event
*event
)
110 struct trace_seq
*s
= &iter
->seq
;
111 struct trace_entry
*ent
= iter
->ent
;
112 struct syscall_trace_enter
*trace
;
113 struct syscall_metadata
*entry
;
116 trace
= (typeof(trace
))ent
;
118 entry
= syscall_nr_to_meta(syscall
);
123 if (entry
->enter_event
->event
.type
!= ent
->type
) {
128 ret
= trace_seq_printf(s
, "%s(", entry
->name
);
130 return TRACE_TYPE_PARTIAL_LINE
;
132 for (i
= 0; i
< entry
->nb_args
; i
++) {
133 /* parameter types */
134 if (trace_flags
& TRACE_ITER_VERBOSE
) {
135 ret
= trace_seq_printf(s
, "%s ", entry
->types
[i
]);
137 return TRACE_TYPE_PARTIAL_LINE
;
139 /* parameter values */
140 ret
= trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
142 i
== entry
->nb_args
- 1 ? "" : ", ");
144 return TRACE_TYPE_PARTIAL_LINE
;
147 ret
= trace_seq_putc(s
, ')');
149 return TRACE_TYPE_PARTIAL_LINE
;
152 ret
= trace_seq_putc(s
, '\n');
154 return TRACE_TYPE_PARTIAL_LINE
;
156 return TRACE_TYPE_HANDLED
;
160 print_syscall_exit(struct trace_iterator
*iter
, int flags
,
161 struct trace_event
*event
)
163 struct trace_seq
*s
= &iter
->seq
;
164 struct trace_entry
*ent
= iter
->ent
;
165 struct syscall_trace_exit
*trace
;
167 struct syscall_metadata
*entry
;
170 trace
= (typeof(trace
))ent
;
172 entry
= syscall_nr_to_meta(syscall
);
175 trace_seq_printf(s
, "\n");
176 return TRACE_TYPE_HANDLED
;
179 if (entry
->exit_event
->event
.type
!= ent
->type
) {
181 return TRACE_TYPE_UNHANDLED
;
184 ret
= trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
187 return TRACE_TYPE_PARTIAL_LINE
;
189 return TRACE_TYPE_HANDLED
;
192 extern char *__bad_type_size(void);
194 #define SYSCALL_FIELD(type, name) \
195 sizeof(type) != sizeof(trace.name) ? \
196 __bad_type_size() : \
197 #type, #name, offsetof(typeof(trace), name), \
198 sizeof(trace.name), is_signed_type(type)
201 int __set_enter_print_fmt(struct syscall_metadata
*entry
, char *buf
, int len
)
206 /* When len=0, we just calculate the needed length */
207 #define LEN_OR_ZERO (len ? len - pos : 0)
209 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
210 for (i
= 0; i
< entry
->nb_args
; i
++) {
211 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s: 0x%%0%zulx%s",
212 entry
->args
[i
], sizeof(unsigned long),
213 i
== entry
->nb_args
- 1 ? "" : ", ");
215 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
217 for (i
= 0; i
< entry
->nb_args
; i
++) {
218 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
219 ", ((unsigned long)(REC->%s))", entry
->args
[i
]);
224 /* return the length of print_fmt */
228 static int set_syscall_print_fmt(struct ftrace_event_call
*call
)
232 struct syscall_metadata
*entry
= call
->data
;
234 if (entry
->enter_event
!= call
) {
235 call
->print_fmt
= "\"0x%lx\", REC->ret";
239 /* First: called with 0 length to calculate the needed length */
240 len
= __set_enter_print_fmt(entry
, NULL
, 0);
242 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
246 /* Second: actually write the @print_fmt */
247 __set_enter_print_fmt(entry
, print_fmt
, len
+ 1);
248 call
->print_fmt
= print_fmt
;
253 static void free_syscall_print_fmt(struct ftrace_event_call
*call
)
255 struct syscall_metadata
*entry
= call
->data
;
257 if (entry
->enter_event
== call
)
258 kfree(call
->print_fmt
);
261 static int syscall_enter_define_fields(struct ftrace_event_call
*call
)
263 struct syscall_trace_enter trace
;
264 struct syscall_metadata
*meta
= call
->data
;
267 int offset
= offsetof(typeof(trace
), args
);
269 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
273 for (i
= 0; i
< meta
->nb_args
; i
++) {
274 ret
= trace_define_field(call
, meta
->types
[i
],
275 meta
->args
[i
], offset
,
276 sizeof(unsigned long), 0,
278 offset
+= sizeof(unsigned long);
284 static int syscall_exit_define_fields(struct ftrace_event_call
*call
)
286 struct syscall_trace_exit trace
;
289 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
293 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
),
299 void ftrace_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
301 struct syscall_trace_enter
*entry
;
302 struct syscall_metadata
*sys_data
;
303 struct ring_buffer_event
*event
;
304 struct ring_buffer
*buffer
;
308 syscall_nr
= syscall_get_nr(current
, regs
);
311 if (!test_bit(syscall_nr
, enabled_enter_syscalls
))
314 sys_data
= syscall_nr_to_meta(syscall_nr
);
318 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
320 event
= trace_current_buffer_lock_reserve(&buffer
,
321 sys_data
->enter_event
->event
.type
, size
, 0, 0);
325 entry
= ring_buffer_event_data(event
);
326 entry
->nr
= syscall_nr
;
327 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
329 if (!filter_current_check_discard(buffer
, sys_data
->enter_event
,
331 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
334 void ftrace_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
336 struct syscall_trace_exit
*entry
;
337 struct syscall_metadata
*sys_data
;
338 struct ring_buffer_event
*event
;
339 struct ring_buffer
*buffer
;
342 syscall_nr
= syscall_get_nr(current
, regs
);
345 if (!test_bit(syscall_nr
, enabled_exit_syscalls
))
348 sys_data
= syscall_nr_to_meta(syscall_nr
);
352 event
= trace_current_buffer_lock_reserve(&buffer
,
353 sys_data
->exit_event
->event
.type
, sizeof(*entry
), 0, 0);
357 entry
= ring_buffer_event_data(event
);
358 entry
->nr
= syscall_nr
;
359 entry
->ret
= syscall_get_return_value(current
, regs
);
361 if (!filter_current_check_discard(buffer
, sys_data
->exit_event
,
363 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
366 int reg_event_syscall_enter(struct ftrace_event_call
*call
)
371 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
372 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
374 mutex_lock(&syscall_trace_lock
);
375 if (!sys_refcount_enter
)
376 ret
= register_trace_sys_enter(ftrace_syscall_enter
, NULL
);
378 set_bit(num
, enabled_enter_syscalls
);
379 sys_refcount_enter
++;
381 mutex_unlock(&syscall_trace_lock
);
385 void unreg_event_syscall_enter(struct ftrace_event_call
*call
)
389 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
390 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
392 mutex_lock(&syscall_trace_lock
);
393 sys_refcount_enter
--;
394 clear_bit(num
, enabled_enter_syscalls
);
395 if (!sys_refcount_enter
)
396 unregister_trace_sys_enter(ftrace_syscall_enter
, NULL
);
397 mutex_unlock(&syscall_trace_lock
);
400 int reg_event_syscall_exit(struct ftrace_event_call
*call
)
405 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
406 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
408 mutex_lock(&syscall_trace_lock
);
409 if (!sys_refcount_exit
)
410 ret
= register_trace_sys_exit(ftrace_syscall_exit
, NULL
);
412 set_bit(num
, enabled_exit_syscalls
);
415 mutex_unlock(&syscall_trace_lock
);
419 void unreg_event_syscall_exit(struct ftrace_event_call
*call
)
423 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
424 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
426 mutex_lock(&syscall_trace_lock
);
428 clear_bit(num
, enabled_exit_syscalls
);
429 if (!sys_refcount_exit
)
430 unregister_trace_sys_exit(ftrace_syscall_exit
, NULL
);
431 mutex_unlock(&syscall_trace_lock
);
434 int init_syscall_trace(struct ftrace_event_call
*call
)
439 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
440 if (num
< 0 || num
>= NR_syscalls
) {
441 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
442 ((struct syscall_metadata
*)call
->data
)->name
);
446 if (set_syscall_print_fmt(call
) < 0)
449 id
= trace_event_raw_init(call
);
452 free_syscall_print_fmt(call
);
459 unsigned long __init __weak
arch_syscall_addr(int nr
)
461 return (unsigned long)sys_call_table
[nr
];
464 int __init
init_ftrace_syscalls(void)
466 struct syscall_metadata
*meta
;
470 syscalls_metadata
= kzalloc(sizeof(*syscalls_metadata
) *
471 NR_syscalls
, GFP_KERNEL
);
472 if (!syscalls_metadata
) {
477 for (i
= 0; i
< NR_syscalls
; i
++) {
478 addr
= arch_syscall_addr(i
);
479 meta
= find_syscall_meta(addr
);
483 meta
->syscall_nr
= i
;
484 syscalls_metadata
[i
] = meta
;
489 core_initcall(init_ftrace_syscalls
);
491 #ifdef CONFIG_PERF_EVENTS
493 static DECLARE_BITMAP(enabled_perf_enter_syscalls
, NR_syscalls
);
494 static DECLARE_BITMAP(enabled_perf_exit_syscalls
, NR_syscalls
);
495 static int sys_perf_refcount_enter
;
496 static int sys_perf_refcount_exit
;
498 static void perf_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
500 struct syscall_metadata
*sys_data
;
501 struct syscall_trace_enter
*rec
;
502 struct hlist_head
*head
;
507 syscall_nr
= syscall_get_nr(current
, regs
);
508 if (!test_bit(syscall_nr
, enabled_perf_enter_syscalls
))
511 sys_data
= syscall_nr_to_meta(syscall_nr
);
515 /* get the size after alignment with the u32 buffer size field */
516 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
517 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
520 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
521 "perf buffer not large enough"))
524 rec
= (struct syscall_trace_enter
*)perf_trace_buf_prepare(size
,
525 sys_data
->enter_event
->event
.type
, regs
, &rctx
);
529 rec
->nr
= syscall_nr
;
530 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
531 (unsigned long *)&rec
->args
);
533 head
= this_cpu_ptr(sys_data
->enter_event
->perf_events
);
534 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
);
537 int perf_sysenter_enable(struct ftrace_event_call
*call
)
542 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
544 mutex_lock(&syscall_trace_lock
);
545 if (!sys_perf_refcount_enter
)
546 ret
= register_trace_sys_enter(perf_syscall_enter
, NULL
);
548 pr_info("event trace: Could not activate"
549 "syscall entry trace point");
551 set_bit(num
, enabled_perf_enter_syscalls
);
552 sys_perf_refcount_enter
++;
554 mutex_unlock(&syscall_trace_lock
);
558 void perf_sysenter_disable(struct ftrace_event_call
*call
)
562 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
564 mutex_lock(&syscall_trace_lock
);
565 sys_perf_refcount_enter
--;
566 clear_bit(num
, enabled_perf_enter_syscalls
);
567 if (!sys_perf_refcount_enter
)
568 unregister_trace_sys_enter(perf_syscall_enter
, NULL
);
569 mutex_unlock(&syscall_trace_lock
);
572 static void perf_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
574 struct syscall_metadata
*sys_data
;
575 struct syscall_trace_exit
*rec
;
576 struct hlist_head
*head
;
581 syscall_nr
= syscall_get_nr(current
, regs
);
582 if (!test_bit(syscall_nr
, enabled_perf_exit_syscalls
))
585 sys_data
= syscall_nr_to_meta(syscall_nr
);
589 /* We can probably do that at build time */
590 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
594 * Impossible, but be paranoid with the future
595 * How to put this check outside runtime?
597 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
598 "exit event has grown above perf buffer size"))
601 rec
= (struct syscall_trace_exit
*)perf_trace_buf_prepare(size
,
602 sys_data
->exit_event
->event
.type
, regs
, &rctx
);
606 rec
->nr
= syscall_nr
;
607 rec
->ret
= syscall_get_return_value(current
, regs
);
609 head
= this_cpu_ptr(sys_data
->exit_event
->perf_events
);
610 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
);
613 int perf_sysexit_enable(struct ftrace_event_call
*call
)
618 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
620 mutex_lock(&syscall_trace_lock
);
621 if (!sys_perf_refcount_exit
)
622 ret
= register_trace_sys_exit(perf_syscall_exit
, NULL
);
624 pr_info("event trace: Could not activate"
625 "syscall exit trace point");
627 set_bit(num
, enabled_perf_exit_syscalls
);
628 sys_perf_refcount_exit
++;
630 mutex_unlock(&syscall_trace_lock
);
634 void perf_sysexit_disable(struct ftrace_event_call
*call
)
638 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
640 mutex_lock(&syscall_trace_lock
);
641 sys_perf_refcount_exit
--;
642 clear_bit(num
, enabled_perf_exit_syscalls
);
643 if (!sys_perf_refcount_exit
)
644 unregister_trace_sys_exit(perf_syscall_exit
, NULL
);
645 mutex_unlock(&syscall_trace_lock
);
648 #endif /* CONFIG_PERF_EVENTS */
650 static int syscall_enter_register(struct ftrace_event_call
*event
,
654 case TRACE_REG_REGISTER
:
655 return reg_event_syscall_enter(event
);
656 case TRACE_REG_UNREGISTER
:
657 unreg_event_syscall_enter(event
);
660 #ifdef CONFIG_PERF_EVENTS
661 case TRACE_REG_PERF_REGISTER
:
662 return perf_sysenter_enable(event
);
663 case TRACE_REG_PERF_UNREGISTER
:
664 perf_sysenter_disable(event
);
671 static int syscall_exit_register(struct ftrace_event_call
*event
,
675 case TRACE_REG_REGISTER
:
676 return reg_event_syscall_exit(event
);
677 case TRACE_REG_UNREGISTER
:
678 unreg_event_syscall_exit(event
);
681 #ifdef CONFIG_PERF_EVENTS
682 case TRACE_REG_PERF_REGISTER
:
683 return perf_sysexit_enable(event
);
684 case TRACE_REG_PERF_UNREGISTER
:
685 perf_sysexit_disable(event
);