1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/slab.h>
4 #include <linux/kernel.h>
5 #include <linux/ftrace.h>
6 #include <linux/perf_event.h>
7 #include <asm/syscall.h>
9 #include "trace_output.h"
12 static DEFINE_MUTEX(syscall_trace_lock
);
13 static int sys_refcount_enter
;
14 static int sys_refcount_exit
;
15 static DECLARE_BITMAP(enabled_enter_syscalls
, NR_syscalls
);
16 static DECLARE_BITMAP(enabled_exit_syscalls
, NR_syscalls
);
18 static int syscall_enter_register(struct ftrace_event_call
*event
,
20 static int syscall_exit_register(struct ftrace_event_call
*event
,
23 static int syscall_enter_define_fields(struct ftrace_event_call
*call
);
24 static int syscall_exit_define_fields(struct ftrace_event_call
*call
);
26 /* All syscall exit events have the same fields */
27 static LIST_HEAD(syscall_exit_fields
);
29 static struct list_head
*
30 syscall_get_enter_fields(struct ftrace_event_call
*call
)
32 struct syscall_metadata
*entry
= call
->data
;
34 return &entry
->enter_fields
;
37 static struct list_head
*
38 syscall_get_exit_fields(struct ftrace_event_call
*call
)
40 return &syscall_exit_fields
;
43 struct trace_event_functions enter_syscall_print_funcs
= {
44 .trace
= print_syscall_enter
,
47 struct trace_event_functions exit_syscall_print_funcs
= {
48 .trace
= print_syscall_exit
,
51 struct ftrace_event_class event_class_syscall_enter
= {
53 .reg
= syscall_enter_register
,
54 .define_fields
= syscall_enter_define_fields
,
55 .get_fields
= syscall_get_enter_fields
,
56 .raw_init
= init_syscall_trace
,
59 struct ftrace_event_class event_class_syscall_exit
= {
61 .reg
= syscall_exit_register
,
62 .define_fields
= syscall_exit_define_fields
,
63 .get_fields
= syscall_get_exit_fields
,
64 .raw_init
= init_syscall_trace
,
67 extern unsigned long __start_syscalls_metadata
[];
68 extern unsigned long __stop_syscalls_metadata
[];
70 static struct syscall_metadata
**syscalls_metadata
;
72 static struct syscall_metadata
*find_syscall_meta(unsigned long syscall
)
74 struct syscall_metadata
*start
;
75 struct syscall_metadata
*stop
;
76 char str
[KSYM_SYMBOL_LEN
];
79 start
= (struct syscall_metadata
*)__start_syscalls_metadata
;
80 stop
= (struct syscall_metadata
*)__stop_syscalls_metadata
;
81 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
83 for ( ; start
< stop
; start
++) {
85 * Only compare after the "sys" prefix. Archs that use
86 * syscall wrappers may have syscalls symbols aliases prefixed
87 * with "SyS" instead of "sys", leading to an unwanted
90 if (start
->name
&& !strcmp(start
->name
+ 3, str
+ 3))
96 static struct syscall_metadata
*syscall_nr_to_meta(int nr
)
98 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
101 return syscalls_metadata
[nr
];
105 print_syscall_enter(struct trace_iterator
*iter
, int flags
,
106 struct trace_event
*event
)
108 struct trace_seq
*s
= &iter
->seq
;
109 struct trace_entry
*ent
= iter
->ent
;
110 struct syscall_trace_enter
*trace
;
111 struct syscall_metadata
*entry
;
114 trace
= (typeof(trace
))ent
;
116 entry
= syscall_nr_to_meta(syscall
);
121 if (entry
->enter_event
->event
.type
!= ent
->type
) {
126 ret
= trace_seq_printf(s
, "%s(", entry
->name
);
128 return TRACE_TYPE_PARTIAL_LINE
;
130 for (i
= 0; i
< entry
->nb_args
; i
++) {
131 /* parameter types */
132 if (trace_flags
& TRACE_ITER_VERBOSE
) {
133 ret
= trace_seq_printf(s
, "%s ", entry
->types
[i
]);
135 return TRACE_TYPE_PARTIAL_LINE
;
137 /* parameter values */
138 ret
= trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
140 i
== entry
->nb_args
- 1 ? "" : ", ");
142 return TRACE_TYPE_PARTIAL_LINE
;
145 ret
= trace_seq_putc(s
, ')');
147 return TRACE_TYPE_PARTIAL_LINE
;
150 ret
= trace_seq_putc(s
, '\n');
152 return TRACE_TYPE_PARTIAL_LINE
;
154 return TRACE_TYPE_HANDLED
;
158 print_syscall_exit(struct trace_iterator
*iter
, int flags
,
159 struct trace_event
*event
)
161 struct trace_seq
*s
= &iter
->seq
;
162 struct trace_entry
*ent
= iter
->ent
;
163 struct syscall_trace_exit
*trace
;
165 struct syscall_metadata
*entry
;
168 trace
= (typeof(trace
))ent
;
170 entry
= syscall_nr_to_meta(syscall
);
173 trace_seq_printf(s
, "\n");
174 return TRACE_TYPE_HANDLED
;
177 if (entry
->exit_event
->event
.type
!= ent
->type
) {
179 return TRACE_TYPE_UNHANDLED
;
182 ret
= trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
185 return TRACE_TYPE_PARTIAL_LINE
;
187 return TRACE_TYPE_HANDLED
;
190 extern char *__bad_type_size(void);
192 #define SYSCALL_FIELD(type, name) \
193 sizeof(type) != sizeof(trace.name) ? \
194 __bad_type_size() : \
195 #type, #name, offsetof(typeof(trace), name), \
196 sizeof(trace.name), is_signed_type(type)
199 int __set_enter_print_fmt(struct syscall_metadata
*entry
, char *buf
, int len
)
204 /* When len=0, we just calculate the needed length */
205 #define LEN_OR_ZERO (len ? len - pos : 0)
207 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
208 for (i
= 0; i
< entry
->nb_args
; i
++) {
209 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s: 0x%%0%zulx%s",
210 entry
->args
[i
], sizeof(unsigned long),
211 i
== entry
->nb_args
- 1 ? "" : ", ");
213 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
215 for (i
= 0; i
< entry
->nb_args
; i
++) {
216 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
217 ", ((unsigned long)(REC->%s))", entry
->args
[i
]);
222 /* return the length of print_fmt */
226 static int set_syscall_print_fmt(struct ftrace_event_call
*call
)
230 struct syscall_metadata
*entry
= call
->data
;
232 if (entry
->enter_event
!= call
) {
233 call
->print_fmt
= "\"0x%lx\", REC->ret";
237 /* First: called with 0 length to calculate the needed length */
238 len
= __set_enter_print_fmt(entry
, NULL
, 0);
240 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
244 /* Second: actually write the @print_fmt */
245 __set_enter_print_fmt(entry
, print_fmt
, len
+ 1);
246 call
->print_fmt
= print_fmt
;
251 static void free_syscall_print_fmt(struct ftrace_event_call
*call
)
253 struct syscall_metadata
*entry
= call
->data
;
255 if (entry
->enter_event
== call
)
256 kfree(call
->print_fmt
);
259 static int syscall_enter_define_fields(struct ftrace_event_call
*call
)
261 struct syscall_trace_enter trace
;
262 struct syscall_metadata
*meta
= call
->data
;
265 int offset
= offsetof(typeof(trace
), args
);
267 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
271 for (i
= 0; i
< meta
->nb_args
; i
++) {
272 ret
= trace_define_field(call
, meta
->types
[i
],
273 meta
->args
[i
], offset
,
274 sizeof(unsigned long), 0,
276 offset
+= sizeof(unsigned long);
282 static int syscall_exit_define_fields(struct ftrace_event_call
*call
)
284 struct syscall_trace_exit trace
;
287 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
291 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
),
297 void ftrace_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
299 struct syscall_trace_enter
*entry
;
300 struct syscall_metadata
*sys_data
;
301 struct ring_buffer_event
*event
;
302 struct ring_buffer
*buffer
;
306 syscall_nr
= syscall_get_nr(current
, regs
);
309 if (!test_bit(syscall_nr
, enabled_enter_syscalls
))
312 sys_data
= syscall_nr_to_meta(syscall_nr
);
316 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
318 event
= trace_current_buffer_lock_reserve(&buffer
,
319 sys_data
->enter_event
->event
.type
, size
, 0, 0);
323 entry
= ring_buffer_event_data(event
);
324 entry
->nr
= syscall_nr
;
325 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
327 if (!filter_current_check_discard(buffer
, sys_data
->enter_event
,
329 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
332 void ftrace_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
334 struct syscall_trace_exit
*entry
;
335 struct syscall_metadata
*sys_data
;
336 struct ring_buffer_event
*event
;
337 struct ring_buffer
*buffer
;
340 syscall_nr
= syscall_get_nr(current
, regs
);
343 if (!test_bit(syscall_nr
, enabled_exit_syscalls
))
346 sys_data
= syscall_nr_to_meta(syscall_nr
);
350 event
= trace_current_buffer_lock_reserve(&buffer
,
351 sys_data
->exit_event
->event
.type
, sizeof(*entry
), 0, 0);
355 entry
= ring_buffer_event_data(event
);
356 entry
->nr
= syscall_nr
;
357 entry
->ret
= syscall_get_return_value(current
, regs
);
359 if (!filter_current_check_discard(buffer
, sys_data
->exit_event
,
361 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
364 int reg_event_syscall_enter(struct ftrace_event_call
*call
)
369 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
370 if (num
< 0 || num
>= NR_syscalls
)
372 mutex_lock(&syscall_trace_lock
);
373 if (!sys_refcount_enter
)
374 ret
= register_trace_sys_enter(ftrace_syscall_enter
, NULL
);
376 set_bit(num
, enabled_enter_syscalls
);
377 sys_refcount_enter
++;
379 mutex_unlock(&syscall_trace_lock
);
383 void unreg_event_syscall_enter(struct ftrace_event_call
*call
)
387 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
388 if (num
< 0 || num
>= NR_syscalls
)
390 mutex_lock(&syscall_trace_lock
);
391 sys_refcount_enter
--;
392 clear_bit(num
, enabled_enter_syscalls
);
393 if (!sys_refcount_enter
)
394 unregister_trace_sys_enter(ftrace_syscall_enter
, NULL
);
395 mutex_unlock(&syscall_trace_lock
);
398 int reg_event_syscall_exit(struct ftrace_event_call
*call
)
403 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
404 if (num
< 0 || num
>= NR_syscalls
)
406 mutex_lock(&syscall_trace_lock
);
407 if (!sys_refcount_exit
)
408 ret
= register_trace_sys_exit(ftrace_syscall_exit
, NULL
);
410 set_bit(num
, enabled_exit_syscalls
);
413 mutex_unlock(&syscall_trace_lock
);
417 void unreg_event_syscall_exit(struct ftrace_event_call
*call
)
421 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
422 if (num
< 0 || num
>= NR_syscalls
)
424 mutex_lock(&syscall_trace_lock
);
426 clear_bit(num
, enabled_exit_syscalls
);
427 if (!sys_refcount_exit
)
428 unregister_trace_sys_exit(ftrace_syscall_exit
, NULL
);
429 mutex_unlock(&syscall_trace_lock
);
432 int init_syscall_trace(struct ftrace_event_call
*call
)
436 if (set_syscall_print_fmt(call
) < 0)
439 id
= trace_event_raw_init(call
);
442 free_syscall_print_fmt(call
);
449 unsigned long __init
arch_syscall_addr(int nr
)
451 return (unsigned long)sys_call_table
[nr
];
454 int __init
init_ftrace_syscalls(void)
456 struct syscall_metadata
*meta
;
460 syscalls_metadata
= kzalloc(sizeof(*syscalls_metadata
) *
461 NR_syscalls
, GFP_KERNEL
);
462 if (!syscalls_metadata
) {
467 for (i
= 0; i
< NR_syscalls
; i
++) {
468 addr
= arch_syscall_addr(i
);
469 meta
= find_syscall_meta(addr
);
473 meta
->syscall_nr
= i
;
474 syscalls_metadata
[i
] = meta
;
479 core_initcall(init_ftrace_syscalls
);
481 #ifdef CONFIG_PERF_EVENTS
483 static DECLARE_BITMAP(enabled_perf_enter_syscalls
, NR_syscalls
);
484 static DECLARE_BITMAP(enabled_perf_exit_syscalls
, NR_syscalls
);
485 static int sys_perf_refcount_enter
;
486 static int sys_perf_refcount_exit
;
488 static void perf_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
490 struct syscall_metadata
*sys_data
;
491 struct syscall_trace_enter
*rec
;
492 struct hlist_head
*head
;
497 syscall_nr
= syscall_get_nr(current
, regs
);
498 if (!test_bit(syscall_nr
, enabled_perf_enter_syscalls
))
501 sys_data
= syscall_nr_to_meta(syscall_nr
);
505 /* get the size after alignment with the u32 buffer size field */
506 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
507 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
510 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
511 "perf buffer not large enough"))
514 rec
= (struct syscall_trace_enter
*)perf_trace_buf_prepare(size
,
515 sys_data
->enter_event
->event
.type
, regs
, &rctx
);
519 rec
->nr
= syscall_nr
;
520 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
521 (unsigned long *)&rec
->args
);
523 head
= this_cpu_ptr(sys_data
->enter_event
->perf_events
);
524 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
);
527 int perf_sysenter_enable(struct ftrace_event_call
*call
)
532 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
534 mutex_lock(&syscall_trace_lock
);
535 if (!sys_perf_refcount_enter
)
536 ret
= register_trace_sys_enter(perf_syscall_enter
, NULL
);
538 pr_info("event trace: Could not activate"
539 "syscall entry trace point");
541 set_bit(num
, enabled_perf_enter_syscalls
);
542 sys_perf_refcount_enter
++;
544 mutex_unlock(&syscall_trace_lock
);
548 void perf_sysenter_disable(struct ftrace_event_call
*call
)
552 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
554 mutex_lock(&syscall_trace_lock
);
555 sys_perf_refcount_enter
--;
556 clear_bit(num
, enabled_perf_enter_syscalls
);
557 if (!sys_perf_refcount_enter
)
558 unregister_trace_sys_enter(perf_syscall_enter
, NULL
);
559 mutex_unlock(&syscall_trace_lock
);
562 static void perf_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
564 struct syscall_metadata
*sys_data
;
565 struct syscall_trace_exit
*rec
;
566 struct hlist_head
*head
;
571 syscall_nr
= syscall_get_nr(current
, regs
);
572 if (!test_bit(syscall_nr
, enabled_perf_exit_syscalls
))
575 sys_data
= syscall_nr_to_meta(syscall_nr
);
579 /* We can probably do that at build time */
580 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
584 * Impossible, but be paranoid with the future
585 * How to put this check outside runtime?
587 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
588 "exit event has grown above perf buffer size"))
591 rec
= (struct syscall_trace_exit
*)perf_trace_buf_prepare(size
,
592 sys_data
->exit_event
->event
.type
, regs
, &rctx
);
596 rec
->nr
= syscall_nr
;
597 rec
->ret
= syscall_get_return_value(current
, regs
);
599 head
= this_cpu_ptr(sys_data
->exit_event
->perf_events
);
600 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
);
603 int perf_sysexit_enable(struct ftrace_event_call
*call
)
608 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
610 mutex_lock(&syscall_trace_lock
);
611 if (!sys_perf_refcount_exit
)
612 ret
= register_trace_sys_exit(perf_syscall_exit
, NULL
);
614 pr_info("event trace: Could not activate"
615 "syscall exit trace point");
617 set_bit(num
, enabled_perf_exit_syscalls
);
618 sys_perf_refcount_exit
++;
620 mutex_unlock(&syscall_trace_lock
);
624 void perf_sysexit_disable(struct ftrace_event_call
*call
)
628 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
630 mutex_lock(&syscall_trace_lock
);
631 sys_perf_refcount_exit
--;
632 clear_bit(num
, enabled_perf_exit_syscalls
);
633 if (!sys_perf_refcount_exit
)
634 unregister_trace_sys_exit(perf_syscall_exit
, NULL
);
635 mutex_unlock(&syscall_trace_lock
);
638 #endif /* CONFIG_PERF_EVENTS */
640 static int syscall_enter_register(struct ftrace_event_call
*event
,
644 case TRACE_REG_REGISTER
:
645 return reg_event_syscall_enter(event
);
646 case TRACE_REG_UNREGISTER
:
647 unreg_event_syscall_enter(event
);
650 #ifdef CONFIG_PERF_EVENTS
651 case TRACE_REG_PERF_REGISTER
:
652 return perf_sysenter_enable(event
);
653 case TRACE_REG_PERF_UNREGISTER
:
654 perf_sysenter_disable(event
);
661 static int syscall_exit_register(struct ftrace_event_call
*event
,
665 case TRACE_REG_REGISTER
:
666 return reg_event_syscall_exit(event
);
667 case TRACE_REG_UNREGISTER
:
668 unreg_event_syscall_exit(event
);
671 #ifdef CONFIG_PERF_EVENTS
672 case TRACE_REG_PERF_REGISTER
:
673 return perf_sysexit_enable(event
);
674 case TRACE_REG_PERF_UNREGISTER
:
675 perf_sysexit_disable(event
);