1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/kernel.h>
4 #include <linux/ftrace.h>
5 #include <linux/perf_event.h>
6 #include <asm/syscall.h>
8 #include "trace_output.h"
11 static DEFINE_MUTEX(syscall_trace_lock
);
12 static int sys_refcount_enter
;
13 static int sys_refcount_exit
;
14 static DECLARE_BITMAP(enabled_enter_syscalls
, NR_syscalls
);
15 static DECLARE_BITMAP(enabled_exit_syscalls
, NR_syscalls
);
17 extern unsigned long __start_syscalls_metadata
[];
18 extern unsigned long __stop_syscalls_metadata
[];
20 static struct syscall_metadata
**syscalls_metadata
;
22 static struct syscall_metadata
*find_syscall_meta(unsigned long syscall
)
24 struct syscall_metadata
*start
;
25 struct syscall_metadata
*stop
;
26 char str
[KSYM_SYMBOL_LEN
];
29 start
= (struct syscall_metadata
*)__start_syscalls_metadata
;
30 stop
= (struct syscall_metadata
*)__stop_syscalls_metadata
;
31 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
33 for ( ; start
< stop
; start
++) {
35 * Only compare after the "sys" prefix. Archs that use
36 * syscall wrappers may have syscalls symbols aliases prefixed
37 * with "SyS" instead of "sys", leading to an unwanted
40 if (start
->name
&& !strcmp(start
->name
+ 3, str
+ 3))
46 static struct syscall_metadata
*syscall_nr_to_meta(int nr
)
48 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
51 return syscalls_metadata
[nr
];
55 print_syscall_enter(struct trace_iterator
*iter
, int flags
)
57 struct trace_seq
*s
= &iter
->seq
;
58 struct trace_entry
*ent
= iter
->ent
;
59 struct syscall_trace_enter
*trace
;
60 struct syscall_metadata
*entry
;
63 trace
= (typeof(trace
))ent
;
65 entry
= syscall_nr_to_meta(syscall
);
70 if (entry
->enter_event
->id
!= ent
->type
) {
75 ret
= trace_seq_printf(s
, "%s(", entry
->name
);
77 return TRACE_TYPE_PARTIAL_LINE
;
79 for (i
= 0; i
< entry
->nb_args
; i
++) {
81 if (trace_flags
& TRACE_ITER_VERBOSE
) {
82 ret
= trace_seq_printf(s
, "%s ", entry
->types
[i
]);
84 return TRACE_TYPE_PARTIAL_LINE
;
86 /* parameter values */
87 ret
= trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
89 i
== entry
->nb_args
- 1 ? "" : ", ");
91 return TRACE_TYPE_PARTIAL_LINE
;
94 ret
= trace_seq_putc(s
, ')');
96 return TRACE_TYPE_PARTIAL_LINE
;
99 ret
= trace_seq_putc(s
, '\n');
101 return TRACE_TYPE_PARTIAL_LINE
;
103 return TRACE_TYPE_HANDLED
;
107 print_syscall_exit(struct trace_iterator
*iter
, int flags
)
109 struct trace_seq
*s
= &iter
->seq
;
110 struct trace_entry
*ent
= iter
->ent
;
111 struct syscall_trace_exit
*trace
;
113 struct syscall_metadata
*entry
;
116 trace
= (typeof(trace
))ent
;
118 entry
= syscall_nr_to_meta(syscall
);
121 trace_seq_printf(s
, "\n");
122 return TRACE_TYPE_HANDLED
;
125 if (entry
->exit_event
->id
!= ent
->type
) {
127 return TRACE_TYPE_UNHANDLED
;
130 ret
= trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
133 return TRACE_TYPE_PARTIAL_LINE
;
135 return TRACE_TYPE_HANDLED
;
138 extern char *__bad_type_size(void);
140 #define SYSCALL_FIELD(type, name) \
141 sizeof(type) != sizeof(trace.name) ? \
142 __bad_type_size() : \
143 #type, #name, offsetof(typeof(trace), name), \
144 sizeof(trace.name), is_signed_type(type)
146 int syscall_enter_format(struct ftrace_event_call
*call
, struct trace_seq
*s
)
150 struct syscall_metadata
*entry
= call
->data
;
151 struct syscall_trace_enter trace
;
152 int offset
= offsetof(struct syscall_trace_enter
, args
);
154 ret
= trace_seq_printf(s
, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
156 SYSCALL_FIELD(int, nr
));
160 for (i
= 0; i
< entry
->nb_args
; i
++) {
161 ret
= trace_seq_printf(s
, "\tfield:%s %s;", entry
->types
[i
],
165 ret
= trace_seq_printf(s
, "\toffset:%d;\tsize:%zu;"
166 "\tsigned:%u;\n", offset
,
167 sizeof(unsigned long),
168 is_signed_type(unsigned long));
171 offset
+= sizeof(unsigned long);
174 trace_seq_puts(s
, "\nprint fmt: \"");
175 for (i
= 0; i
< entry
->nb_args
; i
++) {
176 ret
= trace_seq_printf(s
, "%s: 0x%%0%zulx%s", entry
->args
[i
],
177 sizeof(unsigned long),
178 i
== entry
->nb_args
- 1 ? "" : ", ");
182 trace_seq_putc(s
, '"');
184 for (i
= 0; i
< entry
->nb_args
; i
++) {
185 ret
= trace_seq_printf(s
, ", ((unsigned long)(REC->%s))",
191 return trace_seq_putc(s
, '\n');
194 int syscall_exit_format(struct ftrace_event_call
*call
, struct trace_seq
*s
)
197 struct syscall_trace_exit trace
;
199 ret
= trace_seq_printf(s
,
200 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
202 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
204 SYSCALL_FIELD(int, nr
),
205 SYSCALL_FIELD(long, ret
));
209 return trace_seq_printf(s
, "\nprint fmt: \"0x%%lx\", REC->ret\n");
212 int syscall_enter_define_fields(struct ftrace_event_call
*call
)
214 struct syscall_trace_enter trace
;
215 struct syscall_metadata
*meta
= call
->data
;
218 int offset
= offsetof(typeof(trace
), args
);
220 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
224 for (i
= 0; i
< meta
->nb_args
; i
++) {
225 ret
= trace_define_field(call
, meta
->types
[i
],
226 meta
->args
[i
], offset
,
227 sizeof(unsigned long), 0,
229 offset
+= sizeof(unsigned long);
235 int syscall_exit_define_fields(struct ftrace_event_call
*call
)
237 struct syscall_trace_exit trace
;
240 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
244 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
),
250 void ftrace_syscall_enter(struct pt_regs
*regs
, long id
)
252 struct syscall_trace_enter
*entry
;
253 struct syscall_metadata
*sys_data
;
254 struct ring_buffer_event
*event
;
255 struct ring_buffer
*buffer
;
259 syscall_nr
= syscall_get_nr(current
, regs
);
262 if (!test_bit(syscall_nr
, enabled_enter_syscalls
))
265 sys_data
= syscall_nr_to_meta(syscall_nr
);
269 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
271 event
= trace_current_buffer_lock_reserve(&buffer
,
272 sys_data
->enter_event
->id
, size
, 0, 0);
276 entry
= ring_buffer_event_data(event
);
277 entry
->nr
= syscall_nr
;
278 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
280 if (!filter_current_check_discard(buffer
, sys_data
->enter_event
,
282 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
285 void ftrace_syscall_exit(struct pt_regs
*regs
, long ret
)
287 struct syscall_trace_exit
*entry
;
288 struct syscall_metadata
*sys_data
;
289 struct ring_buffer_event
*event
;
290 struct ring_buffer
*buffer
;
293 syscall_nr
= syscall_get_nr(current
, regs
);
296 if (!test_bit(syscall_nr
, enabled_exit_syscalls
))
299 sys_data
= syscall_nr_to_meta(syscall_nr
);
303 event
= trace_current_buffer_lock_reserve(&buffer
,
304 sys_data
->exit_event
->id
, sizeof(*entry
), 0, 0);
308 entry
= ring_buffer_event_data(event
);
309 entry
->nr
= syscall_nr
;
310 entry
->ret
= syscall_get_return_value(current
, regs
);
312 if (!filter_current_check_discard(buffer
, sys_data
->exit_event
,
314 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
317 int reg_event_syscall_enter(struct ftrace_event_call
*call
)
322 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
323 if (num
< 0 || num
>= NR_syscalls
)
325 mutex_lock(&syscall_trace_lock
);
326 if (!sys_refcount_enter
)
327 ret
= register_trace_sys_enter(ftrace_syscall_enter
);
329 set_bit(num
, enabled_enter_syscalls
);
330 sys_refcount_enter
++;
332 mutex_unlock(&syscall_trace_lock
);
336 void unreg_event_syscall_enter(struct ftrace_event_call
*call
)
340 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
341 if (num
< 0 || num
>= NR_syscalls
)
343 mutex_lock(&syscall_trace_lock
);
344 sys_refcount_enter
--;
345 clear_bit(num
, enabled_enter_syscalls
);
346 if (!sys_refcount_enter
)
347 unregister_trace_sys_enter(ftrace_syscall_enter
);
348 mutex_unlock(&syscall_trace_lock
);
351 int reg_event_syscall_exit(struct ftrace_event_call
*call
)
356 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
357 if (num
< 0 || num
>= NR_syscalls
)
359 mutex_lock(&syscall_trace_lock
);
360 if (!sys_refcount_exit
)
361 ret
= register_trace_sys_exit(ftrace_syscall_exit
);
363 set_bit(num
, enabled_exit_syscalls
);
366 mutex_unlock(&syscall_trace_lock
);
370 void unreg_event_syscall_exit(struct ftrace_event_call
*call
)
374 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
375 if (num
< 0 || num
>= NR_syscalls
)
377 mutex_lock(&syscall_trace_lock
);
379 clear_bit(num
, enabled_exit_syscalls
);
380 if (!sys_refcount_exit
)
381 unregister_trace_sys_exit(ftrace_syscall_exit
);
382 mutex_unlock(&syscall_trace_lock
);
385 int init_syscall_trace(struct ftrace_event_call
*call
)
389 id
= register_ftrace_event(call
->event
);
393 INIT_LIST_HEAD(&call
->fields
);
397 int __init
init_ftrace_syscalls(void)
399 struct syscall_metadata
*meta
;
403 syscalls_metadata
= kzalloc(sizeof(*syscalls_metadata
) *
404 NR_syscalls
, GFP_KERNEL
);
405 if (!syscalls_metadata
) {
410 for (i
= 0; i
< NR_syscalls
; i
++) {
411 addr
= arch_syscall_addr(i
);
412 meta
= find_syscall_meta(addr
);
416 meta
->syscall_nr
= i
;
417 syscalls_metadata
[i
] = meta
;
422 core_initcall(init_ftrace_syscalls
);
424 #ifdef CONFIG_EVENT_PROFILE
426 static DECLARE_BITMAP(enabled_prof_enter_syscalls
, NR_syscalls
);
427 static DECLARE_BITMAP(enabled_prof_exit_syscalls
, NR_syscalls
);
428 static int sys_prof_refcount_enter
;
429 static int sys_prof_refcount_exit
;
431 static void prof_syscall_enter(struct pt_regs
*regs
, long id
)
433 struct syscall_metadata
*sys_data
;
434 struct syscall_trace_enter
*rec
;
443 syscall_nr
= syscall_get_nr(current
, regs
);
444 if (!test_bit(syscall_nr
, enabled_prof_enter_syscalls
))
447 sys_data
= syscall_nr_to_meta(syscall_nr
);
451 /* get the size after alignment with the u32 buffer size field */
452 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
453 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
456 if (WARN_ONCE(size
> FTRACE_MAX_PROFILE_SIZE
,
457 "profile buffer not large enough"))
460 /* Protect the per cpu buffer, begin the rcu read side */
461 local_irq_save(flags
);
463 rctx
= perf_swevent_get_recursion_context();
467 cpu
= smp_processor_id();
469 trace_buf
= rcu_dereference(perf_trace_buf
);
474 raw_data
= per_cpu_ptr(trace_buf
, cpu
);
476 /* zero the dead bytes from align to not leak stack to user */
477 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
479 rec
= (struct syscall_trace_enter
*) raw_data
;
480 tracing_generic_entry_update(&rec
->ent
, 0, 0);
481 rec
->ent
.type
= sys_data
->enter_event
->id
;
482 rec
->nr
= syscall_nr
;
483 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
484 (unsigned long *)&rec
->args
);
485 perf_tp_event(sys_data
->enter_event
->id
, 0, 1, rec
, size
);
488 perf_swevent_put_recursion_context(rctx
);
490 local_irq_restore(flags
);
493 int prof_sysenter_enable(struct ftrace_event_call
*call
)
498 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
500 mutex_lock(&syscall_trace_lock
);
501 if (!sys_prof_refcount_enter
)
502 ret
= register_trace_sys_enter(prof_syscall_enter
);
504 pr_info("event trace: Could not activate"
505 "syscall entry trace point");
507 set_bit(num
, enabled_prof_enter_syscalls
);
508 sys_prof_refcount_enter
++;
510 mutex_unlock(&syscall_trace_lock
);
514 void prof_sysenter_disable(struct ftrace_event_call
*call
)
518 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
520 mutex_lock(&syscall_trace_lock
);
521 sys_prof_refcount_enter
--;
522 clear_bit(num
, enabled_prof_enter_syscalls
);
523 if (!sys_prof_refcount_enter
)
524 unregister_trace_sys_enter(prof_syscall_enter
);
525 mutex_unlock(&syscall_trace_lock
);
528 static void prof_syscall_exit(struct pt_regs
*regs
, long ret
)
530 struct syscall_metadata
*sys_data
;
531 struct syscall_trace_exit
*rec
;
540 syscall_nr
= syscall_get_nr(current
, regs
);
541 if (!test_bit(syscall_nr
, enabled_prof_exit_syscalls
))
544 sys_data
= syscall_nr_to_meta(syscall_nr
);
548 /* We can probably do that at build time */
549 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
553 * Impossible, but be paranoid with the future
554 * How to put this check outside runtime?
556 if (WARN_ONCE(size
> FTRACE_MAX_PROFILE_SIZE
,
557 "exit event has grown above profile buffer size"))
560 /* Protect the per cpu buffer, begin the rcu read side */
561 local_irq_save(flags
);
563 rctx
= perf_swevent_get_recursion_context();
567 cpu
= smp_processor_id();
569 trace_buf
= rcu_dereference(perf_trace_buf
);
574 raw_data
= per_cpu_ptr(trace_buf
, cpu
);
576 /* zero the dead bytes from align to not leak stack to user */
577 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
579 rec
= (struct syscall_trace_exit
*)raw_data
;
581 tracing_generic_entry_update(&rec
->ent
, 0, 0);
582 rec
->ent
.type
= sys_data
->exit_event
->id
;
583 rec
->nr
= syscall_nr
;
584 rec
->ret
= syscall_get_return_value(current
, regs
);
586 perf_tp_event(sys_data
->exit_event
->id
, 0, 1, rec
, size
);
589 perf_swevent_put_recursion_context(rctx
);
591 local_irq_restore(flags
);
594 int prof_sysexit_enable(struct ftrace_event_call
*call
)
599 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
601 mutex_lock(&syscall_trace_lock
);
602 if (!sys_prof_refcount_exit
)
603 ret
= register_trace_sys_exit(prof_syscall_exit
);
605 pr_info("event trace: Could not activate"
606 "syscall entry trace point");
608 set_bit(num
, enabled_prof_exit_syscalls
);
609 sys_prof_refcount_exit
++;
611 mutex_unlock(&syscall_trace_lock
);
615 void prof_sysexit_disable(struct ftrace_event_call
*call
)
619 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
621 mutex_lock(&syscall_trace_lock
);
622 sys_prof_refcount_exit
--;
623 clear_bit(num
, enabled_prof_exit_syscalls
);
624 if (!sys_prof_refcount_exit
)
625 unregister_trace_sys_exit(prof_syscall_exit
);
626 mutex_unlock(&syscall_trace_lock
);