1 #include <trace/syscall.h>
2 #include <linux/kernel.h>
3 #include <linux/ftrace.h>
4 #include <linux/perf_counter.h>
5 #include <asm/syscall.h>
7 #include "trace_output.h"
10 static DEFINE_MUTEX(syscall_trace_lock
);
11 static int sys_refcount_enter
;
12 static int sys_refcount_exit
;
13 static DECLARE_BITMAP(enabled_enter_syscalls
, FTRACE_SYSCALL_MAX
);
14 static DECLARE_BITMAP(enabled_exit_syscalls
, FTRACE_SYSCALL_MAX
);
17 print_syscall_enter(struct trace_iterator
*iter
, int flags
)
19 struct trace_seq
*s
= &iter
->seq
;
20 struct trace_entry
*ent
= iter
->ent
;
21 struct syscall_trace_enter
*trace
;
22 struct syscall_metadata
*entry
;
25 trace
= (typeof(trace
))ent
;
27 entry
= syscall_nr_to_meta(syscall
);
32 if (entry
->enter_id
!= ent
->type
) {
37 ret
= trace_seq_printf(s
, "%s(", entry
->name
);
39 return TRACE_TYPE_PARTIAL_LINE
;
41 for (i
= 0; i
< entry
->nb_args
; i
++) {
43 if (trace_flags
& TRACE_ITER_VERBOSE
) {
44 ret
= trace_seq_printf(s
, "%s ", entry
->types
[i
]);
46 return TRACE_TYPE_PARTIAL_LINE
;
48 /* parameter values */
49 ret
= trace_seq_printf(s
, "%s: %lx%s ", entry
->args
[i
],
51 i
== entry
->nb_args
- 1 ? ")" : ",");
53 return TRACE_TYPE_PARTIAL_LINE
;
57 trace_seq_printf(s
, "\n");
58 return TRACE_TYPE_HANDLED
;
62 print_syscall_exit(struct trace_iterator
*iter
, int flags
)
64 struct trace_seq
*s
= &iter
->seq
;
65 struct trace_entry
*ent
= iter
->ent
;
66 struct syscall_trace_exit
*trace
;
68 struct syscall_metadata
*entry
;
71 trace
= (typeof(trace
))ent
;
73 entry
= syscall_nr_to_meta(syscall
);
76 trace_seq_printf(s
, "\n");
77 return TRACE_TYPE_HANDLED
;
80 if (entry
->exit_id
!= ent
->type
) {
82 return TRACE_TYPE_UNHANDLED
;
85 ret
= trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
88 return TRACE_TYPE_PARTIAL_LINE
;
90 return TRACE_TYPE_HANDLED
;
93 extern char *__bad_type_size(void);
95 #define SYSCALL_FIELD(type, name) \
96 sizeof(type) != sizeof(trace.name) ? \
98 #type, #name, offsetof(typeof(trace), name), sizeof(trace.name)
100 int syscall_enter_format(struct ftrace_event_call
*call
, struct trace_seq
*s
)
105 struct syscall_metadata
*entry
;
106 struct syscall_trace_enter trace
;
107 int offset
= offsetof(struct syscall_trace_enter
, args
);
109 nr
= syscall_name_to_nr(call
->data
);
110 entry
= syscall_nr_to_meta(nr
);
115 ret
= trace_seq_printf(s
, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
116 SYSCALL_FIELD(int, nr
));
120 for (i
= 0; i
< entry
->nb_args
; i
++) {
121 ret
= trace_seq_printf(s
, "\tfield:%s %s;", entry
->types
[i
],
125 ret
= trace_seq_printf(s
, "\toffset:%d;\tsize:%zu;\n", offset
,
126 sizeof(unsigned long));
129 offset
+= sizeof(unsigned long);
132 trace_seq_printf(s
, "\nprint fmt: \"");
133 for (i
= 0; i
< entry
->nb_args
; i
++) {
134 ret
= trace_seq_printf(s
, "%s: 0x%%0%zulx%s", entry
->args
[i
],
135 sizeof(unsigned long),
136 i
== entry
->nb_args
- 1 ? "\", " : ", ");
141 for (i
= 0; i
< entry
->nb_args
; i
++) {
142 ret
= trace_seq_printf(s
, "((unsigned long)(REC->%s))%s",
144 i
== entry
->nb_args
- 1 ? "\n" : ", ");
152 int syscall_exit_format(struct ftrace_event_call
*call
, struct trace_seq
*s
)
155 struct syscall_trace_exit trace
;
157 ret
= trace_seq_printf(s
,
158 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
159 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
160 SYSCALL_FIELD(int, nr
),
161 SYSCALL_FIELD(unsigned long, ret
));
165 return trace_seq_printf(s
, "\nprint fmt: \"0x%%lx\", REC->ret\n");
168 void ftrace_syscall_enter(struct pt_regs
*regs
, long id
)
170 struct syscall_trace_enter
*entry
;
171 struct syscall_metadata
*sys_data
;
172 struct ring_buffer_event
*event
;
176 syscall_nr
= syscall_get_nr(current
, regs
);
177 if (!test_bit(syscall_nr
, enabled_enter_syscalls
))
180 sys_data
= syscall_nr_to_meta(syscall_nr
);
184 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
186 event
= trace_current_buffer_lock_reserve(sys_data
->enter_id
, size
,
191 entry
= ring_buffer_event_data(event
);
192 entry
->nr
= syscall_nr
;
193 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
195 trace_current_buffer_unlock_commit(event
, 0, 0);
199 void ftrace_syscall_exit(struct pt_regs
*regs
, long ret
)
201 struct syscall_trace_exit
*entry
;
202 struct syscall_metadata
*sys_data
;
203 struct ring_buffer_event
*event
;
206 syscall_nr
= syscall_get_nr(current
, regs
);
207 if (!test_bit(syscall_nr
, enabled_exit_syscalls
))
210 sys_data
= syscall_nr_to_meta(syscall_nr
);
214 event
= trace_current_buffer_lock_reserve(sys_data
->exit_id
,
215 sizeof(*entry
), 0, 0);
219 entry
= ring_buffer_event_data(event
);
220 entry
->nr
= syscall_nr
;
221 entry
->ret
= syscall_get_return_value(current
, regs
);
223 trace_current_buffer_unlock_commit(event
, 0, 0);
227 int reg_event_syscall_enter(void *ptr
)
234 num
= syscall_name_to_nr(name
);
235 if (num
< 0 || num
>= FTRACE_SYSCALL_MAX
)
237 mutex_lock(&syscall_trace_lock
);
238 if (!sys_refcount_enter
)
239 ret
= register_trace_syscall_enter(ftrace_syscall_enter
);
241 pr_info("event trace: Could not activate"
242 "syscall entry trace point");
244 set_bit(num
, enabled_enter_syscalls
);
245 sys_refcount_enter
++;
247 mutex_unlock(&syscall_trace_lock
);
251 void unreg_event_syscall_enter(void *ptr
)
257 num
= syscall_name_to_nr(name
);
258 if (num
< 0 || num
>= FTRACE_SYSCALL_MAX
)
260 mutex_lock(&syscall_trace_lock
);
261 sys_refcount_enter
--;
262 clear_bit(num
, enabled_enter_syscalls
);
263 if (!sys_refcount_enter
)
264 unregister_trace_syscall_enter(ftrace_syscall_enter
);
265 mutex_unlock(&syscall_trace_lock
);
268 int reg_event_syscall_exit(void *ptr
)
275 num
= syscall_name_to_nr(name
);
276 if (num
< 0 || num
>= FTRACE_SYSCALL_MAX
)
278 mutex_lock(&syscall_trace_lock
);
279 if (!sys_refcount_exit
)
280 ret
= register_trace_syscall_exit(ftrace_syscall_exit
);
282 pr_info("event trace: Could not activate"
283 "syscall exit trace point");
285 set_bit(num
, enabled_exit_syscalls
);
288 mutex_unlock(&syscall_trace_lock
);
292 void unreg_event_syscall_exit(void *ptr
)
298 num
= syscall_name_to_nr(name
);
299 if (num
< 0 || num
>= FTRACE_SYSCALL_MAX
)
301 mutex_lock(&syscall_trace_lock
);
303 clear_bit(num
, enabled_exit_syscalls
);
304 if (!sys_refcount_exit
)
305 unregister_trace_syscall_exit(ftrace_syscall_exit
);
306 mutex_unlock(&syscall_trace_lock
);
309 struct trace_event event_syscall_enter
= {
310 .trace
= print_syscall_enter
,
313 struct trace_event event_syscall_exit
= {
314 .trace
= print_syscall_exit
,
317 #ifdef CONFIG_EVENT_PROFILE
319 static DECLARE_BITMAP(enabled_prof_enter_syscalls
, FTRACE_SYSCALL_MAX
);
320 static DECLARE_BITMAP(enabled_prof_exit_syscalls
, FTRACE_SYSCALL_MAX
);
321 static int sys_prof_refcount_enter
;
322 static int sys_prof_refcount_exit
;
324 static void prof_syscall_enter(struct pt_regs
*regs
, long id
)
326 struct syscall_trace_enter
*rec
;
327 struct syscall_metadata
*sys_data
;
331 syscall_nr
= syscall_get_nr(current
, regs
);
332 if (!test_bit(syscall_nr
, enabled_prof_enter_syscalls
))
335 sys_data
= syscall_nr_to_meta(syscall_nr
);
339 /* get the size after alignment with the u32 buffer size field */
340 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
341 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
347 /* zero the dead bytes from align to not leak stack to user */
348 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
350 rec
= (struct syscall_trace_enter
*) raw_data
;
351 tracing_generic_entry_update(&rec
->ent
, 0, 0);
352 rec
->ent
.type
= sys_data
->enter_id
;
353 rec
->nr
= syscall_nr
;
354 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
355 (unsigned long *)&rec
->args
);
356 perf_tpcounter_event(sys_data
->enter_id
, 0, 1, rec
, size
);
360 int reg_prof_syscall_enter(char *name
)
365 num
= syscall_name_to_nr(name
);
366 if (num
< 0 || num
>= FTRACE_SYSCALL_MAX
)
369 mutex_lock(&syscall_trace_lock
);
370 if (!sys_prof_refcount_enter
)
371 ret
= register_trace_syscall_enter(prof_syscall_enter
);
373 pr_info("event trace: Could not activate"
374 "syscall entry trace point");
376 set_bit(num
, enabled_prof_enter_syscalls
);
377 sys_prof_refcount_enter
++;
379 mutex_unlock(&syscall_trace_lock
);
383 void unreg_prof_syscall_enter(char *name
)
387 num
= syscall_name_to_nr(name
);
388 if (num
< 0 || num
>= FTRACE_SYSCALL_MAX
)
391 mutex_lock(&syscall_trace_lock
);
392 sys_prof_refcount_enter
--;
393 clear_bit(num
, enabled_prof_enter_syscalls
);
394 if (!sys_prof_refcount_enter
)
395 unregister_trace_syscall_enter(prof_syscall_enter
);
396 mutex_unlock(&syscall_trace_lock
);
399 static void prof_syscall_exit(struct pt_regs
*regs
, long ret
)
401 struct syscall_metadata
*sys_data
;
402 struct syscall_trace_exit rec
;
405 syscall_nr
= syscall_get_nr(current
, regs
);
406 if (!test_bit(syscall_nr
, enabled_prof_exit_syscalls
))
409 sys_data
= syscall_nr_to_meta(syscall_nr
);
413 tracing_generic_entry_update(&rec
.ent
, 0, 0);
414 rec
.ent
.type
= sys_data
->exit_id
;
416 rec
.ret
= syscall_get_return_value(current
, regs
);
418 perf_tpcounter_event(sys_data
->exit_id
, 0, 1, &rec
, sizeof(rec
));
421 int reg_prof_syscall_exit(char *name
)
426 num
= syscall_name_to_nr(name
);
427 if (num
< 0 || num
>= FTRACE_SYSCALL_MAX
)
430 mutex_lock(&syscall_trace_lock
);
431 if (!sys_prof_refcount_exit
)
432 ret
= register_trace_syscall_exit(prof_syscall_exit
);
434 pr_info("event trace: Could not activate"
435 "syscall entry trace point");
437 set_bit(num
, enabled_prof_exit_syscalls
);
438 sys_prof_refcount_exit
++;
440 mutex_unlock(&syscall_trace_lock
);
444 void unreg_prof_syscall_exit(char *name
)
448 num
= syscall_name_to_nr(name
);
449 if (num
< 0 || num
>= FTRACE_SYSCALL_MAX
)
452 mutex_lock(&syscall_trace_lock
);
453 sys_prof_refcount_exit
--;
454 clear_bit(num
, enabled_prof_exit_syscalls
);
455 if (!sys_prof_refcount_exit
)
456 unregister_trace_syscall_exit(prof_syscall_exit
);
457 mutex_unlock(&syscall_trace_lock
);