tracing: Pull up calls to trace_define_common_fields()
[linux-2.6/kvm.git] / kernel / trace / trace_syscalls.c
blobb957edd0ca3b35750b93e6968ad1f6e09d073239
1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/kernel.h>
4 #include <linux/ftrace.h>
5 #include <linux/perf_event.h>
6 #include <asm/syscall.h>
8 #include "trace_output.h"
9 #include "trace.h"
11 static DEFINE_MUTEX(syscall_trace_lock);
12 static int sys_refcount_enter;
13 static int sys_refcount_exit;
14 static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
15 static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
17 extern unsigned long __start_syscalls_metadata[];
18 extern unsigned long __stop_syscalls_metadata[];
20 static struct syscall_metadata **syscalls_metadata;
22 static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
24 struct syscall_metadata *start;
25 struct syscall_metadata *stop;
26 char str[KSYM_SYMBOL_LEN];
29 start = (struct syscall_metadata *)__start_syscalls_metadata;
30 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
31 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
33 for ( ; start < stop; start++) {
35 * Only compare after the "sys" prefix. Archs that use
36 * syscall wrappers may have syscalls symbols aliases prefixed
37 * with "SyS" instead of "sys", leading to an unwanted
38 * mismatch.
40 if (start->name && !strcmp(start->name + 3, str + 3))
41 return start;
43 return NULL;
46 static struct syscall_metadata *syscall_nr_to_meta(int nr)
48 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
49 return NULL;
51 return syscalls_metadata[nr];
54 enum print_line_t
55 print_syscall_enter(struct trace_iterator *iter, int flags)
57 struct trace_seq *s = &iter->seq;
58 struct trace_entry *ent = iter->ent;
59 struct syscall_trace_enter *trace;
60 struct syscall_metadata *entry;
61 int i, ret, syscall;
63 trace = (typeof(trace))ent;
64 syscall = trace->nr;
65 entry = syscall_nr_to_meta(syscall);
67 if (!entry)
68 goto end;
70 if (entry->enter_event->id != ent->type) {
71 WARN_ON_ONCE(1);
72 goto end;
75 ret = trace_seq_printf(s, "%s(", entry->name);
76 if (!ret)
77 return TRACE_TYPE_PARTIAL_LINE;
79 for (i = 0; i < entry->nb_args; i++) {
80 /* parameter types */
81 if (trace_flags & TRACE_ITER_VERBOSE) {
82 ret = trace_seq_printf(s, "%s ", entry->types[i]);
83 if (!ret)
84 return TRACE_TYPE_PARTIAL_LINE;
86 /* parameter values */
87 ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
88 trace->args[i],
89 i == entry->nb_args - 1 ? "" : ", ");
90 if (!ret)
91 return TRACE_TYPE_PARTIAL_LINE;
94 ret = trace_seq_putc(s, ')');
95 if (!ret)
96 return TRACE_TYPE_PARTIAL_LINE;
98 end:
99 ret = trace_seq_putc(s, '\n');
100 if (!ret)
101 return TRACE_TYPE_PARTIAL_LINE;
103 return TRACE_TYPE_HANDLED;
106 enum print_line_t
107 print_syscall_exit(struct trace_iterator *iter, int flags)
109 struct trace_seq *s = &iter->seq;
110 struct trace_entry *ent = iter->ent;
111 struct syscall_trace_exit *trace;
112 int syscall;
113 struct syscall_metadata *entry;
114 int ret;
116 trace = (typeof(trace))ent;
117 syscall = trace->nr;
118 entry = syscall_nr_to_meta(syscall);
120 if (!entry) {
121 trace_seq_printf(s, "\n");
122 return TRACE_TYPE_HANDLED;
125 if (entry->exit_event->id != ent->type) {
126 WARN_ON_ONCE(1);
127 return TRACE_TYPE_UNHANDLED;
130 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
131 trace->ret);
132 if (!ret)
133 return TRACE_TYPE_PARTIAL_LINE;
135 return TRACE_TYPE_HANDLED;
138 extern char *__bad_type_size(void);
140 #define SYSCALL_FIELD(type, name) \
141 sizeof(type) != sizeof(trace.name) ? \
142 __bad_type_size() : \
143 #type, #name, offsetof(typeof(trace), name), \
144 sizeof(trace.name), is_signed_type(type)
146 int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
148 int i;
149 int ret;
150 struct syscall_metadata *entry = call->data;
151 struct syscall_trace_enter trace;
152 int offset = offsetof(struct syscall_trace_enter, args);
154 ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
155 "\tsigned:%u;\n",
156 SYSCALL_FIELD(int, nr));
157 if (!ret)
158 return 0;
160 for (i = 0; i < entry->nb_args; i++) {
161 ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
162 entry->args[i]);
163 if (!ret)
164 return 0;
165 ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
166 "\tsigned:%u;\n", offset,
167 sizeof(unsigned long),
168 is_signed_type(unsigned long));
169 if (!ret)
170 return 0;
171 offset += sizeof(unsigned long);
174 trace_seq_puts(s, "\nprint fmt: \"");
175 for (i = 0; i < entry->nb_args; i++) {
176 ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
177 sizeof(unsigned long),
178 i == entry->nb_args - 1 ? "" : ", ");
179 if (!ret)
180 return 0;
182 trace_seq_putc(s, '"');
184 for (i = 0; i < entry->nb_args; i++) {
185 ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
186 entry->args[i]);
187 if (!ret)
188 return 0;
191 return trace_seq_putc(s, '\n');
194 int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
196 int ret;
197 struct syscall_trace_exit trace;
199 ret = trace_seq_printf(s,
200 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
201 "\tsigned:%u;\n"
202 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
203 "\tsigned:%u;\n",
204 SYSCALL_FIELD(int, nr),
205 SYSCALL_FIELD(long, ret));
206 if (!ret)
207 return 0;
209 return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
212 int syscall_enter_define_fields(struct ftrace_event_call *call)
214 struct syscall_trace_enter trace;
215 struct syscall_metadata *meta = call->data;
216 int ret;
217 int i;
218 int offset = offsetof(typeof(trace), args);
220 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
221 if (ret)
222 return ret;
224 for (i = 0; i < meta->nb_args; i++) {
225 ret = trace_define_field(call, meta->types[i],
226 meta->args[i], offset,
227 sizeof(unsigned long), 0,
228 FILTER_OTHER);
229 offset += sizeof(unsigned long);
232 return ret;
235 int syscall_exit_define_fields(struct ftrace_event_call *call)
237 struct syscall_trace_exit trace;
238 int ret;
240 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
241 if (ret)
242 return ret;
244 ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
245 FILTER_OTHER);
247 return ret;
250 void ftrace_syscall_enter(struct pt_regs *regs, long id)
252 struct syscall_trace_enter *entry;
253 struct syscall_metadata *sys_data;
254 struct ring_buffer_event *event;
255 struct ring_buffer *buffer;
256 int size;
257 int syscall_nr;
259 syscall_nr = syscall_get_nr(current, regs);
260 if (syscall_nr < 0)
261 return;
262 if (!test_bit(syscall_nr, enabled_enter_syscalls))
263 return;
265 sys_data = syscall_nr_to_meta(syscall_nr);
266 if (!sys_data)
267 return;
269 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
271 event = trace_current_buffer_lock_reserve(&buffer,
272 sys_data->enter_event->id, size, 0, 0);
273 if (!event)
274 return;
276 entry = ring_buffer_event_data(event);
277 entry->nr = syscall_nr;
278 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
280 if (!filter_current_check_discard(buffer, sys_data->enter_event,
281 entry, event))
282 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
285 void ftrace_syscall_exit(struct pt_regs *regs, long ret)
287 struct syscall_trace_exit *entry;
288 struct syscall_metadata *sys_data;
289 struct ring_buffer_event *event;
290 struct ring_buffer *buffer;
291 int syscall_nr;
293 syscall_nr = syscall_get_nr(current, regs);
294 if (syscall_nr < 0)
295 return;
296 if (!test_bit(syscall_nr, enabled_exit_syscalls))
297 return;
299 sys_data = syscall_nr_to_meta(syscall_nr);
300 if (!sys_data)
301 return;
303 event = trace_current_buffer_lock_reserve(&buffer,
304 sys_data->exit_event->id, sizeof(*entry), 0, 0);
305 if (!event)
306 return;
308 entry = ring_buffer_event_data(event);
309 entry->nr = syscall_nr;
310 entry->ret = syscall_get_return_value(current, regs);
312 if (!filter_current_check_discard(buffer, sys_data->exit_event,
313 entry, event))
314 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
317 int reg_event_syscall_enter(struct ftrace_event_call *call)
319 int ret = 0;
320 int num;
322 num = ((struct syscall_metadata *)call->data)->syscall_nr;
323 if (num < 0 || num >= NR_syscalls)
324 return -ENOSYS;
325 mutex_lock(&syscall_trace_lock);
326 if (!sys_refcount_enter)
327 ret = register_trace_sys_enter(ftrace_syscall_enter);
328 if (ret) {
329 pr_info("event trace: Could not activate"
330 "syscall entry trace point");
331 } else {
332 set_bit(num, enabled_enter_syscalls);
333 sys_refcount_enter++;
335 mutex_unlock(&syscall_trace_lock);
336 return ret;
339 void unreg_event_syscall_enter(struct ftrace_event_call *call)
341 int num;
343 num = ((struct syscall_metadata *)call->data)->syscall_nr;
344 if (num < 0 || num >= NR_syscalls)
345 return;
346 mutex_lock(&syscall_trace_lock);
347 sys_refcount_enter--;
348 clear_bit(num, enabled_enter_syscalls);
349 if (!sys_refcount_enter)
350 unregister_trace_sys_enter(ftrace_syscall_enter);
351 mutex_unlock(&syscall_trace_lock);
354 int reg_event_syscall_exit(struct ftrace_event_call *call)
356 int ret = 0;
357 int num;
359 num = ((struct syscall_metadata *)call->data)->syscall_nr;
360 if (num < 0 || num >= NR_syscalls)
361 return -ENOSYS;
362 mutex_lock(&syscall_trace_lock);
363 if (!sys_refcount_exit)
364 ret = register_trace_sys_exit(ftrace_syscall_exit);
365 if (ret) {
366 pr_info("event trace: Could not activate"
367 "syscall exit trace point");
368 } else {
369 set_bit(num, enabled_exit_syscalls);
370 sys_refcount_exit++;
372 mutex_unlock(&syscall_trace_lock);
373 return ret;
376 void unreg_event_syscall_exit(struct ftrace_event_call *call)
378 int num;
380 num = ((struct syscall_metadata *)call->data)->syscall_nr;
381 if (num < 0 || num >= NR_syscalls)
382 return;
383 mutex_lock(&syscall_trace_lock);
384 sys_refcount_exit--;
385 clear_bit(num, enabled_exit_syscalls);
386 if (!sys_refcount_exit)
387 unregister_trace_sys_exit(ftrace_syscall_exit);
388 mutex_unlock(&syscall_trace_lock);
391 int init_syscall_trace(struct ftrace_event_call *call)
393 int id;
395 id = register_ftrace_event(call->event);
396 if (!id)
397 return -ENODEV;
398 call->id = id;
399 INIT_LIST_HEAD(&call->fields);
400 return 0;
403 int __init init_ftrace_syscalls(void)
405 struct syscall_metadata *meta;
406 unsigned long addr;
407 int i;
409 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
410 NR_syscalls, GFP_KERNEL);
411 if (!syscalls_metadata) {
412 WARN_ON(1);
413 return -ENOMEM;
416 for (i = 0; i < NR_syscalls; i++) {
417 addr = arch_syscall_addr(i);
418 meta = find_syscall_meta(addr);
419 if (!meta)
420 continue;
422 meta->syscall_nr = i;
423 syscalls_metadata[i] = meta;
426 return 0;
428 core_initcall(init_ftrace_syscalls);
430 #ifdef CONFIG_EVENT_PROFILE
432 static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
433 static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
434 static int sys_prof_refcount_enter;
435 static int sys_prof_refcount_exit;
437 static void prof_syscall_enter(struct pt_regs *regs, long id)
439 struct syscall_metadata *sys_data;
440 struct syscall_trace_enter *rec;
441 unsigned long flags;
442 char *trace_buf;
443 char *raw_data;
444 int syscall_nr;
445 int rctx;
446 int size;
447 int cpu;
449 syscall_nr = syscall_get_nr(current, regs);
450 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
451 return;
453 sys_data = syscall_nr_to_meta(syscall_nr);
454 if (!sys_data)
455 return;
457 /* get the size after alignment with the u32 buffer size field */
458 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
459 size = ALIGN(size + sizeof(u32), sizeof(u64));
460 size -= sizeof(u32);
462 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
463 "profile buffer not large enough"))
464 return;
466 /* Protect the per cpu buffer, begin the rcu read side */
467 local_irq_save(flags);
469 rctx = perf_swevent_get_recursion_context();
470 if (rctx < 0)
471 goto end_recursion;
473 cpu = smp_processor_id();
475 trace_buf = rcu_dereference(perf_trace_buf);
477 if (!trace_buf)
478 goto end;
480 raw_data = per_cpu_ptr(trace_buf, cpu);
482 /* zero the dead bytes from align to not leak stack to user */
483 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
485 rec = (struct syscall_trace_enter *) raw_data;
486 tracing_generic_entry_update(&rec->ent, 0, 0);
487 rec->ent.type = sys_data->enter_event->id;
488 rec->nr = syscall_nr;
489 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
490 (unsigned long *)&rec->args);
491 perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size);
493 end:
494 perf_swevent_put_recursion_context(rctx);
495 end_recursion:
496 local_irq_restore(flags);
499 int prof_sysenter_enable(struct ftrace_event_call *call)
501 int ret = 0;
502 int num;
504 num = ((struct syscall_metadata *)call->data)->syscall_nr;
506 mutex_lock(&syscall_trace_lock);
507 if (!sys_prof_refcount_enter)
508 ret = register_trace_sys_enter(prof_syscall_enter);
509 if (ret) {
510 pr_info("event trace: Could not activate"
511 "syscall entry trace point");
512 } else {
513 set_bit(num, enabled_prof_enter_syscalls);
514 sys_prof_refcount_enter++;
516 mutex_unlock(&syscall_trace_lock);
517 return ret;
520 void prof_sysenter_disable(struct ftrace_event_call *call)
522 int num;
524 num = ((struct syscall_metadata *)call->data)->syscall_nr;
526 mutex_lock(&syscall_trace_lock);
527 sys_prof_refcount_enter--;
528 clear_bit(num, enabled_prof_enter_syscalls);
529 if (!sys_prof_refcount_enter)
530 unregister_trace_sys_enter(prof_syscall_enter);
531 mutex_unlock(&syscall_trace_lock);
534 static void prof_syscall_exit(struct pt_regs *regs, long ret)
536 struct syscall_metadata *sys_data;
537 struct syscall_trace_exit *rec;
538 unsigned long flags;
539 int syscall_nr;
540 char *trace_buf;
541 char *raw_data;
542 int rctx;
543 int size;
544 int cpu;
546 syscall_nr = syscall_get_nr(current, regs);
547 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
548 return;
550 sys_data = syscall_nr_to_meta(syscall_nr);
551 if (!sys_data)
552 return;
554 /* We can probably do that at build time */
555 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
556 size -= sizeof(u32);
559 * Impossible, but be paranoid with the future
560 * How to put this check outside runtime?
562 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
563 "exit event has grown above profile buffer size"))
564 return;
566 /* Protect the per cpu buffer, begin the rcu read side */
567 local_irq_save(flags);
569 rctx = perf_swevent_get_recursion_context();
570 if (rctx < 0)
571 goto end_recursion;
573 cpu = smp_processor_id();
575 trace_buf = rcu_dereference(perf_trace_buf);
577 if (!trace_buf)
578 goto end;
580 raw_data = per_cpu_ptr(trace_buf, cpu);
582 /* zero the dead bytes from align to not leak stack to user */
583 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
585 rec = (struct syscall_trace_exit *)raw_data;
587 tracing_generic_entry_update(&rec->ent, 0, 0);
588 rec->ent.type = sys_data->exit_event->id;
589 rec->nr = syscall_nr;
590 rec->ret = syscall_get_return_value(current, regs);
592 perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size);
594 end:
595 perf_swevent_put_recursion_context(rctx);
596 end_recursion:
597 local_irq_restore(flags);
600 int prof_sysexit_enable(struct ftrace_event_call *call)
602 int ret = 0;
603 int num;
605 num = ((struct syscall_metadata *)call->data)->syscall_nr;
607 mutex_lock(&syscall_trace_lock);
608 if (!sys_prof_refcount_exit)
609 ret = register_trace_sys_exit(prof_syscall_exit);
610 if (ret) {
611 pr_info("event trace: Could not activate"
612 "syscall entry trace point");
613 } else {
614 set_bit(num, enabled_prof_exit_syscalls);
615 sys_prof_refcount_exit++;
617 mutex_unlock(&syscall_trace_lock);
618 return ret;
621 void prof_sysexit_disable(struct ftrace_event_call *call)
623 int num;
625 num = ((struct syscall_metadata *)call->data)->syscall_nr;
627 mutex_lock(&syscall_trace_lock);
628 sys_prof_refcount_exit--;
629 clear_bit(num, enabled_prof_exit_syscalls);
630 if (!sys_prof_refcount_exit)
631 unregister_trace_sys_exit(prof_syscall_exit);
632 mutex_unlock(&syscall_trace_lock);
635 #endif