2 * Kprobes-based tracing events
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/kprobes.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/smp.h>
26 #include <linux/debugfs.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/ctype.h>
30 #include <linux/ptrace.h>
31 #include <linux/perf_event.h>
34 #include "trace_output.h"
36 #define MAX_TRACE_ARGS 128
37 #define MAX_ARGSTR_LEN 63
38 #define MAX_EVENT_NAME_LEN 64
39 #define KPROBE_EVENT_SYSTEM "kprobes"
41 /* Reserved field names */
42 #define FIELD_STRING_IP "__probe_ip"
43 #define FIELD_STRING_NARGS "__probe_nargs"
44 #define FIELD_STRING_RETIP "__probe_ret_ip"
45 #define FIELD_STRING_FUNC "__probe_func"
47 const char *reserved_field_names
[] = {
50 "common_preempt_count",
61 unsigned long (*func
)(struct pt_regs
*, void *);
65 static __kprobes
unsigned long call_fetch(struct fetch_func
*f
,
68 return f
->func(regs
, f
->data
);
72 static __kprobes
unsigned long fetch_register(struct pt_regs
*regs
,
75 return regs_get_register(regs
, (unsigned int)((unsigned long)offset
));
78 static __kprobes
unsigned long fetch_stack(struct pt_regs
*regs
,
81 return regs_get_kernel_stack_nth(regs
,
82 (unsigned int)((unsigned long)num
));
85 static __kprobes
unsigned long fetch_memory(struct pt_regs
*regs
, void *addr
)
89 if (probe_kernel_address(addr
, retval
))
94 static __kprobes
unsigned long fetch_argument(struct pt_regs
*regs
, void *num
)
96 return regs_get_argument_nth(regs
, (unsigned int)((unsigned long)num
));
99 static __kprobes
unsigned long fetch_retvalue(struct pt_regs
*regs
,
102 return regs_return_value(regs
);
105 static __kprobes
unsigned long fetch_stack_address(struct pt_regs
*regs
,
108 return kernel_stack_pointer(regs
);
111 /* Memory fetching by symbol */
112 struct symbol_cache
{
118 static unsigned long update_symbol_cache(struct symbol_cache
*sc
)
120 sc
->addr
= (unsigned long)kallsyms_lookup_name(sc
->symbol
);
122 sc
->addr
+= sc
->offset
;
126 static void free_symbol_cache(struct symbol_cache
*sc
)
132 static struct symbol_cache
*alloc_symbol_cache(const char *sym
, long offset
)
134 struct symbol_cache
*sc
;
136 if (!sym
|| strlen(sym
) == 0)
138 sc
= kzalloc(sizeof(struct symbol_cache
), GFP_KERNEL
);
142 sc
->symbol
= kstrdup(sym
, GFP_KERNEL
);
149 update_symbol_cache(sc
);
153 static __kprobes
unsigned long fetch_symbol(struct pt_regs
*regs
, void *data
)
155 struct symbol_cache
*sc
= data
;
158 return fetch_memory(regs
, (void *)sc
->addr
);
163 /* Special indirect memory access interface */
164 struct indirect_fetch_data
{
165 struct fetch_func orig
;
169 static __kprobes
unsigned long fetch_indirect(struct pt_regs
*regs
, void *data
)
171 struct indirect_fetch_data
*ind
= data
;
174 addr
= call_fetch(&ind
->orig
, regs
);
177 return fetch_memory(regs
, (void *)addr
);
182 static __kprobes
void free_indirect_fetch_data(struct indirect_fetch_data
*data
)
184 if (data
->orig
.func
== fetch_indirect
)
185 free_indirect_fetch_data(data
->orig
.data
);
186 else if (data
->orig
.func
== fetch_symbol
)
187 free_symbol_cache(data
->orig
.data
);
192 * Kprobe event core functions
196 struct fetch_func fetch
;
200 /* Flags for trace_probe */
201 #define TP_FLAG_TRACE 1
202 #define TP_FLAG_PROFILE 2
205 struct list_head list
;
206 struct kretprobe rp
; /* Use rp.kp for kprobe use */
208 unsigned int flags
; /* For TP_FLAG_* */
209 const char *symbol
; /* symbol name */
210 struct ftrace_event_call call
;
211 struct trace_event event
;
212 unsigned int nr_args
;
213 struct probe_arg args
[];
216 #define SIZEOF_TRACE_PROBE(n) \
217 (offsetof(struct trace_probe, args) + \
218 (sizeof(struct probe_arg) * (n)))
220 static __kprobes
int probe_is_return(struct trace_probe
*tp
)
222 return tp
->rp
.handler
!= NULL
;
225 static __kprobes
const char *probe_symbol(struct trace_probe
*tp
)
227 return tp
->symbol
? tp
->symbol
: "unknown";
230 static int probe_arg_string(char *buf
, size_t n
, struct fetch_func
*ff
)
234 if (ff
->func
== fetch_argument
)
235 ret
= snprintf(buf
, n
, "$arg%lu", (unsigned long)ff
->data
);
236 else if (ff
->func
== fetch_register
) {
238 name
= regs_query_register_name((unsigned int)((long)ff
->data
));
239 ret
= snprintf(buf
, n
, "%%%s", name
);
240 } else if (ff
->func
== fetch_stack
)
241 ret
= snprintf(buf
, n
, "$stack%lu", (unsigned long)ff
->data
);
242 else if (ff
->func
== fetch_memory
)
243 ret
= snprintf(buf
, n
, "@0x%p", ff
->data
);
244 else if (ff
->func
== fetch_symbol
) {
245 struct symbol_cache
*sc
= ff
->data
;
247 ret
= snprintf(buf
, n
, "@%s%+ld", sc
->symbol
,
250 ret
= snprintf(buf
, n
, "@%s", sc
->symbol
);
251 } else if (ff
->func
== fetch_retvalue
)
252 ret
= snprintf(buf
, n
, "$retval");
253 else if (ff
->func
== fetch_stack_address
)
254 ret
= snprintf(buf
, n
, "$stack");
255 else if (ff
->func
== fetch_indirect
) {
256 struct indirect_fetch_data
*id
= ff
->data
;
258 ret
= snprintf(buf
, n
, "%+ld(", id
->offset
);
262 ret
= probe_arg_string(buf
+ l
, n
- l
, &id
->orig
);
266 ret
= snprintf(buf
+ l
, n
- l
, ")");
275 static int register_probe_event(struct trace_probe
*tp
);
276 static void unregister_probe_event(struct trace_probe
*tp
);
278 static DEFINE_MUTEX(probe_lock
);
279 static LIST_HEAD(probe_list
);
281 static int kprobe_dispatcher(struct kprobe
*kp
, struct pt_regs
*regs
);
282 static int kretprobe_dispatcher(struct kretprobe_instance
*ri
,
283 struct pt_regs
*regs
);
286 * Allocate new trace_probe and initialize it (including kprobes).
288 static struct trace_probe
*alloc_trace_probe(const char *group
,
293 int nargs
, int is_return
)
295 struct trace_probe
*tp
;
297 tp
= kzalloc(SIZEOF_TRACE_PROBE(nargs
), GFP_KERNEL
);
299 return ERR_PTR(-ENOMEM
);
302 tp
->symbol
= kstrdup(symbol
, GFP_KERNEL
);
305 tp
->rp
.kp
.symbol_name
= tp
->symbol
;
306 tp
->rp
.kp
.offset
= offs
;
308 tp
->rp
.kp
.addr
= addr
;
311 tp
->rp
.handler
= kretprobe_dispatcher
;
313 tp
->rp
.kp
.pre_handler
= kprobe_dispatcher
;
317 tp
->call
.name
= kstrdup(event
, GFP_KERNEL
);
323 tp
->call
.system
= kstrdup(group
, GFP_KERNEL
);
324 if (!tp
->call
.system
)
327 INIT_LIST_HEAD(&tp
->list
);
330 kfree(tp
->call
.name
);
333 return ERR_PTR(-ENOMEM
);
336 static void free_probe_arg(struct probe_arg
*arg
)
338 if (arg
->fetch
.func
== fetch_symbol
)
339 free_symbol_cache(arg
->fetch
.data
);
340 else if (arg
->fetch
.func
== fetch_indirect
)
341 free_indirect_fetch_data(arg
->fetch
.data
);
345 static void free_trace_probe(struct trace_probe
*tp
)
349 for (i
= 0; i
< tp
->nr_args
; i
++)
350 free_probe_arg(&tp
->args
[i
]);
352 kfree(tp
->call
.system
);
353 kfree(tp
->call
.name
);
358 static struct trace_probe
*find_probe_event(const char *event
,
361 struct trace_probe
*tp
;
363 list_for_each_entry(tp
, &probe_list
, list
)
364 if (strcmp(tp
->call
.name
, event
) == 0 &&
365 strcmp(tp
->call
.system
, group
) == 0)
370 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
371 static void unregister_trace_probe(struct trace_probe
*tp
)
373 if (probe_is_return(tp
))
374 unregister_kretprobe(&tp
->rp
);
376 unregister_kprobe(&tp
->rp
.kp
);
378 unregister_probe_event(tp
);
381 /* Register a trace_probe and probe_event */
382 static int register_trace_probe(struct trace_probe
*tp
)
384 struct trace_probe
*old_tp
;
387 mutex_lock(&probe_lock
);
389 /* register as an event */
390 old_tp
= find_probe_event(tp
->call
.name
, tp
->call
.system
);
392 /* delete old event */
393 unregister_trace_probe(old_tp
);
394 free_trace_probe(old_tp
);
396 ret
= register_probe_event(tp
);
398 pr_warning("Faild to register probe event(%d)\n", ret
);
402 tp
->rp
.kp
.flags
|= KPROBE_FLAG_DISABLED
;
403 if (probe_is_return(tp
))
404 ret
= register_kretprobe(&tp
->rp
);
406 ret
= register_kprobe(&tp
->rp
.kp
);
409 pr_warning("Could not insert probe(%d)\n", ret
);
410 if (ret
== -EILSEQ
) {
411 pr_warning("Probing address(0x%p) is not an "
412 "instruction boundary.\n",
416 unregister_probe_event(tp
);
418 list_add_tail(&tp
->list
, &probe_list
);
420 mutex_unlock(&probe_lock
);
424 /* Split symbol and offset. */
425 static int split_symbol_offset(char *symbol
, unsigned long *offset
)
433 tmp
= strchr(symbol
, '+');
435 /* skip sign because strict_strtol doesn't accept '+' */
436 ret
= strict_strtoul(tmp
+ 1, 0, offset
);
445 #define PARAM_MAX_ARGS 16
446 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
448 static int parse_probe_vars(char *arg
, struct fetch_func
*ff
, int is_return
)
453 if (strcmp(arg
, "retval") == 0) {
455 ff
->func
= fetch_retvalue
;
459 } else if (strncmp(arg
, "stack", 5) == 0) {
460 if (arg
[5] == '\0') {
461 ff
->func
= fetch_stack_address
;
463 } else if (isdigit(arg
[5])) {
464 ret
= strict_strtoul(arg
+ 5, 10, ¶m
);
465 if (ret
|| param
> PARAM_MAX_STACK
)
468 ff
->func
= fetch_stack
;
469 ff
->data
= (void *)param
;
473 } else if (strncmp(arg
, "arg", 3) == 0 && isdigit(arg
[3])) {
474 ret
= strict_strtoul(arg
+ 3, 10, ¶m
);
475 if (ret
|| param
> PARAM_MAX_ARGS
)
478 ff
->func
= fetch_argument
;
479 ff
->data
= (void *)param
;
486 /* Recursive argument parser */
487 static int __parse_probe_arg(char *arg
, struct fetch_func
*ff
, int is_return
)
496 ret
= parse_probe_vars(arg
+ 1, ff
, is_return
);
498 case '%': /* named register */
499 ret
= regs_query_register_offset(arg
+ 1);
501 ff
->func
= fetch_register
;
502 ff
->data
= (void *)(unsigned long)ret
;
506 case '@': /* memory or symbol */
507 if (isdigit(arg
[1])) {
508 ret
= strict_strtoul(arg
+ 1, 0, ¶m
);
511 ff
->func
= fetch_memory
;
512 ff
->data
= (void *)param
;
514 ret
= split_symbol_offset(arg
+ 1, &offset
);
517 ff
->data
= alloc_symbol_cache(arg
+ 1, offset
);
519 ff
->func
= fetch_symbol
;
524 case '+': /* indirect memory */
526 tmp
= strchr(arg
, '(');
532 ret
= strict_strtol(arg
+ 1, 0, &offset
);
538 tmp
= strrchr(arg
, ')');
540 struct indirect_fetch_data
*id
;
542 id
= kzalloc(sizeof(struct indirect_fetch_data
),
547 ret
= __parse_probe_arg(arg
, &id
->orig
, is_return
);
551 ff
->func
= fetch_indirect
;
552 ff
->data
= (void *)id
;
558 /* TODO: support custom handler */
564 /* String length checking wrapper */
565 static int parse_probe_arg(char *arg
, struct fetch_func
*ff
, int is_return
)
567 if (strlen(arg
) > MAX_ARGSTR_LEN
) {
568 pr_info("Argument is too long.: %s\n", arg
);
571 return __parse_probe_arg(arg
, ff
, is_return
);
574 /* Return 1 if name is reserved or already used by another argument */
575 static int conflict_field_name(const char *name
,
576 struct probe_arg
*args
, int narg
)
579 for (i
= 0; i
< ARRAY_SIZE(reserved_field_names
); i
++)
580 if (strcmp(reserved_field_names
[i
], name
) == 0)
582 for (i
= 0; i
< narg
; i
++)
583 if (strcmp(args
[i
].name
, name
) == 0)
588 static int create_trace_probe(int argc
, char **argv
)
592 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
593 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
595 * $argN : fetch Nth of function argument. (N:0-)
596 * $retval : fetch return value
597 * $stack : fetch stack address
598 * $stackN : fetch Nth of stack (N:0-)
599 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
600 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
601 * %REG : fetch register REG
602 * Indirect memory fetch:
603 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
604 * Alias name of args:
605 * NAME=FETCHARG : set NAME as alias of FETCHARG.
607 struct trace_probe
*tp
;
610 char *symbol
= NULL
, *event
= NULL
, *arg
= NULL
, *group
= NULL
;
611 unsigned long offset
= 0;
613 char buf
[MAX_EVENT_NAME_LEN
];
616 pr_info("Probe point is not specified.\n");
620 if (argv
[0][0] == 'p')
622 else if (argv
[0][0] == 'r')
625 pr_info("Probe definition must be started with 'p' or 'r'.\n");
629 if (argv
[0][1] == ':') {
631 if (strchr(event
, '/')) {
633 event
= strchr(group
, '/') + 1;
635 if (strlen(group
) == 0) {
636 pr_info("Group name is not specifiled\n");
640 if (strlen(event
) == 0) {
641 pr_info("Event name is not specifiled\n");
646 if (isdigit(argv
[1][0])) {
648 pr_info("Return probe point must be a symbol.\n");
651 /* an address specified */
652 ret
= strict_strtoul(&argv
[0][2], 0, (unsigned long *)&addr
);
654 pr_info("Failed to parse address.\n");
658 /* a symbol specified */
660 /* TODO: support .init module functions */
661 ret
= split_symbol_offset(symbol
, &offset
);
663 pr_info("Failed to parse symbol.\n");
666 if (offset
&& is_return
) {
667 pr_info("Return probe must be used without offset.\n");
671 argc
-= 2; argv
+= 2;
675 group
= KPROBE_EVENT_SYSTEM
;
677 /* Make a new event name */
679 snprintf(buf
, MAX_EVENT_NAME_LEN
, "%c@%s%+ld",
680 is_return
? 'r' : 'p', symbol
, offset
);
682 snprintf(buf
, MAX_EVENT_NAME_LEN
, "%c@0x%p",
683 is_return
? 'r' : 'p', addr
);
686 tp
= alloc_trace_probe(group
, event
, addr
, symbol
, offset
, argc
,
689 pr_info("Failed to allocate trace_probe.(%d)\n",
694 /* parse arguments */
696 for (i
= 0; i
< argc
&& i
< MAX_TRACE_ARGS
; i
++) {
697 /* Parse argument name */
698 arg
= strchr(argv
[i
], '=');
704 if (conflict_field_name(argv
[i
], tp
->args
, i
)) {
705 pr_info("Argument%d name '%s' conflicts with "
706 "another field.\n", i
, argv
[i
]);
711 tp
->args
[i
].name
= kstrdup(argv
[i
], GFP_KERNEL
);
712 if (!tp
->args
[i
].name
) {
713 pr_info("Failed to allocate argument%d name '%s'.\n",
719 /* Parse fetch argument */
720 ret
= parse_probe_arg(arg
, &tp
->args
[i
].fetch
, is_return
);
722 pr_info("Parse error at argument%d. (%d)\n", i
, ret
);
723 kfree(tp
->args
[i
].name
);
730 ret
= register_trace_probe(tp
);
736 free_trace_probe(tp
);
740 static void cleanup_all_probes(void)
742 struct trace_probe
*tp
;
744 mutex_lock(&probe_lock
);
745 /* TODO: Use batch unregistration */
746 while (!list_empty(&probe_list
)) {
747 tp
= list_entry(probe_list
.next
, struct trace_probe
, list
);
748 unregister_trace_probe(tp
);
749 free_trace_probe(tp
);
751 mutex_unlock(&probe_lock
);
755 /* Probes listing interfaces */
756 static void *probes_seq_start(struct seq_file
*m
, loff_t
*pos
)
758 mutex_lock(&probe_lock
);
759 return seq_list_start(&probe_list
, *pos
);
762 static void *probes_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
764 return seq_list_next(v
, &probe_list
, pos
);
767 static void probes_seq_stop(struct seq_file
*m
, void *v
)
769 mutex_unlock(&probe_lock
);
772 static int probes_seq_show(struct seq_file
*m
, void *v
)
774 struct trace_probe
*tp
= v
;
776 char buf
[MAX_ARGSTR_LEN
+ 1];
778 seq_printf(m
, "%c", probe_is_return(tp
) ? 'r' : 'p');
779 seq_printf(m
, ":%s/%s", tp
->call
.system
, tp
->call
.name
);
782 seq_printf(m
, " 0x%p", tp
->rp
.kp
.addr
);
783 else if (tp
->rp
.kp
.offset
)
784 seq_printf(m
, " %s+%u", probe_symbol(tp
), tp
->rp
.kp
.offset
);
786 seq_printf(m
, " %s", probe_symbol(tp
));
788 for (i
= 0; i
< tp
->nr_args
; i
++) {
789 ret
= probe_arg_string(buf
, MAX_ARGSTR_LEN
, &tp
->args
[i
].fetch
);
791 pr_warning("Argument%d decoding error(%d).\n", i
, ret
);
794 seq_printf(m
, " %s=%s", tp
->args
[i
].name
, buf
);
800 static const struct seq_operations probes_seq_op
= {
801 .start
= probes_seq_start
,
802 .next
= probes_seq_next
,
803 .stop
= probes_seq_stop
,
804 .show
= probes_seq_show
807 static int probes_open(struct inode
*inode
, struct file
*file
)
809 if ((file
->f_mode
& FMODE_WRITE
) &&
810 (file
->f_flags
& O_TRUNC
))
811 cleanup_all_probes();
813 return seq_open(file
, &probes_seq_op
);
816 static int command_trace_probe(const char *buf
)
819 int argc
= 0, ret
= 0;
821 argv
= argv_split(GFP_KERNEL
, buf
, &argc
);
826 ret
= create_trace_probe(argc
, argv
);
832 #define WRITE_BUFSIZE 128
834 static ssize_t
probes_write(struct file
*file
, const char __user
*buffer
,
835 size_t count
, loff_t
*ppos
)
842 kbuf
= kmalloc(WRITE_BUFSIZE
, GFP_KERNEL
);
847 while (done
< count
) {
849 if (size
>= WRITE_BUFSIZE
)
850 size
= WRITE_BUFSIZE
- 1;
851 if (copy_from_user(kbuf
, buffer
+ done
, size
)) {
856 tmp
= strchr(kbuf
, '\n');
859 size
= tmp
- kbuf
+ 1;
860 } else if (done
+ size
< count
) {
861 pr_warning("Line length is too long: "
862 "Should be less than %d.", WRITE_BUFSIZE
);
867 /* Remove comments */
868 tmp
= strchr(kbuf
, '#');
872 ret
= command_trace_probe(kbuf
);
882 static const struct file_operations kprobe_events_ops
= {
883 .owner
= THIS_MODULE
,
887 .release
= seq_release
,
888 .write
= probes_write
,
891 /* Probes profiling interfaces */
892 static int probes_profile_seq_show(struct seq_file
*m
, void *v
)
894 struct trace_probe
*tp
= v
;
896 seq_printf(m
, " %-44s %15lu %15lu\n", tp
->call
.name
, tp
->nhit
,
902 static const struct seq_operations profile_seq_op
= {
903 .start
= probes_seq_start
,
904 .next
= probes_seq_next
,
905 .stop
= probes_seq_stop
,
906 .show
= probes_profile_seq_show
909 static int profile_open(struct inode
*inode
, struct file
*file
)
911 return seq_open(file
, &profile_seq_op
);
914 static const struct file_operations kprobe_profile_ops
= {
915 .owner
= THIS_MODULE
,
916 .open
= profile_open
,
919 .release
= seq_release
,
923 static __kprobes
int kprobe_trace_func(struct kprobe
*kp
, struct pt_regs
*regs
)
925 struct trace_probe
*tp
= container_of(kp
, struct trace_probe
, rp
.kp
);
926 struct kprobe_trace_entry
*entry
;
927 struct ring_buffer_event
*event
;
928 struct ring_buffer
*buffer
;
930 unsigned long irq_flags
;
931 struct ftrace_event_call
*call
= &tp
->call
;
935 local_save_flags(irq_flags
);
936 pc
= preempt_count();
938 size
= SIZEOF_KPROBE_TRACE_ENTRY(tp
->nr_args
);
940 event
= trace_current_buffer_lock_reserve(&buffer
, call
->id
, size
,
945 entry
= ring_buffer_event_data(event
);
946 entry
->nargs
= tp
->nr_args
;
947 entry
->ip
= (unsigned long)kp
->addr
;
948 for (i
= 0; i
< tp
->nr_args
; i
++)
949 entry
->args
[i
] = call_fetch(&tp
->args
[i
].fetch
, regs
);
951 if (!filter_current_check_discard(buffer
, call
, entry
, event
))
952 trace_nowake_buffer_unlock_commit(buffer
, event
, irq_flags
, pc
);
956 /* Kretprobe handler */
957 static __kprobes
int kretprobe_trace_func(struct kretprobe_instance
*ri
,
958 struct pt_regs
*regs
)
960 struct trace_probe
*tp
= container_of(ri
->rp
, struct trace_probe
, rp
);
961 struct kretprobe_trace_entry
*entry
;
962 struct ring_buffer_event
*event
;
963 struct ring_buffer
*buffer
;
965 unsigned long irq_flags
;
966 struct ftrace_event_call
*call
= &tp
->call
;
968 local_save_flags(irq_flags
);
969 pc
= preempt_count();
971 size
= SIZEOF_KRETPROBE_TRACE_ENTRY(tp
->nr_args
);
973 event
= trace_current_buffer_lock_reserve(&buffer
, call
->id
, size
,
978 entry
= ring_buffer_event_data(event
);
979 entry
->nargs
= tp
->nr_args
;
980 entry
->func
= (unsigned long)tp
->rp
.kp
.addr
;
981 entry
->ret_ip
= (unsigned long)ri
->ret_addr
;
982 for (i
= 0; i
< tp
->nr_args
; i
++)
983 entry
->args
[i
] = call_fetch(&tp
->args
[i
].fetch
, regs
);
985 if (!filter_current_check_discard(buffer
, call
, entry
, event
))
986 trace_nowake_buffer_unlock_commit(buffer
, event
, irq_flags
, pc
);
991 /* Event entry printers */
993 print_kprobe_event(struct trace_iterator
*iter
, int flags
)
995 struct kprobe_trace_entry
*field
;
996 struct trace_seq
*s
= &iter
->seq
;
997 struct trace_event
*event
;
998 struct trace_probe
*tp
;
1001 field
= (struct kprobe_trace_entry
*)iter
->ent
;
1002 event
= ftrace_find_event(field
->ent
.type
);
1003 tp
= container_of(event
, struct trace_probe
, event
);
1005 if (!trace_seq_printf(s
, "%s: (", tp
->call
.name
))
1008 if (!seq_print_ip_sym(s
, field
->ip
, flags
| TRACE_ITER_SYM_OFFSET
))
1011 if (!trace_seq_puts(s
, ")"))
1014 for (i
= 0; i
< field
->nargs
; i
++)
1015 if (!trace_seq_printf(s
, " %s=%lx",
1016 tp
->args
[i
].name
, field
->args
[i
]))
1019 if (!trace_seq_puts(s
, "\n"))
1022 return TRACE_TYPE_HANDLED
;
1024 return TRACE_TYPE_PARTIAL_LINE
;
1028 print_kretprobe_event(struct trace_iterator
*iter
, int flags
)
1030 struct kretprobe_trace_entry
*field
;
1031 struct trace_seq
*s
= &iter
->seq
;
1032 struct trace_event
*event
;
1033 struct trace_probe
*tp
;
1036 field
= (struct kretprobe_trace_entry
*)iter
->ent
;
1037 event
= ftrace_find_event(field
->ent
.type
);
1038 tp
= container_of(event
, struct trace_probe
, event
);
1040 if (!trace_seq_printf(s
, "%s: (", tp
->call
.name
))
1043 if (!seq_print_ip_sym(s
, field
->ret_ip
, flags
| TRACE_ITER_SYM_OFFSET
))
1046 if (!trace_seq_puts(s
, " <- "))
1049 if (!seq_print_ip_sym(s
, field
->func
, flags
& ~TRACE_ITER_SYM_OFFSET
))
1052 if (!trace_seq_puts(s
, ")"))
1055 for (i
= 0; i
< field
->nargs
; i
++)
1056 if (!trace_seq_printf(s
, " %s=%lx",
1057 tp
->args
[i
].name
, field
->args
[i
]))
1060 if (!trace_seq_puts(s
, "\n"))
1063 return TRACE_TYPE_HANDLED
;
1065 return TRACE_TYPE_PARTIAL_LINE
;
1068 static int probe_event_enable(struct ftrace_event_call
*call
)
1070 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
1072 tp
->flags
|= TP_FLAG_TRACE
;
1073 if (probe_is_return(tp
))
1074 return enable_kretprobe(&tp
->rp
);
1076 return enable_kprobe(&tp
->rp
.kp
);
1079 static void probe_event_disable(struct ftrace_event_call
*call
)
1081 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
1083 tp
->flags
&= ~TP_FLAG_TRACE
;
1084 if (!(tp
->flags
& (TP_FLAG_TRACE
| TP_FLAG_PROFILE
))) {
1085 if (probe_is_return(tp
))
1086 disable_kretprobe(&tp
->rp
);
1088 disable_kprobe(&tp
->rp
.kp
);
1092 static int probe_event_raw_init(struct ftrace_event_call
*event_call
)
1094 INIT_LIST_HEAD(&event_call
->fields
);
1100 #define DEFINE_FIELD(type, item, name, is_signed) \
1102 ret = trace_define_field(event_call, #type, name, \
1103 offsetof(typeof(field), item), \
1104 sizeof(field.item), is_signed, \
1110 static int kprobe_event_define_fields(struct ftrace_event_call
*event_call
)
1113 struct kprobe_trace_entry field
;
1114 struct trace_probe
*tp
= (struct trace_probe
*)event_call
->data
;
1116 ret
= trace_define_common_fields(event_call
);
1120 DEFINE_FIELD(unsigned long, ip
, FIELD_STRING_IP
, 0);
1121 DEFINE_FIELD(int, nargs
, FIELD_STRING_NARGS
, 1);
1122 /* Set argument names as fields */
1123 for (i
= 0; i
< tp
->nr_args
; i
++)
1124 DEFINE_FIELD(unsigned long, args
[i
], tp
->args
[i
].name
, 0);
1128 static int kretprobe_event_define_fields(struct ftrace_event_call
*event_call
)
1131 struct kretprobe_trace_entry field
;
1132 struct trace_probe
*tp
= (struct trace_probe
*)event_call
->data
;
1134 ret
= trace_define_common_fields(event_call
);
1138 DEFINE_FIELD(unsigned long, func
, FIELD_STRING_FUNC
, 0);
1139 DEFINE_FIELD(unsigned long, ret_ip
, FIELD_STRING_RETIP
, 0);
1140 DEFINE_FIELD(int, nargs
, FIELD_STRING_NARGS
, 1);
1141 /* Set argument names as fields */
1142 for (i
= 0; i
< tp
->nr_args
; i
++)
1143 DEFINE_FIELD(unsigned long, args
[i
], tp
->args
[i
].name
, 0);
1147 static int __probe_event_show_format(struct trace_seq
*s
,
1148 struct trace_probe
*tp
, const char *fmt
,
1154 if (!trace_seq_printf(s
, "\nprint fmt: \"%s", fmt
))
1157 for (i
= 0; i
< tp
->nr_args
; i
++)
1158 if (!trace_seq_printf(s
, " %s=%%lx", tp
->args
[i
].name
))
1161 if (!trace_seq_printf(s
, "\", %s", arg
))
1164 for (i
= 0; i
< tp
->nr_args
; i
++)
1165 if (!trace_seq_printf(s
, ", REC->%s", tp
->args
[i
].name
))
1168 return trace_seq_puts(s
, "\n");
1172 #define SHOW_FIELD(type, item, name) \
1174 ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \
1175 "offset:%u;\tsize:%u;\n", name, \
1176 (unsigned int)offsetof(typeof(field), item),\
1177 (unsigned int)sizeof(type)); \
1182 static int kprobe_event_show_format(struct ftrace_event_call
*call
,
1183 struct trace_seq
*s
)
1185 struct kprobe_trace_entry field
__attribute__((unused
));
1187 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
1189 SHOW_FIELD(unsigned long, ip
, FIELD_STRING_IP
);
1190 SHOW_FIELD(int, nargs
, FIELD_STRING_NARGS
);
1193 for (i
= 0; i
< tp
->nr_args
; i
++)
1194 SHOW_FIELD(unsigned long, args
[i
], tp
->args
[i
].name
);
1195 trace_seq_puts(s
, "\n");
1197 return __probe_event_show_format(s
, tp
, "(%lx)",
1198 "REC->" FIELD_STRING_IP
);
1201 static int kretprobe_event_show_format(struct ftrace_event_call
*call
,
1202 struct trace_seq
*s
)
1204 struct kretprobe_trace_entry field
__attribute__((unused
));
1206 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
1208 SHOW_FIELD(unsigned long, func
, FIELD_STRING_FUNC
);
1209 SHOW_FIELD(unsigned long, ret_ip
, FIELD_STRING_RETIP
);
1210 SHOW_FIELD(int, nargs
, FIELD_STRING_NARGS
);
1213 for (i
= 0; i
< tp
->nr_args
; i
++)
1214 SHOW_FIELD(unsigned long, args
[i
], tp
->args
[i
].name
);
1215 trace_seq_puts(s
, "\n");
1217 return __probe_event_show_format(s
, tp
, "(%lx <- %lx)",
1218 "REC->" FIELD_STRING_FUNC
1219 ", REC->" FIELD_STRING_RETIP
);
1222 #ifdef CONFIG_EVENT_PROFILE
1224 /* Kprobe profile handler */
1225 static __kprobes
int kprobe_profile_func(struct kprobe
*kp
,
1226 struct pt_regs
*regs
)
1228 struct trace_probe
*tp
= container_of(kp
, struct trace_probe
, rp
.kp
);
1229 struct ftrace_event_call
*call
= &tp
->call
;
1230 struct kprobe_trace_entry
*entry
;
1231 struct trace_entry
*ent
;
1232 int size
, __size
, i
, pc
, __cpu
;
1233 unsigned long irq_flags
;
1238 pc
= preempt_count();
1239 __size
= SIZEOF_KPROBE_TRACE_ENTRY(tp
->nr_args
);
1240 size
= ALIGN(__size
+ sizeof(u32
), sizeof(u64
));
1241 size
-= sizeof(u32
);
1242 if (WARN_ONCE(size
> FTRACE_MAX_PROFILE_SIZE
,
1243 "profile buffer not large enough"))
1247 * Protect the non nmi buffer
1248 * This also protects the rcu read side
1250 local_irq_save(irq_flags
);
1252 rctx
= perf_swevent_get_recursion_context();
1256 __cpu
= smp_processor_id();
1259 trace_buf
= rcu_dereference(perf_trace_buf_nmi
);
1261 trace_buf
= rcu_dereference(perf_trace_buf
);
1266 raw_data
= per_cpu_ptr(trace_buf
, __cpu
);
1268 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1269 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
1270 entry
= (struct kprobe_trace_entry
*)raw_data
;
1273 tracing_generic_entry_update(ent
, irq_flags
, pc
);
1274 ent
->type
= call
->id
;
1275 entry
->nargs
= tp
->nr_args
;
1276 entry
->ip
= (unsigned long)kp
->addr
;
1277 for (i
= 0; i
< tp
->nr_args
; i
++)
1278 entry
->args
[i
] = call_fetch(&tp
->args
[i
].fetch
, regs
);
1279 perf_tp_event(call
->id
, entry
->ip
, 1, entry
, size
);
1282 perf_swevent_put_recursion_context(rctx
);
1284 local_irq_restore(irq_flags
);
1289 /* Kretprobe profile handler */
1290 static __kprobes
int kretprobe_profile_func(struct kretprobe_instance
*ri
,
1291 struct pt_regs
*regs
)
1293 struct trace_probe
*tp
= container_of(ri
->rp
, struct trace_probe
, rp
);
1294 struct ftrace_event_call
*call
= &tp
->call
;
1295 struct kretprobe_trace_entry
*entry
;
1296 struct trace_entry
*ent
;
1297 int size
, __size
, i
, pc
, __cpu
;
1298 unsigned long irq_flags
;
1303 pc
= preempt_count();
1304 __size
= SIZEOF_KRETPROBE_TRACE_ENTRY(tp
->nr_args
);
1305 size
= ALIGN(__size
+ sizeof(u32
), sizeof(u64
));
1306 size
-= sizeof(u32
);
1307 if (WARN_ONCE(size
> FTRACE_MAX_PROFILE_SIZE
,
1308 "profile buffer not large enough"))
1312 * Protect the non nmi buffer
1313 * This also protects the rcu read side
1315 local_irq_save(irq_flags
);
1317 rctx
= perf_swevent_get_recursion_context();
1321 __cpu
= smp_processor_id();
1324 trace_buf
= rcu_dereference(perf_trace_buf_nmi
);
1326 trace_buf
= rcu_dereference(perf_trace_buf
);
1331 raw_data
= per_cpu_ptr(trace_buf
, __cpu
);
1333 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1334 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
1335 entry
= (struct kretprobe_trace_entry
*)raw_data
;
1338 tracing_generic_entry_update(ent
, irq_flags
, pc
);
1339 ent
->type
= call
->id
;
1340 entry
->nargs
= tp
->nr_args
;
1341 entry
->func
= (unsigned long)tp
->rp
.kp
.addr
;
1342 entry
->ret_ip
= (unsigned long)ri
->ret_addr
;
1343 for (i
= 0; i
< tp
->nr_args
; i
++)
1344 entry
->args
[i
] = call_fetch(&tp
->args
[i
].fetch
, regs
);
1345 perf_tp_event(call
->id
, entry
->ret_ip
, 1, entry
, size
);
1348 perf_swevent_put_recursion_context(rctx
);
1350 local_irq_restore(irq_flags
);
1355 static int probe_profile_enable(struct ftrace_event_call
*call
)
1357 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
1359 tp
->flags
|= TP_FLAG_PROFILE
;
1361 if (probe_is_return(tp
))
1362 return enable_kretprobe(&tp
->rp
);
1364 return enable_kprobe(&tp
->rp
.kp
);
1367 static void probe_profile_disable(struct ftrace_event_call
*call
)
1369 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
1371 tp
->flags
&= ~TP_FLAG_PROFILE
;
1373 if (!(tp
->flags
& TP_FLAG_TRACE
)) {
1374 if (probe_is_return(tp
))
1375 disable_kretprobe(&tp
->rp
);
1377 disable_kprobe(&tp
->rp
.kp
);
1380 #endif /* CONFIG_EVENT_PROFILE */
1384 int kprobe_dispatcher(struct kprobe
*kp
, struct pt_regs
*regs
)
1386 struct trace_probe
*tp
= container_of(kp
, struct trace_probe
, rp
.kp
);
1388 if (tp
->flags
& TP_FLAG_TRACE
)
1389 kprobe_trace_func(kp
, regs
);
1390 #ifdef CONFIG_EVENT_PROFILE
1391 if (tp
->flags
& TP_FLAG_PROFILE
)
1392 kprobe_profile_func(kp
, regs
);
1393 #endif /* CONFIG_EVENT_PROFILE */
1394 return 0; /* We don't tweek kernel, so just return 0 */
1398 int kretprobe_dispatcher(struct kretprobe_instance
*ri
, struct pt_regs
*regs
)
1400 struct trace_probe
*tp
= container_of(ri
->rp
, struct trace_probe
, rp
);
1402 if (tp
->flags
& TP_FLAG_TRACE
)
1403 kretprobe_trace_func(ri
, regs
);
1404 #ifdef CONFIG_EVENT_PROFILE
1405 if (tp
->flags
& TP_FLAG_PROFILE
)
1406 kretprobe_profile_func(ri
, regs
);
1407 #endif /* CONFIG_EVENT_PROFILE */
1408 return 0; /* We don't tweek kernel, so just return 0 */
1411 static int register_probe_event(struct trace_probe
*tp
)
1413 struct ftrace_event_call
*call
= &tp
->call
;
1416 /* Initialize ftrace_event_call */
1417 if (probe_is_return(tp
)) {
1418 tp
->event
.trace
= print_kretprobe_event
;
1419 call
->raw_init
= probe_event_raw_init
;
1420 call
->show_format
= kretprobe_event_show_format
;
1421 call
->define_fields
= kretprobe_event_define_fields
;
1423 tp
->event
.trace
= print_kprobe_event
;
1424 call
->raw_init
= probe_event_raw_init
;
1425 call
->show_format
= kprobe_event_show_format
;
1426 call
->define_fields
= kprobe_event_define_fields
;
1428 call
->event
= &tp
->event
;
1429 call
->id
= register_ftrace_event(&tp
->event
);
1433 call
->regfunc
= probe_event_enable
;
1434 call
->unregfunc
= probe_event_disable
;
1436 #ifdef CONFIG_EVENT_PROFILE
1437 atomic_set(&call
->profile_count
, -1);
1438 call
->profile_enable
= probe_profile_enable
;
1439 call
->profile_disable
= probe_profile_disable
;
1442 ret
= trace_add_event_call(call
);
1444 pr_info("Failed to register kprobe event: %s\n", call
->name
);
1445 unregister_ftrace_event(&tp
->event
);
1450 static void unregister_probe_event(struct trace_probe
*tp
)
1452 /* tp->event is unregistered in trace_remove_event_call() */
1453 trace_remove_event_call(&tp
->call
);
1456 /* Make a debugfs interface for controling probe points */
1457 static __init
int init_kprobe_trace(void)
1459 struct dentry
*d_tracer
;
1460 struct dentry
*entry
;
1462 d_tracer
= tracing_init_dentry();
1466 entry
= debugfs_create_file("kprobe_events", 0644, d_tracer
,
1467 NULL
, &kprobe_events_ops
);
1469 /* Event list interface */
1471 pr_warning("Could not create debugfs "
1472 "'kprobe_events' entry\n");
1474 /* Profile interface */
1475 entry
= debugfs_create_file("kprobe_profile", 0444, d_tracer
,
1476 NULL
, &kprobe_profile_ops
);
1479 pr_warning("Could not create debugfs "
1480 "'kprobe_profile' entry\n");
1483 fs_initcall(init_kprobe_trace
);
1486 #ifdef CONFIG_FTRACE_STARTUP_TEST
1488 static int kprobe_trace_selftest_target(int a1
, int a2
, int a3
,
1489 int a4
, int a5
, int a6
)
1491 return a1
+ a2
+ a3
+ a4
+ a5
+ a6
;
1494 static __init
int kprobe_trace_self_tests_init(void)
1497 int (*target
)(int, int, int, int, int, int);
1499 target
= kprobe_trace_selftest_target
;
1501 pr_info("Testing kprobe tracing: ");
1503 ret
= command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1504 "$arg1 $arg2 $arg3 $arg4 $stack $stack0");
1505 if (WARN_ON_ONCE(ret
))
1506 pr_warning("error enabling function entry\n");
1508 ret
= command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1510 if (WARN_ON_ONCE(ret
))
1511 pr_warning("error enabling function return\n");
1513 ret
= target(1, 2, 3, 4, 5, 6);
1515 cleanup_all_probes();
1521 late_initcall(kprobe_trace_self_tests_init
);