4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex
);
30 LIST_HEAD(ftrace_events
);
31 LIST_HEAD(ftrace_common_fields
);
34 trace_get_fields(struct ftrace_event_call
*event_call
)
36 if (!event_call
->class->get_fields
)
37 return &event_call
->class->fields
;
38 return event_call
->class->get_fields(event_call
);
41 static int __trace_define_field(struct list_head
*head
, const char *type
,
42 const char *name
, int offset
, int size
,
43 int is_signed
, int filter_type
)
45 struct ftrace_event_field
*field
;
47 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
51 field
->name
= kstrdup(name
, GFP_KERNEL
);
55 field
->type
= kstrdup(type
, GFP_KERNEL
);
59 if (filter_type
== FILTER_OTHER
)
60 field
->filter_type
= filter_assign_type(type
);
62 field
->filter_type
= filter_type
;
64 field
->offset
= offset
;
66 field
->is_signed
= is_signed
;
68 list_add(&field
->link
, head
);
80 int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
81 const char *name
, int offset
, int size
, int is_signed
,
84 struct list_head
*head
;
86 if (WARN_ON(!call
->class))
89 head
= trace_get_fields(call
);
90 return __trace_define_field(head
, type
, name
, offset
, size
,
91 is_signed
, filter_type
);
93 EXPORT_SYMBOL_GPL(trace_define_field
);
95 #define __common_field(type, item) \
96 ret = __trace_define_field(&ftrace_common_fields, #type, \
98 offsetof(typeof(ent), item), \
100 is_signed_type(type), FILTER_OTHER); \
104 static int trace_define_common_fields(void)
107 struct trace_entry ent
;
109 __common_field(unsigned short, type
);
110 __common_field(unsigned char, flags
);
111 __common_field(unsigned char, preempt_count
);
112 __common_field(int, pid
);
113 __common_field(int, lock_depth
);
118 void trace_destroy_fields(struct ftrace_event_call
*call
)
120 struct ftrace_event_field
*field
, *next
;
121 struct list_head
*head
;
123 head
= trace_get_fields(call
);
124 list_for_each_entry_safe(field
, next
, head
, link
) {
125 list_del(&field
->link
);
132 int trace_event_raw_init(struct ftrace_event_call
*call
)
136 id
= register_ftrace_event(&call
->event
);
142 EXPORT_SYMBOL_GPL(trace_event_raw_init
);
144 int ftrace_event_reg(struct ftrace_event_call
*call
, enum trace_reg type
)
147 case TRACE_REG_REGISTER
:
148 return tracepoint_probe_register(call
->name
,
151 case TRACE_REG_UNREGISTER
:
152 tracepoint_probe_unregister(call
->name
,
157 #ifdef CONFIG_PERF_EVENTS
158 case TRACE_REG_PERF_REGISTER
:
159 return tracepoint_probe_register(call
->name
,
160 call
->class->perf_probe
,
162 case TRACE_REG_PERF_UNREGISTER
:
163 tracepoint_probe_unregister(call
->name
,
164 call
->class->perf_probe
,
171 EXPORT_SYMBOL_GPL(ftrace_event_reg
);
173 void trace_event_enable_cmd_record(bool enable
)
175 struct ftrace_event_call
*call
;
177 mutex_lock(&event_mutex
);
178 list_for_each_entry(call
, &ftrace_events
, list
) {
179 if (!(call
->flags
& TRACE_EVENT_FL_ENABLED
))
183 tracing_start_cmdline_record();
184 call
->flags
|= TRACE_EVENT_FL_RECORDED_CMD
;
186 tracing_stop_cmdline_record();
187 call
->flags
&= ~TRACE_EVENT_FL_RECORDED_CMD
;
190 mutex_unlock(&event_mutex
);
193 static int ftrace_event_enable_disable(struct ftrace_event_call
*call
,
200 if (call
->flags
& TRACE_EVENT_FL_ENABLED
) {
201 call
->flags
&= ~TRACE_EVENT_FL_ENABLED
;
202 if (call
->flags
& TRACE_EVENT_FL_RECORDED_CMD
) {
203 tracing_stop_cmdline_record();
204 call
->flags
&= ~TRACE_EVENT_FL_RECORDED_CMD
;
206 call
->class->reg(call
, TRACE_REG_UNREGISTER
);
210 if (!(call
->flags
& TRACE_EVENT_FL_ENABLED
)) {
211 if (trace_flags
& TRACE_ITER_RECORD_CMD
) {
212 tracing_start_cmdline_record();
213 call
->flags
|= TRACE_EVENT_FL_RECORDED_CMD
;
215 ret
= call
->class->reg(call
, TRACE_REG_REGISTER
);
217 tracing_stop_cmdline_record();
218 pr_info("event trace: Could not enable event "
222 call
->flags
|= TRACE_EVENT_FL_ENABLED
;
230 static void ftrace_clear_events(void)
232 struct ftrace_event_call
*call
;
234 mutex_lock(&event_mutex
);
235 list_for_each_entry(call
, &ftrace_events
, list
) {
236 ftrace_event_enable_disable(call
, 0);
238 mutex_unlock(&event_mutex
);
242 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
244 static int __ftrace_set_clr_event(const char *match
, const char *sub
,
245 const char *event
, int set
)
247 struct ftrace_event_call
*call
;
250 mutex_lock(&event_mutex
);
251 list_for_each_entry(call
, &ftrace_events
, list
) {
253 if (!call
->name
|| !call
->class || !call
->class->reg
)
257 strcmp(match
, call
->name
) != 0 &&
258 strcmp(match
, call
->class->system
) != 0)
261 if (sub
&& strcmp(sub
, call
->class->system
) != 0)
264 if (event
&& strcmp(event
, call
->name
) != 0)
267 ftrace_event_enable_disable(call
, set
);
271 mutex_unlock(&event_mutex
);
276 static int ftrace_set_clr_event(char *buf
, int set
)
278 char *event
= NULL
, *sub
= NULL
, *match
;
281 * The buf format can be <subsystem>:<event-name>
282 * *:<event-name> means any event by that name.
283 * :<event-name> is the same.
285 * <subsystem>:* means all events in that subsystem
286 * <subsystem>: means the same.
288 * <name> (no ':') means all events in a subsystem with
289 * the name <name> or any event that matches <name>
292 match
= strsep(&buf
, ":");
298 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
300 if (!strlen(event
) || strcmp(event
, "*") == 0)
304 return __ftrace_set_clr_event(match
, sub
, event
, set
);
308 * trace_set_clr_event - enable or disable an event
309 * @system: system name to match (NULL for any system)
310 * @event: event name to match (NULL for all events, within system)
311 * @set: 1 to enable, 0 to disable
313 * This is a way for other parts of the kernel to enable or disable
316 * Returns 0 on success, -EINVAL if the parameters do not match any
319 int trace_set_clr_event(const char *system
, const char *event
, int set
)
321 return __ftrace_set_clr_event(NULL
, system
, event
, set
);
324 /* 128 should be much more than enough */
325 #define EVENT_BUF_SIZE 127
328 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
329 size_t cnt
, loff_t
*ppos
)
331 struct trace_parser parser
;
337 ret
= tracing_update_buffers();
341 if (trace_parser_get_init(&parser
, EVENT_BUF_SIZE
+ 1))
344 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
346 if (read
>= 0 && trace_parser_loaded((&parser
))) {
349 if (*parser
.buffer
== '!')
352 parser
.buffer
[parser
.idx
] = 0;
354 ret
= ftrace_set_clr_event(parser
.buffer
+ !set
, set
);
362 trace_parser_put(&parser
);
368 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
370 struct ftrace_event_call
*call
= v
;
374 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
376 * The ftrace subsystem is for showing formats only.
377 * They can not be enabled or disabled via the event files.
379 if (call
->class && call
->class->reg
)
386 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
388 struct ftrace_event_call
*call
;
391 mutex_lock(&event_mutex
);
393 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
394 for (l
= 0; l
<= *pos
; ) {
395 call
= t_next(m
, call
, &l
);
403 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
405 struct ftrace_event_call
*call
= v
;
409 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
410 if (call
->flags
& TRACE_EVENT_FL_ENABLED
)
417 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
419 struct ftrace_event_call
*call
;
422 mutex_lock(&event_mutex
);
424 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
425 for (l
= 0; l
<= *pos
; ) {
426 call
= s_next(m
, call
, &l
);
433 static int t_show(struct seq_file
*m
, void *v
)
435 struct ftrace_event_call
*call
= v
;
437 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
438 seq_printf(m
, "%s:", call
->class->system
);
439 seq_printf(m
, "%s\n", call
->name
);
444 static void t_stop(struct seq_file
*m
, void *p
)
446 mutex_unlock(&event_mutex
);
450 ftrace_event_seq_open(struct inode
*inode
, struct file
*file
)
452 const struct seq_operations
*seq_ops
;
454 if ((file
->f_mode
& FMODE_WRITE
) &&
455 (file
->f_flags
& O_TRUNC
))
456 ftrace_clear_events();
458 seq_ops
= inode
->i_private
;
459 return seq_open(file
, seq_ops
);
463 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
466 struct ftrace_event_call
*call
= filp
->private_data
;
469 if (call
->flags
& TRACE_EVENT_FL_ENABLED
)
474 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
478 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
481 struct ftrace_event_call
*call
= filp
->private_data
;
486 if (cnt
>= sizeof(buf
))
489 if (copy_from_user(&buf
, ubuf
, cnt
))
494 ret
= strict_strtoul(buf
, 10, &val
);
498 ret
= tracing_update_buffers();
505 mutex_lock(&event_mutex
);
506 ret
= ftrace_event_enable_disable(call
, val
);
507 mutex_unlock(&event_mutex
);
516 return ret
? ret
: cnt
;
520 system_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
523 const char set_to_char
[4] = { '?', '0', '1', 'X' };
524 const char *system
= filp
->private_data
;
525 struct ftrace_event_call
*call
;
530 mutex_lock(&event_mutex
);
531 list_for_each_entry(call
, &ftrace_events
, list
) {
532 if (!call
->name
|| !call
->class || !call
->class->reg
)
535 if (system
&& strcmp(call
->class->system
, system
) != 0)
539 * We need to find out if all the events are set
540 * or if all events or cleared, or if we have
543 set
|= (1 << !!(call
->flags
& TRACE_EVENT_FL_ENABLED
));
546 * If we have a mixture, no need to look further.
551 mutex_unlock(&event_mutex
);
553 buf
[0] = set_to_char
[set
];
556 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
562 system_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
565 const char *system
= filp
->private_data
;
570 if (cnt
>= sizeof(buf
))
573 if (copy_from_user(&buf
, ubuf
, cnt
))
578 ret
= strict_strtoul(buf
, 10, &val
);
582 ret
= tracing_update_buffers();
586 if (val
!= 0 && val
!= 1)
589 ret
= __ftrace_set_clr_event(NULL
, system
, NULL
, val
);
603 FORMAT_FIELD_SEPERATOR
= 2,
607 static void *f_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
609 struct ftrace_event_call
*call
= m
->private;
610 struct ftrace_event_field
*field
;
611 struct list_head
*common_head
= &ftrace_common_fields
;
612 struct list_head
*head
= trace_get_fields(call
);
616 switch ((unsigned long)v
) {
618 if (unlikely(list_empty(common_head
)))
621 field
= list_entry(common_head
->prev
,
622 struct ftrace_event_field
, link
);
625 case FORMAT_FIELD_SEPERATOR
:
626 if (unlikely(list_empty(head
)))
629 field
= list_entry(head
->prev
, struct ftrace_event_field
, link
);
632 case FORMAT_PRINTFMT
:
638 if (field
->link
.prev
== common_head
)
639 return (void *)FORMAT_FIELD_SEPERATOR
;
640 else if (field
->link
.prev
== head
)
641 return (void *)FORMAT_PRINTFMT
;
643 field
= list_entry(field
->link
.prev
, struct ftrace_event_field
, link
);
648 static void *f_start(struct seq_file
*m
, loff_t
*pos
)
653 /* Start by showing the header */
655 return (void *)FORMAT_HEADER
;
657 p
= (void *)FORMAT_HEADER
;
659 p
= f_next(m
, p
, &l
);
660 } while (p
&& l
< *pos
);
665 static int f_show(struct seq_file
*m
, void *v
)
667 struct ftrace_event_call
*call
= m
->private;
668 struct ftrace_event_field
*field
;
669 const char *array_descriptor
;
671 switch ((unsigned long)v
) {
673 seq_printf(m
, "name: %s\n", call
->name
);
674 seq_printf(m
, "ID: %d\n", call
->event
.type
);
675 seq_printf(m
, "format:\n");
678 case FORMAT_FIELD_SEPERATOR
:
682 case FORMAT_PRINTFMT
:
683 seq_printf(m
, "\nprint fmt: %s\n",
691 * Smartly shows the array type(except dynamic array).
694 * If TYPE := TYPE[LEN], it is shown:
695 * field:TYPE VAR[LEN]
697 array_descriptor
= strchr(field
->type
, '[');
699 if (!strncmp(field
->type
, "__data_loc", 10))
700 array_descriptor
= NULL
;
702 if (!array_descriptor
)
703 seq_printf(m
, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
704 field
->type
, field
->name
, field
->offset
,
705 field
->size
, !!field
->is_signed
);
707 seq_printf(m
, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
708 (int)(array_descriptor
- field
->type
),
709 field
->type
, field
->name
,
710 array_descriptor
, field
->offset
,
711 field
->size
, !!field
->is_signed
);
716 static void f_stop(struct seq_file
*m
, void *p
)
720 static const struct seq_operations trace_format_seq_ops
= {
727 static int trace_format_open(struct inode
*inode
, struct file
*file
)
729 struct ftrace_event_call
*call
= inode
->i_private
;
733 ret
= seq_open(file
, &trace_format_seq_ops
);
737 m
= file
->private_data
;
744 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
746 struct ftrace_event_call
*call
= filp
->private_data
;
753 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
758 trace_seq_printf(s
, "%d\n", call
->event
.type
);
760 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
767 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
770 struct ftrace_event_call
*call
= filp
->private_data
;
777 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
783 print_event_filter(call
, s
);
784 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
792 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
795 struct ftrace_event_call
*call
= filp
->private_data
;
799 if (cnt
>= PAGE_SIZE
)
802 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
806 if (copy_from_user(buf
, ubuf
, cnt
)) {
807 free_page((unsigned long) buf
);
812 err
= apply_event_filter(call
, buf
);
813 free_page((unsigned long) buf
);
823 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
826 struct event_subsystem
*system
= filp
->private_data
;
833 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
839 print_subsystem_event_filter(system
, s
);
840 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
848 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
851 struct event_subsystem
*system
= filp
->private_data
;
855 if (cnt
>= PAGE_SIZE
)
858 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
862 if (copy_from_user(buf
, ubuf
, cnt
)) {
863 free_page((unsigned long) buf
);
868 err
= apply_subsystem_event_filter(system
, buf
);
869 free_page((unsigned long) buf
);
879 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
881 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
888 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
895 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
902 static const struct seq_operations show_event_seq_ops
= {
909 static const struct seq_operations show_set_event_seq_ops
= {
916 static const struct file_operations ftrace_avail_fops
= {
917 .open
= ftrace_event_seq_open
,
920 .release
= seq_release
,
923 static const struct file_operations ftrace_set_event_fops
= {
924 .open
= ftrace_event_seq_open
,
926 .write
= ftrace_event_write
,
928 .release
= seq_release
,
931 static const struct file_operations ftrace_enable_fops
= {
932 .open
= tracing_open_generic
,
933 .read
= event_enable_read
,
934 .write
= event_enable_write
,
935 .llseek
= default_llseek
,
938 static const struct file_operations ftrace_event_format_fops
= {
939 .open
= trace_format_open
,
942 .release
= seq_release
,
945 static const struct file_operations ftrace_event_id_fops
= {
946 .open
= tracing_open_generic
,
947 .read
= event_id_read
,
948 .llseek
= default_llseek
,
951 static const struct file_operations ftrace_event_filter_fops
= {
952 .open
= tracing_open_generic
,
953 .read
= event_filter_read
,
954 .write
= event_filter_write
,
955 .llseek
= default_llseek
,
958 static const struct file_operations ftrace_subsystem_filter_fops
= {
959 .open
= tracing_open_generic
,
960 .read
= subsystem_filter_read
,
961 .write
= subsystem_filter_write
,
962 .llseek
= default_llseek
,
965 static const struct file_operations ftrace_system_enable_fops
= {
966 .open
= tracing_open_generic
,
967 .read
= system_enable_read
,
968 .write
= system_enable_write
,
969 .llseek
= default_llseek
,
972 static const struct file_operations ftrace_show_header_fops
= {
973 .open
= tracing_open_generic
,
975 .llseek
= default_llseek
,
978 static struct dentry
*event_trace_events_dir(void)
980 static struct dentry
*d_tracer
;
981 static struct dentry
*d_events
;
986 d_tracer
= tracing_init_dentry();
990 d_events
= debugfs_create_dir("events", d_tracer
);
992 pr_warning("Could not create debugfs "
993 "'events' directory\n");
998 static LIST_HEAD(event_subsystems
);
1000 static struct dentry
*
1001 event_subsystem_dir(const char *name
, struct dentry
*d_events
)
1003 struct event_subsystem
*system
;
1004 struct dentry
*entry
;
1006 /* First see if we did not already create this dir */
1007 list_for_each_entry(system
, &event_subsystems
, list
) {
1008 if (strcmp(system
->name
, name
) == 0) {
1009 system
->nr_events
++;
1010 return system
->entry
;
1014 /* need to create new entry */
1015 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
1017 pr_warning("No memory to create event subsystem %s\n",
1022 system
->entry
= debugfs_create_dir(name
, d_events
);
1023 if (!system
->entry
) {
1024 pr_warning("Could not create event subsystem %s\n",
1030 system
->nr_events
= 1;
1031 system
->name
= kstrdup(name
, GFP_KERNEL
);
1032 if (!system
->name
) {
1033 debugfs_remove(system
->entry
);
1038 list_add(&system
->list
, &event_subsystems
);
1040 system
->filter
= NULL
;
1042 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
1043 if (!system
->filter
) {
1044 pr_warning("Could not allocate filter for subsystem "
1046 return system
->entry
;
1049 entry
= debugfs_create_file("filter", 0644, system
->entry
, system
,
1050 &ftrace_subsystem_filter_fops
);
1052 kfree(system
->filter
);
1053 system
->filter
= NULL
;
1054 pr_warning("Could not create debugfs "
1055 "'%s/filter' entry\n", name
);
1058 trace_create_file("enable", 0644, system
->entry
,
1059 (void *)system
->name
,
1060 &ftrace_system_enable_fops
);
1062 return system
->entry
;
1066 event_create_dir(struct ftrace_event_call
*call
, struct dentry
*d_events
,
1067 const struct file_operations
*id
,
1068 const struct file_operations
*enable
,
1069 const struct file_operations
*filter
,
1070 const struct file_operations
*format
)
1072 struct list_head
*head
;
1076 * If the trace point header did not define TRACE_SYSTEM
1077 * then the system would be called "TRACE_SYSTEM".
1079 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
1080 d_events
= event_subsystem_dir(call
->class->system
, d_events
);
1082 call
->dir
= debugfs_create_dir(call
->name
, d_events
);
1084 pr_warning("Could not create debugfs "
1085 "'%s' directory\n", call
->name
);
1089 if (call
->class->reg
)
1090 trace_create_file("enable", 0644, call
->dir
, call
,
1093 #ifdef CONFIG_PERF_EVENTS
1094 if (call
->event
.type
&& call
->class->reg
)
1095 trace_create_file("id", 0444, call
->dir
, call
,
1100 * Other events may have the same class. Only update
1101 * the fields if they are not already defined.
1103 head
= trace_get_fields(call
);
1104 if (list_empty(head
)) {
1105 ret
= call
->class->define_fields(call
);
1107 pr_warning("Could not initialize trace point"
1108 " events/%s\n", call
->name
);
1112 trace_create_file("filter", 0644, call
->dir
, call
,
1115 trace_create_file("format", 0444, call
->dir
, call
,
1122 __trace_add_event_call(struct ftrace_event_call
*call
, struct module
*mod
,
1123 const struct file_operations
*id
,
1124 const struct file_operations
*enable
,
1125 const struct file_operations
*filter
,
1126 const struct file_operations
*format
)
1128 struct dentry
*d_events
;
1131 /* The linker may leave blanks */
1135 if (call
->class->raw_init
) {
1136 ret
= call
->class->raw_init(call
);
1139 pr_warning("Could not initialize trace events/%s\n",
1145 d_events
= event_trace_events_dir();
1149 ret
= event_create_dir(call
, d_events
, id
, enable
, filter
, format
);
1151 list_add(&call
->list
, &ftrace_events
);
1157 /* Add an additional event_call dynamically */
1158 int trace_add_event_call(struct ftrace_event_call
*call
)
1161 mutex_lock(&event_mutex
);
1162 ret
= __trace_add_event_call(call
, NULL
, &ftrace_event_id_fops
,
1163 &ftrace_enable_fops
,
1164 &ftrace_event_filter_fops
,
1165 &ftrace_event_format_fops
);
1166 mutex_unlock(&event_mutex
);
1170 static void remove_subsystem_dir(const char *name
)
1172 struct event_subsystem
*system
;
1174 if (strcmp(name
, TRACE_SYSTEM
) == 0)
1177 list_for_each_entry(system
, &event_subsystems
, list
) {
1178 if (strcmp(system
->name
, name
) == 0) {
1179 if (!--system
->nr_events
) {
1180 struct event_filter
*filter
= system
->filter
;
1182 debugfs_remove_recursive(system
->entry
);
1183 list_del(&system
->list
);
1185 kfree(filter
->filter_string
);
1188 kfree(system
->name
);
1197 * Must be called under locking both of event_mutex and trace_event_mutex.
1199 static void __trace_remove_event_call(struct ftrace_event_call
*call
)
1201 ftrace_event_enable_disable(call
, 0);
1202 if (call
->event
.funcs
)
1203 __unregister_ftrace_event(&call
->event
);
1204 debugfs_remove_recursive(call
->dir
);
1205 list_del(&call
->list
);
1206 trace_destroy_fields(call
);
1207 destroy_preds(call
);
1208 remove_subsystem_dir(call
->class->system
);
1211 /* Remove an event_call */
1212 void trace_remove_event_call(struct ftrace_event_call
*call
)
1214 mutex_lock(&event_mutex
);
1215 down_write(&trace_event_mutex
);
1216 __trace_remove_event_call(call
);
1217 up_write(&trace_event_mutex
);
1218 mutex_unlock(&event_mutex
);
1221 #define for_each_event(event, start, end) \
1222 for (event = start; \
1223 (unsigned long)event < (unsigned long)end; \
1226 #ifdef CONFIG_MODULES
1228 static LIST_HEAD(ftrace_module_file_list
);
1231 * Modules must own their file_operations to keep up with
1232 * reference counting.
1234 struct ftrace_module_file_ops
{
1235 struct list_head list
;
1237 struct file_operations id
;
1238 struct file_operations enable
;
1239 struct file_operations format
;
1240 struct file_operations filter
;
1243 static struct ftrace_module_file_ops
*
1244 trace_create_file_ops(struct module
*mod
)
1246 struct ftrace_module_file_ops
*file_ops
;
1249 * This is a bit of a PITA. To allow for correct reference
1250 * counting, modules must "own" their file_operations.
1251 * To do this, we allocate the file operations that will be
1252 * used in the event directory.
1255 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
1259 file_ops
->mod
= mod
;
1261 file_ops
->id
= ftrace_event_id_fops
;
1262 file_ops
->id
.owner
= mod
;
1264 file_ops
->enable
= ftrace_enable_fops
;
1265 file_ops
->enable
.owner
= mod
;
1267 file_ops
->filter
= ftrace_event_filter_fops
;
1268 file_ops
->filter
.owner
= mod
;
1270 file_ops
->format
= ftrace_event_format_fops
;
1271 file_ops
->format
.owner
= mod
;
1273 list_add(&file_ops
->list
, &ftrace_module_file_list
);
1278 static void trace_module_add_events(struct module
*mod
)
1280 struct ftrace_module_file_ops
*file_ops
= NULL
;
1281 struct ftrace_event_call
*call
, *start
, *end
;
1283 start
= mod
->trace_events
;
1284 end
= mod
->trace_events
+ mod
->num_trace_events
;
1289 file_ops
= trace_create_file_ops(mod
);
1293 for_each_event(call
, start
, end
) {
1294 __trace_add_event_call(call
, mod
,
1295 &file_ops
->id
, &file_ops
->enable
,
1296 &file_ops
->filter
, &file_ops
->format
);
1300 static void trace_module_remove_events(struct module
*mod
)
1302 struct ftrace_module_file_ops
*file_ops
;
1303 struct ftrace_event_call
*call
, *p
;
1306 down_write(&trace_event_mutex
);
1307 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
1308 if (call
->mod
== mod
) {
1310 __trace_remove_event_call(call
);
1314 /* Now free the file_operations */
1315 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1316 if (file_ops
->mod
== mod
)
1319 if (&file_ops
->list
!= &ftrace_module_file_list
) {
1320 list_del(&file_ops
->list
);
1325 * It is safest to reset the ring buffer if the module being unloaded
1326 * registered any events.
1329 tracing_reset_current_online_cpus();
1330 up_write(&trace_event_mutex
);
1333 static int trace_module_notify(struct notifier_block
*self
,
1334 unsigned long val
, void *data
)
1336 struct module
*mod
= data
;
1338 mutex_lock(&event_mutex
);
1340 case MODULE_STATE_COMING
:
1341 trace_module_add_events(mod
);
1343 case MODULE_STATE_GOING
:
1344 trace_module_remove_events(mod
);
1347 mutex_unlock(&event_mutex
);
1352 static int trace_module_notify(struct notifier_block
*self
,
1353 unsigned long val
, void *data
)
1357 #endif /* CONFIG_MODULES */
1359 static struct notifier_block trace_module_nb
= {
1360 .notifier_call
= trace_module_notify
,
1364 extern struct ftrace_event_call __start_ftrace_events
[];
1365 extern struct ftrace_event_call __stop_ftrace_events
[];
1367 static char bootup_event_buf
[COMMAND_LINE_SIZE
] __initdata
;
1369 static __init
int setup_trace_event(char *str
)
1371 strlcpy(bootup_event_buf
, str
, COMMAND_LINE_SIZE
);
1372 ring_buffer_expanded
= 1;
1373 tracing_selftest_disabled
= 1;
1377 __setup("trace_event=", setup_trace_event
);
1379 static __init
int event_trace_init(void)
1381 struct ftrace_event_call
*call
;
1382 struct dentry
*d_tracer
;
1383 struct dentry
*entry
;
1384 struct dentry
*d_events
;
1386 char *buf
= bootup_event_buf
;
1389 d_tracer
= tracing_init_dentry();
1393 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
1394 (void *)&show_event_seq_ops
,
1395 &ftrace_avail_fops
);
1397 pr_warning("Could not create debugfs "
1398 "'available_events' entry\n");
1400 entry
= debugfs_create_file("set_event", 0644, d_tracer
,
1401 (void *)&show_set_event_seq_ops
,
1402 &ftrace_set_event_fops
);
1404 pr_warning("Could not create debugfs "
1405 "'set_event' entry\n");
1407 d_events
= event_trace_events_dir();
1411 /* ring buffer internal formats */
1412 trace_create_file("header_page", 0444, d_events
,
1413 ring_buffer_print_page_header
,
1414 &ftrace_show_header_fops
);
1416 trace_create_file("header_event", 0444, d_events
,
1417 ring_buffer_print_entry_header
,
1418 &ftrace_show_header_fops
);
1420 trace_create_file("enable", 0644, d_events
,
1421 NULL
, &ftrace_system_enable_fops
);
1423 if (trace_define_common_fields())
1424 pr_warning("tracing: Failed to allocate common fields");
1426 for_each_event(call
, __start_ftrace_events
, __stop_ftrace_events
) {
1427 __trace_add_event_call(call
, NULL
, &ftrace_event_id_fops
,
1428 &ftrace_enable_fops
,
1429 &ftrace_event_filter_fops
,
1430 &ftrace_event_format_fops
);
1434 token
= strsep(&buf
, ",");
1441 ret
= ftrace_set_clr_event(token
, 1);
1443 pr_warning("Failed to enable trace event: %s\n", token
);
1446 ret
= register_module_notifier(&trace_module_nb
);
1448 pr_warning("Failed to register trace events module notifier\n");
1452 fs_initcall(event_trace_init
);
1454 #ifdef CONFIG_FTRACE_STARTUP_TEST
1456 static DEFINE_SPINLOCK(test_spinlock
);
1457 static DEFINE_SPINLOCK(test_spinlock_irq
);
1458 static DEFINE_MUTEX(test_mutex
);
1460 static __init
void test_work(struct work_struct
*dummy
)
1462 spin_lock(&test_spinlock
);
1463 spin_lock_irq(&test_spinlock_irq
);
1465 spin_unlock_irq(&test_spinlock_irq
);
1466 spin_unlock(&test_spinlock
);
1468 mutex_lock(&test_mutex
);
1470 mutex_unlock(&test_mutex
);
1473 static __init
int event_test_thread(void *unused
)
1477 test_malloc
= kmalloc(1234, GFP_KERNEL
);
1479 pr_info("failed to kmalloc\n");
1481 schedule_on_each_cpu(test_work
);
1485 set_current_state(TASK_INTERRUPTIBLE
);
1486 while (!kthread_should_stop())
1493 * Do various things that may trigger events.
1495 static __init
void event_test_stuff(void)
1497 struct task_struct
*test_thread
;
1499 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
1501 kthread_stop(test_thread
);
1505 * For every trace event defined, we will test each trace point separately,
1506 * and then by groups, and finally all trace points.
1508 static __init
void event_trace_self_tests(void)
1510 struct ftrace_event_call
*call
;
1511 struct event_subsystem
*system
;
1514 pr_info("Running tests on trace events:\n");
1516 list_for_each_entry(call
, &ftrace_events
, list
) {
1518 /* Only test those that have a probe */
1519 if (!call
->class || !call
->class->probe
)
1523 * Testing syscall events here is pretty useless, but
1524 * we still do it if configured. But this is time consuming.
1525 * What we really need is a user thread to perform the
1526 * syscalls as we test.
1528 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1529 if (call
->class->system
&&
1530 strcmp(call
->class->system
, "syscalls") == 0)
1534 pr_info("Testing event %s: ", call
->name
);
1537 * If an event is already enabled, someone is using
1538 * it and the self test should not be on.
1540 if (call
->flags
& TRACE_EVENT_FL_ENABLED
) {
1541 pr_warning("Enabled event during self test!\n");
1546 ftrace_event_enable_disable(call
, 1);
1548 ftrace_event_enable_disable(call
, 0);
1553 /* Now test at the sub system level */
1555 pr_info("Running tests on trace event systems:\n");
1557 list_for_each_entry(system
, &event_subsystems
, list
) {
1559 /* the ftrace system is special, skip it */
1560 if (strcmp(system
->name
, "ftrace") == 0)
1563 pr_info("Testing event system %s: ", system
->name
);
1565 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 1);
1566 if (WARN_ON_ONCE(ret
)) {
1567 pr_warning("error enabling system %s\n",
1574 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 0);
1575 if (WARN_ON_ONCE(ret
))
1576 pr_warning("error disabling system %s\n",
1582 /* Test with all events enabled */
1584 pr_info("Running tests on all trace events:\n");
1585 pr_info("Testing all events: ");
1587 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 1);
1588 if (WARN_ON_ONCE(ret
)) {
1589 pr_warning("error enabling all events\n");
1596 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 0);
1597 if (WARN_ON_ONCE(ret
)) {
1598 pr_warning("error disabling all events\n");
1605 #ifdef CONFIG_FUNCTION_TRACER
1607 static DEFINE_PER_CPU(atomic_t
, ftrace_test_event_disable
);
1610 function_test_events_call(unsigned long ip
, unsigned long parent_ip
)
1612 struct ring_buffer_event
*event
;
1613 struct ring_buffer
*buffer
;
1614 struct ftrace_entry
*entry
;
1615 unsigned long flags
;
1620 pc
= preempt_count();
1621 preempt_disable_notrace();
1622 cpu
= raw_smp_processor_id();
1623 disabled
= atomic_inc_return(&per_cpu(ftrace_test_event_disable
, cpu
));
1628 local_save_flags(flags
);
1630 event
= trace_current_buffer_lock_reserve(&buffer
,
1631 TRACE_FN
, sizeof(*entry
),
1635 entry
= ring_buffer_event_data(event
);
1637 entry
->parent_ip
= parent_ip
;
1639 trace_nowake_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1642 atomic_dec(&per_cpu(ftrace_test_event_disable
, cpu
));
1643 preempt_enable_notrace();
1646 static struct ftrace_ops trace_ops __initdata
=
1648 .func
= function_test_events_call
,
1651 static __init
void event_trace_self_test_with_function(void)
1653 register_ftrace_function(&trace_ops
);
1654 pr_info("Running tests again, along with the function tracer\n");
1655 event_trace_self_tests();
1656 unregister_ftrace_function(&trace_ops
);
1659 static __init
void event_trace_self_test_with_function(void)
1664 static __init
int event_trace_self_tests_init(void)
1666 if (!tracing_selftest_disabled
) {
1667 event_trace_self_tests();
1668 event_trace_self_test_with_function();
1674 late_initcall(event_trace_self_tests_init
);