4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex
);
30 DEFINE_MUTEX(event_storage_mutex
);
31 EXPORT_SYMBOL_GPL(event_storage_mutex
);
33 char event_storage
[EVENT_STORAGE_SIZE
];
34 EXPORT_SYMBOL_GPL(event_storage
);
36 LIST_HEAD(ftrace_events
);
37 LIST_HEAD(ftrace_common_fields
);
40 trace_get_fields(struct ftrace_event_call
*event_call
)
42 if (!event_call
->class->get_fields
)
43 return &event_call
->class->fields
;
44 return event_call
->class->get_fields(event_call
);
47 static int __trace_define_field(struct list_head
*head
, const char *type
,
48 const char *name
, int offset
, int size
,
49 int is_signed
, int filter_type
)
51 struct ftrace_event_field
*field
;
53 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
57 field
->name
= kstrdup(name
, GFP_KERNEL
);
61 field
->type
= kstrdup(type
, GFP_KERNEL
);
65 if (filter_type
== FILTER_OTHER
)
66 field
->filter_type
= filter_assign_type(type
);
68 field
->filter_type
= filter_type
;
70 field
->offset
= offset
;
72 field
->is_signed
= is_signed
;
74 list_add(&field
->link
, head
);
86 int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
87 const char *name
, int offset
, int size
, int is_signed
,
90 struct list_head
*head
;
92 if (WARN_ON(!call
->class))
95 head
= trace_get_fields(call
);
96 return __trace_define_field(head
, type
, name
, offset
, size
,
97 is_signed
, filter_type
);
99 EXPORT_SYMBOL_GPL(trace_define_field
);
101 #define __common_field(type, item) \
102 ret = __trace_define_field(&ftrace_common_fields, #type, \
104 offsetof(typeof(ent), item), \
106 is_signed_type(type), FILTER_OTHER); \
110 static int trace_define_common_fields(void)
113 struct trace_entry ent
;
115 __common_field(unsigned short, type
);
116 __common_field(unsigned char, flags
);
117 __common_field(unsigned char, preempt_count
);
118 __common_field(int, pid
);
119 __common_field(int, padding
);
124 void trace_destroy_fields(struct ftrace_event_call
*call
)
126 struct ftrace_event_field
*field
, *next
;
127 struct list_head
*head
;
129 head
= trace_get_fields(call
);
130 list_for_each_entry_safe(field
, next
, head
, link
) {
131 list_del(&field
->link
);
138 int trace_event_raw_init(struct ftrace_event_call
*call
)
142 id
= register_ftrace_event(&call
->event
);
148 EXPORT_SYMBOL_GPL(trace_event_raw_init
);
150 int ftrace_event_reg(struct ftrace_event_call
*call
, enum trace_reg type
)
153 case TRACE_REG_REGISTER
:
154 return tracepoint_probe_register(call
->name
,
157 case TRACE_REG_UNREGISTER
:
158 tracepoint_probe_unregister(call
->name
,
163 #ifdef CONFIG_PERF_EVENTS
164 case TRACE_REG_PERF_REGISTER
:
165 return tracepoint_probe_register(call
->name
,
166 call
->class->perf_probe
,
168 case TRACE_REG_PERF_UNREGISTER
:
169 tracepoint_probe_unregister(call
->name
,
170 call
->class->perf_probe
,
177 EXPORT_SYMBOL_GPL(ftrace_event_reg
);
179 void trace_event_enable_cmd_record(bool enable
)
181 struct ftrace_event_call
*call
;
183 mutex_lock(&event_mutex
);
184 list_for_each_entry(call
, &ftrace_events
, list
) {
185 if (!(call
->flags
& TRACE_EVENT_FL_ENABLED
))
189 tracing_start_cmdline_record();
190 call
->flags
|= TRACE_EVENT_FL_RECORDED_CMD
;
192 tracing_stop_cmdline_record();
193 call
->flags
&= ~TRACE_EVENT_FL_RECORDED_CMD
;
196 mutex_unlock(&event_mutex
);
199 static int ftrace_event_enable_disable(struct ftrace_event_call
*call
,
206 if (call
->flags
& TRACE_EVENT_FL_ENABLED
) {
207 call
->flags
&= ~TRACE_EVENT_FL_ENABLED
;
208 if (call
->flags
& TRACE_EVENT_FL_RECORDED_CMD
) {
209 tracing_stop_cmdline_record();
210 call
->flags
&= ~TRACE_EVENT_FL_RECORDED_CMD
;
212 call
->class->reg(call
, TRACE_REG_UNREGISTER
);
216 if (!(call
->flags
& TRACE_EVENT_FL_ENABLED
)) {
217 if (trace_flags
& TRACE_ITER_RECORD_CMD
) {
218 tracing_start_cmdline_record();
219 call
->flags
|= TRACE_EVENT_FL_RECORDED_CMD
;
221 ret
= call
->class->reg(call
, TRACE_REG_REGISTER
);
223 tracing_stop_cmdline_record();
224 pr_info("event trace: Could not enable event "
228 call
->flags
|= TRACE_EVENT_FL_ENABLED
;
236 static void ftrace_clear_events(void)
238 struct ftrace_event_call
*call
;
240 mutex_lock(&event_mutex
);
241 list_for_each_entry(call
, &ftrace_events
, list
) {
242 ftrace_event_enable_disable(call
, 0);
244 mutex_unlock(&event_mutex
);
247 static void __put_system(struct event_subsystem
*system
)
249 struct event_filter
*filter
= system
->filter
;
251 WARN_ON_ONCE(system
->ref_count
== 0);
252 if (--system
->ref_count
)
256 kfree(filter
->filter_string
);
263 static void __get_system(struct event_subsystem
*system
)
265 WARN_ON_ONCE(system
->ref_count
== 0);
269 static void put_system(struct event_subsystem
*system
)
271 mutex_lock(&event_mutex
);
272 __put_system(system
);
273 mutex_unlock(&event_mutex
);
277 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
279 static int __ftrace_set_clr_event(const char *match
, const char *sub
,
280 const char *event
, int set
)
282 struct ftrace_event_call
*call
;
285 mutex_lock(&event_mutex
);
286 list_for_each_entry(call
, &ftrace_events
, list
) {
288 if (!call
->name
|| !call
->class || !call
->class->reg
)
292 strcmp(match
, call
->name
) != 0 &&
293 strcmp(match
, call
->class->system
) != 0)
296 if (sub
&& strcmp(sub
, call
->class->system
) != 0)
299 if (event
&& strcmp(event
, call
->name
) != 0)
302 ftrace_event_enable_disable(call
, set
);
306 mutex_unlock(&event_mutex
);
311 static int ftrace_set_clr_event(char *buf
, int set
)
313 char *event
= NULL
, *sub
= NULL
, *match
;
316 * The buf format can be <subsystem>:<event-name>
317 * *:<event-name> means any event by that name.
318 * :<event-name> is the same.
320 * <subsystem>:* means all events in that subsystem
321 * <subsystem>: means the same.
323 * <name> (no ':') means all events in a subsystem with
324 * the name <name> or any event that matches <name>
327 match
= strsep(&buf
, ":");
333 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
335 if (!strlen(event
) || strcmp(event
, "*") == 0)
339 return __ftrace_set_clr_event(match
, sub
, event
, set
);
343 * trace_set_clr_event - enable or disable an event
344 * @system: system name to match (NULL for any system)
345 * @event: event name to match (NULL for all events, within system)
346 * @set: 1 to enable, 0 to disable
348 * This is a way for other parts of the kernel to enable or disable
351 * Returns 0 on success, -EINVAL if the parameters do not match any
354 int trace_set_clr_event(const char *system
, const char *event
, int set
)
356 return __ftrace_set_clr_event(NULL
, system
, event
, set
);
358 EXPORT_SYMBOL_GPL(trace_set_clr_event
);
360 /* 128 should be much more than enough */
361 #define EVENT_BUF_SIZE 127
364 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
365 size_t cnt
, loff_t
*ppos
)
367 struct trace_parser parser
;
373 ret
= tracing_update_buffers();
377 if (trace_parser_get_init(&parser
, EVENT_BUF_SIZE
+ 1))
380 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
382 if (read
>= 0 && trace_parser_loaded((&parser
))) {
385 if (*parser
.buffer
== '!')
388 parser
.buffer
[parser
.idx
] = 0;
390 ret
= ftrace_set_clr_event(parser
.buffer
+ !set
, set
);
398 trace_parser_put(&parser
);
404 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
406 struct ftrace_event_call
*call
= v
;
410 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
412 * The ftrace subsystem is for showing formats only.
413 * They can not be enabled or disabled via the event files.
415 if (call
->class && call
->class->reg
)
422 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
424 struct ftrace_event_call
*call
;
427 mutex_lock(&event_mutex
);
429 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
430 for (l
= 0; l
<= *pos
; ) {
431 call
= t_next(m
, call
, &l
);
439 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
441 struct ftrace_event_call
*call
= v
;
445 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
446 if (call
->flags
& TRACE_EVENT_FL_ENABLED
)
453 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
455 struct ftrace_event_call
*call
;
458 mutex_lock(&event_mutex
);
460 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
461 for (l
= 0; l
<= *pos
; ) {
462 call
= s_next(m
, call
, &l
);
469 static int t_show(struct seq_file
*m
, void *v
)
471 struct ftrace_event_call
*call
= v
;
473 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
474 seq_printf(m
, "%s:", call
->class->system
);
475 seq_printf(m
, "%s\n", call
->name
);
480 static void t_stop(struct seq_file
*m
, void *p
)
482 mutex_unlock(&event_mutex
);
486 ftrace_event_seq_open(struct inode
*inode
, struct file
*file
)
488 const struct seq_operations
*seq_ops
;
490 if ((file
->f_mode
& FMODE_WRITE
) &&
491 (file
->f_flags
& O_TRUNC
))
492 ftrace_clear_events();
494 seq_ops
= inode
->i_private
;
495 return seq_open(file
, seq_ops
);
499 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
502 struct ftrace_event_call
*call
= filp
->private_data
;
505 if (call
->flags
& TRACE_EVENT_FL_ENABLED
)
510 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
514 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
517 struct ftrace_event_call
*call
= filp
->private_data
;
521 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
525 ret
= tracing_update_buffers();
532 mutex_lock(&event_mutex
);
533 ret
= ftrace_event_enable_disable(call
, val
);
534 mutex_unlock(&event_mutex
);
543 return ret
? ret
: cnt
;
547 system_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
550 const char set_to_char
[4] = { '?', '0', '1', 'X' };
551 struct event_subsystem
*system
= filp
->private_data
;
552 struct ftrace_event_call
*call
;
557 mutex_lock(&event_mutex
);
558 list_for_each_entry(call
, &ftrace_events
, list
) {
559 if (!call
->name
|| !call
->class || !call
->class->reg
)
562 if (system
&& strcmp(call
->class->system
, system
->name
) != 0)
566 * We need to find out if all the events are set
567 * or if all events or cleared, or if we have
570 set
|= (1 << !!(call
->flags
& TRACE_EVENT_FL_ENABLED
));
573 * If we have a mixture, no need to look further.
578 mutex_unlock(&event_mutex
);
580 buf
[0] = set_to_char
[set
];
583 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
589 system_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
592 struct event_subsystem
*system
= filp
->private_data
;
593 const char *name
= NULL
;
597 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
601 ret
= tracing_update_buffers();
605 if (val
!= 0 && val
!= 1)
609 * Opening of "enable" adds a ref count to system,
610 * so the name is safe to use.
615 ret
= __ftrace_set_clr_event(NULL
, name
, NULL
, val
);
629 FORMAT_FIELD_SEPERATOR
= 2,
633 static void *f_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
635 struct ftrace_event_call
*call
= m
->private;
636 struct ftrace_event_field
*field
;
637 struct list_head
*common_head
= &ftrace_common_fields
;
638 struct list_head
*head
= trace_get_fields(call
);
642 switch ((unsigned long)v
) {
644 if (unlikely(list_empty(common_head
)))
647 field
= list_entry(common_head
->prev
,
648 struct ftrace_event_field
, link
);
651 case FORMAT_FIELD_SEPERATOR
:
652 if (unlikely(list_empty(head
)))
655 field
= list_entry(head
->prev
, struct ftrace_event_field
, link
);
658 case FORMAT_PRINTFMT
:
664 if (field
->link
.prev
== common_head
)
665 return (void *)FORMAT_FIELD_SEPERATOR
;
666 else if (field
->link
.prev
== head
)
667 return (void *)FORMAT_PRINTFMT
;
669 field
= list_entry(field
->link
.prev
, struct ftrace_event_field
, link
);
674 static void *f_start(struct seq_file
*m
, loff_t
*pos
)
679 /* Start by showing the header */
681 return (void *)FORMAT_HEADER
;
683 p
= (void *)FORMAT_HEADER
;
685 p
= f_next(m
, p
, &l
);
686 } while (p
&& l
< *pos
);
691 static int f_show(struct seq_file
*m
, void *v
)
693 struct ftrace_event_call
*call
= m
->private;
694 struct ftrace_event_field
*field
;
695 const char *array_descriptor
;
697 switch ((unsigned long)v
) {
699 seq_printf(m
, "name: %s\n", call
->name
);
700 seq_printf(m
, "ID: %d\n", call
->event
.type
);
701 seq_printf(m
, "format:\n");
704 case FORMAT_FIELD_SEPERATOR
:
708 case FORMAT_PRINTFMT
:
709 seq_printf(m
, "\nprint fmt: %s\n",
717 * Smartly shows the array type(except dynamic array).
720 * If TYPE := TYPE[LEN], it is shown:
721 * field:TYPE VAR[LEN]
723 array_descriptor
= strchr(field
->type
, '[');
725 if (!strncmp(field
->type
, "__data_loc", 10))
726 array_descriptor
= NULL
;
728 if (!array_descriptor
)
729 seq_printf(m
, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
730 field
->type
, field
->name
, field
->offset
,
731 field
->size
, !!field
->is_signed
);
733 seq_printf(m
, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
734 (int)(array_descriptor
- field
->type
),
735 field
->type
, field
->name
,
736 array_descriptor
, field
->offset
,
737 field
->size
, !!field
->is_signed
);
742 static void f_stop(struct seq_file
*m
, void *p
)
746 static const struct seq_operations trace_format_seq_ops
= {
753 static int trace_format_open(struct inode
*inode
, struct file
*file
)
755 struct ftrace_event_call
*call
= inode
->i_private
;
759 ret
= seq_open(file
, &trace_format_seq_ops
);
763 m
= file
->private_data
;
770 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
772 struct ftrace_event_call
*call
= filp
->private_data
;
779 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
784 trace_seq_printf(s
, "%d\n", call
->event
.type
);
786 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
793 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
796 struct ftrace_event_call
*call
= filp
->private_data
;
803 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
809 print_event_filter(call
, s
);
810 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
818 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
821 struct ftrace_event_call
*call
= filp
->private_data
;
825 if (cnt
>= PAGE_SIZE
)
828 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
832 if (copy_from_user(buf
, ubuf
, cnt
)) {
833 free_page((unsigned long) buf
);
838 err
= apply_event_filter(call
, buf
);
839 free_page((unsigned long) buf
);
848 static LIST_HEAD(event_subsystems
);
850 static int subsystem_open(struct inode
*inode
, struct file
*filp
)
852 struct event_subsystem
*system
= NULL
;
855 if (!inode
->i_private
)
858 /* Make sure the system still exists */
859 mutex_lock(&event_mutex
);
860 list_for_each_entry(system
, &event_subsystems
, list
) {
861 if (system
== inode
->i_private
) {
862 /* Don't open systems with no events */
863 if (!system
->nr_events
) {
867 __get_system(system
);
871 mutex_unlock(&event_mutex
);
873 if (system
!= inode
->i_private
)
877 ret
= tracing_open_generic(inode
, filp
);
878 if (ret
< 0 && system
)
884 static int subsystem_release(struct inode
*inode
, struct file
*file
)
886 struct event_subsystem
*system
= inode
->i_private
;
895 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
898 struct event_subsystem
*system
= filp
->private_data
;
905 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
911 print_subsystem_event_filter(system
, s
);
912 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
920 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
923 struct event_subsystem
*system
= filp
->private_data
;
927 if (cnt
>= PAGE_SIZE
)
930 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
934 if (copy_from_user(buf
, ubuf
, cnt
)) {
935 free_page((unsigned long) buf
);
940 err
= apply_subsystem_event_filter(system
, buf
);
941 free_page((unsigned long) buf
);
951 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
953 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
960 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
967 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
974 static const struct seq_operations show_event_seq_ops
= {
981 static const struct seq_operations show_set_event_seq_ops
= {
988 static const struct file_operations ftrace_avail_fops
= {
989 .open
= ftrace_event_seq_open
,
992 .release
= seq_release
,
995 static const struct file_operations ftrace_set_event_fops
= {
996 .open
= ftrace_event_seq_open
,
998 .write
= ftrace_event_write
,
1000 .release
= seq_release
,
1003 static const struct file_operations ftrace_enable_fops
= {
1004 .open
= tracing_open_generic
,
1005 .read
= event_enable_read
,
1006 .write
= event_enable_write
,
1007 .llseek
= default_llseek
,
1010 static const struct file_operations ftrace_event_format_fops
= {
1011 .open
= trace_format_open
,
1013 .llseek
= seq_lseek
,
1014 .release
= seq_release
,
1017 static const struct file_operations ftrace_event_id_fops
= {
1018 .open
= tracing_open_generic
,
1019 .read
= event_id_read
,
1020 .llseek
= default_llseek
,
1023 static const struct file_operations ftrace_event_filter_fops
= {
1024 .open
= tracing_open_generic
,
1025 .read
= event_filter_read
,
1026 .write
= event_filter_write
,
1027 .llseek
= default_llseek
,
1030 static const struct file_operations ftrace_subsystem_filter_fops
= {
1031 .open
= subsystem_open
,
1032 .read
= subsystem_filter_read
,
1033 .write
= subsystem_filter_write
,
1034 .llseek
= default_llseek
,
1035 .release
= subsystem_release
,
1038 static const struct file_operations ftrace_system_enable_fops
= {
1039 .open
= subsystem_open
,
1040 .read
= system_enable_read
,
1041 .write
= system_enable_write
,
1042 .llseek
= default_llseek
,
1043 .release
= subsystem_release
,
1046 static const struct file_operations ftrace_show_header_fops
= {
1047 .open
= tracing_open_generic
,
1048 .read
= show_header
,
1049 .llseek
= default_llseek
,
1052 static struct dentry
*event_trace_events_dir(void)
1054 static struct dentry
*d_tracer
;
1055 static struct dentry
*d_events
;
1060 d_tracer
= tracing_init_dentry();
1064 d_events
= debugfs_create_dir("events", d_tracer
);
1066 pr_warning("Could not create debugfs "
1067 "'events' directory\n");
1072 static struct dentry
*
1073 event_subsystem_dir(const char *name
, struct dentry
*d_events
)
1075 struct event_subsystem
*system
;
1076 struct dentry
*entry
;
1078 /* First see if we did not already create this dir */
1079 list_for_each_entry(system
, &event_subsystems
, list
) {
1080 if (strcmp(system
->name
, name
) == 0) {
1081 __get_system(system
);
1082 system
->nr_events
++;
1083 return system
->entry
;
1087 /* need to create new entry */
1088 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
1090 pr_warning("No memory to create event subsystem %s\n",
1095 system
->entry
= debugfs_create_dir(name
, d_events
);
1096 if (!system
->entry
) {
1097 pr_warning("Could not create event subsystem %s\n",
1103 system
->nr_events
= 1;
1104 system
->ref_count
= 1;
1105 system
->name
= kstrdup(name
, GFP_KERNEL
);
1106 if (!system
->name
) {
1107 debugfs_remove(system
->entry
);
1112 list_add(&system
->list
, &event_subsystems
);
1114 system
->filter
= NULL
;
1116 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
1117 if (!system
->filter
) {
1118 pr_warning("Could not allocate filter for subsystem "
1120 return system
->entry
;
1123 entry
= debugfs_create_file("filter", 0644, system
->entry
, system
,
1124 &ftrace_subsystem_filter_fops
);
1126 kfree(system
->filter
);
1127 system
->filter
= NULL
;
1128 pr_warning("Could not create debugfs "
1129 "'%s/filter' entry\n", name
);
1132 trace_create_file("enable", 0644, system
->entry
, system
,
1133 &ftrace_system_enable_fops
);
1135 return system
->entry
;
1139 event_create_dir(struct ftrace_event_call
*call
, struct dentry
*d_events
,
1140 const struct file_operations
*id
,
1141 const struct file_operations
*enable
,
1142 const struct file_operations
*filter
,
1143 const struct file_operations
*format
)
1145 struct list_head
*head
;
1149 * If the trace point header did not define TRACE_SYSTEM
1150 * then the system would be called "TRACE_SYSTEM".
1152 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
1153 d_events
= event_subsystem_dir(call
->class->system
, d_events
);
1155 call
->dir
= debugfs_create_dir(call
->name
, d_events
);
1157 pr_warning("Could not create debugfs "
1158 "'%s' directory\n", call
->name
);
1162 if (call
->class->reg
)
1163 trace_create_file("enable", 0644, call
->dir
, call
,
1166 #ifdef CONFIG_PERF_EVENTS
1167 if (call
->event
.type
&& call
->class->reg
)
1168 trace_create_file("id", 0444, call
->dir
, call
,
1173 * Other events may have the same class. Only update
1174 * the fields if they are not already defined.
1176 head
= trace_get_fields(call
);
1177 if (list_empty(head
)) {
1178 ret
= call
->class->define_fields(call
);
1180 pr_warning("Could not initialize trace point"
1181 " events/%s\n", call
->name
);
1185 trace_create_file("filter", 0644, call
->dir
, call
,
1188 trace_create_file("format", 0444, call
->dir
, call
,
1195 __trace_add_event_call(struct ftrace_event_call
*call
, struct module
*mod
,
1196 const struct file_operations
*id
,
1197 const struct file_operations
*enable
,
1198 const struct file_operations
*filter
,
1199 const struct file_operations
*format
)
1201 struct dentry
*d_events
;
1204 /* The linker may leave blanks */
1208 if (call
->class->raw_init
) {
1209 ret
= call
->class->raw_init(call
);
1212 pr_warning("Could not initialize trace events/%s\n",
1218 d_events
= event_trace_events_dir();
1222 ret
= event_create_dir(call
, d_events
, id
, enable
, filter
, format
);
1224 list_add(&call
->list
, &ftrace_events
);
1230 /* Add an additional event_call dynamically */
1231 int trace_add_event_call(struct ftrace_event_call
*call
)
1234 mutex_lock(&event_mutex
);
1235 ret
= __trace_add_event_call(call
, NULL
, &ftrace_event_id_fops
,
1236 &ftrace_enable_fops
,
1237 &ftrace_event_filter_fops
,
1238 &ftrace_event_format_fops
);
1239 mutex_unlock(&event_mutex
);
1243 static void remove_subsystem_dir(const char *name
)
1245 struct event_subsystem
*system
;
1247 if (strcmp(name
, TRACE_SYSTEM
) == 0)
1250 list_for_each_entry(system
, &event_subsystems
, list
) {
1251 if (strcmp(system
->name
, name
) == 0) {
1252 if (!--system
->nr_events
) {
1253 debugfs_remove_recursive(system
->entry
);
1254 list_del(&system
->list
);
1255 __put_system(system
);
1263 * Must be called under locking both of event_mutex and trace_event_mutex.
1265 static void __trace_remove_event_call(struct ftrace_event_call
*call
)
1267 ftrace_event_enable_disable(call
, 0);
1268 if (call
->event
.funcs
)
1269 __unregister_ftrace_event(&call
->event
);
1270 debugfs_remove_recursive(call
->dir
);
1271 list_del(&call
->list
);
1272 trace_destroy_fields(call
);
1273 destroy_preds(call
);
1274 remove_subsystem_dir(call
->class->system
);
1277 /* Remove an event_call */
1278 void trace_remove_event_call(struct ftrace_event_call
*call
)
1280 mutex_lock(&event_mutex
);
1281 down_write(&trace_event_mutex
);
1282 __trace_remove_event_call(call
);
1283 up_write(&trace_event_mutex
);
1284 mutex_unlock(&event_mutex
);
1287 #define for_each_event(event, start, end) \
1288 for (event = start; \
1289 (unsigned long)event < (unsigned long)end; \
1292 #ifdef CONFIG_MODULES
1294 static LIST_HEAD(ftrace_module_file_list
);
1297 * Modules must own their file_operations to keep up with
1298 * reference counting.
1300 struct ftrace_module_file_ops
{
1301 struct list_head list
;
1303 struct file_operations id
;
1304 struct file_operations enable
;
1305 struct file_operations format
;
1306 struct file_operations filter
;
1309 static struct ftrace_module_file_ops
*
1310 trace_create_file_ops(struct module
*mod
)
1312 struct ftrace_module_file_ops
*file_ops
;
1315 * This is a bit of a PITA. To allow for correct reference
1316 * counting, modules must "own" their file_operations.
1317 * To do this, we allocate the file operations that will be
1318 * used in the event directory.
1321 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
1325 file_ops
->mod
= mod
;
1327 file_ops
->id
= ftrace_event_id_fops
;
1328 file_ops
->id
.owner
= mod
;
1330 file_ops
->enable
= ftrace_enable_fops
;
1331 file_ops
->enable
.owner
= mod
;
1333 file_ops
->filter
= ftrace_event_filter_fops
;
1334 file_ops
->filter
.owner
= mod
;
1336 file_ops
->format
= ftrace_event_format_fops
;
1337 file_ops
->format
.owner
= mod
;
1339 list_add(&file_ops
->list
, &ftrace_module_file_list
);
1344 static void trace_module_add_events(struct module
*mod
)
1346 struct ftrace_module_file_ops
*file_ops
= NULL
;
1347 struct ftrace_event_call
**call
, **start
, **end
;
1349 start
= mod
->trace_events
;
1350 end
= mod
->trace_events
+ mod
->num_trace_events
;
1355 file_ops
= trace_create_file_ops(mod
);
1359 for_each_event(call
, start
, end
) {
1360 __trace_add_event_call(*call
, mod
,
1361 &file_ops
->id
, &file_ops
->enable
,
1362 &file_ops
->filter
, &file_ops
->format
);
1366 static void trace_module_remove_events(struct module
*mod
)
1368 struct ftrace_module_file_ops
*file_ops
;
1369 struct ftrace_event_call
*call
, *p
;
1372 down_write(&trace_event_mutex
);
1373 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
1374 if (call
->mod
== mod
) {
1376 __trace_remove_event_call(call
);
1380 /* Now free the file_operations */
1381 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1382 if (file_ops
->mod
== mod
)
1385 if (&file_ops
->list
!= &ftrace_module_file_list
) {
1386 list_del(&file_ops
->list
);
1391 * It is safest to reset the ring buffer if the module being unloaded
1392 * registered any events.
1395 tracing_reset_current_online_cpus();
1396 up_write(&trace_event_mutex
);
1399 static int trace_module_notify(struct notifier_block
*self
,
1400 unsigned long val
, void *data
)
1402 struct module
*mod
= data
;
1404 mutex_lock(&event_mutex
);
1406 case MODULE_STATE_COMING
:
1407 trace_module_add_events(mod
);
1409 case MODULE_STATE_GOING
:
1410 trace_module_remove_events(mod
);
1413 mutex_unlock(&event_mutex
);
1418 static int trace_module_notify(struct notifier_block
*self
,
1419 unsigned long val
, void *data
)
1423 #endif /* CONFIG_MODULES */
1425 static struct notifier_block trace_module_nb
= {
1426 .notifier_call
= trace_module_notify
,
1430 extern struct ftrace_event_call
*__start_ftrace_events
[];
1431 extern struct ftrace_event_call
*__stop_ftrace_events
[];
1433 static char bootup_event_buf
[COMMAND_LINE_SIZE
] __initdata
;
1435 static __init
int setup_trace_event(char *str
)
1437 strlcpy(bootup_event_buf
, str
, COMMAND_LINE_SIZE
);
1438 ring_buffer_expanded
= 1;
1439 tracing_selftest_disabled
= 1;
1443 __setup("trace_event=", setup_trace_event
);
1445 static __init
int event_trace_init(void)
1447 struct ftrace_event_call
**call
;
1448 struct dentry
*d_tracer
;
1449 struct dentry
*entry
;
1450 struct dentry
*d_events
;
1452 char *buf
= bootup_event_buf
;
1455 d_tracer
= tracing_init_dentry();
1459 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
1460 (void *)&show_event_seq_ops
,
1461 &ftrace_avail_fops
);
1463 pr_warning("Could not create debugfs "
1464 "'available_events' entry\n");
1466 entry
= debugfs_create_file("set_event", 0644, d_tracer
,
1467 (void *)&show_set_event_seq_ops
,
1468 &ftrace_set_event_fops
);
1470 pr_warning("Could not create debugfs "
1471 "'set_event' entry\n");
1473 d_events
= event_trace_events_dir();
1477 /* ring buffer internal formats */
1478 trace_create_file("header_page", 0444, d_events
,
1479 ring_buffer_print_page_header
,
1480 &ftrace_show_header_fops
);
1482 trace_create_file("header_event", 0444, d_events
,
1483 ring_buffer_print_entry_header
,
1484 &ftrace_show_header_fops
);
1486 trace_create_file("enable", 0644, d_events
,
1487 NULL
, &ftrace_system_enable_fops
);
1489 if (trace_define_common_fields())
1490 pr_warning("tracing: Failed to allocate common fields");
1492 for_each_event(call
, __start_ftrace_events
, __stop_ftrace_events
) {
1493 __trace_add_event_call(*call
, NULL
, &ftrace_event_id_fops
,
1494 &ftrace_enable_fops
,
1495 &ftrace_event_filter_fops
,
1496 &ftrace_event_format_fops
);
1500 token
= strsep(&buf
, ",");
1507 ret
= ftrace_set_clr_event(token
, 1);
1509 pr_warning("Failed to enable trace event: %s\n", token
);
1512 ret
= register_module_notifier(&trace_module_nb
);
1514 pr_warning("Failed to register trace events module notifier\n");
1518 fs_initcall(event_trace_init
);
1520 #ifdef CONFIG_FTRACE_STARTUP_TEST
1522 static DEFINE_SPINLOCK(test_spinlock
);
1523 static DEFINE_SPINLOCK(test_spinlock_irq
);
1524 static DEFINE_MUTEX(test_mutex
);
1526 static __init
void test_work(struct work_struct
*dummy
)
1528 spin_lock(&test_spinlock
);
1529 spin_lock_irq(&test_spinlock_irq
);
1531 spin_unlock_irq(&test_spinlock_irq
);
1532 spin_unlock(&test_spinlock
);
1534 mutex_lock(&test_mutex
);
1536 mutex_unlock(&test_mutex
);
1539 static __init
int event_test_thread(void *unused
)
1543 test_malloc
= kmalloc(1234, GFP_KERNEL
);
1545 pr_info("failed to kmalloc\n");
1547 schedule_on_each_cpu(test_work
);
1551 set_current_state(TASK_INTERRUPTIBLE
);
1552 while (!kthread_should_stop())
1559 * Do various things that may trigger events.
1561 static __init
void event_test_stuff(void)
1563 struct task_struct
*test_thread
;
1565 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
1567 kthread_stop(test_thread
);
1571 * For every trace event defined, we will test each trace point separately,
1572 * and then by groups, and finally all trace points.
1574 static __init
void event_trace_self_tests(void)
1576 struct ftrace_event_call
*call
;
1577 struct event_subsystem
*system
;
1580 pr_info("Running tests on trace events:\n");
1582 list_for_each_entry(call
, &ftrace_events
, list
) {
1584 /* Only test those that have a probe */
1585 if (!call
->class || !call
->class->probe
)
1589 * Testing syscall events here is pretty useless, but
1590 * we still do it if configured. But this is time consuming.
1591 * What we really need is a user thread to perform the
1592 * syscalls as we test.
1594 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1595 if (call
->class->system
&&
1596 strcmp(call
->class->system
, "syscalls") == 0)
1600 pr_info("Testing event %s: ", call
->name
);
1603 * If an event is already enabled, someone is using
1604 * it and the self test should not be on.
1606 if (call
->flags
& TRACE_EVENT_FL_ENABLED
) {
1607 pr_warning("Enabled event during self test!\n");
1612 ftrace_event_enable_disable(call
, 1);
1614 ftrace_event_enable_disable(call
, 0);
1619 /* Now test at the sub system level */
1621 pr_info("Running tests on trace event systems:\n");
1623 list_for_each_entry(system
, &event_subsystems
, list
) {
1625 /* the ftrace system is special, skip it */
1626 if (strcmp(system
->name
, "ftrace") == 0)
1629 pr_info("Testing event system %s: ", system
->name
);
1631 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 1);
1632 if (WARN_ON_ONCE(ret
)) {
1633 pr_warning("error enabling system %s\n",
1640 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 0);
1641 if (WARN_ON_ONCE(ret
))
1642 pr_warning("error disabling system %s\n",
1648 /* Test with all events enabled */
1650 pr_info("Running tests on all trace events:\n");
1651 pr_info("Testing all events: ");
1653 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 1);
1654 if (WARN_ON_ONCE(ret
)) {
1655 pr_warning("error enabling all events\n");
1662 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 0);
1663 if (WARN_ON_ONCE(ret
)) {
1664 pr_warning("error disabling all events\n");
1671 #ifdef CONFIG_FUNCTION_TRACER
1673 static DEFINE_PER_CPU(atomic_t
, ftrace_test_event_disable
);
1676 function_test_events_call(unsigned long ip
, unsigned long parent_ip
)
1678 struct ring_buffer_event
*event
;
1679 struct ring_buffer
*buffer
;
1680 struct ftrace_entry
*entry
;
1681 unsigned long flags
;
1686 pc
= preempt_count();
1687 preempt_disable_notrace();
1688 cpu
= raw_smp_processor_id();
1689 disabled
= atomic_inc_return(&per_cpu(ftrace_test_event_disable
, cpu
));
1694 local_save_flags(flags
);
1696 event
= trace_current_buffer_lock_reserve(&buffer
,
1697 TRACE_FN
, sizeof(*entry
),
1701 entry
= ring_buffer_event_data(event
);
1703 entry
->parent_ip
= parent_ip
;
1705 trace_nowake_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1708 atomic_dec(&per_cpu(ftrace_test_event_disable
, cpu
));
1709 preempt_enable_notrace();
1712 static struct ftrace_ops trace_ops __initdata
=
1714 .func
= function_test_events_call
,
1717 static __init
void event_trace_self_test_with_function(void)
1720 ret
= register_ftrace_function(&trace_ops
);
1721 if (WARN_ON(ret
< 0)) {
1722 pr_info("Failed to enable function tracer for event tests\n");
1725 pr_info("Running tests again, along with the function tracer\n");
1726 event_trace_self_tests();
1727 unregister_ftrace_function(&trace_ops
);
1730 static __init
void event_trace_self_test_with_function(void)
1735 static __init
int event_trace_self_tests_init(void)
1737 if (!tracing_selftest_disabled
) {
1738 event_trace_self_tests();
1739 event_trace_self_test_with_function();
1745 late_initcall(event_trace_self_tests_init
);