4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
20 #include <asm/setup.h>
22 #include "trace_output.h"
25 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 DEFINE_MUTEX(event_mutex
);
29 LIST_HEAD(ftrace_events
);
31 int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
32 const char *name
, int offset
, int size
, int is_signed
,
35 struct ftrace_event_field
*field
;
37 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
41 field
->name
= kstrdup(name
, GFP_KERNEL
);
45 field
->type
= kstrdup(type
, GFP_KERNEL
);
49 if (filter_type
== FILTER_OTHER
)
50 field
->filter_type
= filter_assign_type(type
);
52 field
->filter_type
= filter_type
;
54 field
->offset
= offset
;
56 field
->is_signed
= is_signed
;
58 list_add(&field
->link
, &call
->fields
);
71 EXPORT_SYMBOL_GPL(trace_define_field
);
73 #define __common_field(type, item) \
74 ret = trace_define_field(call, #type, "common_" #item, \
75 offsetof(typeof(ent), item), \
77 is_signed_type(type), FILTER_OTHER); \
81 static int trace_define_common_fields(struct ftrace_event_call
*call
)
84 struct trace_entry ent
;
86 __common_field(unsigned short, type
);
87 __common_field(unsigned char, flags
);
88 __common_field(unsigned char, preempt_count
);
89 __common_field(int, pid
);
90 __common_field(int, lock_depth
);
95 void trace_destroy_fields(struct ftrace_event_call
*call
)
97 struct ftrace_event_field
*field
, *next
;
99 list_for_each_entry_safe(field
, next
, &call
->fields
, link
) {
100 list_del(&field
->link
);
107 int trace_event_raw_init(struct ftrace_event_call
*call
)
111 id
= register_ftrace_event(call
->event
);
115 INIT_LIST_HEAD(&call
->fields
);
119 EXPORT_SYMBOL_GPL(trace_event_raw_init
);
121 static int ftrace_event_enable_disable(struct ftrace_event_call
*call
,
130 tracing_stop_cmdline_record();
131 call
->unregfunc(call
);
135 if (!call
->enabled
) {
136 tracing_start_cmdline_record();
137 ret
= call
->regfunc(call
);
139 tracing_stop_cmdline_record();
140 pr_info("event trace: Could not enable event "
152 static void ftrace_clear_events(void)
154 struct ftrace_event_call
*call
;
156 mutex_lock(&event_mutex
);
157 list_for_each_entry(call
, &ftrace_events
, list
) {
158 ftrace_event_enable_disable(call
, 0);
160 mutex_unlock(&event_mutex
);
164 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
166 static int __ftrace_set_clr_event(const char *match
, const char *sub
,
167 const char *event
, int set
)
169 struct ftrace_event_call
*call
;
172 mutex_lock(&event_mutex
);
173 list_for_each_entry(call
, &ftrace_events
, list
) {
175 if (!call
->name
|| !call
->regfunc
)
179 strcmp(match
, call
->name
) != 0 &&
180 strcmp(match
, call
->system
) != 0)
183 if (sub
&& strcmp(sub
, call
->system
) != 0)
186 if (event
&& strcmp(event
, call
->name
) != 0)
189 ftrace_event_enable_disable(call
, set
);
193 mutex_unlock(&event_mutex
);
198 static int ftrace_set_clr_event(char *buf
, int set
)
200 char *event
= NULL
, *sub
= NULL
, *match
;
203 * The buf format can be <subsystem>:<event-name>
204 * *:<event-name> means any event by that name.
205 * :<event-name> is the same.
207 * <subsystem>:* means all events in that subsystem
208 * <subsystem>: means the same.
210 * <name> (no ':') means all events in a subsystem with
211 * the name <name> or any event that matches <name>
214 match
= strsep(&buf
, ":");
220 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
222 if (!strlen(event
) || strcmp(event
, "*") == 0)
226 return __ftrace_set_clr_event(match
, sub
, event
, set
);
230 * trace_set_clr_event - enable or disable an event
231 * @system: system name to match (NULL for any system)
232 * @event: event name to match (NULL for all events, within system)
233 * @set: 1 to enable, 0 to disable
235 * This is a way for other parts of the kernel to enable or disable
238 * Returns 0 on success, -EINVAL if the parameters do not match any
241 int trace_set_clr_event(const char *system
, const char *event
, int set
)
243 return __ftrace_set_clr_event(NULL
, system
, event
, set
);
246 /* 128 should be much more than enough */
247 #define EVENT_BUF_SIZE 127
250 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
251 size_t cnt
, loff_t
*ppos
)
253 struct trace_parser parser
;
259 ret
= tracing_update_buffers();
263 if (trace_parser_get_init(&parser
, EVENT_BUF_SIZE
+ 1))
266 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
268 if (read
>= 0 && trace_parser_loaded((&parser
))) {
271 if (*parser
.buffer
== '!')
274 parser
.buffer
[parser
.idx
] = 0;
276 ret
= ftrace_set_clr_event(parser
.buffer
+ !set
, set
);
284 trace_parser_put(&parser
);
290 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
292 struct ftrace_event_call
*call
= v
;
296 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
298 * The ftrace subsystem is for showing formats only.
299 * They can not be enabled or disabled via the event files.
308 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
310 struct ftrace_event_call
*call
;
313 mutex_lock(&event_mutex
);
315 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
316 for (l
= 0; l
<= *pos
; ) {
317 call
= t_next(m
, call
, &l
);
325 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
327 struct ftrace_event_call
*call
= v
;
331 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
339 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
341 struct ftrace_event_call
*call
;
344 mutex_lock(&event_mutex
);
346 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
347 for (l
= 0; l
<= *pos
; ) {
348 call
= s_next(m
, call
, &l
);
355 static int t_show(struct seq_file
*m
, void *v
)
357 struct ftrace_event_call
*call
= v
;
359 if (strcmp(call
->system
, TRACE_SYSTEM
) != 0)
360 seq_printf(m
, "%s:", call
->system
);
361 seq_printf(m
, "%s\n", call
->name
);
366 static void t_stop(struct seq_file
*m
, void *p
)
368 mutex_unlock(&event_mutex
);
372 ftrace_event_seq_open(struct inode
*inode
, struct file
*file
)
374 const struct seq_operations
*seq_ops
;
376 if ((file
->f_mode
& FMODE_WRITE
) &&
377 (file
->f_flags
& O_TRUNC
))
378 ftrace_clear_events();
380 seq_ops
= inode
->i_private
;
381 return seq_open(file
, seq_ops
);
385 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
388 struct ftrace_event_call
*call
= filp
->private_data
;
396 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
400 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
403 struct ftrace_event_call
*call
= filp
->private_data
;
408 if (cnt
>= sizeof(buf
))
411 if (copy_from_user(&buf
, ubuf
, cnt
))
416 ret
= strict_strtoul(buf
, 10, &val
);
420 ret
= tracing_update_buffers();
427 mutex_lock(&event_mutex
);
428 ret
= ftrace_event_enable_disable(call
, val
);
429 mutex_unlock(&event_mutex
);
438 return ret
? ret
: cnt
;
442 system_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
445 const char set_to_char
[4] = { '?', '0', '1', 'X' };
446 const char *system
= filp
->private_data
;
447 struct ftrace_event_call
*call
;
452 mutex_lock(&event_mutex
);
453 list_for_each_entry(call
, &ftrace_events
, list
) {
454 if (!call
->name
|| !call
->regfunc
)
457 if (system
&& strcmp(call
->system
, system
) != 0)
461 * We need to find out if all the events are set
462 * or if all events or cleared, or if we have
465 set
|= (1 << !!call
->enabled
);
468 * If we have a mixture, no need to look further.
473 mutex_unlock(&event_mutex
);
475 buf
[0] = set_to_char
[set
];
478 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
484 system_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
487 const char *system
= filp
->private_data
;
492 if (cnt
>= sizeof(buf
))
495 if (copy_from_user(&buf
, ubuf
, cnt
))
500 ret
= strict_strtoul(buf
, 10, &val
);
504 ret
= tracing_update_buffers();
508 if (val
!= 0 && val
!= 1)
511 ret
= __ftrace_set_clr_event(NULL
, system
, NULL
, val
);
523 extern char *__bad_type_size(void);
526 #define FIELD(type, name) \
527 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
528 #type, "common_" #name, offsetof(typeof(field), name), \
529 sizeof(field.name), is_signed_type(type)
531 static int trace_write_header(struct trace_seq
*s
)
533 struct trace_entry field
;
535 /* struct trace_entry */
536 return trace_seq_printf(s
,
537 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
538 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
539 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
540 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
541 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
543 FIELD(unsigned short, type
),
544 FIELD(unsigned char, flags
),
545 FIELD(unsigned char, preempt_count
),
547 FIELD(int, lock_depth
));
551 event_format_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
554 struct ftrace_event_call
*call
= filp
->private_data
;
562 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
568 /* If any of the first writes fail, so will the show_format. */
570 trace_seq_printf(s
, "name: %s\n", call
->name
);
571 trace_seq_printf(s
, "ID: %d\n", call
->id
);
572 trace_seq_printf(s
, "format:\n");
573 trace_write_header(s
);
575 r
= call
->show_format(call
, s
);
578 * ug! The format output is bigger than a PAGE!!
580 buf
= "FORMAT TOO BIG\n";
581 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
586 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
594 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
596 struct ftrace_event_call
*call
= filp
->private_data
;
603 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
608 trace_seq_printf(s
, "%d\n", call
->id
);
610 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
617 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
620 struct ftrace_event_call
*call
= filp
->private_data
;
627 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
633 print_event_filter(call
, s
);
634 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
642 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
645 struct ftrace_event_call
*call
= filp
->private_data
;
649 if (cnt
>= PAGE_SIZE
)
652 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
656 if (copy_from_user(buf
, ubuf
, cnt
)) {
657 free_page((unsigned long) buf
);
662 err
= apply_event_filter(call
, buf
);
663 free_page((unsigned long) buf
);
673 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
676 struct event_subsystem
*system
= filp
->private_data
;
683 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
689 print_subsystem_event_filter(system
, s
);
690 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
698 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
701 struct event_subsystem
*system
= filp
->private_data
;
705 if (cnt
>= PAGE_SIZE
)
708 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
712 if (copy_from_user(buf
, ubuf
, cnt
)) {
713 free_page((unsigned long) buf
);
718 err
= apply_subsystem_event_filter(system
, buf
);
719 free_page((unsigned long) buf
);
729 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
731 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
738 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
745 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
752 static const struct seq_operations show_event_seq_ops
= {
759 static const struct seq_operations show_set_event_seq_ops
= {
766 static const struct file_operations ftrace_avail_fops
= {
767 .open
= ftrace_event_seq_open
,
770 .release
= seq_release
,
773 static const struct file_operations ftrace_set_event_fops
= {
774 .open
= ftrace_event_seq_open
,
776 .write
= ftrace_event_write
,
778 .release
= seq_release
,
781 static const struct file_operations ftrace_enable_fops
= {
782 .open
= tracing_open_generic
,
783 .read
= event_enable_read
,
784 .write
= event_enable_write
,
787 static const struct file_operations ftrace_event_format_fops
= {
788 .open
= tracing_open_generic
,
789 .read
= event_format_read
,
792 static const struct file_operations ftrace_event_id_fops
= {
793 .open
= tracing_open_generic
,
794 .read
= event_id_read
,
797 static const struct file_operations ftrace_event_filter_fops
= {
798 .open
= tracing_open_generic
,
799 .read
= event_filter_read
,
800 .write
= event_filter_write
,
803 static const struct file_operations ftrace_subsystem_filter_fops
= {
804 .open
= tracing_open_generic
,
805 .read
= subsystem_filter_read
,
806 .write
= subsystem_filter_write
,
809 static const struct file_operations ftrace_system_enable_fops
= {
810 .open
= tracing_open_generic
,
811 .read
= system_enable_read
,
812 .write
= system_enable_write
,
815 static const struct file_operations ftrace_show_header_fops
= {
816 .open
= tracing_open_generic
,
820 static struct dentry
*event_trace_events_dir(void)
822 static struct dentry
*d_tracer
;
823 static struct dentry
*d_events
;
828 d_tracer
= tracing_init_dentry();
832 d_events
= debugfs_create_dir("events", d_tracer
);
834 pr_warning("Could not create debugfs "
835 "'events' directory\n");
840 static LIST_HEAD(event_subsystems
);
842 static struct dentry
*
843 event_subsystem_dir(const char *name
, struct dentry
*d_events
)
845 struct event_subsystem
*system
;
846 struct dentry
*entry
;
848 /* First see if we did not already create this dir */
849 list_for_each_entry(system
, &event_subsystems
, list
) {
850 if (strcmp(system
->name
, name
) == 0) {
852 return system
->entry
;
856 /* need to create new entry */
857 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
859 pr_warning("No memory to create event subsystem %s\n",
864 system
->entry
= debugfs_create_dir(name
, d_events
);
865 if (!system
->entry
) {
866 pr_warning("Could not create event subsystem %s\n",
872 system
->nr_events
= 1;
873 system
->name
= kstrdup(name
, GFP_KERNEL
);
875 debugfs_remove(system
->entry
);
880 list_add(&system
->list
, &event_subsystems
);
882 system
->filter
= NULL
;
884 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
885 if (!system
->filter
) {
886 pr_warning("Could not allocate filter for subsystem "
888 return system
->entry
;
891 entry
= debugfs_create_file("filter", 0644, system
->entry
, system
,
892 &ftrace_subsystem_filter_fops
);
894 kfree(system
->filter
);
895 system
->filter
= NULL
;
896 pr_warning("Could not create debugfs "
897 "'%s/filter' entry\n", name
);
900 trace_create_file("enable", 0644, system
->entry
,
901 (void *)system
->name
,
902 &ftrace_system_enable_fops
);
904 return system
->entry
;
908 event_create_dir(struct ftrace_event_call
*call
, struct dentry
*d_events
,
909 const struct file_operations
*id
,
910 const struct file_operations
*enable
,
911 const struct file_operations
*filter
,
912 const struct file_operations
*format
)
917 * If the trace point header did not define TRACE_SYSTEM
918 * then the system would be called "TRACE_SYSTEM".
920 if (strcmp(call
->system
, TRACE_SYSTEM
) != 0)
921 d_events
= event_subsystem_dir(call
->system
, d_events
);
923 call
->dir
= debugfs_create_dir(call
->name
, d_events
);
925 pr_warning("Could not create debugfs "
926 "'%s' directory\n", call
->name
);
931 trace_create_file("enable", 0644, call
->dir
, call
,
934 if (call
->id
&& call
->profile_enable
)
935 trace_create_file("id", 0444, call
->dir
, call
,
938 if (call
->define_fields
) {
939 ret
= trace_define_common_fields(call
);
941 ret
= call
->define_fields(call
);
943 pr_warning("Could not initialize trace point"
944 " events/%s\n", call
->name
);
947 trace_create_file("filter", 0644, call
->dir
, call
,
951 /* A trace may not want to export its format */
952 if (!call
->show_format
)
955 trace_create_file("format", 0444, call
->dir
, call
,
961 static int __trace_add_event_call(struct ftrace_event_call
*call
)
963 struct dentry
*d_events
;
969 if (call
->raw_init
) {
970 ret
= call
->raw_init(call
);
973 pr_warning("Could not initialize trace "
974 "events/%s\n", call
->name
);
979 d_events
= event_trace_events_dir();
983 ret
= event_create_dir(call
, d_events
, &ftrace_event_id_fops
,
984 &ftrace_enable_fops
, &ftrace_event_filter_fops
,
985 &ftrace_event_format_fops
);
987 list_add(&call
->list
, &ftrace_events
);
992 /* Add an additional event_call dynamically */
993 int trace_add_event_call(struct ftrace_event_call
*call
)
996 mutex_lock(&event_mutex
);
997 ret
= __trace_add_event_call(call
);
998 mutex_unlock(&event_mutex
);
1002 static void remove_subsystem_dir(const char *name
)
1004 struct event_subsystem
*system
;
1006 if (strcmp(name
, TRACE_SYSTEM
) == 0)
1009 list_for_each_entry(system
, &event_subsystems
, list
) {
1010 if (strcmp(system
->name
, name
) == 0) {
1011 if (!--system
->nr_events
) {
1012 struct event_filter
*filter
= system
->filter
;
1014 debugfs_remove_recursive(system
->entry
);
1015 list_del(&system
->list
);
1017 kfree(filter
->filter_string
);
1020 kfree(system
->name
);
1029 * Must be called under locking both of event_mutex and trace_event_mutex.
1031 static void __trace_remove_event_call(struct ftrace_event_call
*call
)
1033 ftrace_event_enable_disable(call
, 0);
1035 __unregister_ftrace_event(call
->event
);
1036 debugfs_remove_recursive(call
->dir
);
1037 list_del(&call
->list
);
1038 trace_destroy_fields(call
);
1039 destroy_preds(call
);
1040 remove_subsystem_dir(call
->system
);
1043 /* Remove an event_call */
1044 void trace_remove_event_call(struct ftrace_event_call
*call
)
1046 mutex_lock(&event_mutex
);
1047 down_write(&trace_event_mutex
);
1048 __trace_remove_event_call(call
);
1049 up_write(&trace_event_mutex
);
1050 mutex_unlock(&event_mutex
);
1053 #define for_each_event(event, start, end) \
1054 for (event = start; \
1055 (unsigned long)event < (unsigned long)end; \
1058 #ifdef CONFIG_MODULES
1060 static LIST_HEAD(ftrace_module_file_list
);
1063 * Modules must own their file_operations to keep up with
1064 * reference counting.
1066 struct ftrace_module_file_ops
{
1067 struct list_head list
;
1069 struct file_operations id
;
1070 struct file_operations enable
;
1071 struct file_operations format
;
1072 struct file_operations filter
;
1075 static struct ftrace_module_file_ops
*
1076 trace_create_file_ops(struct module
*mod
)
1078 struct ftrace_module_file_ops
*file_ops
;
1081 * This is a bit of a PITA. To allow for correct reference
1082 * counting, modules must "own" their file_operations.
1083 * To do this, we allocate the file operations that will be
1084 * used in the event directory.
1087 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
1091 file_ops
->mod
= mod
;
1093 file_ops
->id
= ftrace_event_id_fops
;
1094 file_ops
->id
.owner
= mod
;
1096 file_ops
->enable
= ftrace_enable_fops
;
1097 file_ops
->enable
.owner
= mod
;
1099 file_ops
->filter
= ftrace_event_filter_fops
;
1100 file_ops
->filter
.owner
= mod
;
1102 file_ops
->format
= ftrace_event_format_fops
;
1103 file_ops
->format
.owner
= mod
;
1105 list_add(&file_ops
->list
, &ftrace_module_file_list
);
1110 static void trace_module_add_events(struct module
*mod
)
1112 struct ftrace_module_file_ops
*file_ops
= NULL
;
1113 struct ftrace_event_call
*call
, *start
, *end
;
1114 struct dentry
*d_events
;
1117 start
= mod
->trace_events
;
1118 end
= mod
->trace_events
+ mod
->num_trace_events
;
1123 d_events
= event_trace_events_dir();
1127 for_each_event(call
, start
, end
) {
1128 /* The linker may leave blanks */
1131 if (call
->raw_init
) {
1132 ret
= call
->raw_init(call
);
1135 pr_warning("Could not initialize trace "
1136 "point events/%s\n", call
->name
);
1141 * This module has events, create file ops for this module
1142 * if not already done.
1145 file_ops
= trace_create_file_ops(mod
);
1150 ret
= event_create_dir(call
, d_events
,
1151 &file_ops
->id
, &file_ops
->enable
,
1152 &file_ops
->filter
, &file_ops
->format
);
1154 list_add(&call
->list
, &ftrace_events
);
1158 static void trace_module_remove_events(struct module
*mod
)
1160 struct ftrace_module_file_ops
*file_ops
;
1161 struct ftrace_event_call
*call
, *p
;
1164 down_write(&trace_event_mutex
);
1165 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
1166 if (call
->mod
== mod
) {
1168 __trace_remove_event_call(call
);
1172 /* Now free the file_operations */
1173 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1174 if (file_ops
->mod
== mod
)
1177 if (&file_ops
->list
!= &ftrace_module_file_list
) {
1178 list_del(&file_ops
->list
);
1183 * It is safest to reset the ring buffer if the module being unloaded
1184 * registered any events.
1187 tracing_reset_current_online_cpus();
1188 up_write(&trace_event_mutex
);
1191 static int trace_module_notify(struct notifier_block
*self
,
1192 unsigned long val
, void *data
)
1194 struct module
*mod
= data
;
1196 mutex_lock(&event_mutex
);
1198 case MODULE_STATE_COMING
:
1199 trace_module_add_events(mod
);
1201 case MODULE_STATE_GOING
:
1202 trace_module_remove_events(mod
);
1205 mutex_unlock(&event_mutex
);
1210 static int trace_module_notify(struct notifier_block
*self
,
1211 unsigned long val
, void *data
)
1215 #endif /* CONFIG_MODULES */
1217 static struct notifier_block trace_module_nb
= {
1218 .notifier_call
= trace_module_notify
,
1222 extern struct ftrace_event_call __start_ftrace_events
[];
1223 extern struct ftrace_event_call __stop_ftrace_events
[];
1225 static char bootup_event_buf
[COMMAND_LINE_SIZE
] __initdata
;
1227 static __init
int setup_trace_event(char *str
)
1229 strlcpy(bootup_event_buf
, str
, COMMAND_LINE_SIZE
);
1230 ring_buffer_expanded
= 1;
1231 tracing_selftest_disabled
= 1;
1235 __setup("trace_event=", setup_trace_event
);
1237 static __init
int event_trace_init(void)
1239 struct ftrace_event_call
*call
;
1240 struct dentry
*d_tracer
;
1241 struct dentry
*entry
;
1242 struct dentry
*d_events
;
1244 char *buf
= bootup_event_buf
;
1247 d_tracer
= tracing_init_dentry();
1251 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
1252 (void *)&show_event_seq_ops
,
1253 &ftrace_avail_fops
);
1255 pr_warning("Could not create debugfs "
1256 "'available_events' entry\n");
1258 entry
= debugfs_create_file("set_event", 0644, d_tracer
,
1259 (void *)&show_set_event_seq_ops
,
1260 &ftrace_set_event_fops
);
1262 pr_warning("Could not create debugfs "
1263 "'set_event' entry\n");
1265 d_events
= event_trace_events_dir();
1269 /* ring buffer internal formats */
1270 trace_create_file("header_page", 0444, d_events
,
1271 ring_buffer_print_page_header
,
1272 &ftrace_show_header_fops
);
1274 trace_create_file("header_event", 0444, d_events
,
1275 ring_buffer_print_entry_header
,
1276 &ftrace_show_header_fops
);
1278 trace_create_file("enable", 0644, d_events
,
1279 NULL
, &ftrace_system_enable_fops
);
1281 for_each_event(call
, __start_ftrace_events
, __stop_ftrace_events
) {
1282 /* The linker may leave blanks */
1285 if (call
->raw_init
) {
1286 ret
= call
->raw_init(call
);
1289 pr_warning("Could not initialize trace "
1290 "point events/%s\n", call
->name
);
1294 ret
= event_create_dir(call
, d_events
, &ftrace_event_id_fops
,
1295 &ftrace_enable_fops
,
1296 &ftrace_event_filter_fops
,
1297 &ftrace_event_format_fops
);
1299 list_add(&call
->list
, &ftrace_events
);
1303 token
= strsep(&buf
, ",");
1310 ret
= ftrace_set_clr_event(token
, 1);
1312 pr_warning("Failed to enable trace event: %s\n", token
);
1315 ret
= register_module_notifier(&trace_module_nb
);
1317 pr_warning("Failed to register trace events module notifier\n");
1321 fs_initcall(event_trace_init
);
1323 #ifdef CONFIG_FTRACE_STARTUP_TEST
1325 static DEFINE_SPINLOCK(test_spinlock
);
1326 static DEFINE_SPINLOCK(test_spinlock_irq
);
1327 static DEFINE_MUTEX(test_mutex
);
1329 static __init
void test_work(struct work_struct
*dummy
)
1331 spin_lock(&test_spinlock
);
1332 spin_lock_irq(&test_spinlock_irq
);
1334 spin_unlock_irq(&test_spinlock_irq
);
1335 spin_unlock(&test_spinlock
);
1337 mutex_lock(&test_mutex
);
1339 mutex_unlock(&test_mutex
);
1342 static __init
int event_test_thread(void *unused
)
1346 test_malloc
= kmalloc(1234, GFP_KERNEL
);
1348 pr_info("failed to kmalloc\n");
1350 schedule_on_each_cpu(test_work
);
1354 set_current_state(TASK_INTERRUPTIBLE
);
1355 while (!kthread_should_stop())
1362 * Do various things that may trigger events.
1364 static __init
void event_test_stuff(void)
1366 struct task_struct
*test_thread
;
1368 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
1370 kthread_stop(test_thread
);
1374 * For every trace event defined, we will test each trace point separately,
1375 * and then by groups, and finally all trace points.
1377 static __init
void event_trace_self_tests(void)
1379 struct ftrace_event_call
*call
;
1380 struct event_subsystem
*system
;
1383 pr_info("Running tests on trace events:\n");
1385 list_for_each_entry(call
, &ftrace_events
, list
) {
1387 /* Only test those that have a regfunc */
1392 * Testing syscall events here is pretty useless, but
1393 * we still do it if configured. But this is time consuming.
1394 * What we really need is a user thread to perform the
1395 * syscalls as we test.
1397 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1399 strcmp(call
->system
, "syscalls") == 0)
1403 pr_info("Testing event %s: ", call
->name
);
1406 * If an event is already enabled, someone is using
1407 * it and the self test should not be on.
1409 if (call
->enabled
) {
1410 pr_warning("Enabled event during self test!\n");
1415 ftrace_event_enable_disable(call
, 1);
1417 ftrace_event_enable_disable(call
, 0);
1422 /* Now test at the sub system level */
1424 pr_info("Running tests on trace event systems:\n");
1426 list_for_each_entry(system
, &event_subsystems
, list
) {
1428 /* the ftrace system is special, skip it */
1429 if (strcmp(system
->name
, "ftrace") == 0)
1432 pr_info("Testing event system %s: ", system
->name
);
1434 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 1);
1435 if (WARN_ON_ONCE(ret
)) {
1436 pr_warning("error enabling system %s\n",
1443 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 0);
1444 if (WARN_ON_ONCE(ret
))
1445 pr_warning("error disabling system %s\n",
1451 /* Test with all events enabled */
1453 pr_info("Running tests on all trace events:\n");
1454 pr_info("Testing all events: ");
1456 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 1);
1457 if (WARN_ON_ONCE(ret
)) {
1458 pr_warning("error enabling all events\n");
1465 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 0);
1466 if (WARN_ON_ONCE(ret
)) {
1467 pr_warning("error disabling all events\n");
1474 #ifdef CONFIG_FUNCTION_TRACER
1476 static DEFINE_PER_CPU(atomic_t
, ftrace_test_event_disable
);
1479 function_test_events_call(unsigned long ip
, unsigned long parent_ip
)
1481 struct ring_buffer_event
*event
;
1482 struct ring_buffer
*buffer
;
1483 struct ftrace_entry
*entry
;
1484 unsigned long flags
;
1490 pc
= preempt_count();
1491 resched
= ftrace_preempt_disable();
1492 cpu
= raw_smp_processor_id();
1493 disabled
= atomic_inc_return(&per_cpu(ftrace_test_event_disable
, cpu
));
1498 local_save_flags(flags
);
1500 event
= trace_current_buffer_lock_reserve(&buffer
,
1501 TRACE_FN
, sizeof(*entry
),
1505 entry
= ring_buffer_event_data(event
);
1507 entry
->parent_ip
= parent_ip
;
1509 trace_nowake_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1512 atomic_dec(&per_cpu(ftrace_test_event_disable
, cpu
));
1513 ftrace_preempt_enable(resched
);
1516 static struct ftrace_ops trace_ops __initdata
=
1518 .func
= function_test_events_call
,
1521 static __init
void event_trace_self_test_with_function(void)
1523 register_ftrace_function(&trace_ops
);
1524 pr_info("Running tests again, along with the function tracer\n");
1525 event_trace_self_tests();
1526 unregister_ftrace_function(&trace_ops
);
1529 static __init
void event_trace_self_test_with_function(void)
1534 static __init
int event_trace_self_tests_init(void)
1536 if (!tracing_selftest_disabled
) {
1537 event_trace_self_tests();
1538 event_trace_self_test_with_function();
1544 late_initcall(event_trace_self_tests_init
);