4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
20 #include <asm/setup.h>
22 #include "trace_output.h"
25 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 DEFINE_MUTEX(event_mutex
);
29 LIST_HEAD(ftrace_events
);
31 int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
32 const char *name
, int offset
, int size
, int is_signed
,
35 struct ftrace_event_field
*field
;
37 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
41 field
->name
= kstrdup(name
, GFP_KERNEL
);
45 field
->type
= kstrdup(type
, GFP_KERNEL
);
49 if (filter_type
== FILTER_OTHER
)
50 field
->filter_type
= filter_assign_type(type
);
52 field
->filter_type
= filter_type
;
54 field
->offset
= offset
;
56 field
->is_signed
= is_signed
;
58 list_add(&field
->link
, &call
->fields
);
71 EXPORT_SYMBOL_GPL(trace_define_field
);
73 #define __common_field(type, item) \
74 ret = trace_define_field(call, #type, "common_" #item, \
75 offsetof(typeof(ent), item), \
77 is_signed_type(type), FILTER_OTHER); \
81 int trace_define_common_fields(struct ftrace_event_call
*call
)
84 struct trace_entry ent
;
86 __common_field(unsigned short, type
);
87 __common_field(unsigned char, flags
);
88 __common_field(unsigned char, preempt_count
);
89 __common_field(int, pid
);
90 __common_field(int, lock_depth
);
94 EXPORT_SYMBOL_GPL(trace_define_common_fields
);
96 void trace_destroy_fields(struct ftrace_event_call
*call
)
98 struct ftrace_event_field
*field
, *next
;
100 list_for_each_entry_safe(field
, next
, &call
->fields
, link
) {
101 list_del(&field
->link
);
108 static void ftrace_event_enable_disable(struct ftrace_event_call
*call
,
115 tracing_stop_cmdline_record();
116 call
->unregfunc(call
);
120 if (!call
->enabled
) {
122 tracing_start_cmdline_record();
129 static void ftrace_clear_events(void)
131 struct ftrace_event_call
*call
;
133 mutex_lock(&event_mutex
);
134 list_for_each_entry(call
, &ftrace_events
, list
) {
135 ftrace_event_enable_disable(call
, 0);
137 mutex_unlock(&event_mutex
);
141 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
143 static int __ftrace_set_clr_event(const char *match
, const char *sub
,
144 const char *event
, int set
)
146 struct ftrace_event_call
*call
;
149 mutex_lock(&event_mutex
);
150 list_for_each_entry(call
, &ftrace_events
, list
) {
152 if (!call
->name
|| !call
->regfunc
)
156 strcmp(match
, call
->name
) != 0 &&
157 strcmp(match
, call
->system
) != 0)
160 if (sub
&& strcmp(sub
, call
->system
) != 0)
163 if (event
&& strcmp(event
, call
->name
) != 0)
166 ftrace_event_enable_disable(call
, set
);
170 mutex_unlock(&event_mutex
);
175 static int ftrace_set_clr_event(char *buf
, int set
)
177 char *event
= NULL
, *sub
= NULL
, *match
;
180 * The buf format can be <subsystem>:<event-name>
181 * *:<event-name> means any event by that name.
182 * :<event-name> is the same.
184 * <subsystem>:* means all events in that subsystem
185 * <subsystem>: means the same.
187 * <name> (no ':') means all events in a subsystem with
188 * the name <name> or any event that matches <name>
191 match
= strsep(&buf
, ":");
197 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
199 if (!strlen(event
) || strcmp(event
, "*") == 0)
203 return __ftrace_set_clr_event(match
, sub
, event
, set
);
207 * trace_set_clr_event - enable or disable an event
208 * @system: system name to match (NULL for any system)
209 * @event: event name to match (NULL for all events, within system)
210 * @set: 1 to enable, 0 to disable
212 * This is a way for other parts of the kernel to enable or disable
215 * Returns 0 on success, -EINVAL if the parameters do not match any
218 int trace_set_clr_event(const char *system
, const char *event
, int set
)
220 return __ftrace_set_clr_event(NULL
, system
, event
, set
);
223 /* 128 should be much more than enough */
224 #define EVENT_BUF_SIZE 127
227 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
228 size_t cnt
, loff_t
*ppos
)
230 struct trace_parser parser
;
236 ret
= tracing_update_buffers();
240 if (trace_parser_get_init(&parser
, EVENT_BUF_SIZE
+ 1))
243 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
245 if (read
>= 0 && trace_parser_loaded((&parser
))) {
248 if (*parser
.buffer
== '!')
251 parser
.buffer
[parser
.idx
] = 0;
253 ret
= ftrace_set_clr_event(parser
.buffer
+ !set
, set
);
261 trace_parser_put(&parser
);
267 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
269 struct ftrace_event_call
*call
= v
;
273 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
275 * The ftrace subsystem is for showing formats only.
276 * They can not be enabled or disabled via the event files.
285 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
287 struct ftrace_event_call
*call
;
290 mutex_lock(&event_mutex
);
292 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
293 for (l
= 0; l
<= *pos
; ) {
294 call
= t_next(m
, call
, &l
);
302 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
304 struct ftrace_event_call
*call
= v
;
308 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
316 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
318 struct ftrace_event_call
*call
;
321 mutex_lock(&event_mutex
);
323 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
324 for (l
= 0; l
<= *pos
; ) {
325 call
= s_next(m
, call
, &l
);
332 static int t_show(struct seq_file
*m
, void *v
)
334 struct ftrace_event_call
*call
= v
;
336 if (strcmp(call
->system
, TRACE_SYSTEM
) != 0)
337 seq_printf(m
, "%s:", call
->system
);
338 seq_printf(m
, "%s\n", call
->name
);
343 static void t_stop(struct seq_file
*m
, void *p
)
345 mutex_unlock(&event_mutex
);
349 ftrace_event_seq_open(struct inode
*inode
, struct file
*file
)
351 const struct seq_operations
*seq_ops
;
353 if ((file
->f_mode
& FMODE_WRITE
) &&
354 (file
->f_flags
& O_TRUNC
))
355 ftrace_clear_events();
357 seq_ops
= inode
->i_private
;
358 return seq_open(file
, seq_ops
);
362 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
365 struct ftrace_event_call
*call
= filp
->private_data
;
373 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
377 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
380 struct ftrace_event_call
*call
= filp
->private_data
;
385 if (cnt
>= sizeof(buf
))
388 if (copy_from_user(&buf
, ubuf
, cnt
))
393 ret
= strict_strtoul(buf
, 10, &val
);
397 ret
= tracing_update_buffers();
404 mutex_lock(&event_mutex
);
405 ftrace_event_enable_disable(call
, val
);
406 mutex_unlock(&event_mutex
);
419 system_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
422 const char set_to_char
[4] = { '?', '0', '1', 'X' };
423 const char *system
= filp
->private_data
;
424 struct ftrace_event_call
*call
;
429 mutex_lock(&event_mutex
);
430 list_for_each_entry(call
, &ftrace_events
, list
) {
431 if (!call
->name
|| !call
->regfunc
)
434 if (system
&& strcmp(call
->system
, system
) != 0)
438 * We need to find out if all the events are set
439 * or if all events or cleared, or if we have
442 set
|= (1 << !!call
->enabled
);
445 * If we have a mixture, no need to look further.
450 mutex_unlock(&event_mutex
);
452 buf
[0] = set_to_char
[set
];
455 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
461 system_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
464 const char *system
= filp
->private_data
;
469 if (cnt
>= sizeof(buf
))
472 if (copy_from_user(&buf
, ubuf
, cnt
))
477 ret
= strict_strtoul(buf
, 10, &val
);
481 ret
= tracing_update_buffers();
485 if (val
!= 0 && val
!= 1)
488 ret
= __ftrace_set_clr_event(NULL
, system
, NULL
, val
);
500 extern char *__bad_type_size(void);
503 #define FIELD(type, name) \
504 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
505 #type, "common_" #name, offsetof(typeof(field), name), \
506 sizeof(field.name), is_signed_type(type)
508 static int trace_write_header(struct trace_seq
*s
)
510 struct trace_entry field
;
512 /* struct trace_entry */
513 return trace_seq_printf(s
,
514 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
515 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
516 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
517 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
518 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
520 FIELD(unsigned short, type
),
521 FIELD(unsigned char, flags
),
522 FIELD(unsigned char, preempt_count
),
524 FIELD(int, lock_depth
));
528 event_format_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
531 struct ftrace_event_call
*call
= filp
->private_data
;
539 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
545 /* If any of the first writes fail, so will the show_format. */
547 trace_seq_printf(s
, "name: %s\n", call
->name
);
548 trace_seq_printf(s
, "ID: %d\n", call
->id
);
549 trace_seq_printf(s
, "format:\n");
550 trace_write_header(s
);
552 r
= call
->show_format(call
, s
);
555 * ug! The format output is bigger than a PAGE!!
557 buf
= "FORMAT TOO BIG\n";
558 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
563 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
571 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
573 struct ftrace_event_call
*call
= filp
->private_data
;
580 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
585 trace_seq_printf(s
, "%d\n", call
->id
);
587 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
594 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
597 struct ftrace_event_call
*call
= filp
->private_data
;
604 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
610 print_event_filter(call
, s
);
611 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
619 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
622 struct ftrace_event_call
*call
= filp
->private_data
;
626 if (cnt
>= PAGE_SIZE
)
629 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
633 if (copy_from_user(buf
, ubuf
, cnt
)) {
634 free_page((unsigned long) buf
);
639 err
= apply_event_filter(call
, buf
);
640 free_page((unsigned long) buf
);
650 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
653 struct event_subsystem
*system
= filp
->private_data
;
660 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
666 print_subsystem_event_filter(system
, s
);
667 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
675 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
678 struct event_subsystem
*system
= filp
->private_data
;
682 if (cnt
>= PAGE_SIZE
)
685 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
689 if (copy_from_user(buf
, ubuf
, cnt
)) {
690 free_page((unsigned long) buf
);
695 err
= apply_subsystem_event_filter(system
, buf
);
696 free_page((unsigned long) buf
);
706 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
708 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
715 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
722 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
729 static const struct seq_operations show_event_seq_ops
= {
736 static const struct seq_operations show_set_event_seq_ops
= {
743 static const struct file_operations ftrace_avail_fops
= {
744 .open
= ftrace_event_seq_open
,
747 .release
= seq_release
,
750 static const struct file_operations ftrace_set_event_fops
= {
751 .open
= ftrace_event_seq_open
,
753 .write
= ftrace_event_write
,
755 .release
= seq_release
,
758 static const struct file_operations ftrace_enable_fops
= {
759 .open
= tracing_open_generic
,
760 .read
= event_enable_read
,
761 .write
= event_enable_write
,
764 static const struct file_operations ftrace_event_format_fops
= {
765 .open
= tracing_open_generic
,
766 .read
= event_format_read
,
769 static const struct file_operations ftrace_event_id_fops
= {
770 .open
= tracing_open_generic
,
771 .read
= event_id_read
,
774 static const struct file_operations ftrace_event_filter_fops
= {
775 .open
= tracing_open_generic
,
776 .read
= event_filter_read
,
777 .write
= event_filter_write
,
780 static const struct file_operations ftrace_subsystem_filter_fops
= {
781 .open
= tracing_open_generic
,
782 .read
= subsystem_filter_read
,
783 .write
= subsystem_filter_write
,
786 static const struct file_operations ftrace_system_enable_fops
= {
787 .open
= tracing_open_generic
,
788 .read
= system_enable_read
,
789 .write
= system_enable_write
,
792 static const struct file_operations ftrace_show_header_fops
= {
793 .open
= tracing_open_generic
,
797 static struct dentry
*event_trace_events_dir(void)
799 static struct dentry
*d_tracer
;
800 static struct dentry
*d_events
;
805 d_tracer
= tracing_init_dentry();
809 d_events
= debugfs_create_dir("events", d_tracer
);
811 pr_warning("Could not create debugfs "
812 "'events' directory\n");
817 static LIST_HEAD(event_subsystems
);
819 static struct dentry
*
820 event_subsystem_dir(const char *name
, struct dentry
*d_events
)
822 struct event_subsystem
*system
;
823 struct dentry
*entry
;
825 /* First see if we did not already create this dir */
826 list_for_each_entry(system
, &event_subsystems
, list
) {
827 if (strcmp(system
->name
, name
) == 0) {
829 return system
->entry
;
833 /* need to create new entry */
834 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
836 pr_warning("No memory to create event subsystem %s\n",
841 system
->entry
= debugfs_create_dir(name
, d_events
);
842 if (!system
->entry
) {
843 pr_warning("Could not create event subsystem %s\n",
849 system
->nr_events
= 1;
850 system
->name
= kstrdup(name
, GFP_KERNEL
);
852 debugfs_remove(system
->entry
);
857 list_add(&system
->list
, &event_subsystems
);
859 system
->filter
= NULL
;
861 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
862 if (!system
->filter
) {
863 pr_warning("Could not allocate filter for subsystem "
865 return system
->entry
;
868 entry
= debugfs_create_file("filter", 0644, system
->entry
, system
,
869 &ftrace_subsystem_filter_fops
);
871 kfree(system
->filter
);
872 system
->filter
= NULL
;
873 pr_warning("Could not create debugfs "
874 "'%s/filter' entry\n", name
);
877 trace_create_file("enable", 0644, system
->entry
,
878 (void *)system
->name
,
879 &ftrace_system_enable_fops
);
881 return system
->entry
;
885 event_create_dir(struct ftrace_event_call
*call
, struct dentry
*d_events
,
886 const struct file_operations
*id
,
887 const struct file_operations
*enable
,
888 const struct file_operations
*filter
,
889 const struct file_operations
*format
)
894 * If the trace point header did not define TRACE_SYSTEM
895 * then the system would be called "TRACE_SYSTEM".
897 if (strcmp(call
->system
, TRACE_SYSTEM
) != 0)
898 d_events
= event_subsystem_dir(call
->system
, d_events
);
900 call
->dir
= debugfs_create_dir(call
->name
, d_events
);
902 pr_warning("Could not create debugfs "
903 "'%s' directory\n", call
->name
);
908 trace_create_file("enable", 0644, call
->dir
, call
,
911 if (call
->id
&& call
->profile_enable
)
912 trace_create_file("id", 0444, call
->dir
, call
,
915 if (call
->define_fields
) {
916 ret
= call
->define_fields(call
);
918 pr_warning("Could not initialize trace point"
919 " events/%s\n", call
->name
);
922 trace_create_file("filter", 0644, call
->dir
, call
,
926 /* A trace may not want to export its format */
927 if (!call
->show_format
)
930 trace_create_file("format", 0444, call
->dir
, call
,
936 static int __trace_add_event_call(struct ftrace_event_call
*call
)
938 struct dentry
*d_events
;
944 if (call
->raw_init
) {
945 ret
= call
->raw_init(call
);
948 pr_warning("Could not initialize trace "
949 "events/%s\n", call
->name
);
954 d_events
= event_trace_events_dir();
958 ret
= event_create_dir(call
, d_events
, &ftrace_event_id_fops
,
959 &ftrace_enable_fops
, &ftrace_event_filter_fops
,
960 &ftrace_event_format_fops
);
962 list_add(&call
->list
, &ftrace_events
);
967 /* Add an additional event_call dynamically */
968 int trace_add_event_call(struct ftrace_event_call
*call
)
971 mutex_lock(&event_mutex
);
972 ret
= __trace_add_event_call(call
);
973 mutex_unlock(&event_mutex
);
977 static void remove_subsystem_dir(const char *name
)
979 struct event_subsystem
*system
;
981 if (strcmp(name
, TRACE_SYSTEM
) == 0)
984 list_for_each_entry(system
, &event_subsystems
, list
) {
985 if (strcmp(system
->name
, name
) == 0) {
986 if (!--system
->nr_events
) {
987 struct event_filter
*filter
= system
->filter
;
989 debugfs_remove_recursive(system
->entry
);
990 list_del(&system
->list
);
992 kfree(filter
->filter_string
);
1004 * Must be called under locking both of event_mutex and trace_event_mutex.
1006 static void __trace_remove_event_call(struct ftrace_event_call
*call
)
1008 ftrace_event_enable_disable(call
, 0);
1010 __unregister_ftrace_event(call
->event
);
1011 debugfs_remove_recursive(call
->dir
);
1012 list_del(&call
->list
);
1013 trace_destroy_fields(call
);
1014 destroy_preds(call
);
1015 remove_subsystem_dir(call
->system
);
1018 /* Remove an event_call */
1019 void trace_remove_event_call(struct ftrace_event_call
*call
)
1021 mutex_lock(&event_mutex
);
1022 down_write(&trace_event_mutex
);
1023 __trace_remove_event_call(call
);
1024 up_write(&trace_event_mutex
);
1025 mutex_unlock(&event_mutex
);
1028 #define for_each_event(event, start, end) \
1029 for (event = start; \
1030 (unsigned long)event < (unsigned long)end; \
1033 #ifdef CONFIG_MODULES
1035 static LIST_HEAD(ftrace_module_file_list
);
1038 * Modules must own their file_operations to keep up with
1039 * reference counting.
1041 struct ftrace_module_file_ops
{
1042 struct list_head list
;
1044 struct file_operations id
;
1045 struct file_operations enable
;
1046 struct file_operations format
;
1047 struct file_operations filter
;
1050 static struct ftrace_module_file_ops
*
1051 trace_create_file_ops(struct module
*mod
)
1053 struct ftrace_module_file_ops
*file_ops
;
1056 * This is a bit of a PITA. To allow for correct reference
1057 * counting, modules must "own" their file_operations.
1058 * To do this, we allocate the file operations that will be
1059 * used in the event directory.
1062 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
1066 file_ops
->mod
= mod
;
1068 file_ops
->id
= ftrace_event_id_fops
;
1069 file_ops
->id
.owner
= mod
;
1071 file_ops
->enable
= ftrace_enable_fops
;
1072 file_ops
->enable
.owner
= mod
;
1074 file_ops
->filter
= ftrace_event_filter_fops
;
1075 file_ops
->filter
.owner
= mod
;
1077 file_ops
->format
= ftrace_event_format_fops
;
1078 file_ops
->format
.owner
= mod
;
1080 list_add(&file_ops
->list
, &ftrace_module_file_list
);
1085 static void trace_module_add_events(struct module
*mod
)
1087 struct ftrace_module_file_ops
*file_ops
= NULL
;
1088 struct ftrace_event_call
*call
, *start
, *end
;
1089 struct dentry
*d_events
;
1092 start
= mod
->trace_events
;
1093 end
= mod
->trace_events
+ mod
->num_trace_events
;
1098 d_events
= event_trace_events_dir();
1102 for_each_event(call
, start
, end
) {
1103 /* The linker may leave blanks */
1106 if (call
->raw_init
) {
1107 ret
= call
->raw_init(call
);
1110 pr_warning("Could not initialize trace "
1111 "point events/%s\n", call
->name
);
1116 * This module has events, create file ops for this module
1117 * if not already done.
1120 file_ops
= trace_create_file_ops(mod
);
1125 ret
= event_create_dir(call
, d_events
,
1126 &file_ops
->id
, &file_ops
->enable
,
1127 &file_ops
->filter
, &file_ops
->format
);
1129 list_add(&call
->list
, &ftrace_events
);
1133 static void trace_module_remove_events(struct module
*mod
)
1135 struct ftrace_module_file_ops
*file_ops
;
1136 struct ftrace_event_call
*call
, *p
;
1139 down_write(&trace_event_mutex
);
1140 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
1141 if (call
->mod
== mod
) {
1143 __trace_remove_event_call(call
);
1147 /* Now free the file_operations */
1148 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1149 if (file_ops
->mod
== mod
)
1152 if (&file_ops
->list
!= &ftrace_module_file_list
) {
1153 list_del(&file_ops
->list
);
1158 * It is safest to reset the ring buffer if the module being unloaded
1159 * registered any events.
1162 tracing_reset_current_online_cpus();
1163 up_write(&trace_event_mutex
);
1166 static int trace_module_notify(struct notifier_block
*self
,
1167 unsigned long val
, void *data
)
1169 struct module
*mod
= data
;
1171 mutex_lock(&event_mutex
);
1173 case MODULE_STATE_COMING
:
1174 trace_module_add_events(mod
);
1176 case MODULE_STATE_GOING
:
1177 trace_module_remove_events(mod
);
1180 mutex_unlock(&event_mutex
);
1185 static int trace_module_notify(struct notifier_block
*self
,
1186 unsigned long val
, void *data
)
1190 #endif /* CONFIG_MODULES */
1192 static struct notifier_block trace_module_nb
= {
1193 .notifier_call
= trace_module_notify
,
1197 extern struct ftrace_event_call __start_ftrace_events
[];
1198 extern struct ftrace_event_call __stop_ftrace_events
[];
1200 static char bootup_event_buf
[COMMAND_LINE_SIZE
] __initdata
;
1202 static __init
int setup_trace_event(char *str
)
1204 strlcpy(bootup_event_buf
, str
, COMMAND_LINE_SIZE
);
1205 ring_buffer_expanded
= 1;
1206 tracing_selftest_disabled
= 1;
1210 __setup("trace_event=", setup_trace_event
);
1212 static __init
int event_trace_init(void)
1214 struct ftrace_event_call
*call
;
1215 struct dentry
*d_tracer
;
1216 struct dentry
*entry
;
1217 struct dentry
*d_events
;
1219 char *buf
= bootup_event_buf
;
1222 d_tracer
= tracing_init_dentry();
1226 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
1227 (void *)&show_event_seq_ops
,
1228 &ftrace_avail_fops
);
1230 pr_warning("Could not create debugfs "
1231 "'available_events' entry\n");
1233 entry
= debugfs_create_file("set_event", 0644, d_tracer
,
1234 (void *)&show_set_event_seq_ops
,
1235 &ftrace_set_event_fops
);
1237 pr_warning("Could not create debugfs "
1238 "'set_event' entry\n");
1240 d_events
= event_trace_events_dir();
1244 /* ring buffer internal formats */
1245 trace_create_file("header_page", 0444, d_events
,
1246 ring_buffer_print_page_header
,
1247 &ftrace_show_header_fops
);
1249 trace_create_file("header_event", 0444, d_events
,
1250 ring_buffer_print_entry_header
,
1251 &ftrace_show_header_fops
);
1253 trace_create_file("enable", 0644, d_events
,
1254 NULL
, &ftrace_system_enable_fops
);
1256 for_each_event(call
, __start_ftrace_events
, __stop_ftrace_events
) {
1257 /* The linker may leave blanks */
1260 if (call
->raw_init
) {
1261 ret
= call
->raw_init(call
);
1264 pr_warning("Could not initialize trace "
1265 "point events/%s\n", call
->name
);
1269 ret
= event_create_dir(call
, d_events
, &ftrace_event_id_fops
,
1270 &ftrace_enable_fops
,
1271 &ftrace_event_filter_fops
,
1272 &ftrace_event_format_fops
);
1274 list_add(&call
->list
, &ftrace_events
);
1278 token
= strsep(&buf
, ",");
1285 ret
= ftrace_set_clr_event(token
, 1);
1287 pr_warning("Failed to enable trace event: %s\n", token
);
1290 ret
= register_module_notifier(&trace_module_nb
);
1292 pr_warning("Failed to register trace events module notifier\n");
1296 fs_initcall(event_trace_init
);
1298 #ifdef CONFIG_FTRACE_STARTUP_TEST
1300 static DEFINE_SPINLOCK(test_spinlock
);
1301 static DEFINE_SPINLOCK(test_spinlock_irq
);
1302 static DEFINE_MUTEX(test_mutex
);
1304 static __init
void test_work(struct work_struct
*dummy
)
1306 spin_lock(&test_spinlock
);
1307 spin_lock_irq(&test_spinlock_irq
);
1309 spin_unlock_irq(&test_spinlock_irq
);
1310 spin_unlock(&test_spinlock
);
1312 mutex_lock(&test_mutex
);
1314 mutex_unlock(&test_mutex
);
1317 static __init
int event_test_thread(void *unused
)
1321 test_malloc
= kmalloc(1234, GFP_KERNEL
);
1323 pr_info("failed to kmalloc\n");
1325 schedule_on_each_cpu(test_work
);
1329 set_current_state(TASK_INTERRUPTIBLE
);
1330 while (!kthread_should_stop())
1337 * Do various things that may trigger events.
1339 static __init
void event_test_stuff(void)
1341 struct task_struct
*test_thread
;
1343 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
1345 kthread_stop(test_thread
);
1349 * For every trace event defined, we will test each trace point separately,
1350 * and then by groups, and finally all trace points.
1352 static __init
void event_trace_self_tests(void)
1354 struct ftrace_event_call
*call
;
1355 struct event_subsystem
*system
;
1358 pr_info("Running tests on trace events:\n");
1360 list_for_each_entry(call
, &ftrace_events
, list
) {
1362 /* Only test those that have a regfunc */
1367 * Testing syscall events here is pretty useless, but
1368 * we still do it if configured. But this is time consuming.
1369 * What we really need is a user thread to perform the
1370 * syscalls as we test.
1372 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1374 strcmp(call
->system
, "syscalls") == 0)
1378 pr_info("Testing event %s: ", call
->name
);
1381 * If an event is already enabled, someone is using
1382 * it and the self test should not be on.
1384 if (call
->enabled
) {
1385 pr_warning("Enabled event during self test!\n");
1390 ftrace_event_enable_disable(call
, 1);
1392 ftrace_event_enable_disable(call
, 0);
1397 /* Now test at the sub system level */
1399 pr_info("Running tests on trace event systems:\n");
1401 list_for_each_entry(system
, &event_subsystems
, list
) {
1403 /* the ftrace system is special, skip it */
1404 if (strcmp(system
->name
, "ftrace") == 0)
1407 pr_info("Testing event system %s: ", system
->name
);
1409 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 1);
1410 if (WARN_ON_ONCE(ret
)) {
1411 pr_warning("error enabling system %s\n",
1418 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 0);
1419 if (WARN_ON_ONCE(ret
))
1420 pr_warning("error disabling system %s\n",
1426 /* Test with all events enabled */
1428 pr_info("Running tests on all trace events:\n");
1429 pr_info("Testing all events: ");
1431 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 1);
1432 if (WARN_ON_ONCE(ret
)) {
1433 pr_warning("error enabling all events\n");
1440 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 0);
1441 if (WARN_ON_ONCE(ret
)) {
1442 pr_warning("error disabling all events\n");
1449 #ifdef CONFIG_FUNCTION_TRACER
1451 static DEFINE_PER_CPU(atomic_t
, ftrace_test_event_disable
);
1454 function_test_events_call(unsigned long ip
, unsigned long parent_ip
)
1456 struct ring_buffer_event
*event
;
1457 struct ring_buffer
*buffer
;
1458 struct ftrace_entry
*entry
;
1459 unsigned long flags
;
1465 pc
= preempt_count();
1466 resched
= ftrace_preempt_disable();
1467 cpu
= raw_smp_processor_id();
1468 disabled
= atomic_inc_return(&per_cpu(ftrace_test_event_disable
, cpu
));
1473 local_save_flags(flags
);
1475 event
= trace_current_buffer_lock_reserve(&buffer
,
1476 TRACE_FN
, sizeof(*entry
),
1480 entry
= ring_buffer_event_data(event
);
1482 entry
->parent_ip
= parent_ip
;
1484 trace_nowake_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1487 atomic_dec(&per_cpu(ftrace_test_event_disable
, cpu
));
1488 ftrace_preempt_enable(resched
);
1491 static struct ftrace_ops trace_ops __initdata
=
1493 .func
= function_test_events_call
,
1496 static __init
void event_trace_self_test_with_function(void)
1498 register_ftrace_function(&trace_ops
);
1499 pr_info("Running tests again, along with the function tracer\n");
1500 event_trace_self_tests();
1501 unregister_ftrace_function(&trace_ops
);
1504 static __init
void event_trace_self_test_with_function(void)
1509 static __init
int event_trace_self_tests_init(void)
1511 if (!tracing_selftest_disabled
) {
1512 event_trace_self_tests();
1513 event_trace_self_test_with_function();
1519 late_initcall(event_trace_self_tests_init
);