4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex
);
30 LIST_HEAD(ftrace_events
);
32 int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
33 const char *name
, int offset
, int size
, int is_signed
,
36 struct ftrace_event_field
*field
;
38 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
42 field
->name
= kstrdup(name
, GFP_KERNEL
);
46 field
->type
= kstrdup(type
, GFP_KERNEL
);
50 if (filter_type
== FILTER_OTHER
)
51 field
->filter_type
= filter_assign_type(type
);
53 field
->filter_type
= filter_type
;
55 field
->offset
= offset
;
57 field
->is_signed
= is_signed
;
59 list_add(&field
->link
, &call
->fields
);
70 EXPORT_SYMBOL_GPL(trace_define_field
);
72 #define __common_field(type, item) \
73 ret = trace_define_field(call, #type, "common_" #item, \
74 offsetof(typeof(ent), item), \
76 is_signed_type(type), FILTER_OTHER); \
80 static int trace_define_common_fields(struct ftrace_event_call
*call
)
83 struct trace_entry ent
;
85 __common_field(unsigned short, type
);
86 __common_field(unsigned char, flags
);
87 __common_field(unsigned char, preempt_count
);
88 __common_field(int, pid
);
89 __common_field(int, lock_depth
);
94 void trace_destroy_fields(struct ftrace_event_call
*call
)
96 struct ftrace_event_field
*field
, *next
;
98 list_for_each_entry_safe(field
, next
, &call
->fields
, link
) {
99 list_del(&field
->link
);
106 int trace_event_raw_init(struct ftrace_event_call
*call
)
110 id
= register_ftrace_event(call
->event
);
114 INIT_LIST_HEAD(&call
->fields
);
118 EXPORT_SYMBOL_GPL(trace_event_raw_init
);
120 static int ftrace_event_enable_disable(struct ftrace_event_call
*call
,
129 tracing_stop_cmdline_record();
130 call
->unregfunc(call
);
134 if (!call
->enabled
) {
135 tracing_start_cmdline_record();
136 ret
= call
->regfunc(call
);
138 tracing_stop_cmdline_record();
139 pr_info("event trace: Could not enable event "
151 static void ftrace_clear_events(void)
153 struct ftrace_event_call
*call
;
155 mutex_lock(&event_mutex
);
156 list_for_each_entry(call
, &ftrace_events
, list
) {
157 ftrace_event_enable_disable(call
, 0);
159 mutex_unlock(&event_mutex
);
163 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
165 static int __ftrace_set_clr_event(const char *match
, const char *sub
,
166 const char *event
, int set
)
168 struct ftrace_event_call
*call
;
171 mutex_lock(&event_mutex
);
172 list_for_each_entry(call
, &ftrace_events
, list
) {
174 if (!call
->name
|| !call
->regfunc
)
178 strcmp(match
, call
->name
) != 0 &&
179 strcmp(match
, call
->system
) != 0)
182 if (sub
&& strcmp(sub
, call
->system
) != 0)
185 if (event
&& strcmp(event
, call
->name
) != 0)
188 ftrace_event_enable_disable(call
, set
);
192 mutex_unlock(&event_mutex
);
197 static int ftrace_set_clr_event(char *buf
, int set
)
199 char *event
= NULL
, *sub
= NULL
, *match
;
202 * The buf format can be <subsystem>:<event-name>
203 * *:<event-name> means any event by that name.
204 * :<event-name> is the same.
206 * <subsystem>:* means all events in that subsystem
207 * <subsystem>: means the same.
209 * <name> (no ':') means all events in a subsystem with
210 * the name <name> or any event that matches <name>
213 match
= strsep(&buf
, ":");
219 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
221 if (!strlen(event
) || strcmp(event
, "*") == 0)
225 return __ftrace_set_clr_event(match
, sub
, event
, set
);
229 * trace_set_clr_event - enable or disable an event
230 * @system: system name to match (NULL for any system)
231 * @event: event name to match (NULL for all events, within system)
232 * @set: 1 to enable, 0 to disable
234 * This is a way for other parts of the kernel to enable or disable
237 * Returns 0 on success, -EINVAL if the parameters do not match any
240 int trace_set_clr_event(const char *system
, const char *event
, int set
)
242 return __ftrace_set_clr_event(NULL
, system
, event
, set
);
245 /* 128 should be much more than enough */
246 #define EVENT_BUF_SIZE 127
249 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
250 size_t cnt
, loff_t
*ppos
)
252 struct trace_parser parser
;
258 ret
= tracing_update_buffers();
262 if (trace_parser_get_init(&parser
, EVENT_BUF_SIZE
+ 1))
265 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
267 if (read
>= 0 && trace_parser_loaded((&parser
))) {
270 if (*parser
.buffer
== '!')
273 parser
.buffer
[parser
.idx
] = 0;
275 ret
= ftrace_set_clr_event(parser
.buffer
+ !set
, set
);
283 trace_parser_put(&parser
);
289 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
291 struct ftrace_event_call
*call
= v
;
295 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
297 * The ftrace subsystem is for showing formats only.
298 * They can not be enabled or disabled via the event files.
307 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
309 struct ftrace_event_call
*call
;
312 mutex_lock(&event_mutex
);
314 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
315 for (l
= 0; l
<= *pos
; ) {
316 call
= t_next(m
, call
, &l
);
324 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
326 struct ftrace_event_call
*call
= v
;
330 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
338 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
340 struct ftrace_event_call
*call
;
343 mutex_lock(&event_mutex
);
345 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
346 for (l
= 0; l
<= *pos
; ) {
347 call
= s_next(m
, call
, &l
);
354 static int t_show(struct seq_file
*m
, void *v
)
356 struct ftrace_event_call
*call
= v
;
358 if (strcmp(call
->system
, TRACE_SYSTEM
) != 0)
359 seq_printf(m
, "%s:", call
->system
);
360 seq_printf(m
, "%s\n", call
->name
);
365 static void t_stop(struct seq_file
*m
, void *p
)
367 mutex_unlock(&event_mutex
);
371 ftrace_event_seq_open(struct inode
*inode
, struct file
*file
)
373 const struct seq_operations
*seq_ops
;
375 if ((file
->f_mode
& FMODE_WRITE
) &&
376 (file
->f_flags
& O_TRUNC
))
377 ftrace_clear_events();
379 seq_ops
= inode
->i_private
;
380 return seq_open(file
, seq_ops
);
384 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
387 struct ftrace_event_call
*call
= filp
->private_data
;
395 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
399 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
402 struct ftrace_event_call
*call
= filp
->private_data
;
407 if (cnt
>= sizeof(buf
))
410 if (copy_from_user(&buf
, ubuf
, cnt
))
415 ret
= strict_strtoul(buf
, 10, &val
);
419 ret
= tracing_update_buffers();
426 mutex_lock(&event_mutex
);
427 ret
= ftrace_event_enable_disable(call
, val
);
428 mutex_unlock(&event_mutex
);
437 return ret
? ret
: cnt
;
441 system_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
444 const char set_to_char
[4] = { '?', '0', '1', 'X' };
445 const char *system
= filp
->private_data
;
446 struct ftrace_event_call
*call
;
451 mutex_lock(&event_mutex
);
452 list_for_each_entry(call
, &ftrace_events
, list
) {
453 if (!call
->name
|| !call
->regfunc
)
456 if (system
&& strcmp(call
->system
, system
) != 0)
460 * We need to find out if all the events are set
461 * or if all events or cleared, or if we have
464 set
|= (1 << !!call
->enabled
);
467 * If we have a mixture, no need to look further.
472 mutex_unlock(&event_mutex
);
474 buf
[0] = set_to_char
[set
];
477 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
483 system_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
486 const char *system
= filp
->private_data
;
491 if (cnt
>= sizeof(buf
))
494 if (copy_from_user(&buf
, ubuf
, cnt
))
499 ret
= strict_strtoul(buf
, 10, &val
);
503 ret
= tracing_update_buffers();
507 if (val
!= 0 && val
!= 1)
510 ret
= __ftrace_set_clr_event(NULL
, system
, NULL
, val
);
523 event_format_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
526 struct ftrace_event_call
*call
= filp
->private_data
;
527 struct ftrace_event_field
*field
;
529 int common_field_count
= 5;
536 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
542 trace_seq_printf(s
, "name: %s\n", call
->name
);
543 trace_seq_printf(s
, "ID: %d\n", call
->id
);
544 trace_seq_printf(s
, "format:\n");
546 list_for_each_entry_reverse(field
, &call
->fields
, link
) {
548 * Smartly shows the array type(except dynamic array).
551 * If TYPE := TYPE[LEN], it is shown:
552 * field:TYPE VAR[LEN]
554 const char *array_descriptor
= strchr(field
->type
, '[');
556 if (!strncmp(field
->type
, "__data_loc", 10))
557 array_descriptor
= NULL
;
559 if (!array_descriptor
) {
560 r
= trace_seq_printf(s
, "\tfield:%s %s;\toffset:%u;"
561 "\tsize:%u;\tsigned:%d;\n",
562 field
->type
, field
->name
, field
->offset
,
563 field
->size
, !!field
->is_signed
);
565 r
= trace_seq_printf(s
, "\tfield:%.*s %s%s;\toffset:%u;"
566 "\tsize:%u;\tsigned:%d;\n",
567 (int)(array_descriptor
- field
->type
),
568 field
->type
, field
->name
,
569 array_descriptor
, field
->offset
,
570 field
->size
, !!field
->is_signed
);
573 if (--common_field_count
== 0)
574 r
= trace_seq_printf(s
, "\n");
581 r
= trace_seq_printf(s
, "\nprint fmt: %s\n",
586 * ug! The format output is bigger than a PAGE!!
588 buf
= "FORMAT TOO BIG\n";
589 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
594 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
602 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
604 struct ftrace_event_call
*call
= filp
->private_data
;
611 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
616 trace_seq_printf(s
, "%d\n", call
->id
);
618 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
625 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
628 struct ftrace_event_call
*call
= filp
->private_data
;
635 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
641 print_event_filter(call
, s
);
642 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
650 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
653 struct ftrace_event_call
*call
= filp
->private_data
;
657 if (cnt
>= PAGE_SIZE
)
660 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
664 if (copy_from_user(buf
, ubuf
, cnt
)) {
665 free_page((unsigned long) buf
);
670 err
= apply_event_filter(call
, buf
);
671 free_page((unsigned long) buf
);
681 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
684 struct event_subsystem
*system
= filp
->private_data
;
691 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
697 print_subsystem_event_filter(system
, s
);
698 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
706 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
709 struct event_subsystem
*system
= filp
->private_data
;
713 if (cnt
>= PAGE_SIZE
)
716 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
720 if (copy_from_user(buf
, ubuf
, cnt
)) {
721 free_page((unsigned long) buf
);
726 err
= apply_subsystem_event_filter(system
, buf
);
727 free_page((unsigned long) buf
);
737 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
739 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
746 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
753 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
760 static const struct seq_operations show_event_seq_ops
= {
767 static const struct seq_operations show_set_event_seq_ops
= {
774 static const struct file_operations ftrace_avail_fops
= {
775 .open
= ftrace_event_seq_open
,
778 .release
= seq_release
,
781 static const struct file_operations ftrace_set_event_fops
= {
782 .open
= ftrace_event_seq_open
,
784 .write
= ftrace_event_write
,
786 .release
= seq_release
,
789 static const struct file_operations ftrace_enable_fops
= {
790 .open
= tracing_open_generic
,
791 .read
= event_enable_read
,
792 .write
= event_enable_write
,
795 static const struct file_operations ftrace_event_format_fops
= {
796 .open
= tracing_open_generic
,
797 .read
= event_format_read
,
800 static const struct file_operations ftrace_event_id_fops
= {
801 .open
= tracing_open_generic
,
802 .read
= event_id_read
,
805 static const struct file_operations ftrace_event_filter_fops
= {
806 .open
= tracing_open_generic
,
807 .read
= event_filter_read
,
808 .write
= event_filter_write
,
811 static const struct file_operations ftrace_subsystem_filter_fops
= {
812 .open
= tracing_open_generic
,
813 .read
= subsystem_filter_read
,
814 .write
= subsystem_filter_write
,
817 static const struct file_operations ftrace_system_enable_fops
= {
818 .open
= tracing_open_generic
,
819 .read
= system_enable_read
,
820 .write
= system_enable_write
,
823 static const struct file_operations ftrace_show_header_fops
= {
824 .open
= tracing_open_generic
,
828 static struct dentry
*event_trace_events_dir(void)
830 static struct dentry
*d_tracer
;
831 static struct dentry
*d_events
;
836 d_tracer
= tracing_init_dentry();
840 d_events
= debugfs_create_dir("events", d_tracer
);
842 pr_warning("Could not create debugfs "
843 "'events' directory\n");
848 static LIST_HEAD(event_subsystems
);
850 static struct dentry
*
851 event_subsystem_dir(const char *name
, struct dentry
*d_events
)
853 struct event_subsystem
*system
;
854 struct dentry
*entry
;
856 /* First see if we did not already create this dir */
857 list_for_each_entry(system
, &event_subsystems
, list
) {
858 if (strcmp(system
->name
, name
) == 0) {
860 return system
->entry
;
864 /* need to create new entry */
865 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
867 pr_warning("No memory to create event subsystem %s\n",
872 system
->entry
= debugfs_create_dir(name
, d_events
);
873 if (!system
->entry
) {
874 pr_warning("Could not create event subsystem %s\n",
880 system
->nr_events
= 1;
881 system
->name
= kstrdup(name
, GFP_KERNEL
);
883 debugfs_remove(system
->entry
);
888 list_add(&system
->list
, &event_subsystems
);
890 system
->filter
= NULL
;
892 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
893 if (!system
->filter
) {
894 pr_warning("Could not allocate filter for subsystem "
896 return system
->entry
;
899 entry
= debugfs_create_file("filter", 0644, system
->entry
, system
,
900 &ftrace_subsystem_filter_fops
);
902 kfree(system
->filter
);
903 system
->filter
= NULL
;
904 pr_warning("Could not create debugfs "
905 "'%s/filter' entry\n", name
);
908 trace_create_file("enable", 0644, system
->entry
,
909 (void *)system
->name
,
910 &ftrace_system_enable_fops
);
912 return system
->entry
;
916 event_create_dir(struct ftrace_event_call
*call
, struct dentry
*d_events
,
917 const struct file_operations
*id
,
918 const struct file_operations
*enable
,
919 const struct file_operations
*filter
,
920 const struct file_operations
*format
)
925 * If the trace point header did not define TRACE_SYSTEM
926 * then the system would be called "TRACE_SYSTEM".
928 if (strcmp(call
->system
, TRACE_SYSTEM
) != 0)
929 d_events
= event_subsystem_dir(call
->system
, d_events
);
931 call
->dir
= debugfs_create_dir(call
->name
, d_events
);
933 pr_warning("Could not create debugfs "
934 "'%s' directory\n", call
->name
);
939 trace_create_file("enable", 0644, call
->dir
, call
,
942 if (call
->id
&& call
->perf_event_enable
)
943 trace_create_file("id", 0444, call
->dir
, call
,
946 if (call
->define_fields
) {
947 ret
= trace_define_common_fields(call
);
949 ret
= call
->define_fields(call
);
951 pr_warning("Could not initialize trace point"
952 " events/%s\n", call
->name
);
955 trace_create_file("filter", 0644, call
->dir
, call
,
959 trace_create_file("format", 0444, call
->dir
, call
,
965 static int __trace_add_event_call(struct ftrace_event_call
*call
)
967 struct dentry
*d_events
;
973 if (call
->raw_init
) {
974 ret
= call
->raw_init(call
);
977 pr_warning("Could not initialize trace "
978 "events/%s\n", call
->name
);
983 d_events
= event_trace_events_dir();
987 ret
= event_create_dir(call
, d_events
, &ftrace_event_id_fops
,
988 &ftrace_enable_fops
, &ftrace_event_filter_fops
,
989 &ftrace_event_format_fops
);
991 list_add(&call
->list
, &ftrace_events
);
996 /* Add an additional event_call dynamically */
997 int trace_add_event_call(struct ftrace_event_call
*call
)
1000 mutex_lock(&event_mutex
);
1001 ret
= __trace_add_event_call(call
);
1002 mutex_unlock(&event_mutex
);
1006 static void remove_subsystem_dir(const char *name
)
1008 struct event_subsystem
*system
;
1010 if (strcmp(name
, TRACE_SYSTEM
) == 0)
1013 list_for_each_entry(system
, &event_subsystems
, list
) {
1014 if (strcmp(system
->name
, name
) == 0) {
1015 if (!--system
->nr_events
) {
1016 struct event_filter
*filter
= system
->filter
;
1018 debugfs_remove_recursive(system
->entry
);
1019 list_del(&system
->list
);
1021 kfree(filter
->filter_string
);
1024 kfree(system
->name
);
1033 * Must be called under locking both of event_mutex and trace_event_mutex.
1035 static void __trace_remove_event_call(struct ftrace_event_call
*call
)
1037 ftrace_event_enable_disable(call
, 0);
1039 __unregister_ftrace_event(call
->event
);
1040 debugfs_remove_recursive(call
->dir
);
1041 list_del(&call
->list
);
1042 trace_destroy_fields(call
);
1043 destroy_preds(call
);
1044 remove_subsystem_dir(call
->system
);
1047 /* Remove an event_call */
1048 void trace_remove_event_call(struct ftrace_event_call
*call
)
1050 mutex_lock(&event_mutex
);
1051 down_write(&trace_event_mutex
);
1052 __trace_remove_event_call(call
);
1053 up_write(&trace_event_mutex
);
1054 mutex_unlock(&event_mutex
);
1057 #define for_each_event(event, start, end) \
1058 for (event = start; \
1059 (unsigned long)event < (unsigned long)end; \
1062 #ifdef CONFIG_MODULES
1064 static LIST_HEAD(ftrace_module_file_list
);
1067 * Modules must own their file_operations to keep up with
1068 * reference counting.
1070 struct ftrace_module_file_ops
{
1071 struct list_head list
;
1073 struct file_operations id
;
1074 struct file_operations enable
;
1075 struct file_operations format
;
1076 struct file_operations filter
;
1079 static struct ftrace_module_file_ops
*
1080 trace_create_file_ops(struct module
*mod
)
1082 struct ftrace_module_file_ops
*file_ops
;
1085 * This is a bit of a PITA. To allow for correct reference
1086 * counting, modules must "own" their file_operations.
1087 * To do this, we allocate the file operations that will be
1088 * used in the event directory.
1091 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
1095 file_ops
->mod
= mod
;
1097 file_ops
->id
= ftrace_event_id_fops
;
1098 file_ops
->id
.owner
= mod
;
1100 file_ops
->enable
= ftrace_enable_fops
;
1101 file_ops
->enable
.owner
= mod
;
1103 file_ops
->filter
= ftrace_event_filter_fops
;
1104 file_ops
->filter
.owner
= mod
;
1106 file_ops
->format
= ftrace_event_format_fops
;
1107 file_ops
->format
.owner
= mod
;
1109 list_add(&file_ops
->list
, &ftrace_module_file_list
);
1114 static void trace_module_add_events(struct module
*mod
)
1116 struct ftrace_module_file_ops
*file_ops
= NULL
;
1117 struct ftrace_event_call
*call
, *start
, *end
;
1118 struct dentry
*d_events
;
1121 start
= mod
->trace_events
;
1122 end
= mod
->trace_events
+ mod
->num_trace_events
;
1127 d_events
= event_trace_events_dir();
1131 for_each_event(call
, start
, end
) {
1132 /* The linker may leave blanks */
1135 if (call
->raw_init
) {
1136 ret
= call
->raw_init(call
);
1139 pr_warning("Could not initialize trace "
1140 "point events/%s\n", call
->name
);
1145 * This module has events, create file ops for this module
1146 * if not already done.
1149 file_ops
= trace_create_file_ops(mod
);
1154 ret
= event_create_dir(call
, d_events
,
1155 &file_ops
->id
, &file_ops
->enable
,
1156 &file_ops
->filter
, &file_ops
->format
);
1158 list_add(&call
->list
, &ftrace_events
);
1162 static void trace_module_remove_events(struct module
*mod
)
1164 struct ftrace_module_file_ops
*file_ops
;
1165 struct ftrace_event_call
*call
, *p
;
1168 down_write(&trace_event_mutex
);
1169 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
1170 if (call
->mod
== mod
) {
1172 __trace_remove_event_call(call
);
1176 /* Now free the file_operations */
1177 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1178 if (file_ops
->mod
== mod
)
1181 if (&file_ops
->list
!= &ftrace_module_file_list
) {
1182 list_del(&file_ops
->list
);
1187 * It is safest to reset the ring buffer if the module being unloaded
1188 * registered any events.
1191 tracing_reset_current_online_cpus();
1192 up_write(&trace_event_mutex
);
1195 static int trace_module_notify(struct notifier_block
*self
,
1196 unsigned long val
, void *data
)
1198 struct module
*mod
= data
;
1200 mutex_lock(&event_mutex
);
1202 case MODULE_STATE_COMING
:
1203 trace_module_add_events(mod
);
1205 case MODULE_STATE_GOING
:
1206 trace_module_remove_events(mod
);
1209 mutex_unlock(&event_mutex
);
1214 static int trace_module_notify(struct notifier_block
*self
,
1215 unsigned long val
, void *data
)
1219 #endif /* CONFIG_MODULES */
1221 static struct notifier_block trace_module_nb
= {
1222 .notifier_call
= trace_module_notify
,
1226 extern struct ftrace_event_call __start_ftrace_events
[];
1227 extern struct ftrace_event_call __stop_ftrace_events
[];
1229 static char bootup_event_buf
[COMMAND_LINE_SIZE
] __initdata
;
1231 static __init
int setup_trace_event(char *str
)
1233 strlcpy(bootup_event_buf
, str
, COMMAND_LINE_SIZE
);
1234 ring_buffer_expanded
= 1;
1235 tracing_selftest_disabled
= 1;
1239 __setup("trace_event=", setup_trace_event
);
1241 static __init
int event_trace_init(void)
1243 struct ftrace_event_call
*call
;
1244 struct dentry
*d_tracer
;
1245 struct dentry
*entry
;
1246 struct dentry
*d_events
;
1248 char *buf
= bootup_event_buf
;
1251 d_tracer
= tracing_init_dentry();
1255 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
1256 (void *)&show_event_seq_ops
,
1257 &ftrace_avail_fops
);
1259 pr_warning("Could not create debugfs "
1260 "'available_events' entry\n");
1262 entry
= debugfs_create_file("set_event", 0644, d_tracer
,
1263 (void *)&show_set_event_seq_ops
,
1264 &ftrace_set_event_fops
);
1266 pr_warning("Could not create debugfs "
1267 "'set_event' entry\n");
1269 d_events
= event_trace_events_dir();
1273 /* ring buffer internal formats */
1274 trace_create_file("header_page", 0444, d_events
,
1275 ring_buffer_print_page_header
,
1276 &ftrace_show_header_fops
);
1278 trace_create_file("header_event", 0444, d_events
,
1279 ring_buffer_print_entry_header
,
1280 &ftrace_show_header_fops
);
1282 trace_create_file("enable", 0644, d_events
,
1283 NULL
, &ftrace_system_enable_fops
);
1285 for_each_event(call
, __start_ftrace_events
, __stop_ftrace_events
) {
1286 /* The linker may leave blanks */
1289 if (call
->raw_init
) {
1290 ret
= call
->raw_init(call
);
1293 pr_warning("Could not initialize trace "
1294 "point events/%s\n", call
->name
);
1298 ret
= event_create_dir(call
, d_events
, &ftrace_event_id_fops
,
1299 &ftrace_enable_fops
,
1300 &ftrace_event_filter_fops
,
1301 &ftrace_event_format_fops
);
1303 list_add(&call
->list
, &ftrace_events
);
1307 token
= strsep(&buf
, ",");
1314 ret
= ftrace_set_clr_event(token
, 1);
1316 pr_warning("Failed to enable trace event: %s\n", token
);
1319 ret
= register_module_notifier(&trace_module_nb
);
1321 pr_warning("Failed to register trace events module notifier\n");
1325 fs_initcall(event_trace_init
);
1327 #ifdef CONFIG_FTRACE_STARTUP_TEST
1329 static DEFINE_SPINLOCK(test_spinlock
);
1330 static DEFINE_SPINLOCK(test_spinlock_irq
);
1331 static DEFINE_MUTEX(test_mutex
);
1333 static __init
void test_work(struct work_struct
*dummy
)
1335 spin_lock(&test_spinlock
);
1336 spin_lock_irq(&test_spinlock_irq
);
1338 spin_unlock_irq(&test_spinlock_irq
);
1339 spin_unlock(&test_spinlock
);
1341 mutex_lock(&test_mutex
);
1343 mutex_unlock(&test_mutex
);
1346 static __init
int event_test_thread(void *unused
)
1350 test_malloc
= kmalloc(1234, GFP_KERNEL
);
1352 pr_info("failed to kmalloc\n");
1354 schedule_on_each_cpu(test_work
);
1358 set_current_state(TASK_INTERRUPTIBLE
);
1359 while (!kthread_should_stop())
1366 * Do various things that may trigger events.
1368 static __init
void event_test_stuff(void)
1370 struct task_struct
*test_thread
;
1372 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
1374 kthread_stop(test_thread
);
1378 * For every trace event defined, we will test each trace point separately,
1379 * and then by groups, and finally all trace points.
1381 static __init
void event_trace_self_tests(void)
1383 struct ftrace_event_call
*call
;
1384 struct event_subsystem
*system
;
1387 pr_info("Running tests on trace events:\n");
1389 list_for_each_entry(call
, &ftrace_events
, list
) {
1391 /* Only test those that have a regfunc */
1396 * Testing syscall events here is pretty useless, but
1397 * we still do it if configured. But this is time consuming.
1398 * What we really need is a user thread to perform the
1399 * syscalls as we test.
1401 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1403 strcmp(call
->system
, "syscalls") == 0)
1407 pr_info("Testing event %s: ", call
->name
);
1410 * If an event is already enabled, someone is using
1411 * it and the self test should not be on.
1413 if (call
->enabled
) {
1414 pr_warning("Enabled event during self test!\n");
1419 ftrace_event_enable_disable(call
, 1);
1421 ftrace_event_enable_disable(call
, 0);
1426 /* Now test at the sub system level */
1428 pr_info("Running tests on trace event systems:\n");
1430 list_for_each_entry(system
, &event_subsystems
, list
) {
1432 /* the ftrace system is special, skip it */
1433 if (strcmp(system
->name
, "ftrace") == 0)
1436 pr_info("Testing event system %s: ", system
->name
);
1438 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 1);
1439 if (WARN_ON_ONCE(ret
)) {
1440 pr_warning("error enabling system %s\n",
1447 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 0);
1448 if (WARN_ON_ONCE(ret
))
1449 pr_warning("error disabling system %s\n",
1455 /* Test with all events enabled */
1457 pr_info("Running tests on all trace events:\n");
1458 pr_info("Testing all events: ");
1460 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 1);
1461 if (WARN_ON_ONCE(ret
)) {
1462 pr_warning("error enabling all events\n");
1469 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 0);
1470 if (WARN_ON_ONCE(ret
)) {
1471 pr_warning("error disabling all events\n");
1478 #ifdef CONFIG_FUNCTION_TRACER
1480 static DEFINE_PER_CPU(atomic_t
, ftrace_test_event_disable
);
1483 function_test_events_call(unsigned long ip
, unsigned long parent_ip
)
1485 struct ring_buffer_event
*event
;
1486 struct ring_buffer
*buffer
;
1487 struct ftrace_entry
*entry
;
1488 unsigned long flags
;
1494 pc
= preempt_count();
1495 resched
= ftrace_preempt_disable();
1496 cpu
= raw_smp_processor_id();
1497 disabled
= atomic_inc_return(&per_cpu(ftrace_test_event_disable
, cpu
));
1502 local_save_flags(flags
);
1504 event
= trace_current_buffer_lock_reserve(&buffer
,
1505 TRACE_FN
, sizeof(*entry
),
1509 entry
= ring_buffer_event_data(event
);
1511 entry
->parent_ip
= parent_ip
;
1513 trace_nowake_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1516 atomic_dec(&per_cpu(ftrace_test_event_disable
, cpu
));
1517 ftrace_preempt_enable(resched
);
1520 static struct ftrace_ops trace_ops __initdata
=
1522 .func
= function_test_events_call
,
1525 static __init
void event_trace_self_test_with_function(void)
1527 register_ftrace_function(&trace_ops
);
1528 pr_info("Running tests again, along with the function tracer\n");
1529 event_trace_self_tests();
1530 unregister_ftrace_function(&trace_ops
);
1533 static __init
void event_trace_self_test_with_function(void)
1538 static __init
int event_trace_self_tests_init(void)
1540 if (!tracing_selftest_disabled
) {
1541 event_trace_self_tests();
1542 event_trace_self_test_with_function();
1548 late_initcall(event_trace_self_tests_init
);