4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex
);
30 DEFINE_MUTEX(event_storage_mutex
);
31 EXPORT_SYMBOL_GPL(event_storage_mutex
);
33 char event_storage
[EVENT_STORAGE_SIZE
];
34 EXPORT_SYMBOL_GPL(event_storage
);
36 LIST_HEAD(ftrace_events
);
37 LIST_HEAD(ftrace_common_fields
);
40 trace_get_fields(struct ftrace_event_call
*event_call
)
42 if (!event_call
->class->get_fields
)
43 return &event_call
->class->fields
;
44 return event_call
->class->get_fields(event_call
);
47 static int __trace_define_field(struct list_head
*head
, const char *type
,
48 const char *name
, int offset
, int size
,
49 int is_signed
, int filter_type
)
51 struct ftrace_event_field
*field
;
53 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
57 field
->name
= kstrdup(name
, GFP_KERNEL
);
61 field
->type
= kstrdup(type
, GFP_KERNEL
);
65 if (filter_type
== FILTER_OTHER
)
66 field
->filter_type
= filter_assign_type(type
);
68 field
->filter_type
= filter_type
;
70 field
->offset
= offset
;
72 field
->is_signed
= is_signed
;
74 list_add(&field
->link
, head
);
86 int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
87 const char *name
, int offset
, int size
, int is_signed
,
90 struct list_head
*head
;
92 if (WARN_ON(!call
->class))
95 head
= trace_get_fields(call
);
96 return __trace_define_field(head
, type
, name
, offset
, size
,
97 is_signed
, filter_type
);
99 EXPORT_SYMBOL_GPL(trace_define_field
);
101 #define __common_field(type, item) \
102 ret = __trace_define_field(&ftrace_common_fields, #type, \
104 offsetof(typeof(ent), item), \
106 is_signed_type(type), FILTER_OTHER); \
110 static int trace_define_common_fields(void)
113 struct trace_entry ent
;
115 __common_field(unsigned short, type
);
116 __common_field(unsigned char, flags
);
117 __common_field(unsigned char, preempt_count
);
118 __common_field(int, pid
);
119 __common_field(int, padding
);
124 void trace_destroy_fields(struct ftrace_event_call
*call
)
126 struct ftrace_event_field
*field
, *next
;
127 struct list_head
*head
;
129 head
= trace_get_fields(call
);
130 list_for_each_entry_safe(field
, next
, head
, link
) {
131 list_del(&field
->link
);
138 int trace_event_raw_init(struct ftrace_event_call
*call
)
142 id
= register_ftrace_event(&call
->event
);
148 EXPORT_SYMBOL_GPL(trace_event_raw_init
);
150 int ftrace_event_reg(struct ftrace_event_call
*call
,
151 enum trace_reg type
, void *data
)
154 case TRACE_REG_REGISTER
:
155 return tracepoint_probe_register(call
->name
,
158 case TRACE_REG_UNREGISTER
:
159 tracepoint_probe_unregister(call
->name
,
164 #ifdef CONFIG_PERF_EVENTS
165 case TRACE_REG_PERF_REGISTER
:
166 return tracepoint_probe_register(call
->name
,
167 call
->class->perf_probe
,
169 case TRACE_REG_PERF_UNREGISTER
:
170 tracepoint_probe_unregister(call
->name
,
171 call
->class->perf_probe
,
174 case TRACE_REG_PERF_OPEN
:
175 case TRACE_REG_PERF_CLOSE
:
176 case TRACE_REG_PERF_ADD
:
177 case TRACE_REG_PERF_DEL
:
183 EXPORT_SYMBOL_GPL(ftrace_event_reg
);
185 void trace_event_enable_cmd_record(bool enable
)
187 struct ftrace_event_call
*call
;
189 mutex_lock(&event_mutex
);
190 list_for_each_entry(call
, &ftrace_events
, list
) {
191 if (!(call
->flags
& TRACE_EVENT_FL_ENABLED
))
195 tracing_start_cmdline_record();
196 call
->flags
|= TRACE_EVENT_FL_RECORDED_CMD
;
198 tracing_stop_cmdline_record();
199 call
->flags
&= ~TRACE_EVENT_FL_RECORDED_CMD
;
202 mutex_unlock(&event_mutex
);
205 static int ftrace_event_enable_disable(struct ftrace_event_call
*call
,
212 if (call
->flags
& TRACE_EVENT_FL_ENABLED
) {
213 call
->flags
&= ~TRACE_EVENT_FL_ENABLED
;
214 if (call
->flags
& TRACE_EVENT_FL_RECORDED_CMD
) {
215 tracing_stop_cmdline_record();
216 call
->flags
&= ~TRACE_EVENT_FL_RECORDED_CMD
;
218 call
->class->reg(call
, TRACE_REG_UNREGISTER
, NULL
);
222 if (!(call
->flags
& TRACE_EVENT_FL_ENABLED
)) {
223 if (trace_flags
& TRACE_ITER_RECORD_CMD
) {
224 tracing_start_cmdline_record();
225 call
->flags
|= TRACE_EVENT_FL_RECORDED_CMD
;
227 ret
= call
->class->reg(call
, TRACE_REG_REGISTER
, NULL
);
229 tracing_stop_cmdline_record();
230 pr_info("event trace: Could not enable event "
234 call
->flags
|= TRACE_EVENT_FL_ENABLED
;
242 static void ftrace_clear_events(void)
244 struct ftrace_event_call
*call
;
246 mutex_lock(&event_mutex
);
247 list_for_each_entry(call
, &ftrace_events
, list
) {
248 ftrace_event_enable_disable(call
, 0);
250 mutex_unlock(&event_mutex
);
253 static void __put_system(struct event_subsystem
*system
)
255 struct event_filter
*filter
= system
->filter
;
257 WARN_ON_ONCE(system
->ref_count
== 0);
258 if (--system
->ref_count
)
262 kfree(filter
->filter_string
);
269 static void __get_system(struct event_subsystem
*system
)
271 WARN_ON_ONCE(system
->ref_count
== 0);
275 static void put_system(struct event_subsystem
*system
)
277 mutex_lock(&event_mutex
);
278 __put_system(system
);
279 mutex_unlock(&event_mutex
);
283 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
285 static int __ftrace_set_clr_event(const char *match
, const char *sub
,
286 const char *event
, int set
)
288 struct ftrace_event_call
*call
;
291 mutex_lock(&event_mutex
);
292 list_for_each_entry(call
, &ftrace_events
, list
) {
294 if (!call
->name
|| !call
->class || !call
->class->reg
)
297 if (call
->flags
& TRACE_EVENT_FL_IGNORE_ENABLE
)
301 strcmp(match
, call
->name
) != 0 &&
302 strcmp(match
, call
->class->system
) != 0)
305 if (sub
&& strcmp(sub
, call
->class->system
) != 0)
308 if (event
&& strcmp(event
, call
->name
) != 0)
311 ftrace_event_enable_disable(call
, set
);
315 mutex_unlock(&event_mutex
);
320 static int ftrace_set_clr_event(char *buf
, int set
)
322 char *event
= NULL
, *sub
= NULL
, *match
;
325 * The buf format can be <subsystem>:<event-name>
326 * *:<event-name> means any event by that name.
327 * :<event-name> is the same.
329 * <subsystem>:* means all events in that subsystem
330 * <subsystem>: means the same.
332 * <name> (no ':') means all events in a subsystem with
333 * the name <name> or any event that matches <name>
336 match
= strsep(&buf
, ":");
342 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
344 if (!strlen(event
) || strcmp(event
, "*") == 0)
348 return __ftrace_set_clr_event(match
, sub
, event
, set
);
352 * trace_set_clr_event - enable or disable an event
353 * @system: system name to match (NULL for any system)
354 * @event: event name to match (NULL for all events, within system)
355 * @set: 1 to enable, 0 to disable
357 * This is a way for other parts of the kernel to enable or disable
360 * Returns 0 on success, -EINVAL if the parameters do not match any
363 int trace_set_clr_event(const char *system
, const char *event
, int set
)
365 return __ftrace_set_clr_event(NULL
, system
, event
, set
);
367 EXPORT_SYMBOL_GPL(trace_set_clr_event
);
369 /* 128 should be much more than enough */
370 #define EVENT_BUF_SIZE 127
373 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
374 size_t cnt
, loff_t
*ppos
)
376 struct trace_parser parser
;
382 ret
= tracing_update_buffers();
386 if (trace_parser_get_init(&parser
, EVENT_BUF_SIZE
+ 1))
389 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
391 if (read
>= 0 && trace_parser_loaded((&parser
))) {
394 if (*parser
.buffer
== '!')
397 parser
.buffer
[parser
.idx
] = 0;
399 ret
= ftrace_set_clr_event(parser
.buffer
+ !set
, set
);
407 trace_parser_put(&parser
);
413 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
415 struct ftrace_event_call
*call
= v
;
419 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
421 * The ftrace subsystem is for showing formats only.
422 * They can not be enabled or disabled via the event files.
424 if (call
->class && call
->class->reg
)
431 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
433 struct ftrace_event_call
*call
;
436 mutex_lock(&event_mutex
);
438 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
439 for (l
= 0; l
<= *pos
; ) {
440 call
= t_next(m
, call
, &l
);
448 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
450 struct ftrace_event_call
*call
= v
;
454 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
455 if (call
->flags
& TRACE_EVENT_FL_ENABLED
)
462 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
464 struct ftrace_event_call
*call
;
467 mutex_lock(&event_mutex
);
469 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
470 for (l
= 0; l
<= *pos
; ) {
471 call
= s_next(m
, call
, &l
);
478 static int t_show(struct seq_file
*m
, void *v
)
480 struct ftrace_event_call
*call
= v
;
482 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
483 seq_printf(m
, "%s:", call
->class->system
);
484 seq_printf(m
, "%s\n", call
->name
);
489 static void t_stop(struct seq_file
*m
, void *p
)
491 mutex_unlock(&event_mutex
);
495 ftrace_event_seq_open(struct inode
*inode
, struct file
*file
)
497 const struct seq_operations
*seq_ops
;
499 if ((file
->f_mode
& FMODE_WRITE
) &&
500 (file
->f_flags
& O_TRUNC
))
501 ftrace_clear_events();
503 seq_ops
= inode
->i_private
;
504 return seq_open(file
, seq_ops
);
508 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
511 struct ftrace_event_call
*call
= filp
->private_data
;
514 if (call
->flags
& TRACE_EVENT_FL_ENABLED
)
519 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
523 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
526 struct ftrace_event_call
*call
= filp
->private_data
;
530 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
534 ret
= tracing_update_buffers();
541 mutex_lock(&event_mutex
);
542 ret
= ftrace_event_enable_disable(call
, val
);
543 mutex_unlock(&event_mutex
);
552 return ret
? ret
: cnt
;
556 system_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
559 const char set_to_char
[4] = { '?', '0', '1', 'X' };
560 struct event_subsystem
*system
= filp
->private_data
;
561 struct ftrace_event_call
*call
;
566 mutex_lock(&event_mutex
);
567 list_for_each_entry(call
, &ftrace_events
, list
) {
568 if (!call
->name
|| !call
->class || !call
->class->reg
)
571 if (system
&& strcmp(call
->class->system
, system
->name
) != 0)
575 * We need to find out if all the events are set
576 * or if all events or cleared, or if we have
579 set
|= (1 << !!(call
->flags
& TRACE_EVENT_FL_ENABLED
));
582 * If we have a mixture, no need to look further.
587 mutex_unlock(&event_mutex
);
589 buf
[0] = set_to_char
[set
];
592 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
598 system_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
601 struct event_subsystem
*system
= filp
->private_data
;
602 const char *name
= NULL
;
606 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
610 ret
= tracing_update_buffers();
614 if (val
!= 0 && val
!= 1)
618 * Opening of "enable" adds a ref count to system,
619 * so the name is safe to use.
624 ret
= __ftrace_set_clr_event(NULL
, name
, NULL
, val
);
638 FORMAT_FIELD_SEPERATOR
= 2,
642 static void *f_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
644 struct ftrace_event_call
*call
= m
->private;
645 struct ftrace_event_field
*field
;
646 struct list_head
*common_head
= &ftrace_common_fields
;
647 struct list_head
*head
= trace_get_fields(call
);
651 switch ((unsigned long)v
) {
653 if (unlikely(list_empty(common_head
)))
656 field
= list_entry(common_head
->prev
,
657 struct ftrace_event_field
, link
);
660 case FORMAT_FIELD_SEPERATOR
:
661 if (unlikely(list_empty(head
)))
664 field
= list_entry(head
->prev
, struct ftrace_event_field
, link
);
667 case FORMAT_PRINTFMT
:
673 if (field
->link
.prev
== common_head
)
674 return (void *)FORMAT_FIELD_SEPERATOR
;
675 else if (field
->link
.prev
== head
)
676 return (void *)FORMAT_PRINTFMT
;
678 field
= list_entry(field
->link
.prev
, struct ftrace_event_field
, link
);
683 static void *f_start(struct seq_file
*m
, loff_t
*pos
)
688 /* Start by showing the header */
690 return (void *)FORMAT_HEADER
;
692 p
= (void *)FORMAT_HEADER
;
694 p
= f_next(m
, p
, &l
);
695 } while (p
&& l
< *pos
);
700 static int f_show(struct seq_file
*m
, void *v
)
702 struct ftrace_event_call
*call
= m
->private;
703 struct ftrace_event_field
*field
;
704 const char *array_descriptor
;
706 switch ((unsigned long)v
) {
708 seq_printf(m
, "name: %s\n", call
->name
);
709 seq_printf(m
, "ID: %d\n", call
->event
.type
);
710 seq_printf(m
, "format:\n");
713 case FORMAT_FIELD_SEPERATOR
:
717 case FORMAT_PRINTFMT
:
718 seq_printf(m
, "\nprint fmt: %s\n",
726 * Smartly shows the array type(except dynamic array).
729 * If TYPE := TYPE[LEN], it is shown:
730 * field:TYPE VAR[LEN]
732 array_descriptor
= strchr(field
->type
, '[');
734 if (!strncmp(field
->type
, "__data_loc", 10))
735 array_descriptor
= NULL
;
737 if (!array_descriptor
)
738 seq_printf(m
, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
739 field
->type
, field
->name
, field
->offset
,
740 field
->size
, !!field
->is_signed
);
742 seq_printf(m
, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
743 (int)(array_descriptor
- field
->type
),
744 field
->type
, field
->name
,
745 array_descriptor
, field
->offset
,
746 field
->size
, !!field
->is_signed
);
751 static void f_stop(struct seq_file
*m
, void *p
)
755 static const struct seq_operations trace_format_seq_ops
= {
762 static int trace_format_open(struct inode
*inode
, struct file
*file
)
764 struct ftrace_event_call
*call
= inode
->i_private
;
768 ret
= seq_open(file
, &trace_format_seq_ops
);
772 m
= file
->private_data
;
779 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
781 struct ftrace_event_call
*call
= filp
->private_data
;
788 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
793 trace_seq_printf(s
, "%d\n", call
->event
.type
);
795 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
802 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
805 struct ftrace_event_call
*call
= filp
->private_data
;
812 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
818 print_event_filter(call
, s
);
819 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
827 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
830 struct ftrace_event_call
*call
= filp
->private_data
;
834 if (cnt
>= PAGE_SIZE
)
837 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
841 if (copy_from_user(buf
, ubuf
, cnt
)) {
842 free_page((unsigned long) buf
);
847 err
= apply_event_filter(call
, buf
);
848 free_page((unsigned long) buf
);
857 static LIST_HEAD(event_subsystems
);
859 static int subsystem_open(struct inode
*inode
, struct file
*filp
)
861 struct event_subsystem
*system
= NULL
;
864 if (!inode
->i_private
)
867 /* Make sure the system still exists */
868 mutex_lock(&event_mutex
);
869 list_for_each_entry(system
, &event_subsystems
, list
) {
870 if (system
== inode
->i_private
) {
871 /* Don't open systems with no events */
872 if (!system
->nr_events
) {
876 __get_system(system
);
880 mutex_unlock(&event_mutex
);
882 if (system
!= inode
->i_private
)
886 ret
= tracing_open_generic(inode
, filp
);
887 if (ret
< 0 && system
)
893 static int subsystem_release(struct inode
*inode
, struct file
*file
)
895 struct event_subsystem
*system
= inode
->i_private
;
904 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
907 struct event_subsystem
*system
= filp
->private_data
;
914 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
920 print_subsystem_event_filter(system
, s
);
921 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
929 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
932 struct event_subsystem
*system
= filp
->private_data
;
936 if (cnt
>= PAGE_SIZE
)
939 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
943 if (copy_from_user(buf
, ubuf
, cnt
)) {
944 free_page((unsigned long) buf
);
949 err
= apply_subsystem_event_filter(system
, buf
);
950 free_page((unsigned long) buf
);
960 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
962 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
969 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
976 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
983 static const struct seq_operations show_event_seq_ops
= {
990 static const struct seq_operations show_set_event_seq_ops
= {
997 static const struct file_operations ftrace_avail_fops
= {
998 .open
= ftrace_event_seq_open
,
1000 .llseek
= seq_lseek
,
1001 .release
= seq_release
,
1004 static const struct file_operations ftrace_set_event_fops
= {
1005 .open
= ftrace_event_seq_open
,
1007 .write
= ftrace_event_write
,
1008 .llseek
= seq_lseek
,
1009 .release
= seq_release
,
1012 static const struct file_operations ftrace_enable_fops
= {
1013 .open
= tracing_open_generic
,
1014 .read
= event_enable_read
,
1015 .write
= event_enable_write
,
1016 .llseek
= default_llseek
,
1019 static const struct file_operations ftrace_event_format_fops
= {
1020 .open
= trace_format_open
,
1022 .llseek
= seq_lseek
,
1023 .release
= seq_release
,
1026 static const struct file_operations ftrace_event_id_fops
= {
1027 .open
= tracing_open_generic
,
1028 .read
= event_id_read
,
1029 .llseek
= default_llseek
,
1032 static const struct file_operations ftrace_event_filter_fops
= {
1033 .open
= tracing_open_generic
,
1034 .read
= event_filter_read
,
1035 .write
= event_filter_write
,
1036 .llseek
= default_llseek
,
1039 static const struct file_operations ftrace_subsystem_filter_fops
= {
1040 .open
= subsystem_open
,
1041 .read
= subsystem_filter_read
,
1042 .write
= subsystem_filter_write
,
1043 .llseek
= default_llseek
,
1044 .release
= subsystem_release
,
1047 static const struct file_operations ftrace_system_enable_fops
= {
1048 .open
= subsystem_open
,
1049 .read
= system_enable_read
,
1050 .write
= system_enable_write
,
1051 .llseek
= default_llseek
,
1052 .release
= subsystem_release
,
1055 static const struct file_operations ftrace_show_header_fops
= {
1056 .open
= tracing_open_generic
,
1057 .read
= show_header
,
1058 .llseek
= default_llseek
,
1061 static struct dentry
*event_trace_events_dir(void)
1063 static struct dentry
*d_tracer
;
1064 static struct dentry
*d_events
;
1069 d_tracer
= tracing_init_dentry();
1073 d_events
= debugfs_create_dir("events", d_tracer
);
1075 pr_warning("Could not create debugfs "
1076 "'events' directory\n");
1081 static struct dentry
*
1082 event_subsystem_dir(const char *name
, struct dentry
*d_events
)
1084 struct event_subsystem
*system
;
1085 struct dentry
*entry
;
1087 /* First see if we did not already create this dir */
1088 list_for_each_entry(system
, &event_subsystems
, list
) {
1089 if (strcmp(system
->name
, name
) == 0) {
1090 system
->nr_events
++;
1091 return system
->entry
;
1095 /* need to create new entry */
1096 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
1098 pr_warning("No memory to create event subsystem %s\n",
1103 system
->entry
= debugfs_create_dir(name
, d_events
);
1104 if (!system
->entry
) {
1105 pr_warning("Could not create event subsystem %s\n",
1111 system
->nr_events
= 1;
1112 system
->ref_count
= 1;
1113 system
->name
= kstrdup(name
, GFP_KERNEL
);
1114 if (!system
->name
) {
1115 debugfs_remove(system
->entry
);
1120 list_add(&system
->list
, &event_subsystems
);
1122 system
->filter
= NULL
;
1124 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
1125 if (!system
->filter
) {
1126 pr_warning("Could not allocate filter for subsystem "
1128 return system
->entry
;
1131 entry
= debugfs_create_file("filter", 0644, system
->entry
, system
,
1132 &ftrace_subsystem_filter_fops
);
1134 kfree(system
->filter
);
1135 system
->filter
= NULL
;
1136 pr_warning("Could not create debugfs "
1137 "'%s/filter' entry\n", name
);
1140 trace_create_file("enable", 0644, system
->entry
, system
,
1141 &ftrace_system_enable_fops
);
1143 return system
->entry
;
1147 event_create_dir(struct ftrace_event_call
*call
, struct dentry
*d_events
,
1148 const struct file_operations
*id
,
1149 const struct file_operations
*enable
,
1150 const struct file_operations
*filter
,
1151 const struct file_operations
*format
)
1153 struct list_head
*head
;
1157 * If the trace point header did not define TRACE_SYSTEM
1158 * then the system would be called "TRACE_SYSTEM".
1160 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
1161 d_events
= event_subsystem_dir(call
->class->system
, d_events
);
1163 call
->dir
= debugfs_create_dir(call
->name
, d_events
);
1165 pr_warning("Could not create debugfs "
1166 "'%s' directory\n", call
->name
);
1170 if (call
->class->reg
&& !(call
->flags
& TRACE_EVENT_FL_IGNORE_ENABLE
))
1171 trace_create_file("enable", 0644, call
->dir
, call
,
1174 #ifdef CONFIG_PERF_EVENTS
1175 if (call
->event
.type
&& call
->class->reg
)
1176 trace_create_file("id", 0444, call
->dir
, call
,
1181 * Other events may have the same class. Only update
1182 * the fields if they are not already defined.
1184 head
= trace_get_fields(call
);
1185 if (list_empty(head
)) {
1186 ret
= call
->class->define_fields(call
);
1188 pr_warning("Could not initialize trace point"
1189 " events/%s\n", call
->name
);
1193 trace_create_file("filter", 0644, call
->dir
, call
,
1196 trace_create_file("format", 0444, call
->dir
, call
,
1203 __trace_add_event_call(struct ftrace_event_call
*call
, struct module
*mod
,
1204 const struct file_operations
*id
,
1205 const struct file_operations
*enable
,
1206 const struct file_operations
*filter
,
1207 const struct file_operations
*format
)
1209 struct dentry
*d_events
;
1212 /* The linker may leave blanks */
1216 if (call
->class->raw_init
) {
1217 ret
= call
->class->raw_init(call
);
1220 pr_warning("Could not initialize trace events/%s\n",
1226 d_events
= event_trace_events_dir();
1230 ret
= event_create_dir(call
, d_events
, id
, enable
, filter
, format
);
1232 list_add(&call
->list
, &ftrace_events
);
1238 /* Add an additional event_call dynamically */
1239 int trace_add_event_call(struct ftrace_event_call
*call
)
1242 mutex_lock(&event_mutex
);
1243 ret
= __trace_add_event_call(call
, NULL
, &ftrace_event_id_fops
,
1244 &ftrace_enable_fops
,
1245 &ftrace_event_filter_fops
,
1246 &ftrace_event_format_fops
);
1247 mutex_unlock(&event_mutex
);
1251 static void remove_subsystem_dir(const char *name
)
1253 struct event_subsystem
*system
;
1255 if (strcmp(name
, TRACE_SYSTEM
) == 0)
1258 list_for_each_entry(system
, &event_subsystems
, list
) {
1259 if (strcmp(system
->name
, name
) == 0) {
1260 if (!--system
->nr_events
) {
1261 debugfs_remove_recursive(system
->entry
);
1262 list_del(&system
->list
);
1263 __put_system(system
);
1271 * Must be called under locking both of event_mutex and trace_event_mutex.
1273 static void __trace_remove_event_call(struct ftrace_event_call
*call
)
1275 ftrace_event_enable_disable(call
, 0);
1276 if (call
->event
.funcs
)
1277 __unregister_ftrace_event(&call
->event
);
1278 debugfs_remove_recursive(call
->dir
);
1279 list_del(&call
->list
);
1280 trace_destroy_fields(call
);
1281 destroy_preds(call
);
1282 remove_subsystem_dir(call
->class->system
);
1285 /* Remove an event_call */
1286 void trace_remove_event_call(struct ftrace_event_call
*call
)
1288 mutex_lock(&event_mutex
);
1289 down_write(&trace_event_mutex
);
1290 __trace_remove_event_call(call
);
1291 up_write(&trace_event_mutex
);
1292 mutex_unlock(&event_mutex
);
1295 #define for_each_event(event, start, end) \
1296 for (event = start; \
1297 (unsigned long)event < (unsigned long)end; \
1300 #ifdef CONFIG_MODULES
1302 static LIST_HEAD(ftrace_module_file_list
);
1305 * Modules must own their file_operations to keep up with
1306 * reference counting.
1308 struct ftrace_module_file_ops
{
1309 struct list_head list
;
1311 struct file_operations id
;
1312 struct file_operations enable
;
1313 struct file_operations format
;
1314 struct file_operations filter
;
1317 static struct ftrace_module_file_ops
*
1318 trace_create_file_ops(struct module
*mod
)
1320 struct ftrace_module_file_ops
*file_ops
;
1323 * This is a bit of a PITA. To allow for correct reference
1324 * counting, modules must "own" their file_operations.
1325 * To do this, we allocate the file operations that will be
1326 * used in the event directory.
1329 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
1333 file_ops
->mod
= mod
;
1335 file_ops
->id
= ftrace_event_id_fops
;
1336 file_ops
->id
.owner
= mod
;
1338 file_ops
->enable
= ftrace_enable_fops
;
1339 file_ops
->enable
.owner
= mod
;
1341 file_ops
->filter
= ftrace_event_filter_fops
;
1342 file_ops
->filter
.owner
= mod
;
1344 file_ops
->format
= ftrace_event_format_fops
;
1345 file_ops
->format
.owner
= mod
;
1347 list_add(&file_ops
->list
, &ftrace_module_file_list
);
1352 static void trace_module_add_events(struct module
*mod
)
1354 struct ftrace_module_file_ops
*file_ops
= NULL
;
1355 struct ftrace_event_call
**call
, **start
, **end
;
1357 start
= mod
->trace_events
;
1358 end
= mod
->trace_events
+ mod
->num_trace_events
;
1363 file_ops
= trace_create_file_ops(mod
);
1367 for_each_event(call
, start
, end
) {
1368 __trace_add_event_call(*call
, mod
,
1369 &file_ops
->id
, &file_ops
->enable
,
1370 &file_ops
->filter
, &file_ops
->format
);
1374 static void trace_module_remove_events(struct module
*mod
)
1376 struct ftrace_module_file_ops
*file_ops
;
1377 struct ftrace_event_call
*call
, *p
;
1380 down_write(&trace_event_mutex
);
1381 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
1382 if (call
->mod
== mod
) {
1384 __trace_remove_event_call(call
);
1388 /* Now free the file_operations */
1389 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1390 if (file_ops
->mod
== mod
)
1393 if (&file_ops
->list
!= &ftrace_module_file_list
) {
1394 list_del(&file_ops
->list
);
1399 * It is safest to reset the ring buffer if the module being unloaded
1400 * registered any events.
1403 tracing_reset_current_online_cpus();
1404 up_write(&trace_event_mutex
);
1407 static int trace_module_notify(struct notifier_block
*self
,
1408 unsigned long val
, void *data
)
1410 struct module
*mod
= data
;
1412 mutex_lock(&event_mutex
);
1414 case MODULE_STATE_COMING
:
1415 trace_module_add_events(mod
);
1417 case MODULE_STATE_GOING
:
1418 trace_module_remove_events(mod
);
1421 mutex_unlock(&event_mutex
);
1426 static int trace_module_notify(struct notifier_block
*self
,
1427 unsigned long val
, void *data
)
1431 #endif /* CONFIG_MODULES */
1433 static struct notifier_block trace_module_nb
= {
1434 .notifier_call
= trace_module_notify
,
1438 extern struct ftrace_event_call
*__start_ftrace_events
[];
1439 extern struct ftrace_event_call
*__stop_ftrace_events
[];
1441 static char bootup_event_buf
[COMMAND_LINE_SIZE
] __initdata
;
1443 static __init
int setup_trace_event(char *str
)
1445 strlcpy(bootup_event_buf
, str
, COMMAND_LINE_SIZE
);
1446 ring_buffer_expanded
= 1;
1447 tracing_selftest_disabled
= 1;
1451 __setup("trace_event=", setup_trace_event
);
1453 static __init
int event_trace_init(void)
1455 struct ftrace_event_call
**call
;
1456 struct dentry
*d_tracer
;
1457 struct dentry
*entry
;
1458 struct dentry
*d_events
;
1460 char *buf
= bootup_event_buf
;
1463 d_tracer
= tracing_init_dentry();
1467 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
1468 (void *)&show_event_seq_ops
,
1469 &ftrace_avail_fops
);
1471 pr_warning("Could not create debugfs "
1472 "'available_events' entry\n");
1474 entry
= debugfs_create_file("set_event", 0644, d_tracer
,
1475 (void *)&show_set_event_seq_ops
,
1476 &ftrace_set_event_fops
);
1478 pr_warning("Could not create debugfs "
1479 "'set_event' entry\n");
1481 d_events
= event_trace_events_dir();
1485 /* ring buffer internal formats */
1486 trace_create_file("header_page", 0444, d_events
,
1487 ring_buffer_print_page_header
,
1488 &ftrace_show_header_fops
);
1490 trace_create_file("header_event", 0444, d_events
,
1491 ring_buffer_print_entry_header
,
1492 &ftrace_show_header_fops
);
1494 trace_create_file("enable", 0644, d_events
,
1495 NULL
, &ftrace_system_enable_fops
);
1497 if (trace_define_common_fields())
1498 pr_warning("tracing: Failed to allocate common fields");
1500 for_each_event(call
, __start_ftrace_events
, __stop_ftrace_events
) {
1501 __trace_add_event_call(*call
, NULL
, &ftrace_event_id_fops
,
1502 &ftrace_enable_fops
,
1503 &ftrace_event_filter_fops
,
1504 &ftrace_event_format_fops
);
1508 token
= strsep(&buf
, ",");
1515 ret
= ftrace_set_clr_event(token
, 1);
1517 pr_warning("Failed to enable trace event: %s\n", token
);
1520 ret
= register_module_notifier(&trace_module_nb
);
1522 pr_warning("Failed to register trace events module notifier\n");
1526 fs_initcall(event_trace_init
);
1528 #ifdef CONFIG_FTRACE_STARTUP_TEST
1530 static DEFINE_SPINLOCK(test_spinlock
);
1531 static DEFINE_SPINLOCK(test_spinlock_irq
);
1532 static DEFINE_MUTEX(test_mutex
);
1534 static __init
void test_work(struct work_struct
*dummy
)
1536 spin_lock(&test_spinlock
);
1537 spin_lock_irq(&test_spinlock_irq
);
1539 spin_unlock_irq(&test_spinlock_irq
);
1540 spin_unlock(&test_spinlock
);
1542 mutex_lock(&test_mutex
);
1544 mutex_unlock(&test_mutex
);
1547 static __init
int event_test_thread(void *unused
)
1551 test_malloc
= kmalloc(1234, GFP_KERNEL
);
1553 pr_info("failed to kmalloc\n");
1555 schedule_on_each_cpu(test_work
);
1559 set_current_state(TASK_INTERRUPTIBLE
);
1560 while (!kthread_should_stop())
1567 * Do various things that may trigger events.
1569 static __init
void event_test_stuff(void)
1571 struct task_struct
*test_thread
;
1573 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
1575 kthread_stop(test_thread
);
1579 * For every trace event defined, we will test each trace point separately,
1580 * and then by groups, and finally all trace points.
1582 static __init
void event_trace_self_tests(void)
1584 struct ftrace_event_call
*call
;
1585 struct event_subsystem
*system
;
1588 pr_info("Running tests on trace events:\n");
1590 list_for_each_entry(call
, &ftrace_events
, list
) {
1592 /* Only test those that have a probe */
1593 if (!call
->class || !call
->class->probe
)
1597 * Testing syscall events here is pretty useless, but
1598 * we still do it if configured. But this is time consuming.
1599 * What we really need is a user thread to perform the
1600 * syscalls as we test.
1602 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1603 if (call
->class->system
&&
1604 strcmp(call
->class->system
, "syscalls") == 0)
1608 pr_info("Testing event %s: ", call
->name
);
1611 * If an event is already enabled, someone is using
1612 * it and the self test should not be on.
1614 if (call
->flags
& TRACE_EVENT_FL_ENABLED
) {
1615 pr_warning("Enabled event during self test!\n");
1620 ftrace_event_enable_disable(call
, 1);
1622 ftrace_event_enable_disable(call
, 0);
1627 /* Now test at the sub system level */
1629 pr_info("Running tests on trace event systems:\n");
1631 list_for_each_entry(system
, &event_subsystems
, list
) {
1633 /* the ftrace system is special, skip it */
1634 if (strcmp(system
->name
, "ftrace") == 0)
1637 pr_info("Testing event system %s: ", system
->name
);
1639 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 1);
1640 if (WARN_ON_ONCE(ret
)) {
1641 pr_warning("error enabling system %s\n",
1648 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 0);
1649 if (WARN_ON_ONCE(ret
))
1650 pr_warning("error disabling system %s\n",
1656 /* Test with all events enabled */
1658 pr_info("Running tests on all trace events:\n");
1659 pr_info("Testing all events: ");
1661 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 1);
1662 if (WARN_ON_ONCE(ret
)) {
1663 pr_warning("error enabling all events\n");
1670 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 0);
1671 if (WARN_ON_ONCE(ret
)) {
1672 pr_warning("error disabling all events\n");
1679 #ifdef CONFIG_FUNCTION_TRACER
1681 static DEFINE_PER_CPU(atomic_t
, ftrace_test_event_disable
);
1684 function_test_events_call(unsigned long ip
, unsigned long parent_ip
)
1686 struct ring_buffer_event
*event
;
1687 struct ring_buffer
*buffer
;
1688 struct ftrace_entry
*entry
;
1689 unsigned long flags
;
1694 pc
= preempt_count();
1695 preempt_disable_notrace();
1696 cpu
= raw_smp_processor_id();
1697 disabled
= atomic_inc_return(&per_cpu(ftrace_test_event_disable
, cpu
));
1702 local_save_flags(flags
);
1704 event
= trace_current_buffer_lock_reserve(&buffer
,
1705 TRACE_FN
, sizeof(*entry
),
1709 entry
= ring_buffer_event_data(event
);
1711 entry
->parent_ip
= parent_ip
;
1713 trace_nowake_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1716 atomic_dec(&per_cpu(ftrace_test_event_disable
, cpu
));
1717 preempt_enable_notrace();
1720 static struct ftrace_ops trace_ops __initdata
=
1722 .func
= function_test_events_call
,
1725 static __init
void event_trace_self_test_with_function(void)
1728 ret
= register_ftrace_function(&trace_ops
);
1729 if (WARN_ON(ret
< 0)) {
1730 pr_info("Failed to enable function tracer for event tests\n");
1733 pr_info("Running tests again, along with the function tracer\n");
1734 event_trace_self_tests();
1735 unregister_ftrace_function(&trace_ops
);
1738 static __init
void event_trace_self_test_with_function(void)
1743 static __init
int event_trace_self_tests_init(void)
1745 if (!tracing_selftest_disabled
) {
1746 event_trace_self_tests();
1747 event_trace_self_test_with_function();
1753 late_initcall(event_trace_self_tests_init
);