4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex
);
30 DEFINE_MUTEX(event_storage_mutex
);
31 EXPORT_SYMBOL_GPL(event_storage_mutex
);
33 char event_storage
[EVENT_STORAGE_SIZE
];
34 EXPORT_SYMBOL_GPL(event_storage
);
36 LIST_HEAD(ftrace_events
);
37 static LIST_HEAD(ftrace_common_fields
);
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
41 static struct kmem_cache
*field_cachep
;
42 static struct kmem_cache
*file_cachep
;
44 /* Double loops, do not use break, only goto's work */
45 #define do_for_each_event_file(tr, file) \
46 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
47 list_for_each_entry(file, &tr->events, list)
49 #define do_for_each_event_file_safe(tr, file) \
50 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
51 struct ftrace_event_file *___n; \
52 list_for_each_entry_safe(file, ___n, &tr->events, list)
54 #define while_for_each_event_file() \
57 static struct list_head
*
58 trace_get_fields(struct ftrace_event_call
*event_call
)
60 if (!event_call
->class->get_fields
)
61 return &event_call
->class->fields
;
62 return event_call
->class->get_fields(event_call
);
65 static struct ftrace_event_field
*
66 __find_event_field(struct list_head
*head
, char *name
)
68 struct ftrace_event_field
*field
;
70 list_for_each_entry(field
, head
, link
) {
71 if (!strcmp(field
->name
, name
))
78 struct ftrace_event_field
*
79 trace_find_event_field(struct ftrace_event_call
*call
, char *name
)
81 struct ftrace_event_field
*field
;
82 struct list_head
*head
;
84 field
= __find_event_field(&ftrace_common_fields
, name
);
88 head
= trace_get_fields(call
);
89 return __find_event_field(head
, name
);
92 static int __trace_define_field(struct list_head
*head
, const char *type
,
93 const char *name
, int offset
, int size
,
94 int is_signed
, int filter_type
)
96 struct ftrace_event_field
*field
;
98 field
= kmem_cache_alloc(field_cachep
, GFP_TRACE
);
105 if (filter_type
== FILTER_OTHER
)
106 field
->filter_type
= filter_assign_type(type
);
108 field
->filter_type
= filter_type
;
110 field
->offset
= offset
;
112 field
->is_signed
= is_signed
;
114 list_add(&field
->link
, head
);
119 kmem_cache_free(field_cachep
, field
);
124 int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
125 const char *name
, int offset
, int size
, int is_signed
,
128 struct list_head
*head
;
130 if (WARN_ON(!call
->class))
133 head
= trace_get_fields(call
);
134 return __trace_define_field(head
, type
, name
, offset
, size
,
135 is_signed
, filter_type
);
137 EXPORT_SYMBOL_GPL(trace_define_field
);
139 #define __common_field(type, item) \
140 ret = __trace_define_field(&ftrace_common_fields, #type, \
142 offsetof(typeof(ent), item), \
144 is_signed_type(type), FILTER_OTHER); \
148 static int trace_define_common_fields(void)
151 struct trace_entry ent
;
153 __common_field(unsigned short, type
);
154 __common_field(unsigned char, flags
);
155 __common_field(unsigned char, preempt_count
);
156 __common_field(int, pid
);
161 static void trace_destroy_fields(struct ftrace_event_call
*call
)
163 struct ftrace_event_field
*field
, *next
;
164 struct list_head
*head
;
166 head
= trace_get_fields(call
);
167 list_for_each_entry_safe(field
, next
, head
, link
) {
168 list_del(&field
->link
);
169 kmem_cache_free(field_cachep
, field
);
173 int trace_event_raw_init(struct ftrace_event_call
*call
)
177 id
= register_ftrace_event(&call
->event
);
183 EXPORT_SYMBOL_GPL(trace_event_raw_init
);
185 int ftrace_event_reg(struct ftrace_event_call
*call
,
186 enum trace_reg type
, void *data
)
188 struct ftrace_event_file
*file
= data
;
191 case TRACE_REG_REGISTER
:
192 return tracepoint_probe_register(call
->name
,
195 case TRACE_REG_UNREGISTER
:
196 tracepoint_probe_unregister(call
->name
,
201 #ifdef CONFIG_PERF_EVENTS
202 case TRACE_REG_PERF_REGISTER
:
203 return tracepoint_probe_register(call
->name
,
204 call
->class->perf_probe
,
206 case TRACE_REG_PERF_UNREGISTER
:
207 tracepoint_probe_unregister(call
->name
,
208 call
->class->perf_probe
,
211 case TRACE_REG_PERF_OPEN
:
212 case TRACE_REG_PERF_CLOSE
:
213 case TRACE_REG_PERF_ADD
:
214 case TRACE_REG_PERF_DEL
:
220 EXPORT_SYMBOL_GPL(ftrace_event_reg
);
222 void trace_event_enable_cmd_record(bool enable
)
224 struct ftrace_event_file
*file
;
225 struct trace_array
*tr
;
227 mutex_lock(&event_mutex
);
228 do_for_each_event_file(tr
, file
) {
230 if (!(file
->flags
& FTRACE_EVENT_FL_ENABLED
))
234 tracing_start_cmdline_record();
235 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT
, &file
->flags
);
237 tracing_stop_cmdline_record();
238 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT
, &file
->flags
);
240 } while_for_each_event_file();
241 mutex_unlock(&event_mutex
);
244 static int __ftrace_event_enable_disable(struct ftrace_event_file
*file
,
245 int enable
, int soft_disable
)
247 struct ftrace_event_call
*call
= file
->event_call
;
254 * When soft_disable is set and enable is cleared, the sm_ref
255 * reference counter is decremented. If it reaches 0, we want
256 * to clear the SOFT_DISABLED flag but leave the event in the
257 * state that it was. That is, if the event was enabled and
258 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
259 * is set we do not want the event to be enabled before we
262 * When soft_disable is not set but the SOFT_MODE flag is,
263 * we do nothing. Do not disable the tracepoint, otherwise
264 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
267 if (atomic_dec_return(&file
->sm_ref
) > 0)
269 disable
= file
->flags
& FTRACE_EVENT_FL_SOFT_DISABLED
;
270 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT
, &file
->flags
);
272 disable
= !(file
->flags
& FTRACE_EVENT_FL_SOFT_MODE
);
274 if (disable
&& (file
->flags
& FTRACE_EVENT_FL_ENABLED
)) {
275 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT
, &file
->flags
);
276 if (file
->flags
& FTRACE_EVENT_FL_RECORDED_CMD
) {
277 tracing_stop_cmdline_record();
278 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT
, &file
->flags
);
280 call
->class->reg(call
, TRACE_REG_UNREGISTER
, file
);
282 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
283 if (file
->flags
& FTRACE_EVENT_FL_SOFT_MODE
)
284 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &file
->flags
);
288 * When soft_disable is set and enable is set, we want to
289 * register the tracepoint for the event, but leave the event
290 * as is. That means, if the event was already enabled, we do
291 * nothing (but set SOFT_MODE). If the event is disabled, we
292 * set SOFT_DISABLED before enabling the event tracepoint, so
293 * it still seems to be disabled.
296 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &file
->flags
);
298 if (atomic_inc_return(&file
->sm_ref
) > 1)
300 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT
, &file
->flags
);
303 if (!(file
->flags
& FTRACE_EVENT_FL_ENABLED
)) {
305 /* Keep the event disabled, when going to SOFT_MODE. */
307 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &file
->flags
);
309 if (trace_flags
& TRACE_ITER_RECORD_CMD
) {
310 tracing_start_cmdline_record();
311 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT
, &file
->flags
);
313 ret
= call
->class->reg(call
, TRACE_REG_REGISTER
, file
);
315 tracing_stop_cmdline_record();
316 pr_info("event trace: Could not enable event "
320 set_bit(FTRACE_EVENT_FL_ENABLED_BIT
, &file
->flags
);
322 /* WAS_ENABLED gets set but never cleared. */
323 call
->flags
|= TRACE_EVENT_FL_WAS_ENABLED
;
331 static int ftrace_event_enable_disable(struct ftrace_event_file
*file
,
334 return __ftrace_event_enable_disable(file
, enable
, 0);
337 static void ftrace_clear_events(struct trace_array
*tr
)
339 struct ftrace_event_file
*file
;
341 mutex_lock(&event_mutex
);
342 list_for_each_entry(file
, &tr
->events
, list
) {
343 ftrace_event_enable_disable(file
, 0);
345 mutex_unlock(&event_mutex
);
348 static void __put_system(struct event_subsystem
*system
)
350 struct event_filter
*filter
= system
->filter
;
352 WARN_ON_ONCE(system
->ref_count
== 0);
353 if (--system
->ref_count
)
356 list_del(&system
->list
);
359 kfree(filter
->filter_string
);
365 static void __get_system(struct event_subsystem
*system
)
367 WARN_ON_ONCE(system
->ref_count
== 0);
371 static void __get_system_dir(struct ftrace_subsystem_dir
*dir
)
373 WARN_ON_ONCE(dir
->ref_count
== 0);
375 __get_system(dir
->subsystem
);
378 static void __put_system_dir(struct ftrace_subsystem_dir
*dir
)
380 WARN_ON_ONCE(dir
->ref_count
== 0);
381 /* If the subsystem is about to be freed, the dir must be too */
382 WARN_ON_ONCE(dir
->subsystem
->ref_count
== 1 && dir
->ref_count
!= 1);
384 __put_system(dir
->subsystem
);
385 if (!--dir
->ref_count
)
389 static void put_system(struct ftrace_subsystem_dir
*dir
)
391 mutex_lock(&event_mutex
);
392 __put_system_dir(dir
);
393 mutex_unlock(&event_mutex
);
397 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
399 static int __ftrace_set_clr_event(struct trace_array
*tr
, const char *match
,
400 const char *sub
, const char *event
, int set
)
402 struct ftrace_event_file
*file
;
403 struct ftrace_event_call
*call
;
406 mutex_lock(&event_mutex
);
407 list_for_each_entry(file
, &tr
->events
, list
) {
409 call
= file
->event_call
;
411 if (!call
->name
|| !call
->class || !call
->class->reg
)
414 if (call
->flags
& TRACE_EVENT_FL_IGNORE_ENABLE
)
418 strcmp(match
, call
->name
) != 0 &&
419 strcmp(match
, call
->class->system
) != 0)
422 if (sub
&& strcmp(sub
, call
->class->system
) != 0)
425 if (event
&& strcmp(event
, call
->name
) != 0)
428 ftrace_event_enable_disable(file
, set
);
432 mutex_unlock(&event_mutex
);
437 static int ftrace_set_clr_event(struct trace_array
*tr
, char *buf
, int set
)
439 char *event
= NULL
, *sub
= NULL
, *match
;
442 * The buf format can be <subsystem>:<event-name>
443 * *:<event-name> means any event by that name.
444 * :<event-name> is the same.
446 * <subsystem>:* means all events in that subsystem
447 * <subsystem>: means the same.
449 * <name> (no ':') means all events in a subsystem with
450 * the name <name> or any event that matches <name>
453 match
= strsep(&buf
, ":");
459 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
461 if (!strlen(event
) || strcmp(event
, "*") == 0)
465 return __ftrace_set_clr_event(tr
, match
, sub
, event
, set
);
469 * trace_set_clr_event - enable or disable an event
470 * @system: system name to match (NULL for any system)
471 * @event: event name to match (NULL for all events, within system)
472 * @set: 1 to enable, 0 to disable
474 * This is a way for other parts of the kernel to enable or disable
477 * Returns 0 on success, -EINVAL if the parameters do not match any
480 int trace_set_clr_event(const char *system
, const char *event
, int set
)
482 struct trace_array
*tr
= top_trace_array();
484 return __ftrace_set_clr_event(tr
, NULL
, system
, event
, set
);
486 EXPORT_SYMBOL_GPL(trace_set_clr_event
);
488 /* 128 should be much more than enough */
489 #define EVENT_BUF_SIZE 127
492 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
493 size_t cnt
, loff_t
*ppos
)
495 struct trace_parser parser
;
496 struct seq_file
*m
= file
->private_data
;
497 struct trace_array
*tr
= m
->private;
503 ret
= tracing_update_buffers();
507 if (trace_parser_get_init(&parser
, EVENT_BUF_SIZE
+ 1))
510 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
512 if (read
>= 0 && trace_parser_loaded((&parser
))) {
515 if (*parser
.buffer
== '!')
518 parser
.buffer
[parser
.idx
] = 0;
520 ret
= ftrace_set_clr_event(tr
, parser
.buffer
+ !set
, set
);
528 trace_parser_put(&parser
);
534 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
536 struct ftrace_event_file
*file
= v
;
537 struct ftrace_event_call
*call
;
538 struct trace_array
*tr
= m
->private;
542 list_for_each_entry_continue(file
, &tr
->events
, list
) {
543 call
= file
->event_call
;
545 * The ftrace subsystem is for showing formats only.
546 * They can not be enabled or disabled via the event files.
548 if (call
->class && call
->class->reg
)
555 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
557 struct ftrace_event_file
*file
;
558 struct trace_array
*tr
= m
->private;
561 mutex_lock(&event_mutex
);
563 file
= list_entry(&tr
->events
, struct ftrace_event_file
, list
);
564 for (l
= 0; l
<= *pos
; ) {
565 file
= t_next(m
, file
, &l
);
573 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
575 struct ftrace_event_file
*file
= v
;
576 struct trace_array
*tr
= m
->private;
580 list_for_each_entry_continue(file
, &tr
->events
, list
) {
581 if (file
->flags
& FTRACE_EVENT_FL_ENABLED
)
588 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
590 struct ftrace_event_file
*file
;
591 struct trace_array
*tr
= m
->private;
594 mutex_lock(&event_mutex
);
596 file
= list_entry(&tr
->events
, struct ftrace_event_file
, list
);
597 for (l
= 0; l
<= *pos
; ) {
598 file
= s_next(m
, file
, &l
);
605 static int t_show(struct seq_file
*m
, void *v
)
607 struct ftrace_event_file
*file
= v
;
608 struct ftrace_event_call
*call
= file
->event_call
;
610 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
611 seq_printf(m
, "%s:", call
->class->system
);
612 seq_printf(m
, "%s\n", call
->name
);
617 static void t_stop(struct seq_file
*m
, void *p
)
619 mutex_unlock(&event_mutex
);
623 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
626 struct ftrace_event_file
*file
= filp
->private_data
;
629 if (file
->flags
& FTRACE_EVENT_FL_ENABLED
) {
630 if (file
->flags
& FTRACE_EVENT_FL_SOFT_DISABLED
)
632 else if (file
->flags
& FTRACE_EVENT_FL_SOFT_MODE
)
639 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, strlen(buf
));
643 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
646 struct ftrace_event_file
*file
= filp
->private_data
;
653 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
657 ret
= tracing_update_buffers();
664 mutex_lock(&event_mutex
);
665 ret
= ftrace_event_enable_disable(file
, val
);
666 mutex_unlock(&event_mutex
);
675 return ret
? ret
: cnt
;
679 system_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
682 const char set_to_char
[4] = { '?', '0', '1', 'X' };
683 struct ftrace_subsystem_dir
*dir
= filp
->private_data
;
684 struct event_subsystem
*system
= dir
->subsystem
;
685 struct ftrace_event_call
*call
;
686 struct ftrace_event_file
*file
;
687 struct trace_array
*tr
= dir
->tr
;
692 mutex_lock(&event_mutex
);
693 list_for_each_entry(file
, &tr
->events
, list
) {
694 call
= file
->event_call
;
695 if (!call
->name
|| !call
->class || !call
->class->reg
)
698 if (system
&& strcmp(call
->class->system
, system
->name
) != 0)
702 * We need to find out if all the events are set
703 * or if all events or cleared, or if we have
706 set
|= (1 << !!(file
->flags
& FTRACE_EVENT_FL_ENABLED
));
709 * If we have a mixture, no need to look further.
714 mutex_unlock(&event_mutex
);
716 buf
[0] = set_to_char
[set
];
719 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
725 system_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
728 struct ftrace_subsystem_dir
*dir
= filp
->private_data
;
729 struct event_subsystem
*system
= dir
->subsystem
;
730 const char *name
= NULL
;
734 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
738 ret
= tracing_update_buffers();
742 if (val
!= 0 && val
!= 1)
746 * Opening of "enable" adds a ref count to system,
747 * so the name is safe to use.
752 ret
= __ftrace_set_clr_event(dir
->tr
, NULL
, name
, NULL
, val
);
766 FORMAT_FIELD_SEPERATOR
= 2,
770 static void *f_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
772 struct ftrace_event_call
*call
= m
->private;
773 struct ftrace_event_field
*field
;
774 struct list_head
*common_head
= &ftrace_common_fields
;
775 struct list_head
*head
= trace_get_fields(call
);
779 switch ((unsigned long)v
) {
781 if (unlikely(list_empty(common_head
)))
784 field
= list_entry(common_head
->prev
,
785 struct ftrace_event_field
, link
);
788 case FORMAT_FIELD_SEPERATOR
:
789 if (unlikely(list_empty(head
)))
792 field
= list_entry(head
->prev
, struct ftrace_event_field
, link
);
795 case FORMAT_PRINTFMT
:
801 if (field
->link
.prev
== common_head
)
802 return (void *)FORMAT_FIELD_SEPERATOR
;
803 else if (field
->link
.prev
== head
)
804 return (void *)FORMAT_PRINTFMT
;
806 field
= list_entry(field
->link
.prev
, struct ftrace_event_field
, link
);
811 static void *f_start(struct seq_file
*m
, loff_t
*pos
)
816 /* Start by showing the header */
818 return (void *)FORMAT_HEADER
;
820 p
= (void *)FORMAT_HEADER
;
822 p
= f_next(m
, p
, &l
);
823 } while (p
&& l
< *pos
);
828 static int f_show(struct seq_file
*m
, void *v
)
830 struct ftrace_event_call
*call
= m
->private;
831 struct ftrace_event_field
*field
;
832 const char *array_descriptor
;
834 switch ((unsigned long)v
) {
836 seq_printf(m
, "name: %s\n", call
->name
);
837 seq_printf(m
, "ID: %d\n", call
->event
.type
);
838 seq_printf(m
, "format:\n");
841 case FORMAT_FIELD_SEPERATOR
:
845 case FORMAT_PRINTFMT
:
846 seq_printf(m
, "\nprint fmt: %s\n",
854 * Smartly shows the array type(except dynamic array).
857 * If TYPE := TYPE[LEN], it is shown:
858 * field:TYPE VAR[LEN]
860 array_descriptor
= strchr(field
->type
, '[');
862 if (!strncmp(field
->type
, "__data_loc", 10))
863 array_descriptor
= NULL
;
865 if (!array_descriptor
)
866 seq_printf(m
, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
867 field
->type
, field
->name
, field
->offset
,
868 field
->size
, !!field
->is_signed
);
870 seq_printf(m
, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
871 (int)(array_descriptor
- field
->type
),
872 field
->type
, field
->name
,
873 array_descriptor
, field
->offset
,
874 field
->size
, !!field
->is_signed
);
879 static void f_stop(struct seq_file
*m
, void *p
)
883 static const struct seq_operations trace_format_seq_ops
= {
890 static int trace_format_open(struct inode
*inode
, struct file
*file
)
892 struct ftrace_event_call
*call
= inode
->i_private
;
896 ret
= seq_open(file
, &trace_format_seq_ops
);
900 m
= file
->private_data
;
907 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
909 struct ftrace_event_call
*call
= filp
->private_data
;
916 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
921 trace_seq_printf(s
, "%d\n", call
->event
.type
);
923 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
930 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
933 struct ftrace_event_call
*call
= filp
->private_data
;
940 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
946 print_event_filter(call
, s
);
947 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
955 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
958 struct ftrace_event_call
*call
= filp
->private_data
;
962 if (cnt
>= PAGE_SIZE
)
965 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
969 if (copy_from_user(buf
, ubuf
, cnt
)) {
970 free_page((unsigned long) buf
);
975 err
= apply_event_filter(call
, buf
);
976 free_page((unsigned long) buf
);
985 static LIST_HEAD(event_subsystems
);
987 static int subsystem_open(struct inode
*inode
, struct file
*filp
)
989 struct event_subsystem
*system
= NULL
;
990 struct ftrace_subsystem_dir
*dir
= NULL
; /* Initialize for gcc */
991 struct trace_array
*tr
;
994 /* Make sure the system still exists */
995 mutex_lock(&event_mutex
);
996 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
997 list_for_each_entry(dir
, &tr
->systems
, list
) {
998 if (dir
== inode
->i_private
) {
999 /* Don't open systems with no events */
1000 if (dir
->nr_events
) {
1001 __get_system_dir(dir
);
1002 system
= dir
->subsystem
;
1009 mutex_unlock(&event_mutex
);
1014 /* Some versions of gcc think dir can be uninitialized here */
1017 ret
= tracing_open_generic(inode
, filp
);
1024 static int system_tr_open(struct inode
*inode
, struct file
*filp
)
1026 struct ftrace_subsystem_dir
*dir
;
1027 struct trace_array
*tr
= inode
->i_private
;
1030 /* Make a temporary dir that has no system but points to tr */
1031 dir
= kzalloc(sizeof(*dir
), GFP_KERNEL
);
1037 ret
= tracing_open_generic(inode
, filp
);
1041 filp
->private_data
= dir
;
1046 static int subsystem_release(struct inode
*inode
, struct file
*file
)
1048 struct ftrace_subsystem_dir
*dir
= file
->private_data
;
1051 * If dir->subsystem is NULL, then this is a temporary
1052 * descriptor that was made for a trace_array to enable
1064 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
1067 struct ftrace_subsystem_dir
*dir
= filp
->private_data
;
1068 struct event_subsystem
*system
= dir
->subsystem
;
1069 struct trace_seq
*s
;
1075 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
1081 print_subsystem_event_filter(system
, s
);
1082 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
1090 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
1093 struct ftrace_subsystem_dir
*dir
= filp
->private_data
;
1097 if (cnt
>= PAGE_SIZE
)
1100 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
1104 if (copy_from_user(buf
, ubuf
, cnt
)) {
1105 free_page((unsigned long) buf
);
1110 err
= apply_subsystem_event_filter(dir
, buf
);
1111 free_page((unsigned long) buf
);
1121 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
1123 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
1124 struct trace_seq
*s
;
1130 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
1137 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
1144 static int ftrace_event_avail_open(struct inode
*inode
, struct file
*file
);
1145 static int ftrace_event_set_open(struct inode
*inode
, struct file
*file
);
1147 static const struct seq_operations show_event_seq_ops
= {
1154 static const struct seq_operations show_set_event_seq_ops
= {
1161 static const struct file_operations ftrace_avail_fops
= {
1162 .open
= ftrace_event_avail_open
,
1164 .llseek
= seq_lseek
,
1165 .release
= seq_release
,
1168 static const struct file_operations ftrace_set_event_fops
= {
1169 .open
= ftrace_event_set_open
,
1171 .write
= ftrace_event_write
,
1172 .llseek
= seq_lseek
,
1173 .release
= seq_release
,
1176 static const struct file_operations ftrace_enable_fops
= {
1177 .open
= tracing_open_generic
,
1178 .read
= event_enable_read
,
1179 .write
= event_enable_write
,
1180 .llseek
= default_llseek
,
1183 static const struct file_operations ftrace_event_format_fops
= {
1184 .open
= trace_format_open
,
1186 .llseek
= seq_lseek
,
1187 .release
= seq_release
,
1190 static const struct file_operations ftrace_event_id_fops
= {
1191 .open
= tracing_open_generic
,
1192 .read
= event_id_read
,
1193 .llseek
= default_llseek
,
1196 static const struct file_operations ftrace_event_filter_fops
= {
1197 .open
= tracing_open_generic
,
1198 .read
= event_filter_read
,
1199 .write
= event_filter_write
,
1200 .llseek
= default_llseek
,
1203 static const struct file_operations ftrace_subsystem_filter_fops
= {
1204 .open
= subsystem_open
,
1205 .read
= subsystem_filter_read
,
1206 .write
= subsystem_filter_write
,
1207 .llseek
= default_llseek
,
1208 .release
= subsystem_release
,
1211 static const struct file_operations ftrace_system_enable_fops
= {
1212 .open
= subsystem_open
,
1213 .read
= system_enable_read
,
1214 .write
= system_enable_write
,
1215 .llseek
= default_llseek
,
1216 .release
= subsystem_release
,
1219 static const struct file_operations ftrace_tr_enable_fops
= {
1220 .open
= system_tr_open
,
1221 .read
= system_enable_read
,
1222 .write
= system_enable_write
,
1223 .llseek
= default_llseek
,
1224 .release
= subsystem_release
,
1227 static const struct file_operations ftrace_show_header_fops
= {
1228 .open
= tracing_open_generic
,
1229 .read
= show_header
,
1230 .llseek
= default_llseek
,
1234 ftrace_event_open(struct inode
*inode
, struct file
*file
,
1235 const struct seq_operations
*seq_ops
)
1240 ret
= seq_open(file
, seq_ops
);
1243 m
= file
->private_data
;
1244 /* copy tr over to seq ops */
1245 m
->private = inode
->i_private
;
1251 ftrace_event_avail_open(struct inode
*inode
, struct file
*file
)
1253 const struct seq_operations
*seq_ops
= &show_event_seq_ops
;
1255 return ftrace_event_open(inode
, file
, seq_ops
);
1259 ftrace_event_set_open(struct inode
*inode
, struct file
*file
)
1261 const struct seq_operations
*seq_ops
= &show_set_event_seq_ops
;
1262 struct trace_array
*tr
= inode
->i_private
;
1264 if ((file
->f_mode
& FMODE_WRITE
) &&
1265 (file
->f_flags
& O_TRUNC
))
1266 ftrace_clear_events(tr
);
1268 return ftrace_event_open(inode
, file
, seq_ops
);
1271 static struct event_subsystem
*
1272 create_new_subsystem(const char *name
)
1274 struct event_subsystem
*system
;
1276 /* need to create new entry */
1277 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
1281 system
->ref_count
= 1;
1282 system
->name
= name
;
1284 system
->filter
= NULL
;
1286 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
1287 if (!system
->filter
)
1290 list_add(&system
->list
, &event_subsystems
);
1299 static struct dentry
*
1300 event_subsystem_dir(struct trace_array
*tr
, const char *name
,
1301 struct ftrace_event_file
*file
, struct dentry
*parent
)
1303 struct ftrace_subsystem_dir
*dir
;
1304 struct event_subsystem
*system
;
1305 struct dentry
*entry
;
1307 /* First see if we did not already create this dir */
1308 list_for_each_entry(dir
, &tr
->systems
, list
) {
1309 system
= dir
->subsystem
;
1310 if (strcmp(system
->name
, name
) == 0) {
1317 /* Now see if the system itself exists. */
1318 list_for_each_entry(system
, &event_subsystems
, list
) {
1319 if (strcmp(system
->name
, name
) == 0)
1322 /* Reset system variable when not found */
1323 if (&system
->list
== &event_subsystems
)
1326 dir
= kmalloc(sizeof(*dir
), GFP_KERNEL
);
1331 system
= create_new_subsystem(name
);
1335 __get_system(system
);
1337 dir
->entry
= debugfs_create_dir(name
, parent
);
1339 pr_warning("Failed to create system directory %s\n", name
);
1340 __put_system(system
);
1347 dir
->subsystem
= system
;
1350 entry
= debugfs_create_file("filter", 0644, dir
->entry
, dir
,
1351 &ftrace_subsystem_filter_fops
);
1353 kfree(system
->filter
);
1354 system
->filter
= NULL
;
1355 pr_warning("Could not create debugfs '%s/filter' entry\n", name
);
1358 trace_create_file("enable", 0644, dir
->entry
, dir
,
1359 &ftrace_system_enable_fops
);
1361 list_add(&dir
->list
, &tr
->systems
);
1368 /* Only print this message if failed on memory allocation */
1369 if (!dir
|| !system
)
1370 pr_warning("No memory to create event subsystem %s\n",
1376 event_create_dir(struct dentry
*parent
,
1377 struct ftrace_event_file
*file
,
1378 const struct file_operations
*id
,
1379 const struct file_operations
*enable
,
1380 const struct file_operations
*filter
,
1381 const struct file_operations
*format
)
1383 struct ftrace_event_call
*call
= file
->event_call
;
1384 struct trace_array
*tr
= file
->tr
;
1385 struct list_head
*head
;
1386 struct dentry
*d_events
;
1390 * If the trace point header did not define TRACE_SYSTEM
1391 * then the system would be called "TRACE_SYSTEM".
1393 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0) {
1394 d_events
= event_subsystem_dir(tr
, call
->class->system
, file
, parent
);
1400 file
->dir
= debugfs_create_dir(call
->name
, d_events
);
1402 pr_warning("Could not create debugfs '%s' directory\n",
1407 if (call
->class->reg
&& !(call
->flags
& TRACE_EVENT_FL_IGNORE_ENABLE
))
1408 trace_create_file("enable", 0644, file
->dir
, file
,
1411 #ifdef CONFIG_PERF_EVENTS
1412 if (call
->event
.type
&& call
->class->reg
)
1413 trace_create_file("id", 0444, file
->dir
, call
,
1418 * Other events may have the same class. Only update
1419 * the fields if they are not already defined.
1421 head
= trace_get_fields(call
);
1422 if (list_empty(head
)) {
1423 ret
= call
->class->define_fields(call
);
1425 pr_warning("Could not initialize trace point"
1426 " events/%s\n", call
->name
);
1430 trace_create_file("filter", 0644, file
->dir
, call
,
1433 trace_create_file("format", 0444, file
->dir
, call
,
1439 static void remove_subsystem(struct ftrace_subsystem_dir
*dir
)
1444 if (!--dir
->nr_events
) {
1445 debugfs_remove_recursive(dir
->entry
);
1446 list_del(&dir
->list
);
1447 __put_system_dir(dir
);
1451 static void remove_event_from_tracers(struct ftrace_event_call
*call
)
1453 struct ftrace_event_file
*file
;
1454 struct trace_array
*tr
;
1456 do_for_each_event_file_safe(tr
, file
) {
1458 if (file
->event_call
!= call
)
1461 list_del(&file
->list
);
1462 debugfs_remove_recursive(file
->dir
);
1463 remove_subsystem(file
->system
);
1464 kmem_cache_free(file_cachep
, file
);
1467 * The do_for_each_event_file_safe() is
1468 * a double loop. After finding the call for this
1469 * trace_array, we use break to jump to the next
1473 } while_for_each_event_file();
1476 static void event_remove(struct ftrace_event_call
*call
)
1478 struct trace_array
*tr
;
1479 struct ftrace_event_file
*file
;
1481 do_for_each_event_file(tr
, file
) {
1482 if (file
->event_call
!= call
)
1484 ftrace_event_enable_disable(file
, 0);
1486 * The do_for_each_event_file() is
1487 * a double loop. After finding the call for this
1488 * trace_array, we use break to jump to the next
1492 } while_for_each_event_file();
1494 if (call
->event
.funcs
)
1495 __unregister_ftrace_event(&call
->event
);
1496 remove_event_from_tracers(call
);
1497 list_del(&call
->list
);
1500 static int event_init(struct ftrace_event_call
*call
)
1504 if (WARN_ON(!call
->name
))
1507 if (call
->class->raw_init
) {
1508 ret
= call
->class->raw_init(call
);
1509 if (ret
< 0 && ret
!= -ENOSYS
)
1510 pr_warn("Could not initialize trace events/%s\n",
1518 __register_event(struct ftrace_event_call
*call
, struct module
*mod
)
1522 ret
= event_init(call
);
1526 list_add(&call
->list
, &ftrace_events
);
1532 static struct ftrace_event_file
*
1533 trace_create_new_event(struct ftrace_event_call
*call
,
1534 struct trace_array
*tr
)
1536 struct ftrace_event_file
*file
;
1538 file
= kmem_cache_alloc(file_cachep
, GFP_TRACE
);
1542 file
->event_call
= call
;
1544 atomic_set(&file
->sm_ref
, 0);
1545 list_add(&file
->list
, &tr
->events
);
1550 /* Add an event to a trace directory */
1552 __trace_add_new_event(struct ftrace_event_call
*call
,
1553 struct trace_array
*tr
,
1554 const struct file_operations
*id
,
1555 const struct file_operations
*enable
,
1556 const struct file_operations
*filter
,
1557 const struct file_operations
*format
)
1559 struct ftrace_event_file
*file
;
1561 file
= trace_create_new_event(call
, tr
);
1565 return event_create_dir(tr
->event_dir
, file
, id
, enable
, filter
, format
);
1569 * Just create a decriptor for early init. A descriptor is required
1570 * for enabling events at boot. We want to enable events before
1571 * the filesystem is initialized.
1574 __trace_early_add_new_event(struct ftrace_event_call
*call
,
1575 struct trace_array
*tr
)
1577 struct ftrace_event_file
*file
;
1579 file
= trace_create_new_event(call
, tr
);
1586 struct ftrace_module_file_ops
;
1587 static void __add_event_to_tracers(struct ftrace_event_call
*call
,
1588 struct ftrace_module_file_ops
*file_ops
);
1590 /* Add an additional event_call dynamically */
1591 int trace_add_event_call(struct ftrace_event_call
*call
)
1594 mutex_lock(&event_mutex
);
1596 ret
= __register_event(call
, NULL
);
1598 __add_event_to_tracers(call
, NULL
);
1600 mutex_unlock(&event_mutex
);
1605 * Must be called under locking both of event_mutex and trace_event_sem.
1607 static void __trace_remove_event_call(struct ftrace_event_call
*call
)
1610 trace_destroy_fields(call
);
1611 destroy_preds(call
);
1614 /* Remove an event_call */
1615 void trace_remove_event_call(struct ftrace_event_call
*call
)
1617 mutex_lock(&event_mutex
);
1618 down_write(&trace_event_sem
);
1619 __trace_remove_event_call(call
);
1620 up_write(&trace_event_sem
);
1621 mutex_unlock(&event_mutex
);
1624 #define for_each_event(event, start, end) \
1625 for (event = start; \
1626 (unsigned long)event < (unsigned long)end; \
1629 #ifdef CONFIG_MODULES
1631 static LIST_HEAD(ftrace_module_file_list
);
1634 * Modules must own their file_operations to keep up with
1635 * reference counting.
1637 struct ftrace_module_file_ops
{
1638 struct list_head list
;
1640 struct file_operations id
;
1641 struct file_operations enable
;
1642 struct file_operations format
;
1643 struct file_operations filter
;
1646 static struct ftrace_module_file_ops
*
1647 find_ftrace_file_ops(struct ftrace_module_file_ops
*file_ops
, struct module
*mod
)
1650 * As event_calls are added in groups by module,
1651 * when we find one file_ops, we don't need to search for
1652 * each call in that module, as the rest should be the
1653 * same. Only search for a new one if the last one did
1656 if (file_ops
&& mod
== file_ops
->mod
)
1659 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1660 if (file_ops
->mod
== mod
)
1666 static struct ftrace_module_file_ops
*
1667 trace_create_file_ops(struct module
*mod
)
1669 struct ftrace_module_file_ops
*file_ops
;
1672 * This is a bit of a PITA. To allow for correct reference
1673 * counting, modules must "own" their file_operations.
1674 * To do this, we allocate the file operations that will be
1675 * used in the event directory.
1678 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
1682 file_ops
->mod
= mod
;
1684 file_ops
->id
= ftrace_event_id_fops
;
1685 file_ops
->id
.owner
= mod
;
1687 file_ops
->enable
= ftrace_enable_fops
;
1688 file_ops
->enable
.owner
= mod
;
1690 file_ops
->filter
= ftrace_event_filter_fops
;
1691 file_ops
->filter
.owner
= mod
;
1693 file_ops
->format
= ftrace_event_format_fops
;
1694 file_ops
->format
.owner
= mod
;
1696 list_add(&file_ops
->list
, &ftrace_module_file_list
);
1701 static void trace_module_add_events(struct module
*mod
)
1703 struct ftrace_module_file_ops
*file_ops
= NULL
;
1704 struct ftrace_event_call
**call
, **start
, **end
;
1706 start
= mod
->trace_events
;
1707 end
= mod
->trace_events
+ mod
->num_trace_events
;
1712 file_ops
= trace_create_file_ops(mod
);
1716 for_each_event(call
, start
, end
) {
1717 __register_event(*call
, mod
);
1718 __add_event_to_tracers(*call
, file_ops
);
1722 static void trace_module_remove_events(struct module
*mod
)
1724 struct ftrace_module_file_ops
*file_ops
;
1725 struct ftrace_event_call
*call
, *p
;
1726 bool clear_trace
= false;
1728 down_write(&trace_event_sem
);
1729 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
1730 if (call
->mod
== mod
) {
1731 if (call
->flags
& TRACE_EVENT_FL_WAS_ENABLED
)
1733 __trace_remove_event_call(call
);
1737 /* Now free the file_operations */
1738 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1739 if (file_ops
->mod
== mod
)
1742 if (&file_ops
->list
!= &ftrace_module_file_list
) {
1743 list_del(&file_ops
->list
);
1746 up_write(&trace_event_sem
);
1749 * It is safest to reset the ring buffer if the module being unloaded
1750 * registered any events that were used. The only worry is if
1751 * a new module gets loaded, and takes on the same id as the events
1752 * of this module. When printing out the buffer, traced events left
1753 * over from this module may be passed to the new module events and
1754 * unexpected results may occur.
1757 tracing_reset_all_online_cpus();
1760 static int trace_module_notify(struct notifier_block
*self
,
1761 unsigned long val
, void *data
)
1763 struct module
*mod
= data
;
1765 mutex_lock(&event_mutex
);
1767 case MODULE_STATE_COMING
:
1768 trace_module_add_events(mod
);
1770 case MODULE_STATE_GOING
:
1771 trace_module_remove_events(mod
);
1774 mutex_unlock(&event_mutex
);
1780 __trace_add_new_mod_event(struct ftrace_event_call
*call
,
1781 struct trace_array
*tr
,
1782 struct ftrace_module_file_ops
*file_ops
)
1784 return __trace_add_new_event(call
, tr
,
1785 &file_ops
->id
, &file_ops
->enable
,
1786 &file_ops
->filter
, &file_ops
->format
);
1790 static inline struct ftrace_module_file_ops
*
1791 find_ftrace_file_ops(struct ftrace_module_file_ops
*file_ops
, struct module
*mod
)
1795 static inline int trace_module_notify(struct notifier_block
*self
,
1796 unsigned long val
, void *data
)
1801 __trace_add_new_mod_event(struct ftrace_event_call
*call
,
1802 struct trace_array
*tr
,
1803 struct ftrace_module_file_ops
*file_ops
)
1807 #endif /* CONFIG_MODULES */
1809 /* Create a new event directory structure for a trace directory. */
1811 __trace_add_event_dirs(struct trace_array
*tr
)
1813 struct ftrace_module_file_ops
*file_ops
= NULL
;
1814 struct ftrace_event_call
*call
;
1817 list_for_each_entry(call
, &ftrace_events
, list
) {
1820 * Directories for events by modules need to
1821 * keep module ref counts when opened (as we don't
1822 * want the module to disappear when reading one
1823 * of these files). The file_ops keep account of
1824 * the module ref count.
1826 file_ops
= find_ftrace_file_ops(file_ops
, call
->mod
);
1828 continue; /* Warn? */
1829 ret
= __trace_add_new_mod_event(call
, tr
, file_ops
);
1831 pr_warning("Could not create directory for event %s\n",
1835 ret
= __trace_add_new_event(call
, tr
,
1836 &ftrace_event_id_fops
,
1837 &ftrace_enable_fops
,
1838 &ftrace_event_filter_fops
,
1839 &ftrace_event_format_fops
);
1841 pr_warning("Could not create directory for event %s\n",
1846 #ifdef CONFIG_DYNAMIC_FTRACE
1849 #define ENABLE_EVENT_STR "enable_event"
1850 #define DISABLE_EVENT_STR "disable_event"
1852 struct event_probe_data
{
1853 struct ftrace_event_file
*file
;
1854 unsigned long count
;
1859 static struct ftrace_event_file
*
1860 find_event_file(struct trace_array
*tr
, const char *system
, const char *event
)
1862 struct ftrace_event_file
*file
;
1863 struct ftrace_event_call
*call
;
1865 list_for_each_entry(file
, &tr
->events
, list
) {
1867 call
= file
->event_call
;
1869 if (!call
->name
|| !call
->class || !call
->class->reg
)
1872 if (call
->flags
& TRACE_EVENT_FL_IGNORE_ENABLE
)
1875 if (strcmp(event
, call
->name
) == 0 &&
1876 strcmp(system
, call
->class->system
) == 0)
1883 event_enable_probe(unsigned long ip
, unsigned long parent_ip
, void **_data
)
1885 struct event_probe_data
**pdata
= (struct event_probe_data
**)_data
;
1886 struct event_probe_data
*data
= *pdata
;
1892 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &data
->file
->flags
);
1894 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &data
->file
->flags
);
1898 event_enable_count_probe(unsigned long ip
, unsigned long parent_ip
, void **_data
)
1900 struct event_probe_data
**pdata
= (struct event_probe_data
**)_data
;
1901 struct event_probe_data
*data
= *pdata
;
1909 /* Skip if the event is in a state we want to switch to */
1910 if (data
->enable
== !(data
->file
->flags
& FTRACE_EVENT_FL_SOFT_DISABLED
))
1913 if (data
->count
!= -1)
1916 event_enable_probe(ip
, parent_ip
, _data
);
1920 event_enable_print(struct seq_file
*m
, unsigned long ip
,
1921 struct ftrace_probe_ops
*ops
, void *_data
)
1923 struct event_probe_data
*data
= _data
;
1925 seq_printf(m
, "%ps:", (void *)ip
);
1927 seq_printf(m
, "%s:%s:%s",
1928 data
->enable
? ENABLE_EVENT_STR
: DISABLE_EVENT_STR
,
1929 data
->file
->event_call
->class->system
,
1930 data
->file
->event_call
->name
);
1932 if (data
->count
== -1)
1933 seq_printf(m
, ":unlimited\n");
1935 seq_printf(m
, ":count=%ld\n", data
->count
);
1941 event_enable_init(struct ftrace_probe_ops
*ops
, unsigned long ip
,
1944 struct event_probe_data
**pdata
= (struct event_probe_data
**)_data
;
1945 struct event_probe_data
*data
= *pdata
;
1952 event_enable_free(struct ftrace_probe_ops
*ops
, unsigned long ip
,
1955 struct event_probe_data
**pdata
= (struct event_probe_data
**)_data
;
1956 struct event_probe_data
*data
= *pdata
;
1958 if (WARN_ON_ONCE(data
->ref
<= 0))
1963 /* Remove the SOFT_MODE flag */
1964 __ftrace_event_enable_disable(data
->file
, 0, 1);
1965 module_put(data
->file
->event_call
->mod
);
1971 static struct ftrace_probe_ops event_enable_probe_ops
= {
1972 .func
= event_enable_probe
,
1973 .print
= event_enable_print
,
1974 .init
= event_enable_init
,
1975 .free
= event_enable_free
,
1978 static struct ftrace_probe_ops event_enable_count_probe_ops
= {
1979 .func
= event_enable_count_probe
,
1980 .print
= event_enable_print
,
1981 .init
= event_enable_init
,
1982 .free
= event_enable_free
,
1985 static struct ftrace_probe_ops event_disable_probe_ops
= {
1986 .func
= event_enable_probe
,
1987 .print
= event_enable_print
,
1988 .init
= event_enable_init
,
1989 .free
= event_enable_free
,
1992 static struct ftrace_probe_ops event_disable_count_probe_ops
= {
1993 .func
= event_enable_count_probe
,
1994 .print
= event_enable_print
,
1995 .init
= event_enable_init
,
1996 .free
= event_enable_free
,
2000 event_enable_func(struct ftrace_hash
*hash
,
2001 char *glob
, char *cmd
, char *param
, int enabled
)
2003 struct trace_array
*tr
= top_trace_array();
2004 struct ftrace_event_file
*file
;
2005 struct ftrace_probe_ops
*ops
;
2006 struct event_probe_data
*data
;
2013 /* hash funcs only work with set_ftrace_filter */
2020 system
= strsep(¶m
, ":");
2024 event
= strsep(¶m
, ":");
2026 mutex_lock(&event_mutex
);
2029 file
= find_event_file(tr
, system
, event
);
2033 enable
= strcmp(cmd
, ENABLE_EVENT_STR
) == 0;
2036 ops
= param
? &event_enable_count_probe_ops
: &event_enable_probe_ops
;
2038 ops
= param
? &event_disable_count_probe_ops
: &event_disable_probe_ops
;
2040 if (glob
[0] == '!') {
2041 unregister_ftrace_function_probe_func(glob
+1, ops
);
2047 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
2051 data
->enable
= enable
;
2058 number
= strsep(¶m
, ":");
2061 if (!strlen(number
))
2065 * We use the callback data field (which is a pointer)
2068 ret
= kstrtoul(number
, 0, &data
->count
);
2073 /* Don't let event modules unload while probe registered */
2074 ret
= try_module_get(file
->event_call
->mod
);
2080 ret
= __ftrace_event_enable_disable(file
, 1, 1);
2083 ret
= register_ftrace_function_probe(glob
, ops
, data
);
2085 * The above returns on success the # of functions enabled,
2086 * but if it didn't find any functions it returns zero.
2087 * Consider no functions a failure too.
2094 /* Just return zero, not the number of enabled functions */
2097 mutex_unlock(&event_mutex
);
2101 __ftrace_event_enable_disable(file
, 0, 1);
2103 module_put(file
->event_call
->mod
);
2109 static struct ftrace_func_command event_enable_cmd
= {
2110 .name
= ENABLE_EVENT_STR
,
2111 .func
= event_enable_func
,
2114 static struct ftrace_func_command event_disable_cmd
= {
2115 .name
= DISABLE_EVENT_STR
,
2116 .func
= event_enable_func
,
2119 static __init
int register_event_cmds(void)
2123 ret
= register_ftrace_command(&event_enable_cmd
);
2124 if (WARN_ON(ret
< 0))
2126 ret
= register_ftrace_command(&event_disable_cmd
);
2127 if (WARN_ON(ret
< 0))
2128 unregister_ftrace_command(&event_enable_cmd
);
2132 static inline int register_event_cmds(void) { return 0; }
2133 #endif /* CONFIG_DYNAMIC_FTRACE */
2136 * The top level array has already had its ftrace_event_file
2137 * descriptors created in order to allow for early events to
2138 * be recorded. This function is called after the debugfs has been
2139 * initialized, and we now have to create the files associated
2143 __trace_early_add_event_dirs(struct trace_array
*tr
)
2145 struct ftrace_event_file
*file
;
2149 list_for_each_entry(file
, &tr
->events
, list
) {
2150 ret
= event_create_dir(tr
->event_dir
, file
,
2151 &ftrace_event_id_fops
,
2152 &ftrace_enable_fops
,
2153 &ftrace_event_filter_fops
,
2154 &ftrace_event_format_fops
);
2156 pr_warning("Could not create directory for event %s\n",
2157 file
->event_call
->name
);
2162 * For early boot up, the top trace array requires to have
2163 * a list of events that can be enabled. This must be done before
2164 * the filesystem is set up in order to allow events to be traced
2168 __trace_early_add_events(struct trace_array
*tr
)
2170 struct ftrace_event_call
*call
;
2173 list_for_each_entry(call
, &ftrace_events
, list
) {
2174 /* Early boot up should not have any modules loaded */
2175 if (WARN_ON_ONCE(call
->mod
))
2178 ret
= __trace_early_add_new_event(call
, tr
);
2180 pr_warning("Could not create early event %s\n",
2185 /* Remove the event directory structure for a trace directory. */
2187 __trace_remove_event_dirs(struct trace_array
*tr
)
2189 struct ftrace_event_file
*file
, *next
;
2191 list_for_each_entry_safe(file
, next
, &tr
->events
, list
) {
2192 list_del(&file
->list
);
2193 debugfs_remove_recursive(file
->dir
);
2194 remove_subsystem(file
->system
);
2195 kmem_cache_free(file_cachep
, file
);
2200 __add_event_to_tracers(struct ftrace_event_call
*call
,
2201 struct ftrace_module_file_ops
*file_ops
)
2203 struct trace_array
*tr
;
2205 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
2207 __trace_add_new_mod_event(call
, tr
, file_ops
);
2209 __trace_add_new_event(call
, tr
,
2210 &ftrace_event_id_fops
,
2211 &ftrace_enable_fops
,
2212 &ftrace_event_filter_fops
,
2213 &ftrace_event_format_fops
);
2217 static struct notifier_block trace_module_nb
= {
2218 .notifier_call
= trace_module_notify
,
2222 extern struct ftrace_event_call
*__start_ftrace_events
[];
2223 extern struct ftrace_event_call
*__stop_ftrace_events
[];
2225 static char bootup_event_buf
[COMMAND_LINE_SIZE
] __initdata
;
2227 static __init
int setup_trace_event(char *str
)
2229 strlcpy(bootup_event_buf
, str
, COMMAND_LINE_SIZE
);
2230 ring_buffer_expanded
= true;
2231 tracing_selftest_disabled
= true;
2235 __setup("trace_event=", setup_trace_event
);
2237 /* Expects to have event_mutex held when called */
2239 create_event_toplevel_files(struct dentry
*parent
, struct trace_array
*tr
)
2241 struct dentry
*d_events
;
2242 struct dentry
*entry
;
2244 entry
= debugfs_create_file("set_event", 0644, parent
,
2245 tr
, &ftrace_set_event_fops
);
2247 pr_warning("Could not create debugfs 'set_event' entry\n");
2251 d_events
= debugfs_create_dir("events", parent
);
2253 pr_warning("Could not create debugfs 'events' directory\n");
2257 /* ring buffer internal formats */
2258 trace_create_file("header_page", 0444, d_events
,
2259 ring_buffer_print_page_header
,
2260 &ftrace_show_header_fops
);
2262 trace_create_file("header_event", 0444, d_events
,
2263 ring_buffer_print_entry_header
,
2264 &ftrace_show_header_fops
);
2266 trace_create_file("enable", 0644, d_events
,
2267 tr
, &ftrace_tr_enable_fops
);
2269 tr
->event_dir
= d_events
;
2275 * event_trace_add_tracer - add a instance of a trace_array to events
2276 * @parent: The parent dentry to place the files/directories for events in
2277 * @tr: The trace array associated with these events
2279 * When a new instance is created, it needs to set up its events
2280 * directory, as well as other files associated with events. It also
2281 * creates the event hierachry in the @parent/events directory.
2283 * Returns 0 on success.
2285 int event_trace_add_tracer(struct dentry
*parent
, struct trace_array
*tr
)
2289 mutex_lock(&event_mutex
);
2291 ret
= create_event_toplevel_files(parent
, tr
);
2295 down_write(&trace_event_sem
);
2296 __trace_add_event_dirs(tr
);
2297 up_write(&trace_event_sem
);
2300 mutex_unlock(&event_mutex
);
2306 * The top trace array already had its file descriptors created.
2307 * Now the files themselves need to be created.
2310 early_event_add_tracer(struct dentry
*parent
, struct trace_array
*tr
)
2314 mutex_lock(&event_mutex
);
2316 ret
= create_event_toplevel_files(parent
, tr
);
2320 down_write(&trace_event_sem
);
2321 __trace_early_add_event_dirs(tr
);
2322 up_write(&trace_event_sem
);
2325 mutex_unlock(&event_mutex
);
2330 int event_trace_del_tracer(struct trace_array
*tr
)
2332 /* Disable any running events */
2333 __ftrace_set_clr_event(tr
, NULL
, NULL
, NULL
, 0);
2335 mutex_lock(&event_mutex
);
2337 down_write(&trace_event_sem
);
2338 __trace_remove_event_dirs(tr
);
2339 debugfs_remove_recursive(tr
->event_dir
);
2340 up_write(&trace_event_sem
);
2342 tr
->event_dir
= NULL
;
2344 mutex_unlock(&event_mutex
);
2349 static __init
int event_trace_memsetup(void)
2351 field_cachep
= KMEM_CACHE(ftrace_event_field
, SLAB_PANIC
);
2352 file_cachep
= KMEM_CACHE(ftrace_event_file
, SLAB_PANIC
);
2356 static __init
int event_trace_enable(void)
2358 struct trace_array
*tr
= top_trace_array();
2359 struct ftrace_event_call
**iter
, *call
;
2360 char *buf
= bootup_event_buf
;
2364 for_each_event(iter
, __start_ftrace_events
, __stop_ftrace_events
) {
2367 ret
= event_init(call
);
2369 list_add(&call
->list
, &ftrace_events
);
2373 * We need the top trace array to have a working set of trace
2374 * points at early init, before the debug files and directories
2375 * are created. Create the file entries now, and attach them
2376 * to the actual file dentries later.
2378 __trace_early_add_events(tr
);
2381 token
= strsep(&buf
, ",");
2388 ret
= ftrace_set_clr_event(tr
, token
, 1);
2390 pr_warn("Failed to enable trace event: %s\n", token
);
2393 trace_printk_start_comm();
2395 register_event_cmds();
2400 static __init
int event_trace_init(void)
2402 struct trace_array
*tr
;
2403 struct dentry
*d_tracer
;
2404 struct dentry
*entry
;
2407 tr
= top_trace_array();
2409 d_tracer
= tracing_init_dentry();
2413 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
2414 tr
, &ftrace_avail_fops
);
2416 pr_warning("Could not create debugfs "
2417 "'available_events' entry\n");
2419 if (trace_define_common_fields())
2420 pr_warning("tracing: Failed to allocate common fields");
2422 ret
= early_event_add_tracer(d_tracer
, tr
);
2426 ret
= register_module_notifier(&trace_module_nb
);
2428 pr_warning("Failed to register trace events module notifier\n");
2432 early_initcall(event_trace_memsetup
);
2433 core_initcall(event_trace_enable
);
2434 fs_initcall(event_trace_init
);
2436 #ifdef CONFIG_FTRACE_STARTUP_TEST
2438 static DEFINE_SPINLOCK(test_spinlock
);
2439 static DEFINE_SPINLOCK(test_spinlock_irq
);
2440 static DEFINE_MUTEX(test_mutex
);
2442 static __init
void test_work(struct work_struct
*dummy
)
2444 spin_lock(&test_spinlock
);
2445 spin_lock_irq(&test_spinlock_irq
);
2447 spin_unlock_irq(&test_spinlock_irq
);
2448 spin_unlock(&test_spinlock
);
2450 mutex_lock(&test_mutex
);
2452 mutex_unlock(&test_mutex
);
2455 static __init
int event_test_thread(void *unused
)
2459 test_malloc
= kmalloc(1234, GFP_KERNEL
);
2461 pr_info("failed to kmalloc\n");
2463 schedule_on_each_cpu(test_work
);
2467 set_current_state(TASK_INTERRUPTIBLE
);
2468 while (!kthread_should_stop())
2475 * Do various things that may trigger events.
2477 static __init
void event_test_stuff(void)
2479 struct task_struct
*test_thread
;
2481 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
2483 kthread_stop(test_thread
);
2487 * For every trace event defined, we will test each trace point separately,
2488 * and then by groups, and finally all trace points.
2490 static __init
void event_trace_self_tests(void)
2492 struct ftrace_subsystem_dir
*dir
;
2493 struct ftrace_event_file
*file
;
2494 struct ftrace_event_call
*call
;
2495 struct event_subsystem
*system
;
2496 struct trace_array
*tr
;
2499 tr
= top_trace_array();
2501 pr_info("Running tests on trace events:\n");
2503 list_for_each_entry(file
, &tr
->events
, list
) {
2505 call
= file
->event_call
;
2507 /* Only test those that have a probe */
2508 if (!call
->class || !call
->class->probe
)
2512 * Testing syscall events here is pretty useless, but
2513 * we still do it if configured. But this is time consuming.
2514 * What we really need is a user thread to perform the
2515 * syscalls as we test.
2517 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2518 if (call
->class->system
&&
2519 strcmp(call
->class->system
, "syscalls") == 0)
2523 pr_info("Testing event %s: ", call
->name
);
2526 * If an event is already enabled, someone is using
2527 * it and the self test should not be on.
2529 if (file
->flags
& FTRACE_EVENT_FL_ENABLED
) {
2530 pr_warning("Enabled event during self test!\n");
2535 ftrace_event_enable_disable(file
, 1);
2537 ftrace_event_enable_disable(file
, 0);
2542 /* Now test at the sub system level */
2544 pr_info("Running tests on trace event systems:\n");
2546 list_for_each_entry(dir
, &tr
->systems
, list
) {
2548 system
= dir
->subsystem
;
2550 /* the ftrace system is special, skip it */
2551 if (strcmp(system
->name
, "ftrace") == 0)
2554 pr_info("Testing event system %s: ", system
->name
);
2556 ret
= __ftrace_set_clr_event(tr
, NULL
, system
->name
, NULL
, 1);
2557 if (WARN_ON_ONCE(ret
)) {
2558 pr_warning("error enabling system %s\n",
2565 ret
= __ftrace_set_clr_event(tr
, NULL
, system
->name
, NULL
, 0);
2566 if (WARN_ON_ONCE(ret
)) {
2567 pr_warning("error disabling system %s\n",
2575 /* Test with all events enabled */
2577 pr_info("Running tests on all trace events:\n");
2578 pr_info("Testing all events: ");
2580 ret
= __ftrace_set_clr_event(tr
, NULL
, NULL
, NULL
, 1);
2581 if (WARN_ON_ONCE(ret
)) {
2582 pr_warning("error enabling all events\n");
2589 ret
= __ftrace_set_clr_event(tr
, NULL
, NULL
, NULL
, 0);
2590 if (WARN_ON_ONCE(ret
)) {
2591 pr_warning("error disabling all events\n");
2598 #ifdef CONFIG_FUNCTION_TRACER
2600 static DEFINE_PER_CPU(atomic_t
, ftrace_test_event_disable
);
2603 function_test_events_call(unsigned long ip
, unsigned long parent_ip
,
2604 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
2606 struct ring_buffer_event
*event
;
2607 struct ring_buffer
*buffer
;
2608 struct ftrace_entry
*entry
;
2609 unsigned long flags
;
2614 pc
= preempt_count();
2615 preempt_disable_notrace();
2616 cpu
= raw_smp_processor_id();
2617 disabled
= atomic_inc_return(&per_cpu(ftrace_test_event_disable
, cpu
));
2622 local_save_flags(flags
);
2624 event
= trace_current_buffer_lock_reserve(&buffer
,
2625 TRACE_FN
, sizeof(*entry
),
2629 entry
= ring_buffer_event_data(event
);
2631 entry
->parent_ip
= parent_ip
;
2633 trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
2636 atomic_dec(&per_cpu(ftrace_test_event_disable
, cpu
));
2637 preempt_enable_notrace();
2640 static struct ftrace_ops trace_ops __initdata
=
2642 .func
= function_test_events_call
,
2643 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
,
2646 static __init
void event_trace_self_test_with_function(void)
2649 ret
= register_ftrace_function(&trace_ops
);
2650 if (WARN_ON(ret
< 0)) {
2651 pr_info("Failed to enable function tracer for event tests\n");
2654 pr_info("Running tests again, along with the function tracer\n");
2655 event_trace_self_tests();
2656 unregister_ftrace_function(&trace_ops
);
2659 static __init
void event_trace_self_test_with_function(void)
2664 static __init
int event_trace_self_tests_init(void)
2666 if (!tracing_selftest_disabled
) {
2667 event_trace_self_tests();
2668 event_trace_self_test_with_function();
2674 late_initcall(event_trace_self_tests_init
);