4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex
);
30 DEFINE_MUTEX(event_storage_mutex
);
31 EXPORT_SYMBOL_GPL(event_storage_mutex
);
33 char event_storage
[EVENT_STORAGE_SIZE
];
34 EXPORT_SYMBOL_GPL(event_storage
);
36 LIST_HEAD(ftrace_events
);
37 static LIST_HEAD(ftrace_common_fields
);
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
41 static struct kmem_cache
*field_cachep
;
42 static struct kmem_cache
*file_cachep
;
44 #define SYSTEM_FL_FREE_NAME (1 << 31)
46 static inline int system_refcount(struct event_subsystem
*system
)
48 return system
->ref_count
& ~SYSTEM_FL_FREE_NAME
;
51 static int system_refcount_inc(struct event_subsystem
*system
)
53 return (system
->ref_count
++) & ~SYSTEM_FL_FREE_NAME
;
56 static int system_refcount_dec(struct event_subsystem
*system
)
58 return (--system
->ref_count
) & ~SYSTEM_FL_FREE_NAME
;
61 /* Double loops, do not use break, only goto's work */
62 #define do_for_each_event_file(tr, file) \
63 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
64 list_for_each_entry(file, &tr->events, list)
66 #define do_for_each_event_file_safe(tr, file) \
67 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
68 struct ftrace_event_file *___n; \
69 list_for_each_entry_safe(file, ___n, &tr->events, list)
71 #define while_for_each_event_file() \
74 static struct list_head
*
75 trace_get_fields(struct ftrace_event_call
*event_call
)
77 if (!event_call
->class->get_fields
)
78 return &event_call
->class->fields
;
79 return event_call
->class->get_fields(event_call
);
82 static struct ftrace_event_field
*
83 __find_event_field(struct list_head
*head
, char *name
)
85 struct ftrace_event_field
*field
;
87 list_for_each_entry(field
, head
, link
) {
88 if (!strcmp(field
->name
, name
))
95 struct ftrace_event_field
*
96 trace_find_event_field(struct ftrace_event_call
*call
, char *name
)
98 struct ftrace_event_field
*field
;
99 struct list_head
*head
;
101 field
= __find_event_field(&ftrace_common_fields
, name
);
105 head
= trace_get_fields(call
);
106 return __find_event_field(head
, name
);
109 static int __trace_define_field(struct list_head
*head
, const char *type
,
110 const char *name
, int offset
, int size
,
111 int is_signed
, int filter_type
)
113 struct ftrace_event_field
*field
;
115 field
= kmem_cache_alloc(field_cachep
, GFP_TRACE
);
122 if (filter_type
== FILTER_OTHER
)
123 field
->filter_type
= filter_assign_type(type
);
125 field
->filter_type
= filter_type
;
127 field
->offset
= offset
;
129 field
->is_signed
= is_signed
;
131 list_add(&field
->link
, head
);
136 int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
137 const char *name
, int offset
, int size
, int is_signed
,
140 struct list_head
*head
;
142 if (WARN_ON(!call
->class))
145 head
= trace_get_fields(call
);
146 return __trace_define_field(head
, type
, name
, offset
, size
,
147 is_signed
, filter_type
);
149 EXPORT_SYMBOL_GPL(trace_define_field
);
151 #define __common_field(type, item) \
152 ret = __trace_define_field(&ftrace_common_fields, #type, \
154 offsetof(typeof(ent), item), \
156 is_signed_type(type), FILTER_OTHER); \
160 static int trace_define_common_fields(void)
163 struct trace_entry ent
;
165 __common_field(unsigned short, type
);
166 __common_field(unsigned char, flags
);
167 __common_field(unsigned char, preempt_count
);
168 __common_field(int, pid
);
173 static void trace_destroy_fields(struct ftrace_event_call
*call
)
175 struct ftrace_event_field
*field
, *next
;
176 struct list_head
*head
;
178 head
= trace_get_fields(call
);
179 list_for_each_entry_safe(field
, next
, head
, link
) {
180 list_del(&field
->link
);
181 kmem_cache_free(field_cachep
, field
);
185 int trace_event_raw_init(struct ftrace_event_call
*call
)
189 id
= register_ftrace_event(&call
->event
);
195 EXPORT_SYMBOL_GPL(trace_event_raw_init
);
197 int ftrace_event_reg(struct ftrace_event_call
*call
,
198 enum trace_reg type
, void *data
)
200 struct ftrace_event_file
*file
= data
;
203 case TRACE_REG_REGISTER
:
204 return tracepoint_probe_register(call
->name
,
207 case TRACE_REG_UNREGISTER
:
208 tracepoint_probe_unregister(call
->name
,
213 #ifdef CONFIG_PERF_EVENTS
214 case TRACE_REG_PERF_REGISTER
:
215 return tracepoint_probe_register(call
->name
,
216 call
->class->perf_probe
,
218 case TRACE_REG_PERF_UNREGISTER
:
219 tracepoint_probe_unregister(call
->name
,
220 call
->class->perf_probe
,
223 case TRACE_REG_PERF_OPEN
:
224 case TRACE_REG_PERF_CLOSE
:
225 case TRACE_REG_PERF_ADD
:
226 case TRACE_REG_PERF_DEL
:
232 EXPORT_SYMBOL_GPL(ftrace_event_reg
);
234 void trace_event_enable_cmd_record(bool enable
)
236 struct ftrace_event_file
*file
;
237 struct trace_array
*tr
;
239 mutex_lock(&event_mutex
);
240 do_for_each_event_file(tr
, file
) {
242 if (!(file
->flags
& FTRACE_EVENT_FL_ENABLED
))
246 tracing_start_cmdline_record();
247 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT
, &file
->flags
);
249 tracing_stop_cmdline_record();
250 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT
, &file
->flags
);
252 } while_for_each_event_file();
253 mutex_unlock(&event_mutex
);
256 static int __ftrace_event_enable_disable(struct ftrace_event_file
*file
,
257 int enable
, int soft_disable
)
259 struct ftrace_event_call
*call
= file
->event_call
;
266 * When soft_disable is set and enable is cleared, the sm_ref
267 * reference counter is decremented. If it reaches 0, we want
268 * to clear the SOFT_DISABLED flag but leave the event in the
269 * state that it was. That is, if the event was enabled and
270 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
271 * is set we do not want the event to be enabled before we
274 * When soft_disable is not set but the SOFT_MODE flag is,
275 * we do nothing. Do not disable the tracepoint, otherwise
276 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
279 if (atomic_dec_return(&file
->sm_ref
) > 0)
281 disable
= file
->flags
& FTRACE_EVENT_FL_SOFT_DISABLED
;
282 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT
, &file
->flags
);
284 disable
= !(file
->flags
& FTRACE_EVENT_FL_SOFT_MODE
);
286 if (disable
&& (file
->flags
& FTRACE_EVENT_FL_ENABLED
)) {
287 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT
, &file
->flags
);
288 if (file
->flags
& FTRACE_EVENT_FL_RECORDED_CMD
) {
289 tracing_stop_cmdline_record();
290 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT
, &file
->flags
);
292 call
->class->reg(call
, TRACE_REG_UNREGISTER
, file
);
294 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
295 if (file
->flags
& FTRACE_EVENT_FL_SOFT_MODE
)
296 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &file
->flags
);
300 * When soft_disable is set and enable is set, we want to
301 * register the tracepoint for the event, but leave the event
302 * as is. That means, if the event was already enabled, we do
303 * nothing (but set SOFT_MODE). If the event is disabled, we
304 * set SOFT_DISABLED before enabling the event tracepoint, so
305 * it still seems to be disabled.
308 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &file
->flags
);
310 if (atomic_inc_return(&file
->sm_ref
) > 1)
312 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT
, &file
->flags
);
315 if (!(file
->flags
& FTRACE_EVENT_FL_ENABLED
)) {
317 /* Keep the event disabled, when going to SOFT_MODE. */
319 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &file
->flags
);
321 if (trace_flags
& TRACE_ITER_RECORD_CMD
) {
322 tracing_start_cmdline_record();
323 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT
, &file
->flags
);
325 ret
= call
->class->reg(call
, TRACE_REG_REGISTER
, file
);
327 tracing_stop_cmdline_record();
328 pr_info("event trace: Could not enable event "
332 set_bit(FTRACE_EVENT_FL_ENABLED_BIT
, &file
->flags
);
334 /* WAS_ENABLED gets set but never cleared. */
335 call
->flags
|= TRACE_EVENT_FL_WAS_ENABLED
;
343 static int ftrace_event_enable_disable(struct ftrace_event_file
*file
,
346 return __ftrace_event_enable_disable(file
, enable
, 0);
349 static void ftrace_clear_events(struct trace_array
*tr
)
351 struct ftrace_event_file
*file
;
353 mutex_lock(&event_mutex
);
354 list_for_each_entry(file
, &tr
->events
, list
) {
355 ftrace_event_enable_disable(file
, 0);
357 mutex_unlock(&event_mutex
);
360 static void __put_system(struct event_subsystem
*system
)
362 struct event_filter
*filter
= system
->filter
;
364 WARN_ON_ONCE(system_refcount(system
) == 0);
365 if (system_refcount_dec(system
))
368 list_del(&system
->list
);
371 kfree(filter
->filter_string
);
374 if (system
->ref_count
& SYSTEM_FL_FREE_NAME
)
379 static void __get_system(struct event_subsystem
*system
)
381 WARN_ON_ONCE(system_refcount(system
) == 0);
382 system_refcount_inc(system
);
385 static void __get_system_dir(struct ftrace_subsystem_dir
*dir
)
387 WARN_ON_ONCE(dir
->ref_count
== 0);
389 __get_system(dir
->subsystem
);
392 static void __put_system_dir(struct ftrace_subsystem_dir
*dir
)
394 WARN_ON_ONCE(dir
->ref_count
== 0);
395 /* If the subsystem is about to be freed, the dir must be too */
396 WARN_ON_ONCE(system_refcount(dir
->subsystem
) == 1 && dir
->ref_count
!= 1);
398 __put_system(dir
->subsystem
);
399 if (!--dir
->ref_count
)
403 static void put_system(struct ftrace_subsystem_dir
*dir
)
405 mutex_lock(&event_mutex
);
406 __put_system_dir(dir
);
407 mutex_unlock(&event_mutex
);
411 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
413 static int __ftrace_set_clr_event(struct trace_array
*tr
, const char *match
,
414 const char *sub
, const char *event
, int set
)
416 struct ftrace_event_file
*file
;
417 struct ftrace_event_call
*call
;
420 mutex_lock(&event_mutex
);
421 list_for_each_entry(file
, &tr
->events
, list
) {
423 call
= file
->event_call
;
425 if (!call
->name
|| !call
->class || !call
->class->reg
)
428 if (call
->flags
& TRACE_EVENT_FL_IGNORE_ENABLE
)
432 strcmp(match
, call
->name
) != 0 &&
433 strcmp(match
, call
->class->system
) != 0)
436 if (sub
&& strcmp(sub
, call
->class->system
) != 0)
439 if (event
&& strcmp(event
, call
->name
) != 0)
442 ftrace_event_enable_disable(file
, set
);
446 mutex_unlock(&event_mutex
);
451 static int ftrace_set_clr_event(struct trace_array
*tr
, char *buf
, int set
)
453 char *event
= NULL
, *sub
= NULL
, *match
;
456 * The buf format can be <subsystem>:<event-name>
457 * *:<event-name> means any event by that name.
458 * :<event-name> is the same.
460 * <subsystem>:* means all events in that subsystem
461 * <subsystem>: means the same.
463 * <name> (no ':') means all events in a subsystem with
464 * the name <name> or any event that matches <name>
467 match
= strsep(&buf
, ":");
473 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
475 if (!strlen(event
) || strcmp(event
, "*") == 0)
479 return __ftrace_set_clr_event(tr
, match
, sub
, event
, set
);
483 * trace_set_clr_event - enable or disable an event
484 * @system: system name to match (NULL for any system)
485 * @event: event name to match (NULL for all events, within system)
486 * @set: 1 to enable, 0 to disable
488 * This is a way for other parts of the kernel to enable or disable
491 * Returns 0 on success, -EINVAL if the parameters do not match any
494 int trace_set_clr_event(const char *system
, const char *event
, int set
)
496 struct trace_array
*tr
= top_trace_array();
498 return __ftrace_set_clr_event(tr
, NULL
, system
, event
, set
);
500 EXPORT_SYMBOL_GPL(trace_set_clr_event
);
502 /* 128 should be much more than enough */
503 #define EVENT_BUF_SIZE 127
506 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
507 size_t cnt
, loff_t
*ppos
)
509 struct trace_parser parser
;
510 struct seq_file
*m
= file
->private_data
;
511 struct trace_array
*tr
= m
->private;
517 ret
= tracing_update_buffers();
521 if (trace_parser_get_init(&parser
, EVENT_BUF_SIZE
+ 1))
524 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
526 if (read
>= 0 && trace_parser_loaded((&parser
))) {
529 if (*parser
.buffer
== '!')
532 parser
.buffer
[parser
.idx
] = 0;
534 ret
= ftrace_set_clr_event(tr
, parser
.buffer
+ !set
, set
);
542 trace_parser_put(&parser
);
548 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
550 struct ftrace_event_file
*file
= v
;
551 struct ftrace_event_call
*call
;
552 struct trace_array
*tr
= m
->private;
556 list_for_each_entry_continue(file
, &tr
->events
, list
) {
557 call
= file
->event_call
;
559 * The ftrace subsystem is for showing formats only.
560 * They can not be enabled or disabled via the event files.
562 if (call
->class && call
->class->reg
)
569 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
571 struct ftrace_event_file
*file
;
572 struct trace_array
*tr
= m
->private;
575 mutex_lock(&event_mutex
);
577 file
= list_entry(&tr
->events
, struct ftrace_event_file
, list
);
578 for (l
= 0; l
<= *pos
; ) {
579 file
= t_next(m
, file
, &l
);
587 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
589 struct ftrace_event_file
*file
= v
;
590 struct trace_array
*tr
= m
->private;
594 list_for_each_entry_continue(file
, &tr
->events
, list
) {
595 if (file
->flags
& FTRACE_EVENT_FL_ENABLED
)
602 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
604 struct ftrace_event_file
*file
;
605 struct trace_array
*tr
= m
->private;
608 mutex_lock(&event_mutex
);
610 file
= list_entry(&tr
->events
, struct ftrace_event_file
, list
);
611 for (l
= 0; l
<= *pos
; ) {
612 file
= s_next(m
, file
, &l
);
619 static int t_show(struct seq_file
*m
, void *v
)
621 struct ftrace_event_file
*file
= v
;
622 struct ftrace_event_call
*call
= file
->event_call
;
624 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
625 seq_printf(m
, "%s:", call
->class->system
);
626 seq_printf(m
, "%s\n", call
->name
);
631 static void t_stop(struct seq_file
*m
, void *p
)
633 mutex_unlock(&event_mutex
);
637 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
640 struct ftrace_event_file
*file
= filp
->private_data
;
643 if (file
->flags
& FTRACE_EVENT_FL_ENABLED
&&
644 !(file
->flags
& FTRACE_EVENT_FL_SOFT_DISABLED
))
647 if (file
->flags
& FTRACE_EVENT_FL_SOFT_DISABLED
||
648 file
->flags
& FTRACE_EVENT_FL_SOFT_MODE
)
653 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, strlen(buf
));
657 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
660 struct ftrace_event_file
*file
= filp
->private_data
;
667 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
671 ret
= tracing_update_buffers();
678 mutex_lock(&event_mutex
);
679 ret
= ftrace_event_enable_disable(file
, val
);
680 mutex_unlock(&event_mutex
);
689 return ret
? ret
: cnt
;
693 system_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
696 const char set_to_char
[4] = { '?', '0', '1', 'X' };
697 struct ftrace_subsystem_dir
*dir
= filp
->private_data
;
698 struct event_subsystem
*system
= dir
->subsystem
;
699 struct ftrace_event_call
*call
;
700 struct ftrace_event_file
*file
;
701 struct trace_array
*tr
= dir
->tr
;
706 mutex_lock(&event_mutex
);
707 list_for_each_entry(file
, &tr
->events
, list
) {
708 call
= file
->event_call
;
709 if (!call
->name
|| !call
->class || !call
->class->reg
)
712 if (system
&& strcmp(call
->class->system
, system
->name
) != 0)
716 * We need to find out if all the events are set
717 * or if all events or cleared, or if we have
720 set
|= (1 << !!(file
->flags
& FTRACE_EVENT_FL_ENABLED
));
723 * If we have a mixture, no need to look further.
728 mutex_unlock(&event_mutex
);
730 buf
[0] = set_to_char
[set
];
733 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
739 system_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
742 struct ftrace_subsystem_dir
*dir
= filp
->private_data
;
743 struct event_subsystem
*system
= dir
->subsystem
;
744 const char *name
= NULL
;
748 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
752 ret
= tracing_update_buffers();
756 if (val
!= 0 && val
!= 1)
760 * Opening of "enable" adds a ref count to system,
761 * so the name is safe to use.
766 ret
= __ftrace_set_clr_event(dir
->tr
, NULL
, name
, NULL
, val
);
780 FORMAT_FIELD_SEPERATOR
= 2,
784 static void *f_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
786 struct ftrace_event_call
*call
= m
->private;
787 struct ftrace_event_field
*field
;
788 struct list_head
*common_head
= &ftrace_common_fields
;
789 struct list_head
*head
= trace_get_fields(call
);
793 switch ((unsigned long)v
) {
795 if (unlikely(list_empty(common_head
)))
798 field
= list_entry(common_head
->prev
,
799 struct ftrace_event_field
, link
);
802 case FORMAT_FIELD_SEPERATOR
:
803 if (unlikely(list_empty(head
)))
806 field
= list_entry(head
->prev
, struct ftrace_event_field
, link
);
809 case FORMAT_PRINTFMT
:
815 if (field
->link
.prev
== common_head
)
816 return (void *)FORMAT_FIELD_SEPERATOR
;
817 else if (field
->link
.prev
== head
)
818 return (void *)FORMAT_PRINTFMT
;
820 field
= list_entry(field
->link
.prev
, struct ftrace_event_field
, link
);
825 static void *f_start(struct seq_file
*m
, loff_t
*pos
)
830 /* Start by showing the header */
832 return (void *)FORMAT_HEADER
;
834 p
= (void *)FORMAT_HEADER
;
836 p
= f_next(m
, p
, &l
);
837 } while (p
&& l
< *pos
);
842 static int f_show(struct seq_file
*m
, void *v
)
844 struct ftrace_event_call
*call
= m
->private;
845 struct ftrace_event_field
*field
;
846 const char *array_descriptor
;
848 switch ((unsigned long)v
) {
850 seq_printf(m
, "name: %s\n", call
->name
);
851 seq_printf(m
, "ID: %d\n", call
->event
.type
);
852 seq_printf(m
, "format:\n");
855 case FORMAT_FIELD_SEPERATOR
:
859 case FORMAT_PRINTFMT
:
860 seq_printf(m
, "\nprint fmt: %s\n",
868 * Smartly shows the array type(except dynamic array).
871 * If TYPE := TYPE[LEN], it is shown:
872 * field:TYPE VAR[LEN]
874 array_descriptor
= strchr(field
->type
, '[');
876 if (!strncmp(field
->type
, "__data_loc", 10))
877 array_descriptor
= NULL
;
879 if (!array_descriptor
)
880 seq_printf(m
, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
881 field
->type
, field
->name
, field
->offset
,
882 field
->size
, !!field
->is_signed
);
884 seq_printf(m
, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
885 (int)(array_descriptor
- field
->type
),
886 field
->type
, field
->name
,
887 array_descriptor
, field
->offset
,
888 field
->size
, !!field
->is_signed
);
893 static void f_stop(struct seq_file
*m
, void *p
)
897 static const struct seq_operations trace_format_seq_ops
= {
904 static int trace_format_open(struct inode
*inode
, struct file
*file
)
906 struct ftrace_event_call
*call
= inode
->i_private
;
910 ret
= seq_open(file
, &trace_format_seq_ops
);
914 m
= file
->private_data
;
921 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
923 struct ftrace_event_call
*call
= filp
->private_data
;
930 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
935 trace_seq_printf(s
, "%d\n", call
->event
.type
);
937 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
944 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
947 struct ftrace_event_call
*call
= filp
->private_data
;
954 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
960 print_event_filter(call
, s
);
961 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
969 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
972 struct ftrace_event_call
*call
= filp
->private_data
;
976 if (cnt
>= PAGE_SIZE
)
979 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
983 if (copy_from_user(buf
, ubuf
, cnt
)) {
984 free_page((unsigned long) buf
);
989 err
= apply_event_filter(call
, buf
);
990 free_page((unsigned long) buf
);
999 static LIST_HEAD(event_subsystems
);
1001 static int subsystem_open(struct inode
*inode
, struct file
*filp
)
1003 struct event_subsystem
*system
= NULL
;
1004 struct ftrace_subsystem_dir
*dir
= NULL
; /* Initialize for gcc */
1005 struct trace_array
*tr
;
1008 /* Make sure the system still exists */
1009 mutex_lock(&event_mutex
);
1010 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1011 list_for_each_entry(dir
, &tr
->systems
, list
) {
1012 if (dir
== inode
->i_private
) {
1013 /* Don't open systems with no events */
1014 if (dir
->nr_events
) {
1015 __get_system_dir(dir
);
1016 system
= dir
->subsystem
;
1023 mutex_unlock(&event_mutex
);
1028 /* Some versions of gcc think dir can be uninitialized here */
1031 ret
= tracing_open_generic(inode
, filp
);
1038 static int system_tr_open(struct inode
*inode
, struct file
*filp
)
1040 struct ftrace_subsystem_dir
*dir
;
1041 struct trace_array
*tr
= inode
->i_private
;
1044 /* Make a temporary dir that has no system but points to tr */
1045 dir
= kzalloc(sizeof(*dir
), GFP_KERNEL
);
1051 ret
= tracing_open_generic(inode
, filp
);
1055 filp
->private_data
= dir
;
1060 static int subsystem_release(struct inode
*inode
, struct file
*file
)
1062 struct ftrace_subsystem_dir
*dir
= file
->private_data
;
1065 * If dir->subsystem is NULL, then this is a temporary
1066 * descriptor that was made for a trace_array to enable
1078 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
1081 struct ftrace_subsystem_dir
*dir
= filp
->private_data
;
1082 struct event_subsystem
*system
= dir
->subsystem
;
1083 struct trace_seq
*s
;
1089 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
1095 print_subsystem_event_filter(system
, s
);
1096 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
1104 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
1107 struct ftrace_subsystem_dir
*dir
= filp
->private_data
;
1111 if (cnt
>= PAGE_SIZE
)
1114 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
1118 if (copy_from_user(buf
, ubuf
, cnt
)) {
1119 free_page((unsigned long) buf
);
1124 err
= apply_subsystem_event_filter(dir
, buf
);
1125 free_page((unsigned long) buf
);
1135 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
1137 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
1138 struct trace_seq
*s
;
1144 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
1151 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
1158 static int ftrace_event_avail_open(struct inode
*inode
, struct file
*file
);
1159 static int ftrace_event_set_open(struct inode
*inode
, struct file
*file
);
1161 static const struct seq_operations show_event_seq_ops
= {
1168 static const struct seq_operations show_set_event_seq_ops
= {
1175 static const struct file_operations ftrace_avail_fops
= {
1176 .open
= ftrace_event_avail_open
,
1178 .llseek
= seq_lseek
,
1179 .release
= seq_release
,
1182 static const struct file_operations ftrace_set_event_fops
= {
1183 .open
= ftrace_event_set_open
,
1185 .write
= ftrace_event_write
,
1186 .llseek
= seq_lseek
,
1187 .release
= seq_release
,
1190 static const struct file_operations ftrace_enable_fops
= {
1191 .open
= tracing_open_generic
,
1192 .read
= event_enable_read
,
1193 .write
= event_enable_write
,
1194 .llseek
= default_llseek
,
1197 static const struct file_operations ftrace_event_format_fops
= {
1198 .open
= trace_format_open
,
1200 .llseek
= seq_lseek
,
1201 .release
= seq_release
,
1204 static const struct file_operations ftrace_event_id_fops
= {
1205 .open
= tracing_open_generic
,
1206 .read
= event_id_read
,
1207 .llseek
= default_llseek
,
1210 static const struct file_operations ftrace_event_filter_fops
= {
1211 .open
= tracing_open_generic
,
1212 .read
= event_filter_read
,
1213 .write
= event_filter_write
,
1214 .llseek
= default_llseek
,
1217 static const struct file_operations ftrace_subsystem_filter_fops
= {
1218 .open
= subsystem_open
,
1219 .read
= subsystem_filter_read
,
1220 .write
= subsystem_filter_write
,
1221 .llseek
= default_llseek
,
1222 .release
= subsystem_release
,
1225 static const struct file_operations ftrace_system_enable_fops
= {
1226 .open
= subsystem_open
,
1227 .read
= system_enable_read
,
1228 .write
= system_enable_write
,
1229 .llseek
= default_llseek
,
1230 .release
= subsystem_release
,
1233 static const struct file_operations ftrace_tr_enable_fops
= {
1234 .open
= system_tr_open
,
1235 .read
= system_enable_read
,
1236 .write
= system_enable_write
,
1237 .llseek
= default_llseek
,
1238 .release
= subsystem_release
,
1241 static const struct file_operations ftrace_show_header_fops
= {
1242 .open
= tracing_open_generic
,
1243 .read
= show_header
,
1244 .llseek
= default_llseek
,
1248 ftrace_event_open(struct inode
*inode
, struct file
*file
,
1249 const struct seq_operations
*seq_ops
)
1254 ret
= seq_open(file
, seq_ops
);
1257 m
= file
->private_data
;
1258 /* copy tr over to seq ops */
1259 m
->private = inode
->i_private
;
1265 ftrace_event_avail_open(struct inode
*inode
, struct file
*file
)
1267 const struct seq_operations
*seq_ops
= &show_event_seq_ops
;
1269 return ftrace_event_open(inode
, file
, seq_ops
);
1273 ftrace_event_set_open(struct inode
*inode
, struct file
*file
)
1275 const struct seq_operations
*seq_ops
= &show_set_event_seq_ops
;
1276 struct trace_array
*tr
= inode
->i_private
;
1278 if ((file
->f_mode
& FMODE_WRITE
) &&
1279 (file
->f_flags
& O_TRUNC
))
1280 ftrace_clear_events(tr
);
1282 return ftrace_event_open(inode
, file
, seq_ops
);
1285 static struct event_subsystem
*
1286 create_new_subsystem(const char *name
)
1288 struct event_subsystem
*system
;
1290 /* need to create new entry */
1291 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
1295 system
->ref_count
= 1;
1297 /* Only allocate if dynamic (kprobes and modules) */
1298 if (!core_kernel_data((unsigned long)name
)) {
1299 system
->ref_count
|= SYSTEM_FL_FREE_NAME
;
1300 system
->name
= kstrdup(name
, GFP_KERNEL
);
1304 system
->name
= name
;
1306 system
->filter
= NULL
;
1308 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
1309 if (!system
->filter
)
1312 list_add(&system
->list
, &event_subsystems
);
1317 if (system
->ref_count
& SYSTEM_FL_FREE_NAME
)
1318 kfree(system
->name
);
1323 static struct dentry
*
1324 event_subsystem_dir(struct trace_array
*tr
, const char *name
,
1325 struct ftrace_event_file
*file
, struct dentry
*parent
)
1327 struct ftrace_subsystem_dir
*dir
;
1328 struct event_subsystem
*system
;
1329 struct dentry
*entry
;
1331 /* First see if we did not already create this dir */
1332 list_for_each_entry(dir
, &tr
->systems
, list
) {
1333 system
= dir
->subsystem
;
1334 if (strcmp(system
->name
, name
) == 0) {
1341 /* Now see if the system itself exists. */
1342 list_for_each_entry(system
, &event_subsystems
, list
) {
1343 if (strcmp(system
->name
, name
) == 0)
1346 /* Reset system variable when not found */
1347 if (&system
->list
== &event_subsystems
)
1350 dir
= kmalloc(sizeof(*dir
), GFP_KERNEL
);
1355 system
= create_new_subsystem(name
);
1359 __get_system(system
);
1361 dir
->entry
= debugfs_create_dir(name
, parent
);
1363 pr_warning("Failed to create system directory %s\n", name
);
1364 __put_system(system
);
1371 dir
->subsystem
= system
;
1374 entry
= debugfs_create_file("filter", 0644, dir
->entry
, dir
,
1375 &ftrace_subsystem_filter_fops
);
1377 kfree(system
->filter
);
1378 system
->filter
= NULL
;
1379 pr_warning("Could not create debugfs '%s/filter' entry\n", name
);
1382 trace_create_file("enable", 0644, dir
->entry
, dir
,
1383 &ftrace_system_enable_fops
);
1385 list_add(&dir
->list
, &tr
->systems
);
1392 /* Only print this message if failed on memory allocation */
1393 if (!dir
|| !system
)
1394 pr_warning("No memory to create event subsystem %s\n",
1400 event_create_dir(struct dentry
*parent
,
1401 struct ftrace_event_file
*file
,
1402 const struct file_operations
*id
,
1403 const struct file_operations
*enable
,
1404 const struct file_operations
*filter
,
1405 const struct file_operations
*format
)
1407 struct ftrace_event_call
*call
= file
->event_call
;
1408 struct trace_array
*tr
= file
->tr
;
1409 struct list_head
*head
;
1410 struct dentry
*d_events
;
1414 * If the trace point header did not define TRACE_SYSTEM
1415 * then the system would be called "TRACE_SYSTEM".
1417 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0) {
1418 d_events
= event_subsystem_dir(tr
, call
->class->system
, file
, parent
);
1424 file
->dir
= debugfs_create_dir(call
->name
, d_events
);
1426 pr_warning("Could not create debugfs '%s' directory\n",
1431 if (call
->class->reg
&& !(call
->flags
& TRACE_EVENT_FL_IGNORE_ENABLE
))
1432 trace_create_file("enable", 0644, file
->dir
, file
,
1435 #ifdef CONFIG_PERF_EVENTS
1436 if (call
->event
.type
&& call
->class->reg
)
1437 trace_create_file("id", 0444, file
->dir
, call
,
1442 * Other events may have the same class. Only update
1443 * the fields if they are not already defined.
1445 head
= trace_get_fields(call
);
1446 if (list_empty(head
)) {
1447 ret
= call
->class->define_fields(call
);
1449 pr_warning("Could not initialize trace point"
1450 " events/%s\n", call
->name
);
1454 trace_create_file("filter", 0644, file
->dir
, call
,
1457 trace_create_file("format", 0444, file
->dir
, call
,
1463 static void remove_subsystem(struct ftrace_subsystem_dir
*dir
)
1468 if (!--dir
->nr_events
) {
1469 debugfs_remove_recursive(dir
->entry
);
1470 list_del(&dir
->list
);
1471 __put_system_dir(dir
);
1475 static void remove_event_from_tracers(struct ftrace_event_call
*call
)
1477 struct ftrace_event_file
*file
;
1478 struct trace_array
*tr
;
1480 do_for_each_event_file_safe(tr
, file
) {
1482 if (file
->event_call
!= call
)
1485 list_del(&file
->list
);
1486 debugfs_remove_recursive(file
->dir
);
1487 remove_subsystem(file
->system
);
1488 kmem_cache_free(file_cachep
, file
);
1491 * The do_for_each_event_file_safe() is
1492 * a double loop. After finding the call for this
1493 * trace_array, we use break to jump to the next
1497 } while_for_each_event_file();
1500 static void event_remove(struct ftrace_event_call
*call
)
1502 struct trace_array
*tr
;
1503 struct ftrace_event_file
*file
;
1505 do_for_each_event_file(tr
, file
) {
1506 if (file
->event_call
!= call
)
1508 ftrace_event_enable_disable(file
, 0);
1510 * The do_for_each_event_file() is
1511 * a double loop. After finding the call for this
1512 * trace_array, we use break to jump to the next
1516 } while_for_each_event_file();
1518 if (call
->event
.funcs
)
1519 __unregister_ftrace_event(&call
->event
);
1520 remove_event_from_tracers(call
);
1521 list_del(&call
->list
);
1524 static int event_init(struct ftrace_event_call
*call
)
1528 if (WARN_ON(!call
->name
))
1531 if (call
->class->raw_init
) {
1532 ret
= call
->class->raw_init(call
);
1533 if (ret
< 0 && ret
!= -ENOSYS
)
1534 pr_warn("Could not initialize trace events/%s\n",
1542 __register_event(struct ftrace_event_call
*call
, struct module
*mod
)
1546 ret
= event_init(call
);
1550 list_add(&call
->list
, &ftrace_events
);
1556 static struct ftrace_event_file
*
1557 trace_create_new_event(struct ftrace_event_call
*call
,
1558 struct trace_array
*tr
)
1560 struct ftrace_event_file
*file
;
1562 file
= kmem_cache_alloc(file_cachep
, GFP_TRACE
);
1566 file
->event_call
= call
;
1568 atomic_set(&file
->sm_ref
, 0);
1569 list_add(&file
->list
, &tr
->events
);
1574 /* Add an event to a trace directory */
1576 __trace_add_new_event(struct ftrace_event_call
*call
,
1577 struct trace_array
*tr
,
1578 const struct file_operations
*id
,
1579 const struct file_operations
*enable
,
1580 const struct file_operations
*filter
,
1581 const struct file_operations
*format
)
1583 struct ftrace_event_file
*file
;
1585 file
= trace_create_new_event(call
, tr
);
1589 return event_create_dir(tr
->event_dir
, file
, id
, enable
, filter
, format
);
1593 * Just create a decriptor for early init. A descriptor is required
1594 * for enabling events at boot. We want to enable events before
1595 * the filesystem is initialized.
1598 __trace_early_add_new_event(struct ftrace_event_call
*call
,
1599 struct trace_array
*tr
)
1601 struct ftrace_event_file
*file
;
1603 file
= trace_create_new_event(call
, tr
);
1610 struct ftrace_module_file_ops
;
1611 static void __add_event_to_tracers(struct ftrace_event_call
*call
,
1612 struct ftrace_module_file_ops
*file_ops
);
1614 /* Add an additional event_call dynamically */
1615 int trace_add_event_call(struct ftrace_event_call
*call
)
1618 mutex_lock(&event_mutex
);
1620 ret
= __register_event(call
, NULL
);
1622 __add_event_to_tracers(call
, NULL
);
1624 mutex_unlock(&event_mutex
);
1629 * Must be called under locking both of event_mutex and trace_event_sem.
1631 static void __trace_remove_event_call(struct ftrace_event_call
*call
)
1634 trace_destroy_fields(call
);
1635 destroy_preds(call
);
1638 /* Remove an event_call */
1639 void trace_remove_event_call(struct ftrace_event_call
*call
)
1641 mutex_lock(&event_mutex
);
1642 down_write(&trace_event_sem
);
1643 __trace_remove_event_call(call
);
1644 up_write(&trace_event_sem
);
1645 mutex_unlock(&event_mutex
);
1648 #define for_each_event(event, start, end) \
1649 for (event = start; \
1650 (unsigned long)event < (unsigned long)end; \
1653 #ifdef CONFIG_MODULES
1655 static LIST_HEAD(ftrace_module_file_list
);
1658 * Modules must own their file_operations to keep up with
1659 * reference counting.
1661 struct ftrace_module_file_ops
{
1662 struct list_head list
;
1664 struct file_operations id
;
1665 struct file_operations enable
;
1666 struct file_operations format
;
1667 struct file_operations filter
;
1670 static struct ftrace_module_file_ops
*
1671 find_ftrace_file_ops(struct ftrace_module_file_ops
*file_ops
, struct module
*mod
)
1674 * As event_calls are added in groups by module,
1675 * when we find one file_ops, we don't need to search for
1676 * each call in that module, as the rest should be the
1677 * same. Only search for a new one if the last one did
1680 if (file_ops
&& mod
== file_ops
->mod
)
1683 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1684 if (file_ops
->mod
== mod
)
1690 static struct ftrace_module_file_ops
*
1691 trace_create_file_ops(struct module
*mod
)
1693 struct ftrace_module_file_ops
*file_ops
;
1696 * This is a bit of a PITA. To allow for correct reference
1697 * counting, modules must "own" their file_operations.
1698 * To do this, we allocate the file operations that will be
1699 * used in the event directory.
1702 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
1706 file_ops
->mod
= mod
;
1708 file_ops
->id
= ftrace_event_id_fops
;
1709 file_ops
->id
.owner
= mod
;
1711 file_ops
->enable
= ftrace_enable_fops
;
1712 file_ops
->enable
.owner
= mod
;
1714 file_ops
->filter
= ftrace_event_filter_fops
;
1715 file_ops
->filter
.owner
= mod
;
1717 file_ops
->format
= ftrace_event_format_fops
;
1718 file_ops
->format
.owner
= mod
;
1720 list_add(&file_ops
->list
, &ftrace_module_file_list
);
1725 static void trace_module_add_events(struct module
*mod
)
1727 struct ftrace_module_file_ops
*file_ops
= NULL
;
1728 struct ftrace_event_call
**call
, **start
, **end
;
1730 start
= mod
->trace_events
;
1731 end
= mod
->trace_events
+ mod
->num_trace_events
;
1736 file_ops
= trace_create_file_ops(mod
);
1740 for_each_event(call
, start
, end
) {
1741 __register_event(*call
, mod
);
1742 __add_event_to_tracers(*call
, file_ops
);
1746 static void trace_module_remove_events(struct module
*mod
)
1748 struct ftrace_module_file_ops
*file_ops
;
1749 struct ftrace_event_call
*call
, *p
;
1750 bool clear_trace
= false;
1752 down_write(&trace_event_sem
);
1753 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
1754 if (call
->mod
== mod
) {
1755 if (call
->flags
& TRACE_EVENT_FL_WAS_ENABLED
)
1757 __trace_remove_event_call(call
);
1761 /* Now free the file_operations */
1762 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1763 if (file_ops
->mod
== mod
)
1766 if (&file_ops
->list
!= &ftrace_module_file_list
) {
1767 list_del(&file_ops
->list
);
1770 up_write(&trace_event_sem
);
1773 * It is safest to reset the ring buffer if the module being unloaded
1774 * registered any events that were used. The only worry is if
1775 * a new module gets loaded, and takes on the same id as the events
1776 * of this module. When printing out the buffer, traced events left
1777 * over from this module may be passed to the new module events and
1778 * unexpected results may occur.
1781 tracing_reset_all_online_cpus();
1784 static int trace_module_notify(struct notifier_block
*self
,
1785 unsigned long val
, void *data
)
1787 struct module
*mod
= data
;
1789 mutex_lock(&event_mutex
);
1791 case MODULE_STATE_COMING
:
1792 trace_module_add_events(mod
);
1794 case MODULE_STATE_GOING
:
1795 trace_module_remove_events(mod
);
1798 mutex_unlock(&event_mutex
);
1804 __trace_add_new_mod_event(struct ftrace_event_call
*call
,
1805 struct trace_array
*tr
,
1806 struct ftrace_module_file_ops
*file_ops
)
1808 return __trace_add_new_event(call
, tr
,
1809 &file_ops
->id
, &file_ops
->enable
,
1810 &file_ops
->filter
, &file_ops
->format
);
1814 static inline struct ftrace_module_file_ops
*
1815 find_ftrace_file_ops(struct ftrace_module_file_ops
*file_ops
, struct module
*mod
)
1819 static inline int trace_module_notify(struct notifier_block
*self
,
1820 unsigned long val
, void *data
)
1825 __trace_add_new_mod_event(struct ftrace_event_call
*call
,
1826 struct trace_array
*tr
,
1827 struct ftrace_module_file_ops
*file_ops
)
1831 #endif /* CONFIG_MODULES */
1833 /* Create a new event directory structure for a trace directory. */
1835 __trace_add_event_dirs(struct trace_array
*tr
)
1837 struct ftrace_module_file_ops
*file_ops
= NULL
;
1838 struct ftrace_event_call
*call
;
1841 list_for_each_entry(call
, &ftrace_events
, list
) {
1844 * Directories for events by modules need to
1845 * keep module ref counts when opened (as we don't
1846 * want the module to disappear when reading one
1847 * of these files). The file_ops keep account of
1848 * the module ref count.
1850 file_ops
= find_ftrace_file_ops(file_ops
, call
->mod
);
1852 continue; /* Warn? */
1853 ret
= __trace_add_new_mod_event(call
, tr
, file_ops
);
1855 pr_warning("Could not create directory for event %s\n",
1859 ret
= __trace_add_new_event(call
, tr
,
1860 &ftrace_event_id_fops
,
1861 &ftrace_enable_fops
,
1862 &ftrace_event_filter_fops
,
1863 &ftrace_event_format_fops
);
1865 pr_warning("Could not create directory for event %s\n",
1870 #ifdef CONFIG_DYNAMIC_FTRACE
1873 #define ENABLE_EVENT_STR "enable_event"
1874 #define DISABLE_EVENT_STR "disable_event"
1876 struct event_probe_data
{
1877 struct ftrace_event_file
*file
;
1878 unsigned long count
;
1883 static struct ftrace_event_file
*
1884 find_event_file(struct trace_array
*tr
, const char *system
, const char *event
)
1886 struct ftrace_event_file
*file
;
1887 struct ftrace_event_call
*call
;
1889 list_for_each_entry(file
, &tr
->events
, list
) {
1891 call
= file
->event_call
;
1893 if (!call
->name
|| !call
->class || !call
->class->reg
)
1896 if (call
->flags
& TRACE_EVENT_FL_IGNORE_ENABLE
)
1899 if (strcmp(event
, call
->name
) == 0 &&
1900 strcmp(system
, call
->class->system
) == 0)
1907 event_enable_probe(unsigned long ip
, unsigned long parent_ip
, void **_data
)
1909 struct event_probe_data
**pdata
= (struct event_probe_data
**)_data
;
1910 struct event_probe_data
*data
= *pdata
;
1916 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &data
->file
->flags
);
1918 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &data
->file
->flags
);
1922 event_enable_count_probe(unsigned long ip
, unsigned long parent_ip
, void **_data
)
1924 struct event_probe_data
**pdata
= (struct event_probe_data
**)_data
;
1925 struct event_probe_data
*data
= *pdata
;
1933 /* Skip if the event is in a state we want to switch to */
1934 if (data
->enable
== !(data
->file
->flags
& FTRACE_EVENT_FL_SOFT_DISABLED
))
1937 if (data
->count
!= -1)
1940 event_enable_probe(ip
, parent_ip
, _data
);
1944 event_enable_print(struct seq_file
*m
, unsigned long ip
,
1945 struct ftrace_probe_ops
*ops
, void *_data
)
1947 struct event_probe_data
*data
= _data
;
1949 seq_printf(m
, "%ps:", (void *)ip
);
1951 seq_printf(m
, "%s:%s:%s",
1952 data
->enable
? ENABLE_EVENT_STR
: DISABLE_EVENT_STR
,
1953 data
->file
->event_call
->class->system
,
1954 data
->file
->event_call
->name
);
1956 if (data
->count
== -1)
1957 seq_printf(m
, ":unlimited\n");
1959 seq_printf(m
, ":count=%ld\n", data
->count
);
1965 event_enable_init(struct ftrace_probe_ops
*ops
, unsigned long ip
,
1968 struct event_probe_data
**pdata
= (struct event_probe_data
**)_data
;
1969 struct event_probe_data
*data
= *pdata
;
1976 event_enable_free(struct ftrace_probe_ops
*ops
, unsigned long ip
,
1979 struct event_probe_data
**pdata
= (struct event_probe_data
**)_data
;
1980 struct event_probe_data
*data
= *pdata
;
1982 if (WARN_ON_ONCE(data
->ref
<= 0))
1987 /* Remove the SOFT_MODE flag */
1988 __ftrace_event_enable_disable(data
->file
, 0, 1);
1989 module_put(data
->file
->event_call
->mod
);
1995 static struct ftrace_probe_ops event_enable_probe_ops
= {
1996 .func
= event_enable_probe
,
1997 .print
= event_enable_print
,
1998 .init
= event_enable_init
,
1999 .free
= event_enable_free
,
2002 static struct ftrace_probe_ops event_enable_count_probe_ops
= {
2003 .func
= event_enable_count_probe
,
2004 .print
= event_enable_print
,
2005 .init
= event_enable_init
,
2006 .free
= event_enable_free
,
2009 static struct ftrace_probe_ops event_disable_probe_ops
= {
2010 .func
= event_enable_probe
,
2011 .print
= event_enable_print
,
2012 .init
= event_enable_init
,
2013 .free
= event_enable_free
,
2016 static struct ftrace_probe_ops event_disable_count_probe_ops
= {
2017 .func
= event_enable_count_probe
,
2018 .print
= event_enable_print
,
2019 .init
= event_enable_init
,
2020 .free
= event_enable_free
,
2024 event_enable_func(struct ftrace_hash
*hash
,
2025 char *glob
, char *cmd
, char *param
, int enabled
)
2027 struct trace_array
*tr
= top_trace_array();
2028 struct ftrace_event_file
*file
;
2029 struct ftrace_probe_ops
*ops
;
2030 struct event_probe_data
*data
;
2037 /* hash funcs only work with set_ftrace_filter */
2038 if (!enabled
|| !param
)
2041 system
= strsep(¶m
, ":");
2045 event
= strsep(¶m
, ":");
2047 mutex_lock(&event_mutex
);
2050 file
= find_event_file(tr
, system
, event
);
2054 enable
= strcmp(cmd
, ENABLE_EVENT_STR
) == 0;
2057 ops
= param
? &event_enable_count_probe_ops
: &event_enable_probe_ops
;
2059 ops
= param
? &event_disable_count_probe_ops
: &event_disable_probe_ops
;
2061 if (glob
[0] == '!') {
2062 unregister_ftrace_function_probe_func(glob
+1, ops
);
2068 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
2072 data
->enable
= enable
;
2079 number
= strsep(¶m
, ":");
2082 if (!strlen(number
))
2086 * We use the callback data field (which is a pointer)
2089 ret
= kstrtoul(number
, 0, &data
->count
);
2094 /* Don't let event modules unload while probe registered */
2095 ret
= try_module_get(file
->event_call
->mod
);
2101 ret
= __ftrace_event_enable_disable(file
, 1, 1);
2104 ret
= register_ftrace_function_probe(glob
, ops
, data
);
2106 * The above returns on success the # of functions enabled,
2107 * but if it didn't find any functions it returns zero.
2108 * Consider no functions a failure too.
2115 /* Just return zero, not the number of enabled functions */
2118 mutex_unlock(&event_mutex
);
2122 __ftrace_event_enable_disable(file
, 0, 1);
2124 module_put(file
->event_call
->mod
);
2130 static struct ftrace_func_command event_enable_cmd
= {
2131 .name
= ENABLE_EVENT_STR
,
2132 .func
= event_enable_func
,
2135 static struct ftrace_func_command event_disable_cmd
= {
2136 .name
= DISABLE_EVENT_STR
,
2137 .func
= event_enable_func
,
2140 static __init
int register_event_cmds(void)
2144 ret
= register_ftrace_command(&event_enable_cmd
);
2145 if (WARN_ON(ret
< 0))
2147 ret
= register_ftrace_command(&event_disable_cmd
);
2148 if (WARN_ON(ret
< 0))
2149 unregister_ftrace_command(&event_enable_cmd
);
2153 static inline int register_event_cmds(void) { return 0; }
2154 #endif /* CONFIG_DYNAMIC_FTRACE */
2157 * The top level array has already had its ftrace_event_file
2158 * descriptors created in order to allow for early events to
2159 * be recorded. This function is called after the debugfs has been
2160 * initialized, and we now have to create the files associated
2164 __trace_early_add_event_dirs(struct trace_array
*tr
)
2166 struct ftrace_event_file
*file
;
2170 list_for_each_entry(file
, &tr
->events
, list
) {
2171 ret
= event_create_dir(tr
->event_dir
, file
,
2172 &ftrace_event_id_fops
,
2173 &ftrace_enable_fops
,
2174 &ftrace_event_filter_fops
,
2175 &ftrace_event_format_fops
);
2177 pr_warning("Could not create directory for event %s\n",
2178 file
->event_call
->name
);
2183 * For early boot up, the top trace array requires to have
2184 * a list of events that can be enabled. This must be done before
2185 * the filesystem is set up in order to allow events to be traced
2189 __trace_early_add_events(struct trace_array
*tr
)
2191 struct ftrace_event_call
*call
;
2194 list_for_each_entry(call
, &ftrace_events
, list
) {
2195 /* Early boot up should not have any modules loaded */
2196 if (WARN_ON_ONCE(call
->mod
))
2199 ret
= __trace_early_add_new_event(call
, tr
);
2201 pr_warning("Could not create early event %s\n",
2206 /* Remove the event directory structure for a trace directory. */
2208 __trace_remove_event_dirs(struct trace_array
*tr
)
2210 struct ftrace_event_file
*file
, *next
;
2212 list_for_each_entry_safe(file
, next
, &tr
->events
, list
) {
2213 list_del(&file
->list
);
2214 debugfs_remove_recursive(file
->dir
);
2215 remove_subsystem(file
->system
);
2216 kmem_cache_free(file_cachep
, file
);
2221 __add_event_to_tracers(struct ftrace_event_call
*call
,
2222 struct ftrace_module_file_ops
*file_ops
)
2224 struct trace_array
*tr
;
2226 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
2228 __trace_add_new_mod_event(call
, tr
, file_ops
);
2230 __trace_add_new_event(call
, tr
,
2231 &ftrace_event_id_fops
,
2232 &ftrace_enable_fops
,
2233 &ftrace_event_filter_fops
,
2234 &ftrace_event_format_fops
);
2238 static struct notifier_block trace_module_nb
= {
2239 .notifier_call
= trace_module_notify
,
2243 extern struct ftrace_event_call
*__start_ftrace_events
[];
2244 extern struct ftrace_event_call
*__stop_ftrace_events
[];
2246 static char bootup_event_buf
[COMMAND_LINE_SIZE
] __initdata
;
2248 static __init
int setup_trace_event(char *str
)
2250 strlcpy(bootup_event_buf
, str
, COMMAND_LINE_SIZE
);
2251 ring_buffer_expanded
= true;
2252 tracing_selftest_disabled
= true;
2256 __setup("trace_event=", setup_trace_event
);
2258 /* Expects to have event_mutex held when called */
2260 create_event_toplevel_files(struct dentry
*parent
, struct trace_array
*tr
)
2262 struct dentry
*d_events
;
2263 struct dentry
*entry
;
2265 entry
= debugfs_create_file("set_event", 0644, parent
,
2266 tr
, &ftrace_set_event_fops
);
2268 pr_warning("Could not create debugfs 'set_event' entry\n");
2272 d_events
= debugfs_create_dir("events", parent
);
2274 pr_warning("Could not create debugfs 'events' directory\n");
2278 /* ring buffer internal formats */
2279 trace_create_file("header_page", 0444, d_events
,
2280 ring_buffer_print_page_header
,
2281 &ftrace_show_header_fops
);
2283 trace_create_file("header_event", 0444, d_events
,
2284 ring_buffer_print_entry_header
,
2285 &ftrace_show_header_fops
);
2287 trace_create_file("enable", 0644, d_events
,
2288 tr
, &ftrace_tr_enable_fops
);
2290 tr
->event_dir
= d_events
;
2296 * event_trace_add_tracer - add a instance of a trace_array to events
2297 * @parent: The parent dentry to place the files/directories for events in
2298 * @tr: The trace array associated with these events
2300 * When a new instance is created, it needs to set up its events
2301 * directory, as well as other files associated with events. It also
2302 * creates the event hierachry in the @parent/events directory.
2304 * Returns 0 on success.
2306 int event_trace_add_tracer(struct dentry
*parent
, struct trace_array
*tr
)
2310 mutex_lock(&event_mutex
);
2312 ret
= create_event_toplevel_files(parent
, tr
);
2316 down_write(&trace_event_sem
);
2317 __trace_add_event_dirs(tr
);
2318 up_write(&trace_event_sem
);
2321 mutex_unlock(&event_mutex
);
2327 * The top trace array already had its file descriptors created.
2328 * Now the files themselves need to be created.
2331 early_event_add_tracer(struct dentry
*parent
, struct trace_array
*tr
)
2335 mutex_lock(&event_mutex
);
2337 ret
= create_event_toplevel_files(parent
, tr
);
2341 down_write(&trace_event_sem
);
2342 __trace_early_add_event_dirs(tr
);
2343 up_write(&trace_event_sem
);
2346 mutex_unlock(&event_mutex
);
2351 int event_trace_del_tracer(struct trace_array
*tr
)
2353 /* Disable any running events */
2354 __ftrace_set_clr_event(tr
, NULL
, NULL
, NULL
, 0);
2356 mutex_lock(&event_mutex
);
2358 down_write(&trace_event_sem
);
2359 __trace_remove_event_dirs(tr
);
2360 debugfs_remove_recursive(tr
->event_dir
);
2361 up_write(&trace_event_sem
);
2363 tr
->event_dir
= NULL
;
2365 mutex_unlock(&event_mutex
);
2370 static __init
int event_trace_memsetup(void)
2372 field_cachep
= KMEM_CACHE(ftrace_event_field
, SLAB_PANIC
);
2373 file_cachep
= KMEM_CACHE(ftrace_event_file
, SLAB_PANIC
);
2377 static __init
int event_trace_enable(void)
2379 struct trace_array
*tr
= top_trace_array();
2380 struct ftrace_event_call
**iter
, *call
;
2381 char *buf
= bootup_event_buf
;
2385 for_each_event(iter
, __start_ftrace_events
, __stop_ftrace_events
) {
2388 ret
= event_init(call
);
2390 list_add(&call
->list
, &ftrace_events
);
2394 * We need the top trace array to have a working set of trace
2395 * points at early init, before the debug files and directories
2396 * are created. Create the file entries now, and attach them
2397 * to the actual file dentries later.
2399 __trace_early_add_events(tr
);
2402 token
= strsep(&buf
, ",");
2409 ret
= ftrace_set_clr_event(tr
, token
, 1);
2411 pr_warn("Failed to enable trace event: %s\n", token
);
2414 trace_printk_start_comm();
2416 register_event_cmds();
2421 static __init
int event_trace_init(void)
2423 struct trace_array
*tr
;
2424 struct dentry
*d_tracer
;
2425 struct dentry
*entry
;
2428 tr
= top_trace_array();
2430 d_tracer
= tracing_init_dentry();
2434 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
2435 tr
, &ftrace_avail_fops
);
2437 pr_warning("Could not create debugfs "
2438 "'available_events' entry\n");
2440 if (trace_define_common_fields())
2441 pr_warning("tracing: Failed to allocate common fields");
2443 ret
= early_event_add_tracer(d_tracer
, tr
);
2447 ret
= register_module_notifier(&trace_module_nb
);
2449 pr_warning("Failed to register trace events module notifier\n");
2453 early_initcall(event_trace_memsetup
);
2454 core_initcall(event_trace_enable
);
2455 fs_initcall(event_trace_init
);
2457 #ifdef CONFIG_FTRACE_STARTUP_TEST
2459 static DEFINE_SPINLOCK(test_spinlock
);
2460 static DEFINE_SPINLOCK(test_spinlock_irq
);
2461 static DEFINE_MUTEX(test_mutex
);
2463 static __init
void test_work(struct work_struct
*dummy
)
2465 spin_lock(&test_spinlock
);
2466 spin_lock_irq(&test_spinlock_irq
);
2468 spin_unlock_irq(&test_spinlock_irq
);
2469 spin_unlock(&test_spinlock
);
2471 mutex_lock(&test_mutex
);
2473 mutex_unlock(&test_mutex
);
2476 static __init
int event_test_thread(void *unused
)
2480 test_malloc
= kmalloc(1234, GFP_KERNEL
);
2482 pr_info("failed to kmalloc\n");
2484 schedule_on_each_cpu(test_work
);
2488 set_current_state(TASK_INTERRUPTIBLE
);
2489 while (!kthread_should_stop())
2496 * Do various things that may trigger events.
2498 static __init
void event_test_stuff(void)
2500 struct task_struct
*test_thread
;
2502 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
2504 kthread_stop(test_thread
);
2508 * For every trace event defined, we will test each trace point separately,
2509 * and then by groups, and finally all trace points.
2511 static __init
void event_trace_self_tests(void)
2513 struct ftrace_subsystem_dir
*dir
;
2514 struct ftrace_event_file
*file
;
2515 struct ftrace_event_call
*call
;
2516 struct event_subsystem
*system
;
2517 struct trace_array
*tr
;
2520 tr
= top_trace_array();
2522 pr_info("Running tests on trace events:\n");
2524 list_for_each_entry(file
, &tr
->events
, list
) {
2526 call
= file
->event_call
;
2528 /* Only test those that have a probe */
2529 if (!call
->class || !call
->class->probe
)
2533 * Testing syscall events here is pretty useless, but
2534 * we still do it if configured. But this is time consuming.
2535 * What we really need is a user thread to perform the
2536 * syscalls as we test.
2538 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2539 if (call
->class->system
&&
2540 strcmp(call
->class->system
, "syscalls") == 0)
2544 pr_info("Testing event %s: ", call
->name
);
2547 * If an event is already enabled, someone is using
2548 * it and the self test should not be on.
2550 if (file
->flags
& FTRACE_EVENT_FL_ENABLED
) {
2551 pr_warning("Enabled event during self test!\n");
2556 ftrace_event_enable_disable(file
, 1);
2558 ftrace_event_enable_disable(file
, 0);
2563 /* Now test at the sub system level */
2565 pr_info("Running tests on trace event systems:\n");
2567 list_for_each_entry(dir
, &tr
->systems
, list
) {
2569 system
= dir
->subsystem
;
2571 /* the ftrace system is special, skip it */
2572 if (strcmp(system
->name
, "ftrace") == 0)
2575 pr_info("Testing event system %s: ", system
->name
);
2577 ret
= __ftrace_set_clr_event(tr
, NULL
, system
->name
, NULL
, 1);
2578 if (WARN_ON_ONCE(ret
)) {
2579 pr_warning("error enabling system %s\n",
2586 ret
= __ftrace_set_clr_event(tr
, NULL
, system
->name
, NULL
, 0);
2587 if (WARN_ON_ONCE(ret
)) {
2588 pr_warning("error disabling system %s\n",
2596 /* Test with all events enabled */
2598 pr_info("Running tests on all trace events:\n");
2599 pr_info("Testing all events: ");
2601 ret
= __ftrace_set_clr_event(tr
, NULL
, NULL
, NULL
, 1);
2602 if (WARN_ON_ONCE(ret
)) {
2603 pr_warning("error enabling all events\n");
2610 ret
= __ftrace_set_clr_event(tr
, NULL
, NULL
, NULL
, 0);
2611 if (WARN_ON_ONCE(ret
)) {
2612 pr_warning("error disabling all events\n");
2619 #ifdef CONFIG_FUNCTION_TRACER
2621 static DEFINE_PER_CPU(atomic_t
, ftrace_test_event_disable
);
2624 function_test_events_call(unsigned long ip
, unsigned long parent_ip
,
2625 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
2627 struct ring_buffer_event
*event
;
2628 struct ring_buffer
*buffer
;
2629 struct ftrace_entry
*entry
;
2630 unsigned long flags
;
2635 pc
= preempt_count();
2636 preempt_disable_notrace();
2637 cpu
= raw_smp_processor_id();
2638 disabled
= atomic_inc_return(&per_cpu(ftrace_test_event_disable
, cpu
));
2643 local_save_flags(flags
);
2645 event
= trace_current_buffer_lock_reserve(&buffer
,
2646 TRACE_FN
, sizeof(*entry
),
2650 entry
= ring_buffer_event_data(event
);
2652 entry
->parent_ip
= parent_ip
;
2654 trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
2657 atomic_dec(&per_cpu(ftrace_test_event_disable
, cpu
));
2658 preempt_enable_notrace();
2661 static struct ftrace_ops trace_ops __initdata
=
2663 .func
= function_test_events_call
,
2664 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
,
2667 static __init
void event_trace_self_test_with_function(void)
2670 ret
= register_ftrace_function(&trace_ops
);
2671 if (WARN_ON(ret
< 0)) {
2672 pr_info("Failed to enable function tracer for event tests\n");
2675 pr_info("Running tests again, along with the function tracer\n");
2676 event_trace_self_tests();
2677 unregister_ftrace_function(&trace_ops
);
2680 static __init
void event_trace_self_test_with_function(void)
2685 static __init
int event_trace_self_tests_init(void)
2687 if (!tracing_selftest_disabled
) {
2688 event_trace_self_tests();
2689 event_trace_self_test_with_function();
2695 late_initcall(event_trace_self_tests_init
);