mmc: tmio: reset the controller after power-up
[linux-2.6.git] / kernel / trace / trace_events.c
blob7a0cf68027ccf62118d4eb1da83ec00af3a0b49b
1 /*
2 * event tracer
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9 */
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex);
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_common_fields);
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
44 /* Double loops, do not use break, only goto's work */
45 #define do_for_each_event_file(tr, file) \
46 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
47 list_for_each_entry(file, &tr->events, list)
49 #define do_for_each_event_file_safe(tr, file) \
50 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
51 struct ftrace_event_file *___n; \
52 list_for_each_entry_safe(file, ___n, &tr->events, list)
54 #define while_for_each_event_file() \
57 static struct list_head *
58 trace_get_fields(struct ftrace_event_call *event_call)
60 if (!event_call->class->get_fields)
61 return &event_call->class->fields;
62 return event_call->class->get_fields(event_call);
65 static struct ftrace_event_field *
66 __find_event_field(struct list_head *head, char *name)
68 struct ftrace_event_field *field;
70 list_for_each_entry(field, head, link) {
71 if (!strcmp(field->name, name))
72 return field;
75 return NULL;
78 struct ftrace_event_field *
79 trace_find_event_field(struct ftrace_event_call *call, char *name)
81 struct ftrace_event_field *field;
82 struct list_head *head;
84 field = __find_event_field(&ftrace_common_fields, name);
85 if (field)
86 return field;
88 head = trace_get_fields(call);
89 return __find_event_field(head, name);
92 static int __trace_define_field(struct list_head *head, const char *type,
93 const char *name, int offset, int size,
94 int is_signed, int filter_type)
96 struct ftrace_event_field *field;
98 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
99 if (!field)
100 goto err;
102 field->name = name;
103 field->type = type;
105 if (filter_type == FILTER_OTHER)
106 field->filter_type = filter_assign_type(type);
107 else
108 field->filter_type = filter_type;
110 field->offset = offset;
111 field->size = size;
112 field->is_signed = is_signed;
114 list_add(&field->link, head);
116 return 0;
118 err:
119 kmem_cache_free(field_cachep, field);
121 return -ENOMEM;
124 int trace_define_field(struct ftrace_event_call *call, const char *type,
125 const char *name, int offset, int size, int is_signed,
126 int filter_type)
128 struct list_head *head;
130 if (WARN_ON(!call->class))
131 return 0;
133 head = trace_get_fields(call);
134 return __trace_define_field(head, type, name, offset, size,
135 is_signed, filter_type);
137 EXPORT_SYMBOL_GPL(trace_define_field);
139 #define __common_field(type, item) \
140 ret = __trace_define_field(&ftrace_common_fields, #type, \
141 "common_" #item, \
142 offsetof(typeof(ent), item), \
143 sizeof(ent.item), \
144 is_signed_type(type), FILTER_OTHER); \
145 if (ret) \
146 return ret;
148 static int trace_define_common_fields(void)
150 int ret;
151 struct trace_entry ent;
153 __common_field(unsigned short, type);
154 __common_field(unsigned char, flags);
155 __common_field(unsigned char, preempt_count);
156 __common_field(int, pid);
158 return ret;
161 static void trace_destroy_fields(struct ftrace_event_call *call)
163 struct ftrace_event_field *field, *next;
164 struct list_head *head;
166 head = trace_get_fields(call);
167 list_for_each_entry_safe(field, next, head, link) {
168 list_del(&field->link);
169 kmem_cache_free(field_cachep, field);
173 int trace_event_raw_init(struct ftrace_event_call *call)
175 int id;
177 id = register_ftrace_event(&call->event);
178 if (!id)
179 return -ENODEV;
181 return 0;
183 EXPORT_SYMBOL_GPL(trace_event_raw_init);
185 int ftrace_event_reg(struct ftrace_event_call *call,
186 enum trace_reg type, void *data)
188 struct ftrace_event_file *file = data;
190 switch (type) {
191 case TRACE_REG_REGISTER:
192 return tracepoint_probe_register(call->name,
193 call->class->probe,
194 file);
195 case TRACE_REG_UNREGISTER:
196 tracepoint_probe_unregister(call->name,
197 call->class->probe,
198 file);
199 return 0;
201 #ifdef CONFIG_PERF_EVENTS
202 case TRACE_REG_PERF_REGISTER:
203 return tracepoint_probe_register(call->name,
204 call->class->perf_probe,
205 call);
206 case TRACE_REG_PERF_UNREGISTER:
207 tracepoint_probe_unregister(call->name,
208 call->class->perf_probe,
209 call);
210 return 0;
211 case TRACE_REG_PERF_OPEN:
212 case TRACE_REG_PERF_CLOSE:
213 case TRACE_REG_PERF_ADD:
214 case TRACE_REG_PERF_DEL:
215 return 0;
216 #endif
218 return 0;
220 EXPORT_SYMBOL_GPL(ftrace_event_reg);
222 void trace_event_enable_cmd_record(bool enable)
224 struct ftrace_event_file *file;
225 struct trace_array *tr;
227 mutex_lock(&event_mutex);
228 do_for_each_event_file(tr, file) {
230 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
231 continue;
233 if (enable) {
234 tracing_start_cmdline_record();
235 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
236 } else {
237 tracing_stop_cmdline_record();
238 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
240 } while_for_each_event_file();
241 mutex_unlock(&event_mutex);
244 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
245 int enable, int soft_disable)
247 struct ftrace_event_call *call = file->event_call;
248 int ret = 0;
249 int disable;
251 switch (enable) {
252 case 0:
254 * When soft_disable is set and enable is cleared, the sm_ref
255 * reference counter is decremented. If it reaches 0, we want
256 * to clear the SOFT_DISABLED flag but leave the event in the
257 * state that it was. That is, if the event was enabled and
258 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
259 * is set we do not want the event to be enabled before we
260 * clear the bit.
262 * When soft_disable is not set but the SOFT_MODE flag is,
263 * we do nothing. Do not disable the tracepoint, otherwise
264 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
266 if (soft_disable) {
267 if (atomic_dec_return(&file->sm_ref) > 0)
268 break;
269 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
270 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
271 } else
272 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
274 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
275 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
276 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
277 tracing_stop_cmdline_record();
278 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
280 call->class->reg(call, TRACE_REG_UNREGISTER, file);
282 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
283 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
284 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
285 break;
286 case 1:
288 * When soft_disable is set and enable is set, we want to
289 * register the tracepoint for the event, but leave the event
290 * as is. That means, if the event was already enabled, we do
291 * nothing (but set SOFT_MODE). If the event is disabled, we
292 * set SOFT_DISABLED before enabling the event tracepoint, so
293 * it still seems to be disabled.
295 if (!soft_disable)
296 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
297 else {
298 if (atomic_inc_return(&file->sm_ref) > 1)
299 break;
300 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
303 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
305 /* Keep the event disabled, when going to SOFT_MODE. */
306 if (soft_disable)
307 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
309 if (trace_flags & TRACE_ITER_RECORD_CMD) {
310 tracing_start_cmdline_record();
311 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
313 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
314 if (ret) {
315 tracing_stop_cmdline_record();
316 pr_info("event trace: Could not enable event "
317 "%s\n", call->name);
318 break;
320 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
322 /* WAS_ENABLED gets set but never cleared. */
323 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
325 break;
328 return ret;
331 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
332 int enable)
334 return __ftrace_event_enable_disable(file, enable, 0);
337 static void ftrace_clear_events(struct trace_array *tr)
339 struct ftrace_event_file *file;
341 mutex_lock(&event_mutex);
342 list_for_each_entry(file, &tr->events, list) {
343 ftrace_event_enable_disable(file, 0);
345 mutex_unlock(&event_mutex);
348 static void __put_system(struct event_subsystem *system)
350 struct event_filter *filter = system->filter;
352 WARN_ON_ONCE(system->ref_count == 0);
353 if (--system->ref_count)
354 return;
356 list_del(&system->list);
358 if (filter) {
359 kfree(filter->filter_string);
360 kfree(filter);
362 kfree(system);
365 static void __get_system(struct event_subsystem *system)
367 WARN_ON_ONCE(system->ref_count == 0);
368 system->ref_count++;
371 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
373 WARN_ON_ONCE(dir->ref_count == 0);
374 dir->ref_count++;
375 __get_system(dir->subsystem);
378 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
380 WARN_ON_ONCE(dir->ref_count == 0);
381 /* If the subsystem is about to be freed, the dir must be too */
382 WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
384 __put_system(dir->subsystem);
385 if (!--dir->ref_count)
386 kfree(dir);
389 static void put_system(struct ftrace_subsystem_dir *dir)
391 mutex_lock(&event_mutex);
392 __put_system_dir(dir);
393 mutex_unlock(&event_mutex);
397 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
399 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
400 const char *sub, const char *event, int set)
402 struct ftrace_event_file *file;
403 struct ftrace_event_call *call;
404 int ret = -EINVAL;
406 mutex_lock(&event_mutex);
407 list_for_each_entry(file, &tr->events, list) {
409 call = file->event_call;
411 if (!call->name || !call->class || !call->class->reg)
412 continue;
414 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
415 continue;
417 if (match &&
418 strcmp(match, call->name) != 0 &&
419 strcmp(match, call->class->system) != 0)
420 continue;
422 if (sub && strcmp(sub, call->class->system) != 0)
423 continue;
425 if (event && strcmp(event, call->name) != 0)
426 continue;
428 ftrace_event_enable_disable(file, set);
430 ret = 0;
432 mutex_unlock(&event_mutex);
434 return ret;
437 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
439 char *event = NULL, *sub = NULL, *match;
442 * The buf format can be <subsystem>:<event-name>
443 * *:<event-name> means any event by that name.
444 * :<event-name> is the same.
446 * <subsystem>:* means all events in that subsystem
447 * <subsystem>: means the same.
449 * <name> (no ':') means all events in a subsystem with
450 * the name <name> or any event that matches <name>
453 match = strsep(&buf, ":");
454 if (buf) {
455 sub = match;
456 event = buf;
457 match = NULL;
459 if (!strlen(sub) || strcmp(sub, "*") == 0)
460 sub = NULL;
461 if (!strlen(event) || strcmp(event, "*") == 0)
462 event = NULL;
465 return __ftrace_set_clr_event(tr, match, sub, event, set);
469 * trace_set_clr_event - enable or disable an event
470 * @system: system name to match (NULL for any system)
471 * @event: event name to match (NULL for all events, within system)
472 * @set: 1 to enable, 0 to disable
474 * This is a way for other parts of the kernel to enable or disable
475 * event recording.
477 * Returns 0 on success, -EINVAL if the parameters do not match any
478 * registered events.
480 int trace_set_clr_event(const char *system, const char *event, int set)
482 struct trace_array *tr = top_trace_array();
484 return __ftrace_set_clr_event(tr, NULL, system, event, set);
486 EXPORT_SYMBOL_GPL(trace_set_clr_event);
488 /* 128 should be much more than enough */
489 #define EVENT_BUF_SIZE 127
491 static ssize_t
492 ftrace_event_write(struct file *file, const char __user *ubuf,
493 size_t cnt, loff_t *ppos)
495 struct trace_parser parser;
496 struct seq_file *m = file->private_data;
497 struct trace_array *tr = m->private;
498 ssize_t read, ret;
500 if (!cnt)
501 return 0;
503 ret = tracing_update_buffers();
504 if (ret < 0)
505 return ret;
507 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
508 return -ENOMEM;
510 read = trace_get_user(&parser, ubuf, cnt, ppos);
512 if (read >= 0 && trace_parser_loaded((&parser))) {
513 int set = 1;
515 if (*parser.buffer == '!')
516 set = 0;
518 parser.buffer[parser.idx] = 0;
520 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
521 if (ret)
522 goto out_put;
525 ret = read;
527 out_put:
528 trace_parser_put(&parser);
530 return ret;
533 static void *
534 t_next(struct seq_file *m, void *v, loff_t *pos)
536 struct ftrace_event_file *file = v;
537 struct ftrace_event_call *call;
538 struct trace_array *tr = m->private;
540 (*pos)++;
542 list_for_each_entry_continue(file, &tr->events, list) {
543 call = file->event_call;
545 * The ftrace subsystem is for showing formats only.
546 * They can not be enabled or disabled via the event files.
548 if (call->class && call->class->reg)
549 return file;
552 return NULL;
555 static void *t_start(struct seq_file *m, loff_t *pos)
557 struct ftrace_event_file *file;
558 struct trace_array *tr = m->private;
559 loff_t l;
561 mutex_lock(&event_mutex);
563 file = list_entry(&tr->events, struct ftrace_event_file, list);
564 for (l = 0; l <= *pos; ) {
565 file = t_next(m, file, &l);
566 if (!file)
567 break;
569 return file;
572 static void *
573 s_next(struct seq_file *m, void *v, loff_t *pos)
575 struct ftrace_event_file *file = v;
576 struct trace_array *tr = m->private;
578 (*pos)++;
580 list_for_each_entry_continue(file, &tr->events, list) {
581 if (file->flags & FTRACE_EVENT_FL_ENABLED)
582 return file;
585 return NULL;
588 static void *s_start(struct seq_file *m, loff_t *pos)
590 struct ftrace_event_file *file;
591 struct trace_array *tr = m->private;
592 loff_t l;
594 mutex_lock(&event_mutex);
596 file = list_entry(&tr->events, struct ftrace_event_file, list);
597 for (l = 0; l <= *pos; ) {
598 file = s_next(m, file, &l);
599 if (!file)
600 break;
602 return file;
605 static int t_show(struct seq_file *m, void *v)
607 struct ftrace_event_file *file = v;
608 struct ftrace_event_call *call = file->event_call;
610 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
611 seq_printf(m, "%s:", call->class->system);
612 seq_printf(m, "%s\n", call->name);
614 return 0;
617 static void t_stop(struct seq_file *m, void *p)
619 mutex_unlock(&event_mutex);
622 static ssize_t
623 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
624 loff_t *ppos)
626 struct ftrace_event_file *file = filp->private_data;
627 char *buf;
629 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
630 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
631 buf = "0*\n";
632 else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
633 buf = "1*\n";
634 else
635 buf = "1\n";
636 } else
637 buf = "0\n";
639 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
642 static ssize_t
643 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
644 loff_t *ppos)
646 struct ftrace_event_file *file = filp->private_data;
647 unsigned long val;
648 int ret;
650 if (!file)
651 return -EINVAL;
653 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
654 if (ret)
655 return ret;
657 ret = tracing_update_buffers();
658 if (ret < 0)
659 return ret;
661 switch (val) {
662 case 0:
663 case 1:
664 mutex_lock(&event_mutex);
665 ret = ftrace_event_enable_disable(file, val);
666 mutex_unlock(&event_mutex);
667 break;
669 default:
670 return -EINVAL;
673 *ppos += cnt;
675 return ret ? ret : cnt;
678 static ssize_t
679 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
680 loff_t *ppos)
682 const char set_to_char[4] = { '?', '0', '1', 'X' };
683 struct ftrace_subsystem_dir *dir = filp->private_data;
684 struct event_subsystem *system = dir->subsystem;
685 struct ftrace_event_call *call;
686 struct ftrace_event_file *file;
687 struct trace_array *tr = dir->tr;
688 char buf[2];
689 int set = 0;
690 int ret;
692 mutex_lock(&event_mutex);
693 list_for_each_entry(file, &tr->events, list) {
694 call = file->event_call;
695 if (!call->name || !call->class || !call->class->reg)
696 continue;
698 if (system && strcmp(call->class->system, system->name) != 0)
699 continue;
702 * We need to find out if all the events are set
703 * or if all events or cleared, or if we have
704 * a mixture.
706 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
709 * If we have a mixture, no need to look further.
711 if (set == 3)
712 break;
714 mutex_unlock(&event_mutex);
716 buf[0] = set_to_char[set];
717 buf[1] = '\n';
719 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
721 return ret;
724 static ssize_t
725 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
726 loff_t *ppos)
728 struct ftrace_subsystem_dir *dir = filp->private_data;
729 struct event_subsystem *system = dir->subsystem;
730 const char *name = NULL;
731 unsigned long val;
732 ssize_t ret;
734 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
735 if (ret)
736 return ret;
738 ret = tracing_update_buffers();
739 if (ret < 0)
740 return ret;
742 if (val != 0 && val != 1)
743 return -EINVAL;
746 * Opening of "enable" adds a ref count to system,
747 * so the name is safe to use.
749 if (system)
750 name = system->name;
752 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
753 if (ret)
754 goto out;
756 ret = cnt;
758 out:
759 *ppos += cnt;
761 return ret;
764 enum {
765 FORMAT_HEADER = 1,
766 FORMAT_FIELD_SEPERATOR = 2,
767 FORMAT_PRINTFMT = 3,
770 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
772 struct ftrace_event_call *call = m->private;
773 struct ftrace_event_field *field;
774 struct list_head *common_head = &ftrace_common_fields;
775 struct list_head *head = trace_get_fields(call);
777 (*pos)++;
779 switch ((unsigned long)v) {
780 case FORMAT_HEADER:
781 if (unlikely(list_empty(common_head)))
782 return NULL;
784 field = list_entry(common_head->prev,
785 struct ftrace_event_field, link);
786 return field;
788 case FORMAT_FIELD_SEPERATOR:
789 if (unlikely(list_empty(head)))
790 return NULL;
792 field = list_entry(head->prev, struct ftrace_event_field, link);
793 return field;
795 case FORMAT_PRINTFMT:
796 /* all done */
797 return NULL;
800 field = v;
801 if (field->link.prev == common_head)
802 return (void *)FORMAT_FIELD_SEPERATOR;
803 else if (field->link.prev == head)
804 return (void *)FORMAT_PRINTFMT;
806 field = list_entry(field->link.prev, struct ftrace_event_field, link);
808 return field;
811 static void *f_start(struct seq_file *m, loff_t *pos)
813 loff_t l = 0;
814 void *p;
816 /* Start by showing the header */
817 if (!*pos)
818 return (void *)FORMAT_HEADER;
820 p = (void *)FORMAT_HEADER;
821 do {
822 p = f_next(m, p, &l);
823 } while (p && l < *pos);
825 return p;
828 static int f_show(struct seq_file *m, void *v)
830 struct ftrace_event_call *call = m->private;
831 struct ftrace_event_field *field;
832 const char *array_descriptor;
834 switch ((unsigned long)v) {
835 case FORMAT_HEADER:
836 seq_printf(m, "name: %s\n", call->name);
837 seq_printf(m, "ID: %d\n", call->event.type);
838 seq_printf(m, "format:\n");
839 return 0;
841 case FORMAT_FIELD_SEPERATOR:
842 seq_putc(m, '\n');
843 return 0;
845 case FORMAT_PRINTFMT:
846 seq_printf(m, "\nprint fmt: %s\n",
847 call->print_fmt);
848 return 0;
851 field = v;
854 * Smartly shows the array type(except dynamic array).
855 * Normal:
856 * field:TYPE VAR
857 * If TYPE := TYPE[LEN], it is shown:
858 * field:TYPE VAR[LEN]
860 array_descriptor = strchr(field->type, '[');
862 if (!strncmp(field->type, "__data_loc", 10))
863 array_descriptor = NULL;
865 if (!array_descriptor)
866 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
867 field->type, field->name, field->offset,
868 field->size, !!field->is_signed);
869 else
870 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
871 (int)(array_descriptor - field->type),
872 field->type, field->name,
873 array_descriptor, field->offset,
874 field->size, !!field->is_signed);
876 return 0;
879 static void f_stop(struct seq_file *m, void *p)
883 static const struct seq_operations trace_format_seq_ops = {
884 .start = f_start,
885 .next = f_next,
886 .stop = f_stop,
887 .show = f_show,
890 static int trace_format_open(struct inode *inode, struct file *file)
892 struct ftrace_event_call *call = inode->i_private;
893 struct seq_file *m;
894 int ret;
896 ret = seq_open(file, &trace_format_seq_ops);
897 if (ret < 0)
898 return ret;
900 m = file->private_data;
901 m->private = call;
903 return 0;
906 static ssize_t
907 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
909 struct ftrace_event_call *call = filp->private_data;
910 struct trace_seq *s;
911 int r;
913 if (*ppos)
914 return 0;
916 s = kmalloc(sizeof(*s), GFP_KERNEL);
917 if (!s)
918 return -ENOMEM;
920 trace_seq_init(s);
921 trace_seq_printf(s, "%d\n", call->event.type);
923 r = simple_read_from_buffer(ubuf, cnt, ppos,
924 s->buffer, s->len);
925 kfree(s);
926 return r;
929 static ssize_t
930 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
931 loff_t *ppos)
933 struct ftrace_event_call *call = filp->private_data;
934 struct trace_seq *s;
935 int r;
937 if (*ppos)
938 return 0;
940 s = kmalloc(sizeof(*s), GFP_KERNEL);
941 if (!s)
942 return -ENOMEM;
944 trace_seq_init(s);
946 print_event_filter(call, s);
947 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
949 kfree(s);
951 return r;
954 static ssize_t
955 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
956 loff_t *ppos)
958 struct ftrace_event_call *call = filp->private_data;
959 char *buf;
960 int err;
962 if (cnt >= PAGE_SIZE)
963 return -EINVAL;
965 buf = (char *)__get_free_page(GFP_TEMPORARY);
966 if (!buf)
967 return -ENOMEM;
969 if (copy_from_user(buf, ubuf, cnt)) {
970 free_page((unsigned long) buf);
971 return -EFAULT;
973 buf[cnt] = '\0';
975 err = apply_event_filter(call, buf);
976 free_page((unsigned long) buf);
977 if (err < 0)
978 return err;
980 *ppos += cnt;
982 return cnt;
985 static LIST_HEAD(event_subsystems);
987 static int subsystem_open(struct inode *inode, struct file *filp)
989 struct event_subsystem *system = NULL;
990 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
991 struct trace_array *tr;
992 int ret;
994 /* Make sure the system still exists */
995 mutex_lock(&event_mutex);
996 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
997 list_for_each_entry(dir, &tr->systems, list) {
998 if (dir == inode->i_private) {
999 /* Don't open systems with no events */
1000 if (dir->nr_events) {
1001 __get_system_dir(dir);
1002 system = dir->subsystem;
1004 goto exit_loop;
1008 exit_loop:
1009 mutex_unlock(&event_mutex);
1011 if (!system)
1012 return -ENODEV;
1014 /* Some versions of gcc think dir can be uninitialized here */
1015 WARN_ON(!dir);
1017 ret = tracing_open_generic(inode, filp);
1018 if (ret < 0)
1019 put_system(dir);
1021 return ret;
1024 static int system_tr_open(struct inode *inode, struct file *filp)
1026 struct ftrace_subsystem_dir *dir;
1027 struct trace_array *tr = inode->i_private;
1028 int ret;
1030 /* Make a temporary dir that has no system but points to tr */
1031 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1032 if (!dir)
1033 return -ENOMEM;
1035 dir->tr = tr;
1037 ret = tracing_open_generic(inode, filp);
1038 if (ret < 0)
1039 kfree(dir);
1041 filp->private_data = dir;
1043 return ret;
1046 static int subsystem_release(struct inode *inode, struct file *file)
1048 struct ftrace_subsystem_dir *dir = file->private_data;
1051 * If dir->subsystem is NULL, then this is a temporary
1052 * descriptor that was made for a trace_array to enable
1053 * all subsystems.
1055 if (dir->subsystem)
1056 put_system(dir);
1057 else
1058 kfree(dir);
1060 return 0;
1063 static ssize_t
1064 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1065 loff_t *ppos)
1067 struct ftrace_subsystem_dir *dir = filp->private_data;
1068 struct event_subsystem *system = dir->subsystem;
1069 struct trace_seq *s;
1070 int r;
1072 if (*ppos)
1073 return 0;
1075 s = kmalloc(sizeof(*s), GFP_KERNEL);
1076 if (!s)
1077 return -ENOMEM;
1079 trace_seq_init(s);
1081 print_subsystem_event_filter(system, s);
1082 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1084 kfree(s);
1086 return r;
1089 static ssize_t
1090 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1091 loff_t *ppos)
1093 struct ftrace_subsystem_dir *dir = filp->private_data;
1094 char *buf;
1095 int err;
1097 if (cnt >= PAGE_SIZE)
1098 return -EINVAL;
1100 buf = (char *)__get_free_page(GFP_TEMPORARY);
1101 if (!buf)
1102 return -ENOMEM;
1104 if (copy_from_user(buf, ubuf, cnt)) {
1105 free_page((unsigned long) buf);
1106 return -EFAULT;
1108 buf[cnt] = '\0';
1110 err = apply_subsystem_event_filter(dir, buf);
1111 free_page((unsigned long) buf);
1112 if (err < 0)
1113 return err;
1115 *ppos += cnt;
1117 return cnt;
1120 static ssize_t
1121 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1123 int (*func)(struct trace_seq *s) = filp->private_data;
1124 struct trace_seq *s;
1125 int r;
1127 if (*ppos)
1128 return 0;
1130 s = kmalloc(sizeof(*s), GFP_KERNEL);
1131 if (!s)
1132 return -ENOMEM;
1134 trace_seq_init(s);
1136 func(s);
1137 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1139 kfree(s);
1141 return r;
1144 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1145 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1147 static const struct seq_operations show_event_seq_ops = {
1148 .start = t_start,
1149 .next = t_next,
1150 .show = t_show,
1151 .stop = t_stop,
1154 static const struct seq_operations show_set_event_seq_ops = {
1155 .start = s_start,
1156 .next = s_next,
1157 .show = t_show,
1158 .stop = t_stop,
1161 static const struct file_operations ftrace_avail_fops = {
1162 .open = ftrace_event_avail_open,
1163 .read = seq_read,
1164 .llseek = seq_lseek,
1165 .release = seq_release,
1168 static const struct file_operations ftrace_set_event_fops = {
1169 .open = ftrace_event_set_open,
1170 .read = seq_read,
1171 .write = ftrace_event_write,
1172 .llseek = seq_lseek,
1173 .release = seq_release,
1176 static const struct file_operations ftrace_enable_fops = {
1177 .open = tracing_open_generic,
1178 .read = event_enable_read,
1179 .write = event_enable_write,
1180 .llseek = default_llseek,
1183 static const struct file_operations ftrace_event_format_fops = {
1184 .open = trace_format_open,
1185 .read = seq_read,
1186 .llseek = seq_lseek,
1187 .release = seq_release,
1190 static const struct file_operations ftrace_event_id_fops = {
1191 .open = tracing_open_generic,
1192 .read = event_id_read,
1193 .llseek = default_llseek,
1196 static const struct file_operations ftrace_event_filter_fops = {
1197 .open = tracing_open_generic,
1198 .read = event_filter_read,
1199 .write = event_filter_write,
1200 .llseek = default_llseek,
1203 static const struct file_operations ftrace_subsystem_filter_fops = {
1204 .open = subsystem_open,
1205 .read = subsystem_filter_read,
1206 .write = subsystem_filter_write,
1207 .llseek = default_llseek,
1208 .release = subsystem_release,
1211 static const struct file_operations ftrace_system_enable_fops = {
1212 .open = subsystem_open,
1213 .read = system_enable_read,
1214 .write = system_enable_write,
1215 .llseek = default_llseek,
1216 .release = subsystem_release,
1219 static const struct file_operations ftrace_tr_enable_fops = {
1220 .open = system_tr_open,
1221 .read = system_enable_read,
1222 .write = system_enable_write,
1223 .llseek = default_llseek,
1224 .release = subsystem_release,
1227 static const struct file_operations ftrace_show_header_fops = {
1228 .open = tracing_open_generic,
1229 .read = show_header,
1230 .llseek = default_llseek,
1233 static int
1234 ftrace_event_open(struct inode *inode, struct file *file,
1235 const struct seq_operations *seq_ops)
1237 struct seq_file *m;
1238 int ret;
1240 ret = seq_open(file, seq_ops);
1241 if (ret < 0)
1242 return ret;
1243 m = file->private_data;
1244 /* copy tr over to seq ops */
1245 m->private = inode->i_private;
1247 return ret;
1250 static int
1251 ftrace_event_avail_open(struct inode *inode, struct file *file)
1253 const struct seq_operations *seq_ops = &show_event_seq_ops;
1255 return ftrace_event_open(inode, file, seq_ops);
1258 static int
1259 ftrace_event_set_open(struct inode *inode, struct file *file)
1261 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1262 struct trace_array *tr = inode->i_private;
1264 if ((file->f_mode & FMODE_WRITE) &&
1265 (file->f_flags & O_TRUNC))
1266 ftrace_clear_events(tr);
1268 return ftrace_event_open(inode, file, seq_ops);
1271 static struct event_subsystem *
1272 create_new_subsystem(const char *name)
1274 struct event_subsystem *system;
1276 /* need to create new entry */
1277 system = kmalloc(sizeof(*system), GFP_KERNEL);
1278 if (!system)
1279 return NULL;
1281 system->ref_count = 1;
1282 system->name = name;
1284 system->filter = NULL;
1286 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1287 if (!system->filter)
1288 goto out_free;
1290 list_add(&system->list, &event_subsystems);
1292 return system;
1294 out_free:
1295 kfree(system);
1296 return NULL;
1299 static struct dentry *
1300 event_subsystem_dir(struct trace_array *tr, const char *name,
1301 struct ftrace_event_file *file, struct dentry *parent)
1303 struct ftrace_subsystem_dir *dir;
1304 struct event_subsystem *system;
1305 struct dentry *entry;
1307 /* First see if we did not already create this dir */
1308 list_for_each_entry(dir, &tr->systems, list) {
1309 system = dir->subsystem;
1310 if (strcmp(system->name, name) == 0) {
1311 dir->nr_events++;
1312 file->system = dir;
1313 return dir->entry;
1317 /* Now see if the system itself exists. */
1318 list_for_each_entry(system, &event_subsystems, list) {
1319 if (strcmp(system->name, name) == 0)
1320 break;
1322 /* Reset system variable when not found */
1323 if (&system->list == &event_subsystems)
1324 system = NULL;
1326 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1327 if (!dir)
1328 goto out_fail;
1330 if (!system) {
1331 system = create_new_subsystem(name);
1332 if (!system)
1333 goto out_free;
1334 } else
1335 __get_system(system);
1337 dir->entry = debugfs_create_dir(name, parent);
1338 if (!dir->entry) {
1339 pr_warning("Failed to create system directory %s\n", name);
1340 __put_system(system);
1341 goto out_free;
1344 dir->tr = tr;
1345 dir->ref_count = 1;
1346 dir->nr_events = 1;
1347 dir->subsystem = system;
1348 file->system = dir;
1350 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1351 &ftrace_subsystem_filter_fops);
1352 if (!entry) {
1353 kfree(system->filter);
1354 system->filter = NULL;
1355 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1358 trace_create_file("enable", 0644, dir->entry, dir,
1359 &ftrace_system_enable_fops);
1361 list_add(&dir->list, &tr->systems);
1363 return dir->entry;
1365 out_free:
1366 kfree(dir);
1367 out_fail:
1368 /* Only print this message if failed on memory allocation */
1369 if (!dir || !system)
1370 pr_warning("No memory to create event subsystem %s\n",
1371 name);
1372 return NULL;
1375 static int
1376 event_create_dir(struct dentry *parent,
1377 struct ftrace_event_file *file,
1378 const struct file_operations *id,
1379 const struct file_operations *enable,
1380 const struct file_operations *filter,
1381 const struct file_operations *format)
1383 struct ftrace_event_call *call = file->event_call;
1384 struct trace_array *tr = file->tr;
1385 struct list_head *head;
1386 struct dentry *d_events;
1387 int ret;
1390 * If the trace point header did not define TRACE_SYSTEM
1391 * then the system would be called "TRACE_SYSTEM".
1393 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1394 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1395 if (!d_events)
1396 return -ENOMEM;
1397 } else
1398 d_events = parent;
1400 file->dir = debugfs_create_dir(call->name, d_events);
1401 if (!file->dir) {
1402 pr_warning("Could not create debugfs '%s' directory\n",
1403 call->name);
1404 return -1;
1407 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1408 trace_create_file("enable", 0644, file->dir, file,
1409 enable);
1411 #ifdef CONFIG_PERF_EVENTS
1412 if (call->event.type && call->class->reg)
1413 trace_create_file("id", 0444, file->dir, call,
1414 id);
1415 #endif
1418 * Other events may have the same class. Only update
1419 * the fields if they are not already defined.
1421 head = trace_get_fields(call);
1422 if (list_empty(head)) {
1423 ret = call->class->define_fields(call);
1424 if (ret < 0) {
1425 pr_warning("Could not initialize trace point"
1426 " events/%s\n", call->name);
1427 return -1;
1430 trace_create_file("filter", 0644, file->dir, call,
1431 filter);
1433 trace_create_file("format", 0444, file->dir, call,
1434 format);
1436 return 0;
1439 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1441 if (!dir)
1442 return;
1444 if (!--dir->nr_events) {
1445 debugfs_remove_recursive(dir->entry);
1446 list_del(&dir->list);
1447 __put_system_dir(dir);
1451 static void remove_event_from_tracers(struct ftrace_event_call *call)
1453 struct ftrace_event_file *file;
1454 struct trace_array *tr;
1456 do_for_each_event_file_safe(tr, file) {
1458 if (file->event_call != call)
1459 continue;
1461 list_del(&file->list);
1462 debugfs_remove_recursive(file->dir);
1463 remove_subsystem(file->system);
1464 kmem_cache_free(file_cachep, file);
1467 * The do_for_each_event_file_safe() is
1468 * a double loop. After finding the call for this
1469 * trace_array, we use break to jump to the next
1470 * trace_array.
1472 break;
1473 } while_for_each_event_file();
1476 static void event_remove(struct ftrace_event_call *call)
1478 struct trace_array *tr;
1479 struct ftrace_event_file *file;
1481 do_for_each_event_file(tr, file) {
1482 if (file->event_call != call)
1483 continue;
1484 ftrace_event_enable_disable(file, 0);
1486 * The do_for_each_event_file() is
1487 * a double loop. After finding the call for this
1488 * trace_array, we use break to jump to the next
1489 * trace_array.
1491 break;
1492 } while_for_each_event_file();
1494 if (call->event.funcs)
1495 __unregister_ftrace_event(&call->event);
1496 remove_event_from_tracers(call);
1497 list_del(&call->list);
1500 static int event_init(struct ftrace_event_call *call)
1502 int ret = 0;
1504 if (WARN_ON(!call->name))
1505 return -EINVAL;
1507 if (call->class->raw_init) {
1508 ret = call->class->raw_init(call);
1509 if (ret < 0 && ret != -ENOSYS)
1510 pr_warn("Could not initialize trace events/%s\n",
1511 call->name);
1514 return ret;
1517 static int
1518 __register_event(struct ftrace_event_call *call, struct module *mod)
1520 int ret;
1522 ret = event_init(call);
1523 if (ret < 0)
1524 return ret;
1526 list_add(&call->list, &ftrace_events);
1527 call->mod = mod;
1529 return 0;
1532 static struct ftrace_event_file *
1533 trace_create_new_event(struct ftrace_event_call *call,
1534 struct trace_array *tr)
1536 struct ftrace_event_file *file;
1538 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1539 if (!file)
1540 return NULL;
1542 file->event_call = call;
1543 file->tr = tr;
1544 atomic_set(&file->sm_ref, 0);
1545 list_add(&file->list, &tr->events);
1547 return file;
1550 /* Add an event to a trace directory */
1551 static int
1552 __trace_add_new_event(struct ftrace_event_call *call,
1553 struct trace_array *tr,
1554 const struct file_operations *id,
1555 const struct file_operations *enable,
1556 const struct file_operations *filter,
1557 const struct file_operations *format)
1559 struct ftrace_event_file *file;
1561 file = trace_create_new_event(call, tr);
1562 if (!file)
1563 return -ENOMEM;
1565 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1569 * Just create a decriptor for early init. A descriptor is required
1570 * for enabling events at boot. We want to enable events before
1571 * the filesystem is initialized.
1573 static __init int
1574 __trace_early_add_new_event(struct ftrace_event_call *call,
1575 struct trace_array *tr)
1577 struct ftrace_event_file *file;
1579 file = trace_create_new_event(call, tr);
1580 if (!file)
1581 return -ENOMEM;
1583 return 0;
1586 struct ftrace_module_file_ops;
1587 static void __add_event_to_tracers(struct ftrace_event_call *call,
1588 struct ftrace_module_file_ops *file_ops);
1590 /* Add an additional event_call dynamically */
1591 int trace_add_event_call(struct ftrace_event_call *call)
1593 int ret;
1594 mutex_lock(&event_mutex);
1596 ret = __register_event(call, NULL);
1597 if (ret >= 0)
1598 __add_event_to_tracers(call, NULL);
1600 mutex_unlock(&event_mutex);
1601 return ret;
1605 * Must be called under locking both of event_mutex and trace_event_sem.
1607 static void __trace_remove_event_call(struct ftrace_event_call *call)
1609 event_remove(call);
1610 trace_destroy_fields(call);
1611 destroy_preds(call);
1614 /* Remove an event_call */
1615 void trace_remove_event_call(struct ftrace_event_call *call)
1617 mutex_lock(&event_mutex);
1618 down_write(&trace_event_sem);
1619 __trace_remove_event_call(call);
1620 up_write(&trace_event_sem);
1621 mutex_unlock(&event_mutex);
1624 #define for_each_event(event, start, end) \
1625 for (event = start; \
1626 (unsigned long)event < (unsigned long)end; \
1627 event++)
1629 #ifdef CONFIG_MODULES
1631 static LIST_HEAD(ftrace_module_file_list);
1634 * Modules must own their file_operations to keep up with
1635 * reference counting.
1637 struct ftrace_module_file_ops {
1638 struct list_head list;
1639 struct module *mod;
1640 struct file_operations id;
1641 struct file_operations enable;
1642 struct file_operations format;
1643 struct file_operations filter;
1646 static struct ftrace_module_file_ops *
1647 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1650 * As event_calls are added in groups by module,
1651 * when we find one file_ops, we don't need to search for
1652 * each call in that module, as the rest should be the
1653 * same. Only search for a new one if the last one did
1654 * not match.
1656 if (file_ops && mod == file_ops->mod)
1657 return file_ops;
1659 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1660 if (file_ops->mod == mod)
1661 return file_ops;
1663 return NULL;
1666 static struct ftrace_module_file_ops *
1667 trace_create_file_ops(struct module *mod)
1669 struct ftrace_module_file_ops *file_ops;
1672 * This is a bit of a PITA. To allow for correct reference
1673 * counting, modules must "own" their file_operations.
1674 * To do this, we allocate the file operations that will be
1675 * used in the event directory.
1678 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1679 if (!file_ops)
1680 return NULL;
1682 file_ops->mod = mod;
1684 file_ops->id = ftrace_event_id_fops;
1685 file_ops->id.owner = mod;
1687 file_ops->enable = ftrace_enable_fops;
1688 file_ops->enable.owner = mod;
1690 file_ops->filter = ftrace_event_filter_fops;
1691 file_ops->filter.owner = mod;
1693 file_ops->format = ftrace_event_format_fops;
1694 file_ops->format.owner = mod;
1696 list_add(&file_ops->list, &ftrace_module_file_list);
1698 return file_ops;
1701 static void trace_module_add_events(struct module *mod)
1703 struct ftrace_module_file_ops *file_ops = NULL;
1704 struct ftrace_event_call **call, **start, **end;
1706 start = mod->trace_events;
1707 end = mod->trace_events + mod->num_trace_events;
1709 if (start == end)
1710 return;
1712 file_ops = trace_create_file_ops(mod);
1713 if (!file_ops)
1714 return;
1716 for_each_event(call, start, end) {
1717 __register_event(*call, mod);
1718 __add_event_to_tracers(*call, file_ops);
1722 static void trace_module_remove_events(struct module *mod)
1724 struct ftrace_module_file_ops *file_ops;
1725 struct ftrace_event_call *call, *p;
1726 bool clear_trace = false;
1728 down_write(&trace_event_sem);
1729 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1730 if (call->mod == mod) {
1731 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1732 clear_trace = true;
1733 __trace_remove_event_call(call);
1737 /* Now free the file_operations */
1738 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1739 if (file_ops->mod == mod)
1740 break;
1742 if (&file_ops->list != &ftrace_module_file_list) {
1743 list_del(&file_ops->list);
1744 kfree(file_ops);
1746 up_write(&trace_event_sem);
1749 * It is safest to reset the ring buffer if the module being unloaded
1750 * registered any events that were used. The only worry is if
1751 * a new module gets loaded, and takes on the same id as the events
1752 * of this module. When printing out the buffer, traced events left
1753 * over from this module may be passed to the new module events and
1754 * unexpected results may occur.
1756 if (clear_trace)
1757 tracing_reset_all_online_cpus();
1760 static int trace_module_notify(struct notifier_block *self,
1761 unsigned long val, void *data)
1763 struct module *mod = data;
1765 mutex_lock(&event_mutex);
1766 switch (val) {
1767 case MODULE_STATE_COMING:
1768 trace_module_add_events(mod);
1769 break;
1770 case MODULE_STATE_GOING:
1771 trace_module_remove_events(mod);
1772 break;
1774 mutex_unlock(&event_mutex);
1776 return 0;
1779 static int
1780 __trace_add_new_mod_event(struct ftrace_event_call *call,
1781 struct trace_array *tr,
1782 struct ftrace_module_file_ops *file_ops)
1784 return __trace_add_new_event(call, tr,
1785 &file_ops->id, &file_ops->enable,
1786 &file_ops->filter, &file_ops->format);
1789 #else
1790 static inline struct ftrace_module_file_ops *
1791 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1793 return NULL;
1795 static inline int trace_module_notify(struct notifier_block *self,
1796 unsigned long val, void *data)
1798 return 0;
1800 static inline int
1801 __trace_add_new_mod_event(struct ftrace_event_call *call,
1802 struct trace_array *tr,
1803 struct ftrace_module_file_ops *file_ops)
1805 return -ENODEV;
1807 #endif /* CONFIG_MODULES */
1809 /* Create a new event directory structure for a trace directory. */
1810 static void
1811 __trace_add_event_dirs(struct trace_array *tr)
1813 struct ftrace_module_file_ops *file_ops = NULL;
1814 struct ftrace_event_call *call;
1815 int ret;
1817 list_for_each_entry(call, &ftrace_events, list) {
1818 if (call->mod) {
1820 * Directories for events by modules need to
1821 * keep module ref counts when opened (as we don't
1822 * want the module to disappear when reading one
1823 * of these files). The file_ops keep account of
1824 * the module ref count.
1826 file_ops = find_ftrace_file_ops(file_ops, call->mod);
1827 if (!file_ops)
1828 continue; /* Warn? */
1829 ret = __trace_add_new_mod_event(call, tr, file_ops);
1830 if (ret < 0)
1831 pr_warning("Could not create directory for event %s\n",
1832 call->name);
1833 continue;
1835 ret = __trace_add_new_event(call, tr,
1836 &ftrace_event_id_fops,
1837 &ftrace_enable_fops,
1838 &ftrace_event_filter_fops,
1839 &ftrace_event_format_fops);
1840 if (ret < 0)
1841 pr_warning("Could not create directory for event %s\n",
1842 call->name);
1846 #ifdef CONFIG_DYNAMIC_FTRACE
1848 /* Avoid typos */
1849 #define ENABLE_EVENT_STR "enable_event"
1850 #define DISABLE_EVENT_STR "disable_event"
1852 struct event_probe_data {
1853 struct ftrace_event_file *file;
1854 unsigned long count;
1855 int ref;
1856 bool enable;
1859 static struct ftrace_event_file *
1860 find_event_file(struct trace_array *tr, const char *system, const char *event)
1862 struct ftrace_event_file *file;
1863 struct ftrace_event_call *call;
1865 list_for_each_entry(file, &tr->events, list) {
1867 call = file->event_call;
1869 if (!call->name || !call->class || !call->class->reg)
1870 continue;
1872 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1873 continue;
1875 if (strcmp(event, call->name) == 0 &&
1876 strcmp(system, call->class->system) == 0)
1877 return file;
1879 return NULL;
1882 static void
1883 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1885 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1886 struct event_probe_data *data = *pdata;
1888 if (!data)
1889 return;
1891 if (data->enable)
1892 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1893 else
1894 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1897 static void
1898 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1900 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1901 struct event_probe_data *data = *pdata;
1903 if (!data)
1904 return;
1906 if (!data->count)
1907 return;
1909 /* Skip if the event is in a state we want to switch to */
1910 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1911 return;
1913 if (data->count != -1)
1914 (data->count)--;
1916 event_enable_probe(ip, parent_ip, _data);
1919 static int
1920 event_enable_print(struct seq_file *m, unsigned long ip,
1921 struct ftrace_probe_ops *ops, void *_data)
1923 struct event_probe_data *data = _data;
1925 seq_printf(m, "%ps:", (void *)ip);
1927 seq_printf(m, "%s:%s:%s",
1928 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1929 data->file->event_call->class->system,
1930 data->file->event_call->name);
1932 if (data->count == -1)
1933 seq_printf(m, ":unlimited\n");
1934 else
1935 seq_printf(m, ":count=%ld\n", data->count);
1937 return 0;
1940 static int
1941 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
1942 void **_data)
1944 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1945 struct event_probe_data *data = *pdata;
1947 data->ref++;
1948 return 0;
1951 static void
1952 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
1953 void **_data)
1955 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1956 struct event_probe_data *data = *pdata;
1958 if (WARN_ON_ONCE(data->ref <= 0))
1959 return;
1961 data->ref--;
1962 if (!data->ref) {
1963 /* Remove the SOFT_MODE flag */
1964 __ftrace_event_enable_disable(data->file, 0, 1);
1965 module_put(data->file->event_call->mod);
1966 kfree(data);
1968 *pdata = NULL;
1971 static struct ftrace_probe_ops event_enable_probe_ops = {
1972 .func = event_enable_probe,
1973 .print = event_enable_print,
1974 .init = event_enable_init,
1975 .free = event_enable_free,
1978 static struct ftrace_probe_ops event_enable_count_probe_ops = {
1979 .func = event_enable_count_probe,
1980 .print = event_enable_print,
1981 .init = event_enable_init,
1982 .free = event_enable_free,
1985 static struct ftrace_probe_ops event_disable_probe_ops = {
1986 .func = event_enable_probe,
1987 .print = event_enable_print,
1988 .init = event_enable_init,
1989 .free = event_enable_free,
1992 static struct ftrace_probe_ops event_disable_count_probe_ops = {
1993 .func = event_enable_count_probe,
1994 .print = event_enable_print,
1995 .init = event_enable_init,
1996 .free = event_enable_free,
1999 static int
2000 event_enable_func(struct ftrace_hash *hash,
2001 char *glob, char *cmd, char *param, int enabled)
2003 struct trace_array *tr = top_trace_array();
2004 struct ftrace_event_file *file;
2005 struct ftrace_probe_ops *ops;
2006 struct event_probe_data *data;
2007 const char *system;
2008 const char *event;
2009 char *number;
2010 bool enable;
2011 int ret;
2013 /* hash funcs only work with set_ftrace_filter */
2014 if (!enabled)
2015 return -EINVAL;
2017 if (!param)
2018 return -EINVAL;
2020 system = strsep(&param, ":");
2021 if (!param)
2022 return -EINVAL;
2024 event = strsep(&param, ":");
2026 mutex_lock(&event_mutex);
2028 ret = -EINVAL;
2029 file = find_event_file(tr, system, event);
2030 if (!file)
2031 goto out;
2033 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2035 if (enable)
2036 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2037 else
2038 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2040 if (glob[0] == '!') {
2041 unregister_ftrace_function_probe_func(glob+1, ops);
2042 ret = 0;
2043 goto out;
2046 ret = -ENOMEM;
2047 data = kzalloc(sizeof(*data), GFP_KERNEL);
2048 if (!data)
2049 goto out;
2051 data->enable = enable;
2052 data->count = -1;
2053 data->file = file;
2055 if (!param)
2056 goto out_reg;
2058 number = strsep(&param, ":");
2060 ret = -EINVAL;
2061 if (!strlen(number))
2062 goto out_free;
2065 * We use the callback data field (which is a pointer)
2066 * as our counter.
2068 ret = kstrtoul(number, 0, &data->count);
2069 if (ret)
2070 goto out_free;
2072 out_reg:
2073 /* Don't let event modules unload while probe registered */
2074 ret = try_module_get(file->event_call->mod);
2075 if (!ret)
2076 goto out_free;
2078 ret = __ftrace_event_enable_disable(file, 1, 1);
2079 if (ret < 0)
2080 goto out_put;
2081 ret = register_ftrace_function_probe(glob, ops, data);
2083 * The above returns on success the # of functions enabled,
2084 * but if it didn't find any functions it returns zero.
2085 * Consider no functions a failure too.
2087 if (!ret) {
2088 ret = -ENOENT;
2089 goto out_disable;
2090 } else if (ret < 0)
2091 goto out_disable;
2092 /* Just return zero, not the number of enabled functions */
2093 ret = 0;
2094 out:
2095 mutex_unlock(&event_mutex);
2096 return ret;
2098 out_disable:
2099 __ftrace_event_enable_disable(file, 0, 1);
2100 out_put:
2101 module_put(file->event_call->mod);
2102 out_free:
2103 kfree(data);
2104 goto out;
2107 static struct ftrace_func_command event_enable_cmd = {
2108 .name = ENABLE_EVENT_STR,
2109 .func = event_enable_func,
2112 static struct ftrace_func_command event_disable_cmd = {
2113 .name = DISABLE_EVENT_STR,
2114 .func = event_enable_func,
2117 static __init int register_event_cmds(void)
2119 int ret;
2121 ret = register_ftrace_command(&event_enable_cmd);
2122 if (WARN_ON(ret < 0))
2123 return ret;
2124 ret = register_ftrace_command(&event_disable_cmd);
2125 if (WARN_ON(ret < 0))
2126 unregister_ftrace_command(&event_enable_cmd);
2127 return ret;
2129 #else
2130 static inline int register_event_cmds(void) { return 0; }
2131 #endif /* CONFIG_DYNAMIC_FTRACE */
2134 * The top level array has already had its ftrace_event_file
2135 * descriptors created in order to allow for early events to
2136 * be recorded. This function is called after the debugfs has been
2137 * initialized, and we now have to create the files associated
2138 * to the events.
2140 static __init void
2141 __trace_early_add_event_dirs(struct trace_array *tr)
2143 struct ftrace_event_file *file;
2144 int ret;
2147 list_for_each_entry(file, &tr->events, list) {
2148 ret = event_create_dir(tr->event_dir, file,
2149 &ftrace_event_id_fops,
2150 &ftrace_enable_fops,
2151 &ftrace_event_filter_fops,
2152 &ftrace_event_format_fops);
2153 if (ret < 0)
2154 pr_warning("Could not create directory for event %s\n",
2155 file->event_call->name);
2160 * For early boot up, the top trace array requires to have
2161 * a list of events that can be enabled. This must be done before
2162 * the filesystem is set up in order to allow events to be traced
2163 * early.
2165 static __init void
2166 __trace_early_add_events(struct trace_array *tr)
2168 struct ftrace_event_call *call;
2169 int ret;
2171 list_for_each_entry(call, &ftrace_events, list) {
2172 /* Early boot up should not have any modules loaded */
2173 if (WARN_ON_ONCE(call->mod))
2174 continue;
2176 ret = __trace_early_add_new_event(call, tr);
2177 if (ret < 0)
2178 pr_warning("Could not create early event %s\n",
2179 call->name);
2183 /* Remove the event directory structure for a trace directory. */
2184 static void
2185 __trace_remove_event_dirs(struct trace_array *tr)
2187 struct ftrace_event_file *file, *next;
2189 list_for_each_entry_safe(file, next, &tr->events, list) {
2190 list_del(&file->list);
2191 debugfs_remove_recursive(file->dir);
2192 remove_subsystem(file->system);
2193 kmem_cache_free(file_cachep, file);
2197 static void
2198 __add_event_to_tracers(struct ftrace_event_call *call,
2199 struct ftrace_module_file_ops *file_ops)
2201 struct trace_array *tr;
2203 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2204 if (file_ops)
2205 __trace_add_new_mod_event(call, tr, file_ops);
2206 else
2207 __trace_add_new_event(call, tr,
2208 &ftrace_event_id_fops,
2209 &ftrace_enable_fops,
2210 &ftrace_event_filter_fops,
2211 &ftrace_event_format_fops);
2215 static struct notifier_block trace_module_nb = {
2216 .notifier_call = trace_module_notify,
2217 .priority = 0,
2220 extern struct ftrace_event_call *__start_ftrace_events[];
2221 extern struct ftrace_event_call *__stop_ftrace_events[];
2223 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2225 static __init int setup_trace_event(char *str)
2227 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2228 ring_buffer_expanded = true;
2229 tracing_selftest_disabled = true;
2231 return 1;
2233 __setup("trace_event=", setup_trace_event);
2235 /* Expects to have event_mutex held when called */
2236 static int
2237 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2239 struct dentry *d_events;
2240 struct dentry *entry;
2242 entry = debugfs_create_file("set_event", 0644, parent,
2243 tr, &ftrace_set_event_fops);
2244 if (!entry) {
2245 pr_warning("Could not create debugfs 'set_event' entry\n");
2246 return -ENOMEM;
2249 d_events = debugfs_create_dir("events", parent);
2250 if (!d_events) {
2251 pr_warning("Could not create debugfs 'events' directory\n");
2252 return -ENOMEM;
2255 /* ring buffer internal formats */
2256 trace_create_file("header_page", 0444, d_events,
2257 ring_buffer_print_page_header,
2258 &ftrace_show_header_fops);
2260 trace_create_file("header_event", 0444, d_events,
2261 ring_buffer_print_entry_header,
2262 &ftrace_show_header_fops);
2264 trace_create_file("enable", 0644, d_events,
2265 tr, &ftrace_tr_enable_fops);
2267 tr->event_dir = d_events;
2269 return 0;
2273 * event_trace_add_tracer - add a instance of a trace_array to events
2274 * @parent: The parent dentry to place the files/directories for events in
2275 * @tr: The trace array associated with these events
2277 * When a new instance is created, it needs to set up its events
2278 * directory, as well as other files associated with events. It also
2279 * creates the event hierachry in the @parent/events directory.
2281 * Returns 0 on success.
2283 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2285 int ret;
2287 mutex_lock(&event_mutex);
2289 ret = create_event_toplevel_files(parent, tr);
2290 if (ret)
2291 goto out_unlock;
2293 down_write(&trace_event_sem);
2294 __trace_add_event_dirs(tr);
2295 up_write(&trace_event_sem);
2297 out_unlock:
2298 mutex_unlock(&event_mutex);
2300 return ret;
2304 * The top trace array already had its file descriptors created.
2305 * Now the files themselves need to be created.
2307 static __init int
2308 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2310 int ret;
2312 mutex_lock(&event_mutex);
2314 ret = create_event_toplevel_files(parent, tr);
2315 if (ret)
2316 goto out_unlock;
2318 down_write(&trace_event_sem);
2319 __trace_early_add_event_dirs(tr);
2320 up_write(&trace_event_sem);
2322 out_unlock:
2323 mutex_unlock(&event_mutex);
2325 return ret;
2328 int event_trace_del_tracer(struct trace_array *tr)
2330 /* Disable any running events */
2331 __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2333 mutex_lock(&event_mutex);
2335 down_write(&trace_event_sem);
2336 __trace_remove_event_dirs(tr);
2337 debugfs_remove_recursive(tr->event_dir);
2338 up_write(&trace_event_sem);
2340 tr->event_dir = NULL;
2342 mutex_unlock(&event_mutex);
2344 return 0;
2347 static __init int event_trace_memsetup(void)
2349 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2350 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2351 return 0;
2354 static __init int event_trace_enable(void)
2356 struct trace_array *tr = top_trace_array();
2357 struct ftrace_event_call **iter, *call;
2358 char *buf = bootup_event_buf;
2359 char *token;
2360 int ret;
2362 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2364 call = *iter;
2365 ret = event_init(call);
2366 if (!ret)
2367 list_add(&call->list, &ftrace_events);
2371 * We need the top trace array to have a working set of trace
2372 * points at early init, before the debug files and directories
2373 * are created. Create the file entries now, and attach them
2374 * to the actual file dentries later.
2376 __trace_early_add_events(tr);
2378 while (true) {
2379 token = strsep(&buf, ",");
2381 if (!token)
2382 break;
2383 if (!*token)
2384 continue;
2386 ret = ftrace_set_clr_event(tr, token, 1);
2387 if (ret)
2388 pr_warn("Failed to enable trace event: %s\n", token);
2391 trace_printk_start_comm();
2393 register_event_cmds();
2395 return 0;
2398 static __init int event_trace_init(void)
2400 struct trace_array *tr;
2401 struct dentry *d_tracer;
2402 struct dentry *entry;
2403 int ret;
2405 tr = top_trace_array();
2407 d_tracer = tracing_init_dentry();
2408 if (!d_tracer)
2409 return 0;
2411 entry = debugfs_create_file("available_events", 0444, d_tracer,
2412 tr, &ftrace_avail_fops);
2413 if (!entry)
2414 pr_warning("Could not create debugfs "
2415 "'available_events' entry\n");
2417 if (trace_define_common_fields())
2418 pr_warning("tracing: Failed to allocate common fields");
2420 ret = early_event_add_tracer(d_tracer, tr);
2421 if (ret)
2422 return ret;
2424 ret = register_module_notifier(&trace_module_nb);
2425 if (ret)
2426 pr_warning("Failed to register trace events module notifier\n");
2428 return 0;
2430 early_initcall(event_trace_memsetup);
2431 core_initcall(event_trace_enable);
2432 fs_initcall(event_trace_init);
2434 #ifdef CONFIG_FTRACE_STARTUP_TEST
2436 static DEFINE_SPINLOCK(test_spinlock);
2437 static DEFINE_SPINLOCK(test_spinlock_irq);
2438 static DEFINE_MUTEX(test_mutex);
2440 static __init void test_work(struct work_struct *dummy)
2442 spin_lock(&test_spinlock);
2443 spin_lock_irq(&test_spinlock_irq);
2444 udelay(1);
2445 spin_unlock_irq(&test_spinlock_irq);
2446 spin_unlock(&test_spinlock);
2448 mutex_lock(&test_mutex);
2449 msleep(1);
2450 mutex_unlock(&test_mutex);
2453 static __init int event_test_thread(void *unused)
2455 void *test_malloc;
2457 test_malloc = kmalloc(1234, GFP_KERNEL);
2458 if (!test_malloc)
2459 pr_info("failed to kmalloc\n");
2461 schedule_on_each_cpu(test_work);
2463 kfree(test_malloc);
2465 set_current_state(TASK_INTERRUPTIBLE);
2466 while (!kthread_should_stop())
2467 schedule();
2469 return 0;
2473 * Do various things that may trigger events.
2475 static __init void event_test_stuff(void)
2477 struct task_struct *test_thread;
2479 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2480 msleep(1);
2481 kthread_stop(test_thread);
2485 * For every trace event defined, we will test each trace point separately,
2486 * and then by groups, and finally all trace points.
2488 static __init void event_trace_self_tests(void)
2490 struct ftrace_subsystem_dir *dir;
2491 struct ftrace_event_file *file;
2492 struct ftrace_event_call *call;
2493 struct event_subsystem *system;
2494 struct trace_array *tr;
2495 int ret;
2497 tr = top_trace_array();
2499 pr_info("Running tests on trace events:\n");
2501 list_for_each_entry(file, &tr->events, list) {
2503 call = file->event_call;
2505 /* Only test those that have a probe */
2506 if (!call->class || !call->class->probe)
2507 continue;
2510 * Testing syscall events here is pretty useless, but
2511 * we still do it if configured. But this is time consuming.
2512 * What we really need is a user thread to perform the
2513 * syscalls as we test.
2515 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2516 if (call->class->system &&
2517 strcmp(call->class->system, "syscalls") == 0)
2518 continue;
2519 #endif
2521 pr_info("Testing event %s: ", call->name);
2524 * If an event is already enabled, someone is using
2525 * it and the self test should not be on.
2527 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2528 pr_warning("Enabled event during self test!\n");
2529 WARN_ON_ONCE(1);
2530 continue;
2533 ftrace_event_enable_disable(file, 1);
2534 event_test_stuff();
2535 ftrace_event_enable_disable(file, 0);
2537 pr_cont("OK\n");
2540 /* Now test at the sub system level */
2542 pr_info("Running tests on trace event systems:\n");
2544 list_for_each_entry(dir, &tr->systems, list) {
2546 system = dir->subsystem;
2548 /* the ftrace system is special, skip it */
2549 if (strcmp(system->name, "ftrace") == 0)
2550 continue;
2552 pr_info("Testing event system %s: ", system->name);
2554 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2555 if (WARN_ON_ONCE(ret)) {
2556 pr_warning("error enabling system %s\n",
2557 system->name);
2558 continue;
2561 event_test_stuff();
2563 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2564 if (WARN_ON_ONCE(ret)) {
2565 pr_warning("error disabling system %s\n",
2566 system->name);
2567 continue;
2570 pr_cont("OK\n");
2573 /* Test with all events enabled */
2575 pr_info("Running tests on all trace events:\n");
2576 pr_info("Testing all events: ");
2578 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2579 if (WARN_ON_ONCE(ret)) {
2580 pr_warning("error enabling all events\n");
2581 return;
2584 event_test_stuff();
2586 /* reset sysname */
2587 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2588 if (WARN_ON_ONCE(ret)) {
2589 pr_warning("error disabling all events\n");
2590 return;
2593 pr_cont("OK\n");
2596 #ifdef CONFIG_FUNCTION_TRACER
2598 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2600 static void
2601 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2602 struct ftrace_ops *op, struct pt_regs *pt_regs)
2604 struct ring_buffer_event *event;
2605 struct ring_buffer *buffer;
2606 struct ftrace_entry *entry;
2607 unsigned long flags;
2608 long disabled;
2609 int cpu;
2610 int pc;
2612 pc = preempt_count();
2613 preempt_disable_notrace();
2614 cpu = raw_smp_processor_id();
2615 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2617 if (disabled != 1)
2618 goto out;
2620 local_save_flags(flags);
2622 event = trace_current_buffer_lock_reserve(&buffer,
2623 TRACE_FN, sizeof(*entry),
2624 flags, pc);
2625 if (!event)
2626 goto out;
2627 entry = ring_buffer_event_data(event);
2628 entry->ip = ip;
2629 entry->parent_ip = parent_ip;
2631 trace_buffer_unlock_commit(buffer, event, flags, pc);
2633 out:
2634 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2635 preempt_enable_notrace();
2638 static struct ftrace_ops trace_ops __initdata =
2640 .func = function_test_events_call,
2641 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2644 static __init void event_trace_self_test_with_function(void)
2646 int ret;
2647 ret = register_ftrace_function(&trace_ops);
2648 if (WARN_ON(ret < 0)) {
2649 pr_info("Failed to enable function tracer for event tests\n");
2650 return;
2652 pr_info("Running tests again, along with the function tracer\n");
2653 event_trace_self_tests();
2654 unregister_ftrace_function(&trace_ops);
2656 #else
2657 static __init void event_trace_self_test_with_function(void)
2660 #endif
2662 static __init int event_trace_self_tests_init(void)
2664 if (!tracing_selftest_disabled) {
2665 event_trace_self_tests();
2666 event_trace_self_test_with_function();
2669 return 0;
2672 late_initcall(event_trace_self_tests_init);
2674 #endif