mmc: sdhci: Check mrq->cmd in sdhci_tasklet_finish
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / trace / trace_events.c
blob189b09baf4fbfd1bb6a2340897c477d83d6de800
1 /*
2 * event tracer
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9 */
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
20 #include <asm/setup.h>
22 #include "trace_output.h"
24 #undef TRACE_SYSTEM
25 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 DEFINE_MUTEX(event_mutex);
29 LIST_HEAD(ftrace_events);
31 int trace_define_field(struct ftrace_event_call *call, const char *type,
32 const char *name, int offset, int size, int is_signed,
33 int filter_type)
35 struct ftrace_event_field *field;
37 field = kzalloc(sizeof(*field), GFP_KERNEL);
38 if (!field)
39 goto err;
41 field->name = kstrdup(name, GFP_KERNEL);
42 if (!field->name)
43 goto err;
45 field->type = kstrdup(type, GFP_KERNEL);
46 if (!field->type)
47 goto err;
49 if (filter_type == FILTER_OTHER)
50 field->filter_type = filter_assign_type(type);
51 else
52 field->filter_type = filter_type;
54 field->offset = offset;
55 field->size = size;
56 field->is_signed = is_signed;
58 list_add(&field->link, &call->fields);
60 return 0;
62 err:
63 if (field) {
64 kfree(field->name);
65 kfree(field->type);
67 kfree(field);
69 return -ENOMEM;
71 EXPORT_SYMBOL_GPL(trace_define_field);
73 #define __common_field(type, item) \
74 ret = trace_define_field(call, #type, "common_" #item, \
75 offsetof(typeof(ent), item), \
76 sizeof(ent.item), \
77 is_signed_type(type), FILTER_OTHER); \
78 if (ret) \
79 return ret;
81 static int trace_define_common_fields(struct ftrace_event_call *call)
83 int ret;
84 struct trace_entry ent;
86 __common_field(unsigned short, type);
87 __common_field(unsigned char, flags);
88 __common_field(unsigned char, preempt_count);
89 __common_field(int, pid);
90 __common_field(int, lock_depth);
92 return ret;
95 void trace_destroy_fields(struct ftrace_event_call *call)
97 struct ftrace_event_field *field, *next;
99 list_for_each_entry_safe(field, next, &call->fields, link) {
100 list_del(&field->link);
101 kfree(field->type);
102 kfree(field->name);
103 kfree(field);
107 int trace_event_raw_init(struct ftrace_event_call *call)
109 int id;
111 id = register_ftrace_event(call->event);
112 if (!id)
113 return -ENODEV;
114 call->id = id;
115 INIT_LIST_HEAD(&call->fields);
117 return 0;
119 EXPORT_SYMBOL_GPL(trace_event_raw_init);
121 static int ftrace_event_enable_disable(struct ftrace_event_call *call,
122 int enable)
124 int ret = 0;
126 switch (enable) {
127 case 0:
128 if (call->enabled) {
129 call->enabled = 0;
130 tracing_stop_cmdline_record();
131 call->unregfunc(call);
133 break;
134 case 1:
135 if (!call->enabled) {
136 tracing_start_cmdline_record();
137 ret = call->regfunc(call);
138 if (ret) {
139 tracing_stop_cmdline_record();
140 pr_info("event trace: Could not enable event "
141 "%s\n", call->name);
142 break;
144 call->enabled = 1;
146 break;
149 return ret;
152 static void ftrace_clear_events(void)
154 struct ftrace_event_call *call;
156 mutex_lock(&event_mutex);
157 list_for_each_entry(call, &ftrace_events, list) {
158 ftrace_event_enable_disable(call, 0);
160 mutex_unlock(&event_mutex);
164 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
166 static int __ftrace_set_clr_event(const char *match, const char *sub,
167 const char *event, int set)
169 struct ftrace_event_call *call;
170 int ret = -EINVAL;
172 mutex_lock(&event_mutex);
173 list_for_each_entry(call, &ftrace_events, list) {
175 if (!call->name || !call->regfunc)
176 continue;
178 if (match &&
179 strcmp(match, call->name) != 0 &&
180 strcmp(match, call->system) != 0)
181 continue;
183 if (sub && strcmp(sub, call->system) != 0)
184 continue;
186 if (event && strcmp(event, call->name) != 0)
187 continue;
189 ftrace_event_enable_disable(call, set);
191 ret = 0;
193 mutex_unlock(&event_mutex);
195 return ret;
198 static int ftrace_set_clr_event(char *buf, int set)
200 char *event = NULL, *sub = NULL, *match;
203 * The buf format can be <subsystem>:<event-name>
204 * *:<event-name> means any event by that name.
205 * :<event-name> is the same.
207 * <subsystem>:* means all events in that subsystem
208 * <subsystem>: means the same.
210 * <name> (no ':') means all events in a subsystem with
211 * the name <name> or any event that matches <name>
214 match = strsep(&buf, ":");
215 if (buf) {
216 sub = match;
217 event = buf;
218 match = NULL;
220 if (!strlen(sub) || strcmp(sub, "*") == 0)
221 sub = NULL;
222 if (!strlen(event) || strcmp(event, "*") == 0)
223 event = NULL;
226 return __ftrace_set_clr_event(match, sub, event, set);
230 * trace_set_clr_event - enable or disable an event
231 * @system: system name to match (NULL for any system)
232 * @event: event name to match (NULL for all events, within system)
233 * @set: 1 to enable, 0 to disable
235 * This is a way for other parts of the kernel to enable or disable
236 * event recording.
238 * Returns 0 on success, -EINVAL if the parameters do not match any
239 * registered events.
241 int trace_set_clr_event(const char *system, const char *event, int set)
243 return __ftrace_set_clr_event(NULL, system, event, set);
246 /* 128 should be much more than enough */
247 #define EVENT_BUF_SIZE 127
249 static ssize_t
250 ftrace_event_write(struct file *file, const char __user *ubuf,
251 size_t cnt, loff_t *ppos)
253 struct trace_parser parser;
254 ssize_t read, ret;
256 if (!cnt)
257 return 0;
259 ret = tracing_update_buffers();
260 if (ret < 0)
261 return ret;
263 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
264 return -ENOMEM;
266 read = trace_get_user(&parser, ubuf, cnt, ppos);
268 if (read >= 0 && trace_parser_loaded((&parser))) {
269 int set = 1;
271 if (*parser.buffer == '!')
272 set = 0;
274 parser.buffer[parser.idx] = 0;
276 ret = ftrace_set_clr_event(parser.buffer + !set, set);
277 if (ret)
278 goto out_put;
281 ret = read;
283 out_put:
284 trace_parser_put(&parser);
286 return ret;
289 static void *
290 t_next(struct seq_file *m, void *v, loff_t *pos)
292 struct ftrace_event_call *call = v;
294 (*pos)++;
296 list_for_each_entry_continue(call, &ftrace_events, list) {
298 * The ftrace subsystem is for showing formats only.
299 * They can not be enabled or disabled via the event files.
301 if (call->regfunc)
302 return call;
305 return NULL;
308 static void *t_start(struct seq_file *m, loff_t *pos)
310 struct ftrace_event_call *call;
311 loff_t l;
313 mutex_lock(&event_mutex);
315 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
316 for (l = 0; l <= *pos; ) {
317 call = t_next(m, call, &l);
318 if (!call)
319 break;
321 return call;
324 static void *
325 s_next(struct seq_file *m, void *v, loff_t *pos)
327 struct ftrace_event_call *call = v;
329 (*pos)++;
331 list_for_each_entry_continue(call, &ftrace_events, list) {
332 if (call->enabled)
333 return call;
336 return NULL;
339 static void *s_start(struct seq_file *m, loff_t *pos)
341 struct ftrace_event_call *call;
342 loff_t l;
344 mutex_lock(&event_mutex);
346 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
347 for (l = 0; l <= *pos; ) {
348 call = s_next(m, call, &l);
349 if (!call)
350 break;
352 return call;
355 static int t_show(struct seq_file *m, void *v)
357 struct ftrace_event_call *call = v;
359 if (strcmp(call->system, TRACE_SYSTEM) != 0)
360 seq_printf(m, "%s:", call->system);
361 seq_printf(m, "%s\n", call->name);
363 return 0;
366 static void t_stop(struct seq_file *m, void *p)
368 mutex_unlock(&event_mutex);
371 static int
372 ftrace_event_seq_open(struct inode *inode, struct file *file)
374 const struct seq_operations *seq_ops;
376 if ((file->f_mode & FMODE_WRITE) &&
377 (file->f_flags & O_TRUNC))
378 ftrace_clear_events();
380 seq_ops = inode->i_private;
381 return seq_open(file, seq_ops);
384 static ssize_t
385 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
386 loff_t *ppos)
388 struct ftrace_event_call *call = filp->private_data;
389 char *buf;
391 if (call->enabled)
392 buf = "1\n";
393 else
394 buf = "0\n";
396 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
399 static ssize_t
400 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
401 loff_t *ppos)
403 struct ftrace_event_call *call = filp->private_data;
404 char buf[64];
405 unsigned long val;
406 int ret;
408 if (cnt >= sizeof(buf))
409 return -EINVAL;
411 if (copy_from_user(&buf, ubuf, cnt))
412 return -EFAULT;
414 buf[cnt] = 0;
416 ret = strict_strtoul(buf, 10, &val);
417 if (ret < 0)
418 return ret;
420 ret = tracing_update_buffers();
421 if (ret < 0)
422 return ret;
424 switch (val) {
425 case 0:
426 case 1:
427 mutex_lock(&event_mutex);
428 ret = ftrace_event_enable_disable(call, val);
429 mutex_unlock(&event_mutex);
430 break;
432 default:
433 return -EINVAL;
436 *ppos += cnt;
438 return ret ? ret : cnt;
441 static ssize_t
442 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
443 loff_t *ppos)
445 const char set_to_char[4] = { '?', '0', '1', 'X' };
446 const char *system = filp->private_data;
447 struct ftrace_event_call *call;
448 char buf[2];
449 int set = 0;
450 int ret;
452 mutex_lock(&event_mutex);
453 list_for_each_entry(call, &ftrace_events, list) {
454 if (!call->name || !call->regfunc)
455 continue;
457 if (system && strcmp(call->system, system) != 0)
458 continue;
461 * We need to find out if all the events are set
462 * or if all events or cleared, or if we have
463 * a mixture.
465 set |= (1 << !!call->enabled);
468 * If we have a mixture, no need to look further.
470 if (set == 3)
471 break;
473 mutex_unlock(&event_mutex);
475 buf[0] = set_to_char[set];
476 buf[1] = '\n';
478 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
480 return ret;
483 static ssize_t
484 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
485 loff_t *ppos)
487 const char *system = filp->private_data;
488 unsigned long val;
489 char buf[64];
490 ssize_t ret;
492 if (cnt >= sizeof(buf))
493 return -EINVAL;
495 if (copy_from_user(&buf, ubuf, cnt))
496 return -EFAULT;
498 buf[cnt] = 0;
500 ret = strict_strtoul(buf, 10, &val);
501 if (ret < 0)
502 return ret;
504 ret = tracing_update_buffers();
505 if (ret < 0)
506 return ret;
508 if (val != 0 && val != 1)
509 return -EINVAL;
511 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
512 if (ret)
513 goto out;
515 ret = cnt;
517 out:
518 *ppos += cnt;
520 return ret;
523 extern char *__bad_type_size(void);
525 #undef FIELD
526 #define FIELD(type, name) \
527 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
528 #type, "common_" #name, offsetof(typeof(field), name), \
529 sizeof(field.name), is_signed_type(type)
531 static int trace_write_header(struct trace_seq *s)
533 struct trace_entry field;
535 /* struct trace_entry */
536 return trace_seq_printf(s,
537 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
538 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
539 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
540 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
541 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
542 "\n",
543 FIELD(unsigned short, type),
544 FIELD(unsigned char, flags),
545 FIELD(unsigned char, preempt_count),
546 FIELD(int, pid),
547 FIELD(int, lock_depth));
550 static ssize_t
551 event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
552 loff_t *ppos)
554 struct ftrace_event_call *call = filp->private_data;
555 struct trace_seq *s;
556 char *buf;
557 int r;
559 if (*ppos)
560 return 0;
562 s = kmalloc(sizeof(*s), GFP_KERNEL);
563 if (!s)
564 return -ENOMEM;
566 trace_seq_init(s);
568 /* If any of the first writes fail, so will the show_format. */
570 trace_seq_printf(s, "name: %s\n", call->name);
571 trace_seq_printf(s, "ID: %d\n", call->id);
572 trace_seq_printf(s, "format:\n");
573 trace_write_header(s);
575 r = call->show_format(call, s);
576 if (!r) {
578 * ug! The format output is bigger than a PAGE!!
580 buf = "FORMAT TOO BIG\n";
581 r = simple_read_from_buffer(ubuf, cnt, ppos,
582 buf, strlen(buf));
583 goto out;
586 r = simple_read_from_buffer(ubuf, cnt, ppos,
587 s->buffer, s->len);
588 out:
589 kfree(s);
590 return r;
593 static ssize_t
594 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
596 struct ftrace_event_call *call = filp->private_data;
597 struct trace_seq *s;
598 int r;
600 if (*ppos)
601 return 0;
603 s = kmalloc(sizeof(*s), GFP_KERNEL);
604 if (!s)
605 return -ENOMEM;
607 trace_seq_init(s);
608 trace_seq_printf(s, "%d\n", call->id);
610 r = simple_read_from_buffer(ubuf, cnt, ppos,
611 s->buffer, s->len);
612 kfree(s);
613 return r;
616 static ssize_t
617 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
618 loff_t *ppos)
620 struct ftrace_event_call *call = filp->private_data;
621 struct trace_seq *s;
622 int r;
624 if (*ppos)
625 return 0;
627 s = kmalloc(sizeof(*s), GFP_KERNEL);
628 if (!s)
629 return -ENOMEM;
631 trace_seq_init(s);
633 print_event_filter(call, s);
634 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
636 kfree(s);
638 return r;
641 static ssize_t
642 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
643 loff_t *ppos)
645 struct ftrace_event_call *call = filp->private_data;
646 char *buf;
647 int err;
649 if (cnt >= PAGE_SIZE)
650 return -EINVAL;
652 buf = (char *)__get_free_page(GFP_TEMPORARY);
653 if (!buf)
654 return -ENOMEM;
656 if (copy_from_user(buf, ubuf, cnt)) {
657 free_page((unsigned long) buf);
658 return -EFAULT;
660 buf[cnt] = '\0';
662 err = apply_event_filter(call, buf);
663 free_page((unsigned long) buf);
664 if (err < 0)
665 return err;
667 *ppos += cnt;
669 return cnt;
672 static ssize_t
673 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
674 loff_t *ppos)
676 struct event_subsystem *system = filp->private_data;
677 struct trace_seq *s;
678 int r;
680 if (*ppos)
681 return 0;
683 s = kmalloc(sizeof(*s), GFP_KERNEL);
684 if (!s)
685 return -ENOMEM;
687 trace_seq_init(s);
689 print_subsystem_event_filter(system, s);
690 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
692 kfree(s);
694 return r;
697 static ssize_t
698 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
699 loff_t *ppos)
701 struct event_subsystem *system = filp->private_data;
702 char *buf;
703 int err;
705 if (cnt >= PAGE_SIZE)
706 return -EINVAL;
708 buf = (char *)__get_free_page(GFP_TEMPORARY);
709 if (!buf)
710 return -ENOMEM;
712 if (copy_from_user(buf, ubuf, cnt)) {
713 free_page((unsigned long) buf);
714 return -EFAULT;
716 buf[cnt] = '\0';
718 err = apply_subsystem_event_filter(system, buf);
719 free_page((unsigned long) buf);
720 if (err < 0)
721 return err;
723 *ppos += cnt;
725 return cnt;
728 static ssize_t
729 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
731 int (*func)(struct trace_seq *s) = filp->private_data;
732 struct trace_seq *s;
733 int r;
735 if (*ppos)
736 return 0;
738 s = kmalloc(sizeof(*s), GFP_KERNEL);
739 if (!s)
740 return -ENOMEM;
742 trace_seq_init(s);
744 func(s);
745 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
747 kfree(s);
749 return r;
752 static const struct seq_operations show_event_seq_ops = {
753 .start = t_start,
754 .next = t_next,
755 .show = t_show,
756 .stop = t_stop,
759 static const struct seq_operations show_set_event_seq_ops = {
760 .start = s_start,
761 .next = s_next,
762 .show = t_show,
763 .stop = t_stop,
766 static const struct file_operations ftrace_avail_fops = {
767 .open = ftrace_event_seq_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = seq_release,
773 static const struct file_operations ftrace_set_event_fops = {
774 .open = ftrace_event_seq_open,
775 .read = seq_read,
776 .write = ftrace_event_write,
777 .llseek = seq_lseek,
778 .release = seq_release,
781 static const struct file_operations ftrace_enable_fops = {
782 .open = tracing_open_generic,
783 .read = event_enable_read,
784 .write = event_enable_write,
787 static const struct file_operations ftrace_event_format_fops = {
788 .open = tracing_open_generic,
789 .read = event_format_read,
792 static const struct file_operations ftrace_event_id_fops = {
793 .open = tracing_open_generic,
794 .read = event_id_read,
797 static const struct file_operations ftrace_event_filter_fops = {
798 .open = tracing_open_generic,
799 .read = event_filter_read,
800 .write = event_filter_write,
803 static const struct file_operations ftrace_subsystem_filter_fops = {
804 .open = tracing_open_generic,
805 .read = subsystem_filter_read,
806 .write = subsystem_filter_write,
809 static const struct file_operations ftrace_system_enable_fops = {
810 .open = tracing_open_generic,
811 .read = system_enable_read,
812 .write = system_enable_write,
815 static const struct file_operations ftrace_show_header_fops = {
816 .open = tracing_open_generic,
817 .read = show_header,
820 static struct dentry *event_trace_events_dir(void)
822 static struct dentry *d_tracer;
823 static struct dentry *d_events;
825 if (d_events)
826 return d_events;
828 d_tracer = tracing_init_dentry();
829 if (!d_tracer)
830 return NULL;
832 d_events = debugfs_create_dir("events", d_tracer);
833 if (!d_events)
834 pr_warning("Could not create debugfs "
835 "'events' directory\n");
837 return d_events;
840 static LIST_HEAD(event_subsystems);
842 static struct dentry *
843 event_subsystem_dir(const char *name, struct dentry *d_events)
845 struct event_subsystem *system;
846 struct dentry *entry;
848 /* First see if we did not already create this dir */
849 list_for_each_entry(system, &event_subsystems, list) {
850 if (strcmp(system->name, name) == 0) {
851 system->nr_events++;
852 return system->entry;
856 /* need to create new entry */
857 system = kmalloc(sizeof(*system), GFP_KERNEL);
858 if (!system) {
859 pr_warning("No memory to create event subsystem %s\n",
860 name);
861 return d_events;
864 system->entry = debugfs_create_dir(name, d_events);
865 if (!system->entry) {
866 pr_warning("Could not create event subsystem %s\n",
867 name);
868 kfree(system);
869 return d_events;
872 system->nr_events = 1;
873 system->name = kstrdup(name, GFP_KERNEL);
874 if (!system->name) {
875 debugfs_remove(system->entry);
876 kfree(system);
877 return d_events;
880 list_add(&system->list, &event_subsystems);
882 system->filter = NULL;
884 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
885 if (!system->filter) {
886 pr_warning("Could not allocate filter for subsystem "
887 "'%s'\n", name);
888 return system->entry;
891 entry = debugfs_create_file("filter", 0644, system->entry, system,
892 &ftrace_subsystem_filter_fops);
893 if (!entry) {
894 kfree(system->filter);
895 system->filter = NULL;
896 pr_warning("Could not create debugfs "
897 "'%s/filter' entry\n", name);
900 trace_create_file("enable", 0644, system->entry,
901 (void *)system->name,
902 &ftrace_system_enable_fops);
904 return system->entry;
907 static int
908 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
909 const struct file_operations *id,
910 const struct file_operations *enable,
911 const struct file_operations *filter,
912 const struct file_operations *format)
914 int ret;
917 * If the trace point header did not define TRACE_SYSTEM
918 * then the system would be called "TRACE_SYSTEM".
920 if (strcmp(call->system, TRACE_SYSTEM) != 0)
921 d_events = event_subsystem_dir(call->system, d_events);
923 call->dir = debugfs_create_dir(call->name, d_events);
924 if (!call->dir) {
925 pr_warning("Could not create debugfs "
926 "'%s' directory\n", call->name);
927 return -1;
930 if (call->regfunc)
931 trace_create_file("enable", 0644, call->dir, call,
932 enable);
934 if (call->id && call->profile_enable)
935 trace_create_file("id", 0444, call->dir, call,
936 id);
938 if (call->define_fields) {
939 ret = trace_define_common_fields(call);
940 if (!ret)
941 ret = call->define_fields(call);
942 if (ret < 0) {
943 pr_warning("Could not initialize trace point"
944 " events/%s\n", call->name);
945 return ret;
947 trace_create_file("filter", 0644, call->dir, call,
948 filter);
951 /* A trace may not want to export its format */
952 if (!call->show_format)
953 return 0;
955 trace_create_file("format", 0444, call->dir, call,
956 format);
958 return 0;
961 static int __trace_add_event_call(struct ftrace_event_call *call)
963 struct dentry *d_events;
964 int ret;
966 if (!call->name)
967 return -EINVAL;
969 if (call->raw_init) {
970 ret = call->raw_init(call);
971 if (ret < 0) {
972 if (ret != -ENOSYS)
973 pr_warning("Could not initialize trace "
974 "events/%s\n", call->name);
975 return ret;
979 d_events = event_trace_events_dir();
980 if (!d_events)
981 return -ENOENT;
983 ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
984 &ftrace_enable_fops, &ftrace_event_filter_fops,
985 &ftrace_event_format_fops);
986 if (!ret)
987 list_add(&call->list, &ftrace_events);
989 return ret;
992 /* Add an additional event_call dynamically */
993 int trace_add_event_call(struct ftrace_event_call *call)
995 int ret;
996 mutex_lock(&event_mutex);
997 ret = __trace_add_event_call(call);
998 mutex_unlock(&event_mutex);
999 return ret;
1002 static void remove_subsystem_dir(const char *name)
1004 struct event_subsystem *system;
1006 if (strcmp(name, TRACE_SYSTEM) == 0)
1007 return;
1009 list_for_each_entry(system, &event_subsystems, list) {
1010 if (strcmp(system->name, name) == 0) {
1011 if (!--system->nr_events) {
1012 struct event_filter *filter = system->filter;
1014 debugfs_remove_recursive(system->entry);
1015 list_del(&system->list);
1016 if (filter) {
1017 kfree(filter->filter_string);
1018 kfree(filter);
1020 kfree(system->name);
1021 kfree(system);
1023 break;
1029 * Must be called under locking both of event_mutex and trace_event_mutex.
1031 static void __trace_remove_event_call(struct ftrace_event_call *call)
1033 ftrace_event_enable_disable(call, 0);
1034 if (call->event)
1035 __unregister_ftrace_event(call->event);
1036 debugfs_remove_recursive(call->dir);
1037 list_del(&call->list);
1038 trace_destroy_fields(call);
1039 destroy_preds(call);
1040 remove_subsystem_dir(call->system);
1043 /* Remove an event_call */
1044 void trace_remove_event_call(struct ftrace_event_call *call)
1046 mutex_lock(&event_mutex);
1047 down_write(&trace_event_mutex);
1048 __trace_remove_event_call(call);
1049 up_write(&trace_event_mutex);
1050 mutex_unlock(&event_mutex);
1053 #define for_each_event(event, start, end) \
1054 for (event = start; \
1055 (unsigned long)event < (unsigned long)end; \
1056 event++)
1058 #ifdef CONFIG_MODULES
1060 static LIST_HEAD(ftrace_module_file_list);
1063 * Modules must own their file_operations to keep up with
1064 * reference counting.
1066 struct ftrace_module_file_ops {
1067 struct list_head list;
1068 struct module *mod;
1069 struct file_operations id;
1070 struct file_operations enable;
1071 struct file_operations format;
1072 struct file_operations filter;
1075 static struct ftrace_module_file_ops *
1076 trace_create_file_ops(struct module *mod)
1078 struct ftrace_module_file_ops *file_ops;
1081 * This is a bit of a PITA. To allow for correct reference
1082 * counting, modules must "own" their file_operations.
1083 * To do this, we allocate the file operations that will be
1084 * used in the event directory.
1087 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1088 if (!file_ops)
1089 return NULL;
1091 file_ops->mod = mod;
1093 file_ops->id = ftrace_event_id_fops;
1094 file_ops->id.owner = mod;
1096 file_ops->enable = ftrace_enable_fops;
1097 file_ops->enable.owner = mod;
1099 file_ops->filter = ftrace_event_filter_fops;
1100 file_ops->filter.owner = mod;
1102 file_ops->format = ftrace_event_format_fops;
1103 file_ops->format.owner = mod;
1105 list_add(&file_ops->list, &ftrace_module_file_list);
1107 return file_ops;
1110 static void trace_module_add_events(struct module *mod)
1112 struct ftrace_module_file_ops *file_ops = NULL;
1113 struct ftrace_event_call *call, *start, *end;
1114 struct dentry *d_events;
1115 int ret;
1117 start = mod->trace_events;
1118 end = mod->trace_events + mod->num_trace_events;
1120 if (start == end)
1121 return;
1123 d_events = event_trace_events_dir();
1124 if (!d_events)
1125 return;
1127 for_each_event(call, start, end) {
1128 /* The linker may leave blanks */
1129 if (!call->name)
1130 continue;
1131 if (call->raw_init) {
1132 ret = call->raw_init(call);
1133 if (ret < 0) {
1134 if (ret != -ENOSYS)
1135 pr_warning("Could not initialize trace "
1136 "point events/%s\n", call->name);
1137 continue;
1141 * This module has events, create file ops for this module
1142 * if not already done.
1144 if (!file_ops) {
1145 file_ops = trace_create_file_ops(mod);
1146 if (!file_ops)
1147 return;
1149 call->mod = mod;
1150 ret = event_create_dir(call, d_events,
1151 &file_ops->id, &file_ops->enable,
1152 &file_ops->filter, &file_ops->format);
1153 if (!ret)
1154 list_add(&call->list, &ftrace_events);
1158 static void trace_module_remove_events(struct module *mod)
1160 struct ftrace_module_file_ops *file_ops;
1161 struct ftrace_event_call *call, *p;
1162 bool found = false;
1164 down_write(&trace_event_mutex);
1165 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1166 if (call->mod == mod) {
1167 found = true;
1168 __trace_remove_event_call(call);
1172 /* Now free the file_operations */
1173 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1174 if (file_ops->mod == mod)
1175 break;
1177 if (&file_ops->list != &ftrace_module_file_list) {
1178 list_del(&file_ops->list);
1179 kfree(file_ops);
1183 * It is safest to reset the ring buffer if the module being unloaded
1184 * registered any events.
1186 if (found)
1187 tracing_reset_current_online_cpus();
1188 up_write(&trace_event_mutex);
1191 static int trace_module_notify(struct notifier_block *self,
1192 unsigned long val, void *data)
1194 struct module *mod = data;
1196 mutex_lock(&event_mutex);
1197 switch (val) {
1198 case MODULE_STATE_COMING:
1199 trace_module_add_events(mod);
1200 break;
1201 case MODULE_STATE_GOING:
1202 trace_module_remove_events(mod);
1203 break;
1205 mutex_unlock(&event_mutex);
1207 return 0;
1209 #else
1210 static int trace_module_notify(struct notifier_block *self,
1211 unsigned long val, void *data)
1213 return 0;
1215 #endif /* CONFIG_MODULES */
1217 static struct notifier_block trace_module_nb = {
1218 .notifier_call = trace_module_notify,
1219 .priority = 0,
1222 extern struct ftrace_event_call __start_ftrace_events[];
1223 extern struct ftrace_event_call __stop_ftrace_events[];
1225 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1227 static __init int setup_trace_event(char *str)
1229 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1230 ring_buffer_expanded = 1;
1231 tracing_selftest_disabled = 1;
1233 return 1;
1235 __setup("trace_event=", setup_trace_event);
1237 static __init int event_trace_init(void)
1239 struct ftrace_event_call *call;
1240 struct dentry *d_tracer;
1241 struct dentry *entry;
1242 struct dentry *d_events;
1243 int ret;
1244 char *buf = bootup_event_buf;
1245 char *token;
1247 d_tracer = tracing_init_dentry();
1248 if (!d_tracer)
1249 return 0;
1251 entry = debugfs_create_file("available_events", 0444, d_tracer,
1252 (void *)&show_event_seq_ops,
1253 &ftrace_avail_fops);
1254 if (!entry)
1255 pr_warning("Could not create debugfs "
1256 "'available_events' entry\n");
1258 entry = debugfs_create_file("set_event", 0644, d_tracer,
1259 (void *)&show_set_event_seq_ops,
1260 &ftrace_set_event_fops);
1261 if (!entry)
1262 pr_warning("Could not create debugfs "
1263 "'set_event' entry\n");
1265 d_events = event_trace_events_dir();
1266 if (!d_events)
1267 return 0;
1269 /* ring buffer internal formats */
1270 trace_create_file("header_page", 0444, d_events,
1271 ring_buffer_print_page_header,
1272 &ftrace_show_header_fops);
1274 trace_create_file("header_event", 0444, d_events,
1275 ring_buffer_print_entry_header,
1276 &ftrace_show_header_fops);
1278 trace_create_file("enable", 0644, d_events,
1279 NULL, &ftrace_system_enable_fops);
1281 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1282 /* The linker may leave blanks */
1283 if (!call->name)
1284 continue;
1285 if (call->raw_init) {
1286 ret = call->raw_init(call);
1287 if (ret < 0) {
1288 if (ret != -ENOSYS)
1289 pr_warning("Could not initialize trace "
1290 "point events/%s\n", call->name);
1291 continue;
1294 ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
1295 &ftrace_enable_fops,
1296 &ftrace_event_filter_fops,
1297 &ftrace_event_format_fops);
1298 if (!ret)
1299 list_add(&call->list, &ftrace_events);
1302 while (true) {
1303 token = strsep(&buf, ",");
1305 if (!token)
1306 break;
1307 if (!*token)
1308 continue;
1310 ret = ftrace_set_clr_event(token, 1);
1311 if (ret)
1312 pr_warning("Failed to enable trace event: %s\n", token);
1315 ret = register_module_notifier(&trace_module_nb);
1316 if (ret)
1317 pr_warning("Failed to register trace events module notifier\n");
1319 return 0;
1321 fs_initcall(event_trace_init);
1323 #ifdef CONFIG_FTRACE_STARTUP_TEST
1325 static DEFINE_SPINLOCK(test_spinlock);
1326 static DEFINE_SPINLOCK(test_spinlock_irq);
1327 static DEFINE_MUTEX(test_mutex);
1329 static __init void test_work(struct work_struct *dummy)
1331 spin_lock(&test_spinlock);
1332 spin_lock_irq(&test_spinlock_irq);
1333 udelay(1);
1334 spin_unlock_irq(&test_spinlock_irq);
1335 spin_unlock(&test_spinlock);
1337 mutex_lock(&test_mutex);
1338 msleep(1);
1339 mutex_unlock(&test_mutex);
1342 static __init int event_test_thread(void *unused)
1344 void *test_malloc;
1346 test_malloc = kmalloc(1234, GFP_KERNEL);
1347 if (!test_malloc)
1348 pr_info("failed to kmalloc\n");
1350 schedule_on_each_cpu(test_work);
1352 kfree(test_malloc);
1354 set_current_state(TASK_INTERRUPTIBLE);
1355 while (!kthread_should_stop())
1356 schedule();
1358 return 0;
1362 * Do various things that may trigger events.
1364 static __init void event_test_stuff(void)
1366 struct task_struct *test_thread;
1368 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1369 msleep(1);
1370 kthread_stop(test_thread);
1374 * For every trace event defined, we will test each trace point separately,
1375 * and then by groups, and finally all trace points.
1377 static __init void event_trace_self_tests(void)
1379 struct ftrace_event_call *call;
1380 struct event_subsystem *system;
1381 int ret;
1383 pr_info("Running tests on trace events:\n");
1385 list_for_each_entry(call, &ftrace_events, list) {
1387 /* Only test those that have a regfunc */
1388 if (!call->regfunc)
1389 continue;
1392 * Testing syscall events here is pretty useless, but
1393 * we still do it if configured. But this is time consuming.
1394 * What we really need is a user thread to perform the
1395 * syscalls as we test.
1397 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1398 if (call->system &&
1399 strcmp(call->system, "syscalls") == 0)
1400 continue;
1401 #endif
1403 pr_info("Testing event %s: ", call->name);
1406 * If an event is already enabled, someone is using
1407 * it and the self test should not be on.
1409 if (call->enabled) {
1410 pr_warning("Enabled event during self test!\n");
1411 WARN_ON_ONCE(1);
1412 continue;
1415 ftrace_event_enable_disable(call, 1);
1416 event_test_stuff();
1417 ftrace_event_enable_disable(call, 0);
1419 pr_cont("OK\n");
1422 /* Now test at the sub system level */
1424 pr_info("Running tests on trace event systems:\n");
1426 list_for_each_entry(system, &event_subsystems, list) {
1428 /* the ftrace system is special, skip it */
1429 if (strcmp(system->name, "ftrace") == 0)
1430 continue;
1432 pr_info("Testing event system %s: ", system->name);
1434 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1435 if (WARN_ON_ONCE(ret)) {
1436 pr_warning("error enabling system %s\n",
1437 system->name);
1438 continue;
1441 event_test_stuff();
1443 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1444 if (WARN_ON_ONCE(ret))
1445 pr_warning("error disabling system %s\n",
1446 system->name);
1448 pr_cont("OK\n");
1451 /* Test with all events enabled */
1453 pr_info("Running tests on all trace events:\n");
1454 pr_info("Testing all events: ");
1456 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1457 if (WARN_ON_ONCE(ret)) {
1458 pr_warning("error enabling all events\n");
1459 return;
1462 event_test_stuff();
1464 /* reset sysname */
1465 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1466 if (WARN_ON_ONCE(ret)) {
1467 pr_warning("error disabling all events\n");
1468 return;
1471 pr_cont("OK\n");
1474 #ifdef CONFIG_FUNCTION_TRACER
1476 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1478 static void
1479 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1481 struct ring_buffer_event *event;
1482 struct ring_buffer *buffer;
1483 struct ftrace_entry *entry;
1484 unsigned long flags;
1485 long disabled;
1486 int resched;
1487 int cpu;
1488 int pc;
1490 pc = preempt_count();
1491 resched = ftrace_preempt_disable();
1492 cpu = raw_smp_processor_id();
1493 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1495 if (disabled != 1)
1496 goto out;
1498 local_save_flags(flags);
1500 event = trace_current_buffer_lock_reserve(&buffer,
1501 TRACE_FN, sizeof(*entry),
1502 flags, pc);
1503 if (!event)
1504 goto out;
1505 entry = ring_buffer_event_data(event);
1506 entry->ip = ip;
1507 entry->parent_ip = parent_ip;
1509 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1511 out:
1512 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1513 ftrace_preempt_enable(resched);
1516 static struct ftrace_ops trace_ops __initdata =
1518 .func = function_test_events_call,
1521 static __init void event_trace_self_test_with_function(void)
1523 register_ftrace_function(&trace_ops);
1524 pr_info("Running tests again, along with the function tracer\n");
1525 event_trace_self_tests();
1526 unregister_ftrace_function(&trace_ops);
1528 #else
1529 static __init void event_trace_self_test_with_function(void)
1532 #endif
1534 static __init int event_trace_self_tests_init(void)
1536 if (!tracing_selftest_disabled) {
1537 event_trace_self_tests();
1538 event_trace_self_test_with_function();
1541 return 0;
1544 late_initcall(event_trace_self_tests_init);
1546 #endif