NFS: fix nfs4_find_client_sessionid() arguments list
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / trace / trace_events.c
blobc212a7f934ec4841d8c9887ecf93f6a0b7526b27
1 /*
2 * event tracer
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9 */
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex);
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
36 LIST_HEAD(ftrace_events);
37 LIST_HEAD(ftrace_common_fields);
39 struct list_head *
40 trace_get_fields(struct ftrace_event_call *event_call)
42 if (!event_call->class->get_fields)
43 return &event_call->class->fields;
44 return event_call->class->get_fields(event_call);
47 static int __trace_define_field(struct list_head *head, const char *type,
48 const char *name, int offset, int size,
49 int is_signed, int filter_type)
51 struct ftrace_event_field *field;
53 field = kzalloc(sizeof(*field), GFP_KERNEL);
54 if (!field)
55 goto err;
57 field->name = kstrdup(name, GFP_KERNEL);
58 if (!field->name)
59 goto err;
61 field->type = kstrdup(type, GFP_KERNEL);
62 if (!field->type)
63 goto err;
65 if (filter_type == FILTER_OTHER)
66 field->filter_type = filter_assign_type(type);
67 else
68 field->filter_type = filter_type;
70 field->offset = offset;
71 field->size = size;
72 field->is_signed = is_signed;
74 list_add(&field->link, head);
76 return 0;
78 err:
79 if (field)
80 kfree(field->name);
81 kfree(field);
83 return -ENOMEM;
86 int trace_define_field(struct ftrace_event_call *call, const char *type,
87 const char *name, int offset, int size, int is_signed,
88 int filter_type)
90 struct list_head *head;
92 if (WARN_ON(!call->class))
93 return 0;
95 head = trace_get_fields(call);
96 return __trace_define_field(head, type, name, offset, size,
97 is_signed, filter_type);
99 EXPORT_SYMBOL_GPL(trace_define_field);
101 #define __common_field(type, item) \
102 ret = __trace_define_field(&ftrace_common_fields, #type, \
103 "common_" #item, \
104 offsetof(typeof(ent), item), \
105 sizeof(ent.item), \
106 is_signed_type(type), FILTER_OTHER); \
107 if (ret) \
108 return ret;
110 static int trace_define_common_fields(void)
112 int ret;
113 struct trace_entry ent;
115 __common_field(unsigned short, type);
116 __common_field(unsigned char, flags);
117 __common_field(unsigned char, preempt_count);
118 __common_field(int, pid);
119 __common_field(int, padding);
121 return ret;
124 void trace_destroy_fields(struct ftrace_event_call *call)
126 struct ftrace_event_field *field, *next;
127 struct list_head *head;
129 head = trace_get_fields(call);
130 list_for_each_entry_safe(field, next, head, link) {
131 list_del(&field->link);
132 kfree(field->type);
133 kfree(field->name);
134 kfree(field);
138 int trace_event_raw_init(struct ftrace_event_call *call)
140 int id;
142 id = register_ftrace_event(&call->event);
143 if (!id)
144 return -ENODEV;
146 return 0;
148 EXPORT_SYMBOL_GPL(trace_event_raw_init);
150 int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
152 switch (type) {
153 case TRACE_REG_REGISTER:
154 return tracepoint_probe_register(call->name,
155 call->class->probe,
156 call);
157 case TRACE_REG_UNREGISTER:
158 tracepoint_probe_unregister(call->name,
159 call->class->probe,
160 call);
161 return 0;
163 #ifdef CONFIG_PERF_EVENTS
164 case TRACE_REG_PERF_REGISTER:
165 return tracepoint_probe_register(call->name,
166 call->class->perf_probe,
167 call);
168 case TRACE_REG_PERF_UNREGISTER:
169 tracepoint_probe_unregister(call->name,
170 call->class->perf_probe,
171 call);
172 return 0;
173 #endif
175 return 0;
177 EXPORT_SYMBOL_GPL(ftrace_event_reg);
179 void trace_event_enable_cmd_record(bool enable)
181 struct ftrace_event_call *call;
183 mutex_lock(&event_mutex);
184 list_for_each_entry(call, &ftrace_events, list) {
185 if (!(call->flags & TRACE_EVENT_FL_ENABLED))
186 continue;
188 if (enable) {
189 tracing_start_cmdline_record();
190 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
191 } else {
192 tracing_stop_cmdline_record();
193 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
196 mutex_unlock(&event_mutex);
199 static int ftrace_event_enable_disable(struct ftrace_event_call *call,
200 int enable)
202 int ret = 0;
204 switch (enable) {
205 case 0:
206 if (call->flags & TRACE_EVENT_FL_ENABLED) {
207 call->flags &= ~TRACE_EVENT_FL_ENABLED;
208 if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
209 tracing_stop_cmdline_record();
210 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
212 call->class->reg(call, TRACE_REG_UNREGISTER);
214 break;
215 case 1:
216 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
217 if (trace_flags & TRACE_ITER_RECORD_CMD) {
218 tracing_start_cmdline_record();
219 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
221 ret = call->class->reg(call, TRACE_REG_REGISTER);
222 if (ret) {
223 tracing_stop_cmdline_record();
224 pr_info("event trace: Could not enable event "
225 "%s\n", call->name);
226 break;
228 call->flags |= TRACE_EVENT_FL_ENABLED;
230 break;
233 return ret;
236 static void ftrace_clear_events(void)
238 struct ftrace_event_call *call;
240 mutex_lock(&event_mutex);
241 list_for_each_entry(call, &ftrace_events, list) {
242 ftrace_event_enable_disable(call, 0);
244 mutex_unlock(&event_mutex);
247 static void __put_system(struct event_subsystem *system)
249 struct event_filter *filter = system->filter;
251 WARN_ON_ONCE(system->ref_count == 0);
252 if (--system->ref_count)
253 return;
255 if (filter) {
256 kfree(filter->filter_string);
257 kfree(filter);
259 kfree(system->name);
260 kfree(system);
263 static void __get_system(struct event_subsystem *system)
265 WARN_ON_ONCE(system->ref_count == 0);
266 system->ref_count++;
269 static void put_system(struct event_subsystem *system)
271 mutex_lock(&event_mutex);
272 __put_system(system);
273 mutex_unlock(&event_mutex);
277 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
279 static int __ftrace_set_clr_event(const char *match, const char *sub,
280 const char *event, int set)
282 struct ftrace_event_call *call;
283 int ret = -EINVAL;
285 mutex_lock(&event_mutex);
286 list_for_each_entry(call, &ftrace_events, list) {
288 if (!call->name || !call->class || !call->class->reg)
289 continue;
291 if (match &&
292 strcmp(match, call->name) != 0 &&
293 strcmp(match, call->class->system) != 0)
294 continue;
296 if (sub && strcmp(sub, call->class->system) != 0)
297 continue;
299 if (event && strcmp(event, call->name) != 0)
300 continue;
302 ftrace_event_enable_disable(call, set);
304 ret = 0;
306 mutex_unlock(&event_mutex);
308 return ret;
311 static int ftrace_set_clr_event(char *buf, int set)
313 char *event = NULL, *sub = NULL, *match;
316 * The buf format can be <subsystem>:<event-name>
317 * *:<event-name> means any event by that name.
318 * :<event-name> is the same.
320 * <subsystem>:* means all events in that subsystem
321 * <subsystem>: means the same.
323 * <name> (no ':') means all events in a subsystem with
324 * the name <name> or any event that matches <name>
327 match = strsep(&buf, ":");
328 if (buf) {
329 sub = match;
330 event = buf;
331 match = NULL;
333 if (!strlen(sub) || strcmp(sub, "*") == 0)
334 sub = NULL;
335 if (!strlen(event) || strcmp(event, "*") == 0)
336 event = NULL;
339 return __ftrace_set_clr_event(match, sub, event, set);
343 * trace_set_clr_event - enable or disable an event
344 * @system: system name to match (NULL for any system)
345 * @event: event name to match (NULL for all events, within system)
346 * @set: 1 to enable, 0 to disable
348 * This is a way for other parts of the kernel to enable or disable
349 * event recording.
351 * Returns 0 on success, -EINVAL if the parameters do not match any
352 * registered events.
354 int trace_set_clr_event(const char *system, const char *event, int set)
356 return __ftrace_set_clr_event(NULL, system, event, set);
358 EXPORT_SYMBOL_GPL(trace_set_clr_event);
360 /* 128 should be much more than enough */
361 #define EVENT_BUF_SIZE 127
363 static ssize_t
364 ftrace_event_write(struct file *file, const char __user *ubuf,
365 size_t cnt, loff_t *ppos)
367 struct trace_parser parser;
368 ssize_t read, ret;
370 if (!cnt)
371 return 0;
373 ret = tracing_update_buffers();
374 if (ret < 0)
375 return ret;
377 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
378 return -ENOMEM;
380 read = trace_get_user(&parser, ubuf, cnt, ppos);
382 if (read >= 0 && trace_parser_loaded((&parser))) {
383 int set = 1;
385 if (*parser.buffer == '!')
386 set = 0;
388 parser.buffer[parser.idx] = 0;
390 ret = ftrace_set_clr_event(parser.buffer + !set, set);
391 if (ret)
392 goto out_put;
395 ret = read;
397 out_put:
398 trace_parser_put(&parser);
400 return ret;
403 static void *
404 t_next(struct seq_file *m, void *v, loff_t *pos)
406 struct ftrace_event_call *call = v;
408 (*pos)++;
410 list_for_each_entry_continue(call, &ftrace_events, list) {
412 * The ftrace subsystem is for showing formats only.
413 * They can not be enabled or disabled via the event files.
415 if (call->class && call->class->reg)
416 return call;
419 return NULL;
422 static void *t_start(struct seq_file *m, loff_t *pos)
424 struct ftrace_event_call *call;
425 loff_t l;
427 mutex_lock(&event_mutex);
429 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
430 for (l = 0; l <= *pos; ) {
431 call = t_next(m, call, &l);
432 if (!call)
433 break;
435 return call;
438 static void *
439 s_next(struct seq_file *m, void *v, loff_t *pos)
441 struct ftrace_event_call *call = v;
443 (*pos)++;
445 list_for_each_entry_continue(call, &ftrace_events, list) {
446 if (call->flags & TRACE_EVENT_FL_ENABLED)
447 return call;
450 return NULL;
453 static void *s_start(struct seq_file *m, loff_t *pos)
455 struct ftrace_event_call *call;
456 loff_t l;
458 mutex_lock(&event_mutex);
460 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
461 for (l = 0; l <= *pos; ) {
462 call = s_next(m, call, &l);
463 if (!call)
464 break;
466 return call;
469 static int t_show(struct seq_file *m, void *v)
471 struct ftrace_event_call *call = v;
473 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
474 seq_printf(m, "%s:", call->class->system);
475 seq_printf(m, "%s\n", call->name);
477 return 0;
480 static void t_stop(struct seq_file *m, void *p)
482 mutex_unlock(&event_mutex);
485 static int
486 ftrace_event_seq_open(struct inode *inode, struct file *file)
488 const struct seq_operations *seq_ops;
490 if ((file->f_mode & FMODE_WRITE) &&
491 (file->f_flags & O_TRUNC))
492 ftrace_clear_events();
494 seq_ops = inode->i_private;
495 return seq_open(file, seq_ops);
498 static ssize_t
499 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
500 loff_t *ppos)
502 struct ftrace_event_call *call = filp->private_data;
503 char *buf;
505 if (call->flags & TRACE_EVENT_FL_ENABLED)
506 buf = "1\n";
507 else
508 buf = "0\n";
510 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
513 static ssize_t
514 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
515 loff_t *ppos)
517 struct ftrace_event_call *call = filp->private_data;
518 unsigned long val;
519 int ret;
521 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
522 if (ret)
523 return ret;
525 ret = tracing_update_buffers();
526 if (ret < 0)
527 return ret;
529 switch (val) {
530 case 0:
531 case 1:
532 mutex_lock(&event_mutex);
533 ret = ftrace_event_enable_disable(call, val);
534 mutex_unlock(&event_mutex);
535 break;
537 default:
538 return -EINVAL;
541 *ppos += cnt;
543 return ret ? ret : cnt;
546 static ssize_t
547 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
548 loff_t *ppos)
550 const char set_to_char[4] = { '?', '0', '1', 'X' };
551 struct event_subsystem *system = filp->private_data;
552 struct ftrace_event_call *call;
553 char buf[2];
554 int set = 0;
555 int ret;
557 mutex_lock(&event_mutex);
558 list_for_each_entry(call, &ftrace_events, list) {
559 if (!call->name || !call->class || !call->class->reg)
560 continue;
562 if (system && strcmp(call->class->system, system->name) != 0)
563 continue;
566 * We need to find out if all the events are set
567 * or if all events or cleared, or if we have
568 * a mixture.
570 set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
573 * If we have a mixture, no need to look further.
575 if (set == 3)
576 break;
578 mutex_unlock(&event_mutex);
580 buf[0] = set_to_char[set];
581 buf[1] = '\n';
583 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
585 return ret;
588 static ssize_t
589 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
590 loff_t *ppos)
592 struct event_subsystem *system = filp->private_data;
593 const char *name = NULL;
594 unsigned long val;
595 ssize_t ret;
597 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
598 if (ret)
599 return ret;
601 ret = tracing_update_buffers();
602 if (ret < 0)
603 return ret;
605 if (val != 0 && val != 1)
606 return -EINVAL;
609 * Opening of "enable" adds a ref count to system,
610 * so the name is safe to use.
612 if (system)
613 name = system->name;
615 ret = __ftrace_set_clr_event(NULL, name, NULL, val);
616 if (ret)
617 goto out;
619 ret = cnt;
621 out:
622 *ppos += cnt;
624 return ret;
627 enum {
628 FORMAT_HEADER = 1,
629 FORMAT_FIELD_SEPERATOR = 2,
630 FORMAT_PRINTFMT = 3,
633 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
635 struct ftrace_event_call *call = m->private;
636 struct ftrace_event_field *field;
637 struct list_head *common_head = &ftrace_common_fields;
638 struct list_head *head = trace_get_fields(call);
640 (*pos)++;
642 switch ((unsigned long)v) {
643 case FORMAT_HEADER:
644 if (unlikely(list_empty(common_head)))
645 return NULL;
647 field = list_entry(common_head->prev,
648 struct ftrace_event_field, link);
649 return field;
651 case FORMAT_FIELD_SEPERATOR:
652 if (unlikely(list_empty(head)))
653 return NULL;
655 field = list_entry(head->prev, struct ftrace_event_field, link);
656 return field;
658 case FORMAT_PRINTFMT:
659 /* all done */
660 return NULL;
663 field = v;
664 if (field->link.prev == common_head)
665 return (void *)FORMAT_FIELD_SEPERATOR;
666 else if (field->link.prev == head)
667 return (void *)FORMAT_PRINTFMT;
669 field = list_entry(field->link.prev, struct ftrace_event_field, link);
671 return field;
674 static void *f_start(struct seq_file *m, loff_t *pos)
676 loff_t l = 0;
677 void *p;
679 /* Start by showing the header */
680 if (!*pos)
681 return (void *)FORMAT_HEADER;
683 p = (void *)FORMAT_HEADER;
684 do {
685 p = f_next(m, p, &l);
686 } while (p && l < *pos);
688 return p;
691 static int f_show(struct seq_file *m, void *v)
693 struct ftrace_event_call *call = m->private;
694 struct ftrace_event_field *field;
695 const char *array_descriptor;
697 switch ((unsigned long)v) {
698 case FORMAT_HEADER:
699 seq_printf(m, "name: %s\n", call->name);
700 seq_printf(m, "ID: %d\n", call->event.type);
701 seq_printf(m, "format:\n");
702 return 0;
704 case FORMAT_FIELD_SEPERATOR:
705 seq_putc(m, '\n');
706 return 0;
708 case FORMAT_PRINTFMT:
709 seq_printf(m, "\nprint fmt: %s\n",
710 call->print_fmt);
711 return 0;
714 field = v;
717 * Smartly shows the array type(except dynamic array).
718 * Normal:
719 * field:TYPE VAR
720 * If TYPE := TYPE[LEN], it is shown:
721 * field:TYPE VAR[LEN]
723 array_descriptor = strchr(field->type, '[');
725 if (!strncmp(field->type, "__data_loc", 10))
726 array_descriptor = NULL;
728 if (!array_descriptor)
729 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
730 field->type, field->name, field->offset,
731 field->size, !!field->is_signed);
732 else
733 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
734 (int)(array_descriptor - field->type),
735 field->type, field->name,
736 array_descriptor, field->offset,
737 field->size, !!field->is_signed);
739 return 0;
742 static void f_stop(struct seq_file *m, void *p)
746 static const struct seq_operations trace_format_seq_ops = {
747 .start = f_start,
748 .next = f_next,
749 .stop = f_stop,
750 .show = f_show,
753 static int trace_format_open(struct inode *inode, struct file *file)
755 struct ftrace_event_call *call = inode->i_private;
756 struct seq_file *m;
757 int ret;
759 ret = seq_open(file, &trace_format_seq_ops);
760 if (ret < 0)
761 return ret;
763 m = file->private_data;
764 m->private = call;
766 return 0;
769 static ssize_t
770 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
772 struct ftrace_event_call *call = filp->private_data;
773 struct trace_seq *s;
774 int r;
776 if (*ppos)
777 return 0;
779 s = kmalloc(sizeof(*s), GFP_KERNEL);
780 if (!s)
781 return -ENOMEM;
783 trace_seq_init(s);
784 trace_seq_printf(s, "%d\n", call->event.type);
786 r = simple_read_from_buffer(ubuf, cnt, ppos,
787 s->buffer, s->len);
788 kfree(s);
789 return r;
792 static ssize_t
793 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
794 loff_t *ppos)
796 struct ftrace_event_call *call = filp->private_data;
797 struct trace_seq *s;
798 int r;
800 if (*ppos)
801 return 0;
803 s = kmalloc(sizeof(*s), GFP_KERNEL);
804 if (!s)
805 return -ENOMEM;
807 trace_seq_init(s);
809 print_event_filter(call, s);
810 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
812 kfree(s);
814 return r;
817 static ssize_t
818 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
819 loff_t *ppos)
821 struct ftrace_event_call *call = filp->private_data;
822 char *buf;
823 int err;
825 if (cnt >= PAGE_SIZE)
826 return -EINVAL;
828 buf = (char *)__get_free_page(GFP_TEMPORARY);
829 if (!buf)
830 return -ENOMEM;
832 if (copy_from_user(buf, ubuf, cnt)) {
833 free_page((unsigned long) buf);
834 return -EFAULT;
836 buf[cnt] = '\0';
838 err = apply_event_filter(call, buf);
839 free_page((unsigned long) buf);
840 if (err < 0)
841 return err;
843 *ppos += cnt;
845 return cnt;
848 static LIST_HEAD(event_subsystems);
850 static int subsystem_open(struct inode *inode, struct file *filp)
852 struct event_subsystem *system = NULL;
853 int ret;
855 if (!inode->i_private)
856 goto skip_search;
858 /* Make sure the system still exists */
859 mutex_lock(&event_mutex);
860 list_for_each_entry(system, &event_subsystems, list) {
861 if (system == inode->i_private) {
862 /* Don't open systems with no events */
863 if (!system->nr_events) {
864 system = NULL;
865 break;
867 __get_system(system);
868 break;
871 mutex_unlock(&event_mutex);
873 if (system != inode->i_private)
874 return -ENODEV;
876 skip_search:
877 ret = tracing_open_generic(inode, filp);
878 if (ret < 0 && system)
879 put_system(system);
881 return ret;
884 static int subsystem_release(struct inode *inode, struct file *file)
886 struct event_subsystem *system = inode->i_private;
888 if (system)
889 put_system(system);
891 return 0;
894 static ssize_t
895 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
896 loff_t *ppos)
898 struct event_subsystem *system = filp->private_data;
899 struct trace_seq *s;
900 int r;
902 if (*ppos)
903 return 0;
905 s = kmalloc(sizeof(*s), GFP_KERNEL);
906 if (!s)
907 return -ENOMEM;
909 trace_seq_init(s);
911 print_subsystem_event_filter(system, s);
912 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
914 kfree(s);
916 return r;
919 static ssize_t
920 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
921 loff_t *ppos)
923 struct event_subsystem *system = filp->private_data;
924 char *buf;
925 int err;
927 if (cnt >= PAGE_SIZE)
928 return -EINVAL;
930 buf = (char *)__get_free_page(GFP_TEMPORARY);
931 if (!buf)
932 return -ENOMEM;
934 if (copy_from_user(buf, ubuf, cnt)) {
935 free_page((unsigned long) buf);
936 return -EFAULT;
938 buf[cnt] = '\0';
940 err = apply_subsystem_event_filter(system, buf);
941 free_page((unsigned long) buf);
942 if (err < 0)
943 return err;
945 *ppos += cnt;
947 return cnt;
950 static ssize_t
951 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
953 int (*func)(struct trace_seq *s) = filp->private_data;
954 struct trace_seq *s;
955 int r;
957 if (*ppos)
958 return 0;
960 s = kmalloc(sizeof(*s), GFP_KERNEL);
961 if (!s)
962 return -ENOMEM;
964 trace_seq_init(s);
966 func(s);
967 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
969 kfree(s);
971 return r;
974 static const struct seq_operations show_event_seq_ops = {
975 .start = t_start,
976 .next = t_next,
977 .show = t_show,
978 .stop = t_stop,
981 static const struct seq_operations show_set_event_seq_ops = {
982 .start = s_start,
983 .next = s_next,
984 .show = t_show,
985 .stop = t_stop,
988 static const struct file_operations ftrace_avail_fops = {
989 .open = ftrace_event_seq_open,
990 .read = seq_read,
991 .llseek = seq_lseek,
992 .release = seq_release,
995 static const struct file_operations ftrace_set_event_fops = {
996 .open = ftrace_event_seq_open,
997 .read = seq_read,
998 .write = ftrace_event_write,
999 .llseek = seq_lseek,
1000 .release = seq_release,
1003 static const struct file_operations ftrace_enable_fops = {
1004 .open = tracing_open_generic,
1005 .read = event_enable_read,
1006 .write = event_enable_write,
1007 .llseek = default_llseek,
1010 static const struct file_operations ftrace_event_format_fops = {
1011 .open = trace_format_open,
1012 .read = seq_read,
1013 .llseek = seq_lseek,
1014 .release = seq_release,
1017 static const struct file_operations ftrace_event_id_fops = {
1018 .open = tracing_open_generic,
1019 .read = event_id_read,
1020 .llseek = default_llseek,
1023 static const struct file_operations ftrace_event_filter_fops = {
1024 .open = tracing_open_generic,
1025 .read = event_filter_read,
1026 .write = event_filter_write,
1027 .llseek = default_llseek,
1030 static const struct file_operations ftrace_subsystem_filter_fops = {
1031 .open = subsystem_open,
1032 .read = subsystem_filter_read,
1033 .write = subsystem_filter_write,
1034 .llseek = default_llseek,
1035 .release = subsystem_release,
1038 static const struct file_operations ftrace_system_enable_fops = {
1039 .open = subsystem_open,
1040 .read = system_enable_read,
1041 .write = system_enable_write,
1042 .llseek = default_llseek,
1043 .release = subsystem_release,
1046 static const struct file_operations ftrace_show_header_fops = {
1047 .open = tracing_open_generic,
1048 .read = show_header,
1049 .llseek = default_llseek,
1052 static struct dentry *event_trace_events_dir(void)
1054 static struct dentry *d_tracer;
1055 static struct dentry *d_events;
1057 if (d_events)
1058 return d_events;
1060 d_tracer = tracing_init_dentry();
1061 if (!d_tracer)
1062 return NULL;
1064 d_events = debugfs_create_dir("events", d_tracer);
1065 if (!d_events)
1066 pr_warning("Could not create debugfs "
1067 "'events' directory\n");
1069 return d_events;
1072 static struct dentry *
1073 event_subsystem_dir(const char *name, struct dentry *d_events)
1075 struct event_subsystem *system;
1076 struct dentry *entry;
1078 /* First see if we did not already create this dir */
1079 list_for_each_entry(system, &event_subsystems, list) {
1080 if (strcmp(system->name, name) == 0) {
1081 system->nr_events++;
1082 return system->entry;
1086 /* need to create new entry */
1087 system = kmalloc(sizeof(*system), GFP_KERNEL);
1088 if (!system) {
1089 pr_warning("No memory to create event subsystem %s\n",
1090 name);
1091 return d_events;
1094 system->entry = debugfs_create_dir(name, d_events);
1095 if (!system->entry) {
1096 pr_warning("Could not create event subsystem %s\n",
1097 name);
1098 kfree(system);
1099 return d_events;
1102 system->nr_events = 1;
1103 system->ref_count = 1;
1104 system->name = kstrdup(name, GFP_KERNEL);
1105 if (!system->name) {
1106 debugfs_remove(system->entry);
1107 kfree(system);
1108 return d_events;
1111 list_add(&system->list, &event_subsystems);
1113 system->filter = NULL;
1115 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1116 if (!system->filter) {
1117 pr_warning("Could not allocate filter for subsystem "
1118 "'%s'\n", name);
1119 return system->entry;
1122 entry = debugfs_create_file("filter", 0644, system->entry, system,
1123 &ftrace_subsystem_filter_fops);
1124 if (!entry) {
1125 kfree(system->filter);
1126 system->filter = NULL;
1127 pr_warning("Could not create debugfs "
1128 "'%s/filter' entry\n", name);
1131 trace_create_file("enable", 0644, system->entry, system,
1132 &ftrace_system_enable_fops);
1134 return system->entry;
1137 static int
1138 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1139 const struct file_operations *id,
1140 const struct file_operations *enable,
1141 const struct file_operations *filter,
1142 const struct file_operations *format)
1144 struct list_head *head;
1145 int ret;
1148 * If the trace point header did not define TRACE_SYSTEM
1149 * then the system would be called "TRACE_SYSTEM".
1151 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1152 d_events = event_subsystem_dir(call->class->system, d_events);
1154 call->dir = debugfs_create_dir(call->name, d_events);
1155 if (!call->dir) {
1156 pr_warning("Could not create debugfs "
1157 "'%s' directory\n", call->name);
1158 return -1;
1161 if (call->class->reg)
1162 trace_create_file("enable", 0644, call->dir, call,
1163 enable);
1165 #ifdef CONFIG_PERF_EVENTS
1166 if (call->event.type && call->class->reg)
1167 trace_create_file("id", 0444, call->dir, call,
1168 id);
1169 #endif
1172 * Other events may have the same class. Only update
1173 * the fields if they are not already defined.
1175 head = trace_get_fields(call);
1176 if (list_empty(head)) {
1177 ret = call->class->define_fields(call);
1178 if (ret < 0) {
1179 pr_warning("Could not initialize trace point"
1180 " events/%s\n", call->name);
1181 return ret;
1184 trace_create_file("filter", 0644, call->dir, call,
1185 filter);
1187 trace_create_file("format", 0444, call->dir, call,
1188 format);
1190 return 0;
1193 static int
1194 __trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1195 const struct file_operations *id,
1196 const struct file_operations *enable,
1197 const struct file_operations *filter,
1198 const struct file_operations *format)
1200 struct dentry *d_events;
1201 int ret;
1203 /* The linker may leave blanks */
1204 if (!call->name)
1205 return -EINVAL;
1207 if (call->class->raw_init) {
1208 ret = call->class->raw_init(call);
1209 if (ret < 0) {
1210 if (ret != -ENOSYS)
1211 pr_warning("Could not initialize trace events/%s\n",
1212 call->name);
1213 return ret;
1217 d_events = event_trace_events_dir();
1218 if (!d_events)
1219 return -ENOENT;
1221 ret = event_create_dir(call, d_events, id, enable, filter, format);
1222 if (!ret)
1223 list_add(&call->list, &ftrace_events);
1224 call->mod = mod;
1226 return ret;
1229 /* Add an additional event_call dynamically */
1230 int trace_add_event_call(struct ftrace_event_call *call)
1232 int ret;
1233 mutex_lock(&event_mutex);
1234 ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1235 &ftrace_enable_fops,
1236 &ftrace_event_filter_fops,
1237 &ftrace_event_format_fops);
1238 mutex_unlock(&event_mutex);
1239 return ret;
1242 static void remove_subsystem_dir(const char *name)
1244 struct event_subsystem *system;
1246 if (strcmp(name, TRACE_SYSTEM) == 0)
1247 return;
1249 list_for_each_entry(system, &event_subsystems, list) {
1250 if (strcmp(system->name, name) == 0) {
1251 if (!--system->nr_events) {
1252 debugfs_remove_recursive(system->entry);
1253 list_del(&system->list);
1254 __put_system(system);
1256 break;
1262 * Must be called under locking both of event_mutex and trace_event_mutex.
1264 static void __trace_remove_event_call(struct ftrace_event_call *call)
1266 ftrace_event_enable_disable(call, 0);
1267 if (call->event.funcs)
1268 __unregister_ftrace_event(&call->event);
1269 debugfs_remove_recursive(call->dir);
1270 list_del(&call->list);
1271 trace_destroy_fields(call);
1272 destroy_preds(call);
1273 remove_subsystem_dir(call->class->system);
1276 /* Remove an event_call */
1277 void trace_remove_event_call(struct ftrace_event_call *call)
1279 mutex_lock(&event_mutex);
1280 down_write(&trace_event_mutex);
1281 __trace_remove_event_call(call);
1282 up_write(&trace_event_mutex);
1283 mutex_unlock(&event_mutex);
1286 #define for_each_event(event, start, end) \
1287 for (event = start; \
1288 (unsigned long)event < (unsigned long)end; \
1289 event++)
1291 #ifdef CONFIG_MODULES
1293 static LIST_HEAD(ftrace_module_file_list);
1296 * Modules must own their file_operations to keep up with
1297 * reference counting.
1299 struct ftrace_module_file_ops {
1300 struct list_head list;
1301 struct module *mod;
1302 struct file_operations id;
1303 struct file_operations enable;
1304 struct file_operations format;
1305 struct file_operations filter;
1308 static struct ftrace_module_file_ops *
1309 trace_create_file_ops(struct module *mod)
1311 struct ftrace_module_file_ops *file_ops;
1314 * This is a bit of a PITA. To allow for correct reference
1315 * counting, modules must "own" their file_operations.
1316 * To do this, we allocate the file operations that will be
1317 * used in the event directory.
1320 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1321 if (!file_ops)
1322 return NULL;
1324 file_ops->mod = mod;
1326 file_ops->id = ftrace_event_id_fops;
1327 file_ops->id.owner = mod;
1329 file_ops->enable = ftrace_enable_fops;
1330 file_ops->enable.owner = mod;
1332 file_ops->filter = ftrace_event_filter_fops;
1333 file_ops->filter.owner = mod;
1335 file_ops->format = ftrace_event_format_fops;
1336 file_ops->format.owner = mod;
1338 list_add(&file_ops->list, &ftrace_module_file_list);
1340 return file_ops;
1343 static void trace_module_add_events(struct module *mod)
1345 struct ftrace_module_file_ops *file_ops = NULL;
1346 struct ftrace_event_call **call, **start, **end;
1348 start = mod->trace_events;
1349 end = mod->trace_events + mod->num_trace_events;
1351 if (start == end)
1352 return;
1354 file_ops = trace_create_file_ops(mod);
1355 if (!file_ops)
1356 return;
1358 for_each_event(call, start, end) {
1359 __trace_add_event_call(*call, mod,
1360 &file_ops->id, &file_ops->enable,
1361 &file_ops->filter, &file_ops->format);
1365 static void trace_module_remove_events(struct module *mod)
1367 struct ftrace_module_file_ops *file_ops;
1368 struct ftrace_event_call *call, *p;
1369 bool found = false;
1371 down_write(&trace_event_mutex);
1372 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1373 if (call->mod == mod) {
1374 found = true;
1375 __trace_remove_event_call(call);
1379 /* Now free the file_operations */
1380 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1381 if (file_ops->mod == mod)
1382 break;
1384 if (&file_ops->list != &ftrace_module_file_list) {
1385 list_del(&file_ops->list);
1386 kfree(file_ops);
1390 * It is safest to reset the ring buffer if the module being unloaded
1391 * registered any events.
1393 if (found)
1394 tracing_reset_current_online_cpus();
1395 up_write(&trace_event_mutex);
1398 static int trace_module_notify(struct notifier_block *self,
1399 unsigned long val, void *data)
1401 struct module *mod = data;
1403 mutex_lock(&event_mutex);
1404 switch (val) {
1405 case MODULE_STATE_COMING:
1406 trace_module_add_events(mod);
1407 break;
1408 case MODULE_STATE_GOING:
1409 trace_module_remove_events(mod);
1410 break;
1412 mutex_unlock(&event_mutex);
1414 return 0;
1416 #else
1417 static int trace_module_notify(struct notifier_block *self,
1418 unsigned long val, void *data)
1420 return 0;
1422 #endif /* CONFIG_MODULES */
1424 static struct notifier_block trace_module_nb = {
1425 .notifier_call = trace_module_notify,
1426 .priority = 0,
1429 extern struct ftrace_event_call *__start_ftrace_events[];
1430 extern struct ftrace_event_call *__stop_ftrace_events[];
1432 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1434 static __init int setup_trace_event(char *str)
1436 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1437 ring_buffer_expanded = 1;
1438 tracing_selftest_disabled = 1;
1440 return 1;
1442 __setup("trace_event=", setup_trace_event);
1444 static __init int event_trace_init(void)
1446 struct ftrace_event_call **call;
1447 struct dentry *d_tracer;
1448 struct dentry *entry;
1449 struct dentry *d_events;
1450 int ret;
1451 char *buf = bootup_event_buf;
1452 char *token;
1454 d_tracer = tracing_init_dentry();
1455 if (!d_tracer)
1456 return 0;
1458 entry = debugfs_create_file("available_events", 0444, d_tracer,
1459 (void *)&show_event_seq_ops,
1460 &ftrace_avail_fops);
1461 if (!entry)
1462 pr_warning("Could not create debugfs "
1463 "'available_events' entry\n");
1465 entry = debugfs_create_file("set_event", 0644, d_tracer,
1466 (void *)&show_set_event_seq_ops,
1467 &ftrace_set_event_fops);
1468 if (!entry)
1469 pr_warning("Could not create debugfs "
1470 "'set_event' entry\n");
1472 d_events = event_trace_events_dir();
1473 if (!d_events)
1474 return 0;
1476 /* ring buffer internal formats */
1477 trace_create_file("header_page", 0444, d_events,
1478 ring_buffer_print_page_header,
1479 &ftrace_show_header_fops);
1481 trace_create_file("header_event", 0444, d_events,
1482 ring_buffer_print_entry_header,
1483 &ftrace_show_header_fops);
1485 trace_create_file("enable", 0644, d_events,
1486 NULL, &ftrace_system_enable_fops);
1488 if (trace_define_common_fields())
1489 pr_warning("tracing: Failed to allocate common fields");
1491 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1492 __trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
1493 &ftrace_enable_fops,
1494 &ftrace_event_filter_fops,
1495 &ftrace_event_format_fops);
1498 while (true) {
1499 token = strsep(&buf, ",");
1501 if (!token)
1502 break;
1503 if (!*token)
1504 continue;
1506 ret = ftrace_set_clr_event(token, 1);
1507 if (ret)
1508 pr_warning("Failed to enable trace event: %s\n", token);
1511 ret = register_module_notifier(&trace_module_nb);
1512 if (ret)
1513 pr_warning("Failed to register trace events module notifier\n");
1515 return 0;
1517 fs_initcall(event_trace_init);
1519 #ifdef CONFIG_FTRACE_STARTUP_TEST
1521 static DEFINE_SPINLOCK(test_spinlock);
1522 static DEFINE_SPINLOCK(test_spinlock_irq);
1523 static DEFINE_MUTEX(test_mutex);
1525 static __init void test_work(struct work_struct *dummy)
1527 spin_lock(&test_spinlock);
1528 spin_lock_irq(&test_spinlock_irq);
1529 udelay(1);
1530 spin_unlock_irq(&test_spinlock_irq);
1531 spin_unlock(&test_spinlock);
1533 mutex_lock(&test_mutex);
1534 msleep(1);
1535 mutex_unlock(&test_mutex);
1538 static __init int event_test_thread(void *unused)
1540 void *test_malloc;
1542 test_malloc = kmalloc(1234, GFP_KERNEL);
1543 if (!test_malloc)
1544 pr_info("failed to kmalloc\n");
1546 schedule_on_each_cpu(test_work);
1548 kfree(test_malloc);
1550 set_current_state(TASK_INTERRUPTIBLE);
1551 while (!kthread_should_stop())
1552 schedule();
1554 return 0;
1558 * Do various things that may trigger events.
1560 static __init void event_test_stuff(void)
1562 struct task_struct *test_thread;
1564 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1565 msleep(1);
1566 kthread_stop(test_thread);
1570 * For every trace event defined, we will test each trace point separately,
1571 * and then by groups, and finally all trace points.
1573 static __init void event_trace_self_tests(void)
1575 struct ftrace_event_call *call;
1576 struct event_subsystem *system;
1577 int ret;
1579 pr_info("Running tests on trace events:\n");
1581 list_for_each_entry(call, &ftrace_events, list) {
1583 /* Only test those that have a probe */
1584 if (!call->class || !call->class->probe)
1585 continue;
1588 * Testing syscall events here is pretty useless, but
1589 * we still do it if configured. But this is time consuming.
1590 * What we really need is a user thread to perform the
1591 * syscalls as we test.
1593 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1594 if (call->class->system &&
1595 strcmp(call->class->system, "syscalls") == 0)
1596 continue;
1597 #endif
1599 pr_info("Testing event %s: ", call->name);
1602 * If an event is already enabled, someone is using
1603 * it and the self test should not be on.
1605 if (call->flags & TRACE_EVENT_FL_ENABLED) {
1606 pr_warning("Enabled event during self test!\n");
1607 WARN_ON_ONCE(1);
1608 continue;
1611 ftrace_event_enable_disable(call, 1);
1612 event_test_stuff();
1613 ftrace_event_enable_disable(call, 0);
1615 pr_cont("OK\n");
1618 /* Now test at the sub system level */
1620 pr_info("Running tests on trace event systems:\n");
1622 list_for_each_entry(system, &event_subsystems, list) {
1624 /* the ftrace system is special, skip it */
1625 if (strcmp(system->name, "ftrace") == 0)
1626 continue;
1628 pr_info("Testing event system %s: ", system->name);
1630 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1631 if (WARN_ON_ONCE(ret)) {
1632 pr_warning("error enabling system %s\n",
1633 system->name);
1634 continue;
1637 event_test_stuff();
1639 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1640 if (WARN_ON_ONCE(ret))
1641 pr_warning("error disabling system %s\n",
1642 system->name);
1644 pr_cont("OK\n");
1647 /* Test with all events enabled */
1649 pr_info("Running tests on all trace events:\n");
1650 pr_info("Testing all events: ");
1652 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1653 if (WARN_ON_ONCE(ret)) {
1654 pr_warning("error enabling all events\n");
1655 return;
1658 event_test_stuff();
1660 /* reset sysname */
1661 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1662 if (WARN_ON_ONCE(ret)) {
1663 pr_warning("error disabling all events\n");
1664 return;
1667 pr_cont("OK\n");
1670 #ifdef CONFIG_FUNCTION_TRACER
1672 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1674 static void
1675 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1677 struct ring_buffer_event *event;
1678 struct ring_buffer *buffer;
1679 struct ftrace_entry *entry;
1680 unsigned long flags;
1681 long disabled;
1682 int cpu;
1683 int pc;
1685 pc = preempt_count();
1686 preempt_disable_notrace();
1687 cpu = raw_smp_processor_id();
1688 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1690 if (disabled != 1)
1691 goto out;
1693 local_save_flags(flags);
1695 event = trace_current_buffer_lock_reserve(&buffer,
1696 TRACE_FN, sizeof(*entry),
1697 flags, pc);
1698 if (!event)
1699 goto out;
1700 entry = ring_buffer_event_data(event);
1701 entry->ip = ip;
1702 entry->parent_ip = parent_ip;
1704 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1706 out:
1707 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1708 preempt_enable_notrace();
1711 static struct ftrace_ops trace_ops __initdata =
1713 .func = function_test_events_call,
1716 static __init void event_trace_self_test_with_function(void)
1718 int ret;
1719 ret = register_ftrace_function(&trace_ops);
1720 if (WARN_ON(ret < 0)) {
1721 pr_info("Failed to enable function tracer for event tests\n");
1722 return;
1724 pr_info("Running tests again, along with the function tracer\n");
1725 event_trace_self_tests();
1726 unregister_ftrace_function(&trace_ops);
1728 #else
1729 static __init void event_trace_self_test_with_function(void)
1732 #endif
1734 static __init int event_trace_self_tests_init(void)
1736 if (!tracing_selftest_disabled) {
1737 event_trace_self_tests();
1738 event_trace_self_test_with_function();
1741 return 0;
1744 late_initcall(event_trace_self_tests_init);
1746 #endif