ftrace: add quick function trace stop
[linux-2.6/libata-dev.git] / kernel / trace / ftrace.c
blob896c71f0f4c4fc745b6bd447ca4f0ddee93d6dd7
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
32 #include "trace.h"
34 #define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
40 #define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
50 /* Quick disabling of function tracer. */
51 int function_trace_stop;
54 * ftrace_disabled is set when an anomaly is discovered.
55 * ftrace_disabled is much stronger than ftrace_enabled.
57 static int ftrace_disabled __read_mostly;
59 static DEFINE_SPINLOCK(ftrace_lock);
60 static DEFINE_MUTEX(ftrace_sysctl_lock);
62 static struct ftrace_ops ftrace_list_end __read_mostly =
64 .func = ftrace_stub,
67 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
68 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
69 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
71 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
73 struct ftrace_ops *op = ftrace_list;
75 /* in case someone actually ports this to alpha! */
76 read_barrier_depends();
78 while (op != &ftrace_list_end) {
79 /* silly alpha */
80 read_barrier_depends();
81 op->func(ip, parent_ip);
82 op = op->next;
86 /**
87 * clear_ftrace_function - reset the ftrace function
89 * This NULLs the ftrace function and in essence stops
90 * tracing. There may be lag
92 void clear_ftrace_function(void)
94 ftrace_trace_function = ftrace_stub;
95 __ftrace_trace_function = ftrace_stub;
98 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
100 * For those archs that do not test ftrace_trace_stop in their
101 * mcount call site, we need to do it from C.
103 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
105 if (function_trace_stop)
106 return;
108 __ftrace_trace_function(ip, parent_ip);
110 #endif
112 static int __register_ftrace_function(struct ftrace_ops *ops)
114 /* should not be called from interrupt context */
115 spin_lock(&ftrace_lock);
117 ops->next = ftrace_list;
119 * We are entering ops into the ftrace_list but another
120 * CPU might be walking that list. We need to make sure
121 * the ops->next pointer is valid before another CPU sees
122 * the ops pointer included into the ftrace_list.
124 smp_wmb();
125 ftrace_list = ops;
127 if (ftrace_enabled) {
129 * For one func, simply call it directly.
130 * For more than one func, call the chain.
132 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
133 if (ops->next == &ftrace_list_end)
134 ftrace_trace_function = ops->func;
135 else
136 ftrace_trace_function = ftrace_list_func;
137 #else
138 if (ops->next == &ftrace_list_end)
139 __ftrace_trace_function = ops->func;
140 else
141 __ftrace_trace_function = ftrace_list_func;
142 ftrace_trace_function = ftrace_test_stop_func;
143 #endif
146 spin_unlock(&ftrace_lock);
148 return 0;
151 static int __unregister_ftrace_function(struct ftrace_ops *ops)
153 struct ftrace_ops **p;
154 int ret = 0;
156 /* should not be called from interrupt context */
157 spin_lock(&ftrace_lock);
160 * If we are removing the last function, then simply point
161 * to the ftrace_stub.
163 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
164 ftrace_trace_function = ftrace_stub;
165 ftrace_list = &ftrace_list_end;
166 goto out;
169 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
170 if (*p == ops)
171 break;
173 if (*p != ops) {
174 ret = -1;
175 goto out;
178 *p = (*p)->next;
180 if (ftrace_enabled) {
181 /* If we only have one func left, then call that directly */
182 if (ftrace_list == &ftrace_list_end ||
183 ftrace_list->next == &ftrace_list_end)
184 ftrace_trace_function = ftrace_list->func;
187 out:
188 spin_unlock(&ftrace_lock);
190 return ret;
193 #ifdef CONFIG_DYNAMIC_FTRACE
194 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
195 # error Dynamic ftrace depends on MCOUNT_RECORD
196 #endif
199 * Since MCOUNT_ADDR may point to mcount itself, we do not want
200 * to get it confused by reading a reference in the code as we
201 * are parsing on objcopy output of text. Use a variable for
202 * it instead.
204 static unsigned long mcount_addr = MCOUNT_ADDR;
206 enum {
207 FTRACE_ENABLE_CALLS = (1 << 0),
208 FTRACE_DISABLE_CALLS = (1 << 1),
209 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
210 FTRACE_ENABLE_MCOUNT = (1 << 3),
211 FTRACE_DISABLE_MCOUNT = (1 << 4),
214 static int ftrace_filtered;
215 static int tracing_on;
217 static LIST_HEAD(ftrace_new_addrs);
219 static DEFINE_MUTEX(ftrace_regex_lock);
221 struct ftrace_page {
222 struct ftrace_page *next;
223 unsigned long index;
224 struct dyn_ftrace records[];
227 #define ENTRIES_PER_PAGE \
228 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
230 /* estimate from running different kernels */
231 #define NR_TO_INIT 10000
233 static struct ftrace_page *ftrace_pages_start;
234 static struct ftrace_page *ftrace_pages;
236 static struct dyn_ftrace *ftrace_free_records;
239 #ifdef CONFIG_KPROBES
241 static int frozen_record_count;
243 static inline void freeze_record(struct dyn_ftrace *rec)
245 if (!(rec->flags & FTRACE_FL_FROZEN)) {
246 rec->flags |= FTRACE_FL_FROZEN;
247 frozen_record_count++;
251 static inline void unfreeze_record(struct dyn_ftrace *rec)
253 if (rec->flags & FTRACE_FL_FROZEN) {
254 rec->flags &= ~FTRACE_FL_FROZEN;
255 frozen_record_count--;
259 static inline int record_frozen(struct dyn_ftrace *rec)
261 return rec->flags & FTRACE_FL_FROZEN;
263 #else
264 # define freeze_record(rec) ({ 0; })
265 # define unfreeze_record(rec) ({ 0; })
266 # define record_frozen(rec) ({ 0; })
267 #endif /* CONFIG_KPROBES */
269 static void ftrace_free_rec(struct dyn_ftrace *rec)
271 rec->ip = (unsigned long)ftrace_free_records;
272 ftrace_free_records = rec;
273 rec->flags |= FTRACE_FL_FREE;
276 void ftrace_release(void *start, unsigned long size)
278 struct dyn_ftrace *rec;
279 struct ftrace_page *pg;
280 unsigned long s = (unsigned long)start;
281 unsigned long e = s + size;
282 int i;
284 if (ftrace_disabled || !start)
285 return;
287 /* should not be called from interrupt context */
288 spin_lock(&ftrace_lock);
290 for (pg = ftrace_pages_start; pg; pg = pg->next) {
291 for (i = 0; i < pg->index; i++) {
292 rec = &pg->records[i];
294 if ((rec->ip >= s) && (rec->ip < e))
295 ftrace_free_rec(rec);
298 spin_unlock(&ftrace_lock);
301 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
303 struct dyn_ftrace *rec;
305 /* First check for freed records */
306 if (ftrace_free_records) {
307 rec = ftrace_free_records;
309 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
310 FTRACE_WARN_ON_ONCE(1);
311 ftrace_free_records = NULL;
312 return NULL;
315 ftrace_free_records = (void *)rec->ip;
316 memset(rec, 0, sizeof(*rec));
317 return rec;
320 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
321 if (!ftrace_pages->next) {
322 /* allocate another page */
323 ftrace_pages->next =
324 (void *)get_zeroed_page(GFP_KERNEL);
325 if (!ftrace_pages->next)
326 return NULL;
328 ftrace_pages = ftrace_pages->next;
331 return &ftrace_pages->records[ftrace_pages->index++];
334 static struct dyn_ftrace *
335 ftrace_record_ip(unsigned long ip)
337 struct dyn_ftrace *rec;
339 if (!ftrace_enabled || ftrace_disabled)
340 return NULL;
342 rec = ftrace_alloc_dyn_node(ip);
343 if (!rec)
344 return NULL;
346 rec->ip = ip;
348 list_add(&rec->list, &ftrace_new_addrs);
350 return rec;
353 #define FTRACE_ADDR ((long)(ftrace_caller))
355 static int
356 __ftrace_replace_code(struct dyn_ftrace *rec,
357 unsigned char *old, unsigned char *new, int enable)
359 unsigned long ip, fl;
361 ip = rec->ip;
363 if (ftrace_filtered && enable) {
365 * If filtering is on:
367 * If this record is set to be filtered and
368 * is enabled then do nothing.
370 * If this record is set to be filtered and
371 * it is not enabled, enable it.
373 * If this record is not set to be filtered
374 * and it is not enabled do nothing.
376 * If this record is set not to trace then
377 * do nothing.
379 * If this record is set not to trace and
380 * it is enabled then disable it.
382 * If this record is not set to be filtered and
383 * it is enabled, disable it.
386 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
387 FTRACE_FL_ENABLED);
389 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
390 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
391 !fl || (fl == FTRACE_FL_NOTRACE))
392 return 0;
395 * If it is enabled disable it,
396 * otherwise enable it!
398 if (fl & FTRACE_FL_ENABLED) {
399 /* swap new and old */
400 new = old;
401 old = ftrace_call_replace(ip, FTRACE_ADDR);
402 rec->flags &= ~FTRACE_FL_ENABLED;
403 } else {
404 new = ftrace_call_replace(ip, FTRACE_ADDR);
405 rec->flags |= FTRACE_FL_ENABLED;
407 } else {
409 if (enable) {
411 * If this record is set not to trace and is
412 * not enabled, do nothing.
414 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
415 if (fl == FTRACE_FL_NOTRACE)
416 return 0;
418 new = ftrace_call_replace(ip, FTRACE_ADDR);
419 } else
420 old = ftrace_call_replace(ip, FTRACE_ADDR);
422 if (enable) {
423 if (rec->flags & FTRACE_FL_ENABLED)
424 return 0;
425 rec->flags |= FTRACE_FL_ENABLED;
426 } else {
427 if (!(rec->flags & FTRACE_FL_ENABLED))
428 return 0;
429 rec->flags &= ~FTRACE_FL_ENABLED;
433 return ftrace_modify_code(ip, old, new);
436 static void ftrace_replace_code(int enable)
438 int i, failed;
439 unsigned char *new = NULL, *old = NULL;
440 struct dyn_ftrace *rec;
441 struct ftrace_page *pg;
443 if (enable)
444 old = ftrace_nop_replace();
445 else
446 new = ftrace_nop_replace();
448 for (pg = ftrace_pages_start; pg; pg = pg->next) {
449 for (i = 0; i < pg->index; i++) {
450 rec = &pg->records[i];
452 /* don't modify code that has already faulted */
453 if (rec->flags & FTRACE_FL_FAILED)
454 continue;
456 /* ignore updates to this record's mcount site */
457 if (get_kprobe((void *)rec->ip)) {
458 freeze_record(rec);
459 continue;
460 } else {
461 unfreeze_record(rec);
464 failed = __ftrace_replace_code(rec, old, new, enable);
465 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
466 rec->flags |= FTRACE_FL_FAILED;
467 if ((system_state == SYSTEM_BOOTING) ||
468 !core_kernel_text(rec->ip)) {
469 ftrace_free_rec(rec);
476 static void print_ip_ins(const char *fmt, unsigned char *p)
478 int i;
480 printk(KERN_CONT "%s", fmt);
482 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
483 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
486 static int
487 ftrace_code_disable(struct dyn_ftrace *rec)
489 unsigned long ip;
490 unsigned char *nop, *call;
491 int ret;
493 ip = rec->ip;
495 nop = ftrace_nop_replace();
496 call = ftrace_call_replace(ip, mcount_addr);
498 ret = ftrace_modify_code(ip, call, nop);
499 if (ret) {
500 switch (ret) {
501 case -EFAULT:
502 FTRACE_WARN_ON_ONCE(1);
503 pr_info("ftrace faulted on modifying ");
504 print_ip_sym(ip);
505 break;
506 case -EINVAL:
507 FTRACE_WARN_ON_ONCE(1);
508 pr_info("ftrace failed to modify ");
509 print_ip_sym(ip);
510 print_ip_ins(" expected: ", call);
511 print_ip_ins(" actual: ", (unsigned char *)ip);
512 print_ip_ins(" replace: ", nop);
513 printk(KERN_CONT "\n");
514 break;
515 case -EPERM:
516 FTRACE_WARN_ON_ONCE(1);
517 pr_info("ftrace faulted on writing ");
518 print_ip_sym(ip);
519 break;
520 default:
521 FTRACE_WARN_ON_ONCE(1);
522 pr_info("ftrace faulted on unknown error ");
523 print_ip_sym(ip);
526 rec->flags |= FTRACE_FL_FAILED;
527 return 0;
529 return 1;
532 static int __ftrace_modify_code(void *data)
534 int *command = data;
536 if (*command & FTRACE_ENABLE_CALLS) {
537 ftrace_replace_code(1);
538 tracing_on = 1;
539 } else if (*command & FTRACE_DISABLE_CALLS) {
540 ftrace_replace_code(0);
541 tracing_on = 0;
544 if (*command & FTRACE_UPDATE_TRACE_FUNC)
545 ftrace_update_ftrace_func(ftrace_trace_function);
547 return 0;
550 static void ftrace_run_update_code(int command)
552 stop_machine(__ftrace_modify_code, &command, NULL);
555 static ftrace_func_t saved_ftrace_func;
556 static int ftrace_start_up;
557 static DEFINE_MUTEX(ftrace_start_lock);
559 static void ftrace_startup(void)
561 int command = 0;
563 if (unlikely(ftrace_disabled))
564 return;
566 mutex_lock(&ftrace_start_lock);
567 ftrace_start_up++;
568 if (ftrace_start_up == 1)
569 command |= FTRACE_ENABLE_CALLS;
571 if (saved_ftrace_func != ftrace_trace_function) {
572 saved_ftrace_func = ftrace_trace_function;
573 command |= FTRACE_UPDATE_TRACE_FUNC;
576 if (!command || !ftrace_enabled)
577 goto out;
579 ftrace_run_update_code(command);
580 out:
581 mutex_unlock(&ftrace_start_lock);
584 static void ftrace_shutdown(void)
586 int command = 0;
588 if (unlikely(ftrace_disabled))
589 return;
591 mutex_lock(&ftrace_start_lock);
592 ftrace_start_up--;
593 if (!ftrace_start_up)
594 command |= FTRACE_DISABLE_CALLS;
596 if (saved_ftrace_func != ftrace_trace_function) {
597 saved_ftrace_func = ftrace_trace_function;
598 command |= FTRACE_UPDATE_TRACE_FUNC;
601 if (!command || !ftrace_enabled)
602 goto out;
604 ftrace_run_update_code(command);
605 out:
606 mutex_unlock(&ftrace_start_lock);
609 static void ftrace_startup_sysctl(void)
611 int command = FTRACE_ENABLE_MCOUNT;
613 if (unlikely(ftrace_disabled))
614 return;
616 mutex_lock(&ftrace_start_lock);
617 /* Force update next time */
618 saved_ftrace_func = NULL;
619 /* ftrace_start_up is true if we want ftrace running */
620 if (ftrace_start_up)
621 command |= FTRACE_ENABLE_CALLS;
623 ftrace_run_update_code(command);
624 mutex_unlock(&ftrace_start_lock);
627 static void ftrace_shutdown_sysctl(void)
629 int command = FTRACE_DISABLE_MCOUNT;
631 if (unlikely(ftrace_disabled))
632 return;
634 mutex_lock(&ftrace_start_lock);
635 /* ftrace_start_up is true if ftrace is running */
636 if (ftrace_start_up)
637 command |= FTRACE_DISABLE_CALLS;
639 ftrace_run_update_code(command);
640 mutex_unlock(&ftrace_start_lock);
643 static cycle_t ftrace_update_time;
644 static unsigned long ftrace_update_cnt;
645 unsigned long ftrace_update_tot_cnt;
647 static int ftrace_update_code(void)
649 struct dyn_ftrace *p, *t;
650 cycle_t start, stop;
652 start = ftrace_now(raw_smp_processor_id());
653 ftrace_update_cnt = 0;
655 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
657 /* If something went wrong, bail without enabling anything */
658 if (unlikely(ftrace_disabled))
659 return -1;
661 list_del_init(&p->list);
663 /* convert record (i.e, patch mcount-call with NOP) */
664 if (ftrace_code_disable(p)) {
665 p->flags |= FTRACE_FL_CONVERTED;
666 ftrace_update_cnt++;
667 } else
668 ftrace_free_rec(p);
671 stop = ftrace_now(raw_smp_processor_id());
672 ftrace_update_time = stop - start;
673 ftrace_update_tot_cnt += ftrace_update_cnt;
675 return 0;
678 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
680 struct ftrace_page *pg;
681 int cnt;
682 int i;
684 /* allocate a few pages */
685 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
686 if (!ftrace_pages_start)
687 return -1;
690 * Allocate a few more pages.
692 * TODO: have some parser search vmlinux before
693 * final linking to find all calls to ftrace.
694 * Then we can:
695 * a) know how many pages to allocate.
696 * and/or
697 * b) set up the table then.
699 * The dynamic code is still necessary for
700 * modules.
703 pg = ftrace_pages = ftrace_pages_start;
705 cnt = num_to_init / ENTRIES_PER_PAGE;
706 pr_info("ftrace: allocating %ld entries in %d pages\n",
707 num_to_init, cnt);
709 for (i = 0; i < cnt; i++) {
710 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
712 /* If we fail, we'll try later anyway */
713 if (!pg->next)
714 break;
716 pg = pg->next;
719 return 0;
722 enum {
723 FTRACE_ITER_FILTER = (1 << 0),
724 FTRACE_ITER_CONT = (1 << 1),
725 FTRACE_ITER_NOTRACE = (1 << 2),
726 FTRACE_ITER_FAILURES = (1 << 3),
729 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
731 struct ftrace_iterator {
732 loff_t pos;
733 struct ftrace_page *pg;
734 unsigned idx;
735 unsigned flags;
736 unsigned char buffer[FTRACE_BUFF_MAX+1];
737 unsigned buffer_idx;
738 unsigned filtered;
741 static void *
742 t_next(struct seq_file *m, void *v, loff_t *pos)
744 struct ftrace_iterator *iter = m->private;
745 struct dyn_ftrace *rec = NULL;
747 (*pos)++;
749 /* should not be called from interrupt context */
750 spin_lock(&ftrace_lock);
751 retry:
752 if (iter->idx >= iter->pg->index) {
753 if (iter->pg->next) {
754 iter->pg = iter->pg->next;
755 iter->idx = 0;
756 goto retry;
758 } else {
759 rec = &iter->pg->records[iter->idx++];
760 if ((rec->flags & FTRACE_FL_FREE) ||
762 (!(iter->flags & FTRACE_ITER_FAILURES) &&
763 (rec->flags & FTRACE_FL_FAILED)) ||
765 ((iter->flags & FTRACE_ITER_FAILURES) &&
766 !(rec->flags & FTRACE_FL_FAILED)) ||
768 ((iter->flags & FTRACE_ITER_NOTRACE) &&
769 !(rec->flags & FTRACE_FL_NOTRACE))) {
770 rec = NULL;
771 goto retry;
774 spin_unlock(&ftrace_lock);
776 iter->pos = *pos;
778 return rec;
781 static void *t_start(struct seq_file *m, loff_t *pos)
783 struct ftrace_iterator *iter = m->private;
784 void *p = NULL;
785 loff_t l = -1;
787 if (*pos != iter->pos) {
788 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
790 } else {
791 l = *pos;
792 p = t_next(m, p, &l);
795 return p;
798 static void t_stop(struct seq_file *m, void *p)
802 static int t_show(struct seq_file *m, void *v)
804 struct dyn_ftrace *rec = v;
805 char str[KSYM_SYMBOL_LEN];
807 if (!rec)
808 return 0;
810 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
812 seq_printf(m, "%s\n", str);
814 return 0;
817 static struct seq_operations show_ftrace_seq_ops = {
818 .start = t_start,
819 .next = t_next,
820 .stop = t_stop,
821 .show = t_show,
824 static int
825 ftrace_avail_open(struct inode *inode, struct file *file)
827 struct ftrace_iterator *iter;
828 int ret;
830 if (unlikely(ftrace_disabled))
831 return -ENODEV;
833 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
834 if (!iter)
835 return -ENOMEM;
837 iter->pg = ftrace_pages_start;
838 iter->pos = -1;
840 ret = seq_open(file, &show_ftrace_seq_ops);
841 if (!ret) {
842 struct seq_file *m = file->private_data;
844 m->private = iter;
845 } else {
846 kfree(iter);
849 return ret;
852 int ftrace_avail_release(struct inode *inode, struct file *file)
854 struct seq_file *m = (struct seq_file *)file->private_data;
855 struct ftrace_iterator *iter = m->private;
857 seq_release(inode, file);
858 kfree(iter);
860 return 0;
863 static int
864 ftrace_failures_open(struct inode *inode, struct file *file)
866 int ret;
867 struct seq_file *m;
868 struct ftrace_iterator *iter;
870 ret = ftrace_avail_open(inode, file);
871 if (!ret) {
872 m = (struct seq_file *)file->private_data;
873 iter = (struct ftrace_iterator *)m->private;
874 iter->flags = FTRACE_ITER_FAILURES;
877 return ret;
881 static void ftrace_filter_reset(int enable)
883 struct ftrace_page *pg;
884 struct dyn_ftrace *rec;
885 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
886 unsigned i;
888 /* should not be called from interrupt context */
889 spin_lock(&ftrace_lock);
890 if (enable)
891 ftrace_filtered = 0;
892 pg = ftrace_pages_start;
893 while (pg) {
894 for (i = 0; i < pg->index; i++) {
895 rec = &pg->records[i];
896 if (rec->flags & FTRACE_FL_FAILED)
897 continue;
898 rec->flags &= ~type;
900 pg = pg->next;
902 spin_unlock(&ftrace_lock);
905 static int
906 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
908 struct ftrace_iterator *iter;
909 int ret = 0;
911 if (unlikely(ftrace_disabled))
912 return -ENODEV;
914 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
915 if (!iter)
916 return -ENOMEM;
918 mutex_lock(&ftrace_regex_lock);
919 if ((file->f_mode & FMODE_WRITE) &&
920 !(file->f_flags & O_APPEND))
921 ftrace_filter_reset(enable);
923 if (file->f_mode & FMODE_READ) {
924 iter->pg = ftrace_pages_start;
925 iter->pos = -1;
926 iter->flags = enable ? FTRACE_ITER_FILTER :
927 FTRACE_ITER_NOTRACE;
929 ret = seq_open(file, &show_ftrace_seq_ops);
930 if (!ret) {
931 struct seq_file *m = file->private_data;
932 m->private = iter;
933 } else
934 kfree(iter);
935 } else
936 file->private_data = iter;
937 mutex_unlock(&ftrace_regex_lock);
939 return ret;
942 static int
943 ftrace_filter_open(struct inode *inode, struct file *file)
945 return ftrace_regex_open(inode, file, 1);
948 static int
949 ftrace_notrace_open(struct inode *inode, struct file *file)
951 return ftrace_regex_open(inode, file, 0);
954 static ssize_t
955 ftrace_regex_read(struct file *file, char __user *ubuf,
956 size_t cnt, loff_t *ppos)
958 if (file->f_mode & FMODE_READ)
959 return seq_read(file, ubuf, cnt, ppos);
960 else
961 return -EPERM;
964 static loff_t
965 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
967 loff_t ret;
969 if (file->f_mode & FMODE_READ)
970 ret = seq_lseek(file, offset, origin);
971 else
972 file->f_pos = ret = 1;
974 return ret;
977 enum {
978 MATCH_FULL,
979 MATCH_FRONT_ONLY,
980 MATCH_MIDDLE_ONLY,
981 MATCH_END_ONLY,
984 static void
985 ftrace_match(unsigned char *buff, int len, int enable)
987 char str[KSYM_SYMBOL_LEN];
988 char *search = NULL;
989 struct ftrace_page *pg;
990 struct dyn_ftrace *rec;
991 int type = MATCH_FULL;
992 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
993 unsigned i, match = 0, search_len = 0;
995 for (i = 0; i < len; i++) {
996 if (buff[i] == '*') {
997 if (!i) {
998 search = buff + i + 1;
999 type = MATCH_END_ONLY;
1000 search_len = len - (i + 1);
1001 } else {
1002 if (type == MATCH_END_ONLY) {
1003 type = MATCH_MIDDLE_ONLY;
1004 } else {
1005 match = i;
1006 type = MATCH_FRONT_ONLY;
1008 buff[i] = 0;
1009 break;
1014 /* should not be called from interrupt context */
1015 spin_lock(&ftrace_lock);
1016 if (enable)
1017 ftrace_filtered = 1;
1018 pg = ftrace_pages_start;
1019 while (pg) {
1020 for (i = 0; i < pg->index; i++) {
1021 int matched = 0;
1022 char *ptr;
1024 rec = &pg->records[i];
1025 if (rec->flags & FTRACE_FL_FAILED)
1026 continue;
1027 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1028 switch (type) {
1029 case MATCH_FULL:
1030 if (strcmp(str, buff) == 0)
1031 matched = 1;
1032 break;
1033 case MATCH_FRONT_ONLY:
1034 if (memcmp(str, buff, match) == 0)
1035 matched = 1;
1036 break;
1037 case MATCH_MIDDLE_ONLY:
1038 if (strstr(str, search))
1039 matched = 1;
1040 break;
1041 case MATCH_END_ONLY:
1042 ptr = strstr(str, search);
1043 if (ptr && (ptr[search_len] == 0))
1044 matched = 1;
1045 break;
1047 if (matched)
1048 rec->flags |= flag;
1050 pg = pg->next;
1052 spin_unlock(&ftrace_lock);
1055 static ssize_t
1056 ftrace_regex_write(struct file *file, const char __user *ubuf,
1057 size_t cnt, loff_t *ppos, int enable)
1059 struct ftrace_iterator *iter;
1060 char ch;
1061 size_t read = 0;
1062 ssize_t ret;
1064 if (!cnt || cnt < 0)
1065 return 0;
1067 mutex_lock(&ftrace_regex_lock);
1069 if (file->f_mode & FMODE_READ) {
1070 struct seq_file *m = file->private_data;
1071 iter = m->private;
1072 } else
1073 iter = file->private_data;
1075 if (!*ppos) {
1076 iter->flags &= ~FTRACE_ITER_CONT;
1077 iter->buffer_idx = 0;
1080 ret = get_user(ch, ubuf++);
1081 if (ret)
1082 goto out;
1083 read++;
1084 cnt--;
1086 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1087 /* skip white space */
1088 while (cnt && isspace(ch)) {
1089 ret = get_user(ch, ubuf++);
1090 if (ret)
1091 goto out;
1092 read++;
1093 cnt--;
1096 if (isspace(ch)) {
1097 file->f_pos += read;
1098 ret = read;
1099 goto out;
1102 iter->buffer_idx = 0;
1105 while (cnt && !isspace(ch)) {
1106 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1107 iter->buffer[iter->buffer_idx++] = ch;
1108 else {
1109 ret = -EINVAL;
1110 goto out;
1112 ret = get_user(ch, ubuf++);
1113 if (ret)
1114 goto out;
1115 read++;
1116 cnt--;
1119 if (isspace(ch)) {
1120 iter->filtered++;
1121 iter->buffer[iter->buffer_idx] = 0;
1122 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1123 iter->buffer_idx = 0;
1124 } else
1125 iter->flags |= FTRACE_ITER_CONT;
1128 file->f_pos += read;
1130 ret = read;
1131 out:
1132 mutex_unlock(&ftrace_regex_lock);
1134 return ret;
1137 static ssize_t
1138 ftrace_filter_write(struct file *file, const char __user *ubuf,
1139 size_t cnt, loff_t *ppos)
1141 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1144 static ssize_t
1145 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1146 size_t cnt, loff_t *ppos)
1148 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1151 static void
1152 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1154 if (unlikely(ftrace_disabled))
1155 return;
1157 mutex_lock(&ftrace_regex_lock);
1158 if (reset)
1159 ftrace_filter_reset(enable);
1160 if (buf)
1161 ftrace_match(buf, len, enable);
1162 mutex_unlock(&ftrace_regex_lock);
1166 * ftrace_set_filter - set a function to filter on in ftrace
1167 * @buf - the string that holds the function filter text.
1168 * @len - the length of the string.
1169 * @reset - non zero to reset all filters before applying this filter.
1171 * Filters denote which functions should be enabled when tracing is enabled.
1172 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1174 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1176 ftrace_set_regex(buf, len, reset, 1);
1180 * ftrace_set_notrace - set a function to not trace in ftrace
1181 * @buf - the string that holds the function notrace text.
1182 * @len - the length of the string.
1183 * @reset - non zero to reset all filters before applying this filter.
1185 * Notrace Filters denote which functions should not be enabled when tracing
1186 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1187 * for tracing.
1189 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1191 ftrace_set_regex(buf, len, reset, 0);
1194 static int
1195 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1197 struct seq_file *m = (struct seq_file *)file->private_data;
1198 struct ftrace_iterator *iter;
1200 mutex_lock(&ftrace_regex_lock);
1201 if (file->f_mode & FMODE_READ) {
1202 iter = m->private;
1204 seq_release(inode, file);
1205 } else
1206 iter = file->private_data;
1208 if (iter->buffer_idx) {
1209 iter->filtered++;
1210 iter->buffer[iter->buffer_idx] = 0;
1211 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1214 mutex_lock(&ftrace_sysctl_lock);
1215 mutex_lock(&ftrace_start_lock);
1216 if (iter->filtered && ftrace_start_up && ftrace_enabled)
1217 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1218 mutex_unlock(&ftrace_start_lock);
1219 mutex_unlock(&ftrace_sysctl_lock);
1221 kfree(iter);
1222 mutex_unlock(&ftrace_regex_lock);
1223 return 0;
1226 static int
1227 ftrace_filter_release(struct inode *inode, struct file *file)
1229 return ftrace_regex_release(inode, file, 1);
1232 static int
1233 ftrace_notrace_release(struct inode *inode, struct file *file)
1235 return ftrace_regex_release(inode, file, 0);
1238 static struct file_operations ftrace_avail_fops = {
1239 .open = ftrace_avail_open,
1240 .read = seq_read,
1241 .llseek = seq_lseek,
1242 .release = ftrace_avail_release,
1245 static struct file_operations ftrace_failures_fops = {
1246 .open = ftrace_failures_open,
1247 .read = seq_read,
1248 .llseek = seq_lseek,
1249 .release = ftrace_avail_release,
1252 static struct file_operations ftrace_filter_fops = {
1253 .open = ftrace_filter_open,
1254 .read = ftrace_regex_read,
1255 .write = ftrace_filter_write,
1256 .llseek = ftrace_regex_lseek,
1257 .release = ftrace_filter_release,
1260 static struct file_operations ftrace_notrace_fops = {
1261 .open = ftrace_notrace_open,
1262 .read = ftrace_regex_read,
1263 .write = ftrace_notrace_write,
1264 .llseek = ftrace_regex_lseek,
1265 .release = ftrace_notrace_release,
1268 static __init int ftrace_init_debugfs(void)
1270 struct dentry *d_tracer;
1271 struct dentry *entry;
1273 d_tracer = tracing_init_dentry();
1275 entry = debugfs_create_file("available_filter_functions", 0444,
1276 d_tracer, NULL, &ftrace_avail_fops);
1277 if (!entry)
1278 pr_warning("Could not create debugfs "
1279 "'available_filter_functions' entry\n");
1281 entry = debugfs_create_file("failures", 0444,
1282 d_tracer, NULL, &ftrace_failures_fops);
1283 if (!entry)
1284 pr_warning("Could not create debugfs 'failures' entry\n");
1286 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1287 NULL, &ftrace_filter_fops);
1288 if (!entry)
1289 pr_warning("Could not create debugfs "
1290 "'set_ftrace_filter' entry\n");
1292 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1293 NULL, &ftrace_notrace_fops);
1294 if (!entry)
1295 pr_warning("Could not create debugfs "
1296 "'set_ftrace_notrace' entry\n");
1298 return 0;
1301 fs_initcall(ftrace_init_debugfs);
1303 static int ftrace_convert_nops(unsigned long *start,
1304 unsigned long *end)
1306 unsigned long *p;
1307 unsigned long addr;
1308 unsigned long flags;
1310 mutex_lock(&ftrace_start_lock);
1311 p = start;
1312 while (p < end) {
1313 addr = ftrace_call_adjust(*p++);
1314 ftrace_record_ip(addr);
1317 /* disable interrupts to prevent kstop machine */
1318 local_irq_save(flags);
1319 ftrace_update_code();
1320 local_irq_restore(flags);
1321 mutex_unlock(&ftrace_start_lock);
1323 return 0;
1326 void ftrace_init_module(unsigned long *start, unsigned long *end)
1328 if (ftrace_disabled || start == end)
1329 return;
1330 ftrace_convert_nops(start, end);
1333 extern unsigned long __start_mcount_loc[];
1334 extern unsigned long __stop_mcount_loc[];
1336 void __init ftrace_init(void)
1338 unsigned long count, addr, flags;
1339 int ret;
1341 /* Keep the ftrace pointer to the stub */
1342 addr = (unsigned long)ftrace_stub;
1344 local_irq_save(flags);
1345 ftrace_dyn_arch_init(&addr);
1346 local_irq_restore(flags);
1348 /* ftrace_dyn_arch_init places the return code in addr */
1349 if (addr)
1350 goto failed;
1352 count = __stop_mcount_loc - __start_mcount_loc;
1354 ret = ftrace_dyn_table_alloc(count);
1355 if (ret)
1356 goto failed;
1358 last_ftrace_enabled = ftrace_enabled = 1;
1360 ret = ftrace_convert_nops(__start_mcount_loc,
1361 __stop_mcount_loc);
1363 return;
1364 failed:
1365 ftrace_disabled = 1;
1368 #else
1370 static int __init ftrace_nodyn_init(void)
1372 ftrace_enabled = 1;
1373 return 0;
1375 device_initcall(ftrace_nodyn_init);
1377 # define ftrace_startup() do { } while (0)
1378 # define ftrace_shutdown() do { } while (0)
1379 # define ftrace_startup_sysctl() do { } while (0)
1380 # define ftrace_shutdown_sysctl() do { } while (0)
1381 #endif /* CONFIG_DYNAMIC_FTRACE */
1384 * ftrace_kill - kill ftrace
1386 * This function should be used by panic code. It stops ftrace
1387 * but in a not so nice way. If you need to simply kill ftrace
1388 * from a non-atomic section, use ftrace_kill.
1390 void ftrace_kill(void)
1392 ftrace_disabled = 1;
1393 ftrace_enabled = 0;
1394 clear_ftrace_function();
1398 * register_ftrace_function - register a function for profiling
1399 * @ops - ops structure that holds the function for profiling.
1401 * Register a function to be called by all functions in the
1402 * kernel.
1404 * Note: @ops->func and all the functions it calls must be labeled
1405 * with "notrace", otherwise it will go into a
1406 * recursive loop.
1408 int register_ftrace_function(struct ftrace_ops *ops)
1410 int ret;
1412 if (unlikely(ftrace_disabled))
1413 return -1;
1415 mutex_lock(&ftrace_sysctl_lock);
1416 ret = __register_ftrace_function(ops);
1417 ftrace_startup();
1418 mutex_unlock(&ftrace_sysctl_lock);
1420 return ret;
1424 * unregister_ftrace_function - unresgister a function for profiling.
1425 * @ops - ops structure that holds the function to unregister
1427 * Unregister a function that was added to be called by ftrace profiling.
1429 int unregister_ftrace_function(struct ftrace_ops *ops)
1431 int ret;
1433 mutex_lock(&ftrace_sysctl_lock);
1434 ret = __unregister_ftrace_function(ops);
1435 ftrace_shutdown();
1436 mutex_unlock(&ftrace_sysctl_lock);
1438 return ret;
1442 ftrace_enable_sysctl(struct ctl_table *table, int write,
1443 struct file *file, void __user *buffer, size_t *lenp,
1444 loff_t *ppos)
1446 int ret;
1448 if (unlikely(ftrace_disabled))
1449 return -ENODEV;
1451 mutex_lock(&ftrace_sysctl_lock);
1453 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1455 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1456 goto out;
1458 last_ftrace_enabled = ftrace_enabled;
1460 if (ftrace_enabled) {
1462 ftrace_startup_sysctl();
1464 /* we are starting ftrace again */
1465 if (ftrace_list != &ftrace_list_end) {
1466 if (ftrace_list->next == &ftrace_list_end)
1467 ftrace_trace_function = ftrace_list->func;
1468 else
1469 ftrace_trace_function = ftrace_list_func;
1472 } else {
1473 /* stopping ftrace calls (just send to ftrace_stub) */
1474 ftrace_trace_function = ftrace_stub;
1476 ftrace_shutdown_sysctl();
1479 out:
1480 mutex_unlock(&ftrace_sysctl_lock);
1481 return ret;