markers: use module notifier
[linux-2.6.git] / kernel / trace / ftrace.c
blob54cb9a7d15e5b74e42968bfc622b4eca3fa45028
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
32 #include "trace.h"
34 #define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
40 #define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
50 /* Quick disabling of function tracer. */
51 int function_trace_stop;
54 * ftrace_disabled is set when an anomaly is discovered.
55 * ftrace_disabled is much stronger than ftrace_enabled.
57 static int ftrace_disabled __read_mostly;
59 static DEFINE_SPINLOCK(ftrace_lock);
60 static DEFINE_MUTEX(ftrace_sysctl_lock);
62 static struct ftrace_ops ftrace_list_end __read_mostly =
64 .func = ftrace_stub,
67 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
68 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
69 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
71 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
73 struct ftrace_ops *op = ftrace_list;
75 /* in case someone actually ports this to alpha! */
76 read_barrier_depends();
78 while (op != &ftrace_list_end) {
79 /* silly alpha */
80 read_barrier_depends();
81 op->func(ip, parent_ip);
82 op = op->next;
86 /**
87 * clear_ftrace_function - reset the ftrace function
89 * This NULLs the ftrace function and in essence stops
90 * tracing. There may be lag
92 void clear_ftrace_function(void)
94 ftrace_trace_function = ftrace_stub;
95 __ftrace_trace_function = ftrace_stub;
98 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
100 * For those archs that do not test ftrace_trace_stop in their
101 * mcount call site, we need to do it from C.
103 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
105 if (function_trace_stop)
106 return;
108 __ftrace_trace_function(ip, parent_ip);
110 #endif
112 static int __register_ftrace_function(struct ftrace_ops *ops)
114 /* should not be called from interrupt context */
115 spin_lock(&ftrace_lock);
117 ops->next = ftrace_list;
119 * We are entering ops into the ftrace_list but another
120 * CPU might be walking that list. We need to make sure
121 * the ops->next pointer is valid before another CPU sees
122 * the ops pointer included into the ftrace_list.
124 smp_wmb();
125 ftrace_list = ops;
127 if (ftrace_enabled) {
129 * For one func, simply call it directly.
130 * For more than one func, call the chain.
132 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
133 if (ops->next == &ftrace_list_end)
134 ftrace_trace_function = ops->func;
135 else
136 ftrace_trace_function = ftrace_list_func;
137 #else
138 if (ops->next == &ftrace_list_end)
139 __ftrace_trace_function = ops->func;
140 else
141 __ftrace_trace_function = ftrace_list_func;
142 ftrace_trace_function = ftrace_test_stop_func;
143 #endif
146 spin_unlock(&ftrace_lock);
148 return 0;
151 static int __unregister_ftrace_function(struct ftrace_ops *ops)
153 struct ftrace_ops **p;
154 int ret = 0;
156 /* should not be called from interrupt context */
157 spin_lock(&ftrace_lock);
160 * If we are removing the last function, then simply point
161 * to the ftrace_stub.
163 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
164 ftrace_trace_function = ftrace_stub;
165 ftrace_list = &ftrace_list_end;
166 goto out;
169 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
170 if (*p == ops)
171 break;
173 if (*p != ops) {
174 ret = -1;
175 goto out;
178 *p = (*p)->next;
180 if (ftrace_enabled) {
181 /* If we only have one func left, then call that directly */
182 if (ftrace_list->next == &ftrace_list_end)
183 ftrace_trace_function = ftrace_list->func;
186 out:
187 spin_unlock(&ftrace_lock);
189 return ret;
192 #ifdef CONFIG_DYNAMIC_FTRACE
193 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
194 # error Dynamic ftrace depends on MCOUNT_RECORD
195 #endif
198 * Since MCOUNT_ADDR may point to mcount itself, we do not want
199 * to get it confused by reading a reference in the code as we
200 * are parsing on objcopy output of text. Use a variable for
201 * it instead.
203 static unsigned long mcount_addr = MCOUNT_ADDR;
205 enum {
206 FTRACE_ENABLE_CALLS = (1 << 0),
207 FTRACE_DISABLE_CALLS = (1 << 1),
208 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
209 FTRACE_ENABLE_MCOUNT = (1 << 3),
210 FTRACE_DISABLE_MCOUNT = (1 << 4),
213 static int ftrace_filtered;
215 static LIST_HEAD(ftrace_new_addrs);
217 static DEFINE_MUTEX(ftrace_regex_lock);
219 struct ftrace_page {
220 struct ftrace_page *next;
221 unsigned long index;
222 struct dyn_ftrace records[];
225 #define ENTRIES_PER_PAGE \
226 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
228 /* estimate from running different kernels */
229 #define NR_TO_INIT 10000
231 static struct ftrace_page *ftrace_pages_start;
232 static struct ftrace_page *ftrace_pages;
234 static struct dyn_ftrace *ftrace_free_records;
237 #ifdef CONFIG_KPROBES
239 static int frozen_record_count;
241 static inline void freeze_record(struct dyn_ftrace *rec)
243 if (!(rec->flags & FTRACE_FL_FROZEN)) {
244 rec->flags |= FTRACE_FL_FROZEN;
245 frozen_record_count++;
249 static inline void unfreeze_record(struct dyn_ftrace *rec)
251 if (rec->flags & FTRACE_FL_FROZEN) {
252 rec->flags &= ~FTRACE_FL_FROZEN;
253 frozen_record_count--;
257 static inline int record_frozen(struct dyn_ftrace *rec)
259 return rec->flags & FTRACE_FL_FROZEN;
261 #else
262 # define freeze_record(rec) ({ 0; })
263 # define unfreeze_record(rec) ({ 0; })
264 # define record_frozen(rec) ({ 0; })
265 #endif /* CONFIG_KPROBES */
267 static void ftrace_free_rec(struct dyn_ftrace *rec)
269 rec->ip = (unsigned long)ftrace_free_records;
270 ftrace_free_records = rec;
271 rec->flags |= FTRACE_FL_FREE;
274 void ftrace_release(void *start, unsigned long size)
276 struct dyn_ftrace *rec;
277 struct ftrace_page *pg;
278 unsigned long s = (unsigned long)start;
279 unsigned long e = s + size;
280 int i;
282 if (ftrace_disabled || !start)
283 return;
285 /* should not be called from interrupt context */
286 spin_lock(&ftrace_lock);
288 for (pg = ftrace_pages_start; pg; pg = pg->next) {
289 for (i = 0; i < pg->index; i++) {
290 rec = &pg->records[i];
292 if ((rec->ip >= s) && (rec->ip < e))
293 ftrace_free_rec(rec);
296 spin_unlock(&ftrace_lock);
299 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
301 struct dyn_ftrace *rec;
303 /* First check for freed records */
304 if (ftrace_free_records) {
305 rec = ftrace_free_records;
307 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
308 FTRACE_WARN_ON_ONCE(1);
309 ftrace_free_records = NULL;
310 return NULL;
313 ftrace_free_records = (void *)rec->ip;
314 memset(rec, 0, sizeof(*rec));
315 return rec;
318 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
319 if (!ftrace_pages->next) {
320 /* allocate another page */
321 ftrace_pages->next =
322 (void *)get_zeroed_page(GFP_KERNEL);
323 if (!ftrace_pages->next)
324 return NULL;
326 ftrace_pages = ftrace_pages->next;
329 return &ftrace_pages->records[ftrace_pages->index++];
332 static struct dyn_ftrace *
333 ftrace_record_ip(unsigned long ip)
335 struct dyn_ftrace *rec;
337 if (!ftrace_enabled || ftrace_disabled)
338 return NULL;
340 rec = ftrace_alloc_dyn_node(ip);
341 if (!rec)
342 return NULL;
344 rec->ip = ip;
346 list_add(&rec->list, &ftrace_new_addrs);
348 return rec;
351 #define FTRACE_ADDR ((long)(ftrace_caller))
353 static int
354 __ftrace_replace_code(struct dyn_ftrace *rec,
355 unsigned char *old, unsigned char *new, int enable)
357 unsigned long ip, fl;
359 ip = rec->ip;
361 if (ftrace_filtered && enable) {
363 * If filtering is on:
365 * If this record is set to be filtered and
366 * is enabled then do nothing.
368 * If this record is set to be filtered and
369 * it is not enabled, enable it.
371 * If this record is not set to be filtered
372 * and it is not enabled do nothing.
374 * If this record is set not to trace then
375 * do nothing.
377 * If this record is set not to trace and
378 * it is enabled then disable it.
380 * If this record is not set to be filtered and
381 * it is enabled, disable it.
384 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
385 FTRACE_FL_ENABLED);
387 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
388 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
389 !fl || (fl == FTRACE_FL_NOTRACE))
390 return 0;
393 * If it is enabled disable it,
394 * otherwise enable it!
396 if (fl & FTRACE_FL_ENABLED) {
397 /* swap new and old */
398 new = old;
399 old = ftrace_call_replace(ip, FTRACE_ADDR);
400 rec->flags &= ~FTRACE_FL_ENABLED;
401 } else {
402 new = ftrace_call_replace(ip, FTRACE_ADDR);
403 rec->flags |= FTRACE_FL_ENABLED;
405 } else {
407 if (enable) {
409 * If this record is set not to trace and is
410 * not enabled, do nothing.
412 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
413 if (fl == FTRACE_FL_NOTRACE)
414 return 0;
416 new = ftrace_call_replace(ip, FTRACE_ADDR);
417 } else
418 old = ftrace_call_replace(ip, FTRACE_ADDR);
420 if (enable) {
421 if (rec->flags & FTRACE_FL_ENABLED)
422 return 0;
423 rec->flags |= FTRACE_FL_ENABLED;
424 } else {
425 if (!(rec->flags & FTRACE_FL_ENABLED))
426 return 0;
427 rec->flags &= ~FTRACE_FL_ENABLED;
431 return ftrace_modify_code(ip, old, new);
434 static void ftrace_replace_code(int enable)
436 int i, failed;
437 unsigned char *new = NULL, *old = NULL;
438 struct dyn_ftrace *rec;
439 struct ftrace_page *pg;
441 if (enable)
442 old = ftrace_nop_replace();
443 else
444 new = ftrace_nop_replace();
446 for (pg = ftrace_pages_start; pg; pg = pg->next) {
447 for (i = 0; i < pg->index; i++) {
448 rec = &pg->records[i];
450 /* don't modify code that has already faulted */
451 if (rec->flags & FTRACE_FL_FAILED)
452 continue;
454 /* ignore updates to this record's mcount site */
455 if (get_kprobe((void *)rec->ip)) {
456 freeze_record(rec);
457 continue;
458 } else {
459 unfreeze_record(rec);
462 failed = __ftrace_replace_code(rec, old, new, enable);
463 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
464 rec->flags |= FTRACE_FL_FAILED;
465 if ((system_state == SYSTEM_BOOTING) ||
466 !core_kernel_text(rec->ip)) {
467 ftrace_free_rec(rec);
474 static void print_ip_ins(const char *fmt, unsigned char *p)
476 int i;
478 printk(KERN_CONT "%s", fmt);
480 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
481 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
484 static int
485 ftrace_code_disable(struct dyn_ftrace *rec)
487 unsigned long ip;
488 unsigned char *nop, *call;
489 int ret;
491 ip = rec->ip;
493 nop = ftrace_nop_replace();
494 call = ftrace_call_replace(ip, mcount_addr);
496 ret = ftrace_modify_code(ip, call, nop);
497 if (ret) {
498 switch (ret) {
499 case -EFAULT:
500 FTRACE_WARN_ON_ONCE(1);
501 pr_info("ftrace faulted on modifying ");
502 print_ip_sym(ip);
503 break;
504 case -EINVAL:
505 FTRACE_WARN_ON_ONCE(1);
506 pr_info("ftrace failed to modify ");
507 print_ip_sym(ip);
508 print_ip_ins(" expected: ", call);
509 print_ip_ins(" actual: ", (unsigned char *)ip);
510 print_ip_ins(" replace: ", nop);
511 printk(KERN_CONT "\n");
512 break;
513 case -EPERM:
514 FTRACE_WARN_ON_ONCE(1);
515 pr_info("ftrace faulted on writing ");
516 print_ip_sym(ip);
517 break;
518 default:
519 FTRACE_WARN_ON_ONCE(1);
520 pr_info("ftrace faulted on unknown error ");
521 print_ip_sym(ip);
524 rec->flags |= FTRACE_FL_FAILED;
525 return 0;
527 return 1;
530 static int __ftrace_modify_code(void *data)
532 int *command = data;
534 if (*command & FTRACE_ENABLE_CALLS)
535 ftrace_replace_code(1);
536 else if (*command & FTRACE_DISABLE_CALLS)
537 ftrace_replace_code(0);
539 if (*command & FTRACE_UPDATE_TRACE_FUNC)
540 ftrace_update_ftrace_func(ftrace_trace_function);
542 return 0;
545 static void ftrace_run_update_code(int command)
547 stop_machine(__ftrace_modify_code, &command, NULL);
550 static ftrace_func_t saved_ftrace_func;
551 static int ftrace_start_up;
552 static DEFINE_MUTEX(ftrace_start_lock);
554 static void ftrace_startup(void)
556 int command = 0;
558 if (unlikely(ftrace_disabled))
559 return;
561 mutex_lock(&ftrace_start_lock);
562 ftrace_start_up++;
563 if (ftrace_start_up == 1)
564 command |= FTRACE_ENABLE_CALLS;
566 if (saved_ftrace_func != ftrace_trace_function) {
567 saved_ftrace_func = ftrace_trace_function;
568 command |= FTRACE_UPDATE_TRACE_FUNC;
571 if (!command || !ftrace_enabled)
572 goto out;
574 ftrace_run_update_code(command);
575 out:
576 mutex_unlock(&ftrace_start_lock);
579 static void ftrace_shutdown(void)
581 int command = 0;
583 if (unlikely(ftrace_disabled))
584 return;
586 mutex_lock(&ftrace_start_lock);
587 ftrace_start_up--;
588 if (!ftrace_start_up)
589 command |= FTRACE_DISABLE_CALLS;
591 if (saved_ftrace_func != ftrace_trace_function) {
592 saved_ftrace_func = ftrace_trace_function;
593 command |= FTRACE_UPDATE_TRACE_FUNC;
596 if (!command || !ftrace_enabled)
597 goto out;
599 ftrace_run_update_code(command);
600 out:
601 mutex_unlock(&ftrace_start_lock);
604 static void ftrace_startup_sysctl(void)
606 int command = FTRACE_ENABLE_MCOUNT;
608 if (unlikely(ftrace_disabled))
609 return;
611 mutex_lock(&ftrace_start_lock);
612 /* Force update next time */
613 saved_ftrace_func = NULL;
614 /* ftrace_start_up is true if we want ftrace running */
615 if (ftrace_start_up)
616 command |= FTRACE_ENABLE_CALLS;
618 ftrace_run_update_code(command);
619 mutex_unlock(&ftrace_start_lock);
622 static void ftrace_shutdown_sysctl(void)
624 int command = FTRACE_DISABLE_MCOUNT;
626 if (unlikely(ftrace_disabled))
627 return;
629 mutex_lock(&ftrace_start_lock);
630 /* ftrace_start_up is true if ftrace is running */
631 if (ftrace_start_up)
632 command |= FTRACE_DISABLE_CALLS;
634 ftrace_run_update_code(command);
635 mutex_unlock(&ftrace_start_lock);
638 static cycle_t ftrace_update_time;
639 static unsigned long ftrace_update_cnt;
640 unsigned long ftrace_update_tot_cnt;
642 static int ftrace_update_code(void)
644 struct dyn_ftrace *p, *t;
645 cycle_t start, stop;
647 start = ftrace_now(raw_smp_processor_id());
648 ftrace_update_cnt = 0;
650 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
652 /* If something went wrong, bail without enabling anything */
653 if (unlikely(ftrace_disabled))
654 return -1;
656 list_del_init(&p->list);
658 /* convert record (i.e, patch mcount-call with NOP) */
659 if (ftrace_code_disable(p)) {
660 p->flags |= FTRACE_FL_CONVERTED;
661 ftrace_update_cnt++;
662 } else
663 ftrace_free_rec(p);
666 stop = ftrace_now(raw_smp_processor_id());
667 ftrace_update_time = stop - start;
668 ftrace_update_tot_cnt += ftrace_update_cnt;
670 return 0;
673 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
675 struct ftrace_page *pg;
676 int cnt;
677 int i;
679 /* allocate a few pages */
680 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
681 if (!ftrace_pages_start)
682 return -1;
685 * Allocate a few more pages.
687 * TODO: have some parser search vmlinux before
688 * final linking to find all calls to ftrace.
689 * Then we can:
690 * a) know how many pages to allocate.
691 * and/or
692 * b) set up the table then.
694 * The dynamic code is still necessary for
695 * modules.
698 pg = ftrace_pages = ftrace_pages_start;
700 cnt = num_to_init / ENTRIES_PER_PAGE;
701 pr_info("ftrace: allocating %ld entries in %d pages\n",
702 num_to_init, cnt);
704 for (i = 0; i < cnt; i++) {
705 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
707 /* If we fail, we'll try later anyway */
708 if (!pg->next)
709 break;
711 pg = pg->next;
714 return 0;
717 enum {
718 FTRACE_ITER_FILTER = (1 << 0),
719 FTRACE_ITER_CONT = (1 << 1),
720 FTRACE_ITER_NOTRACE = (1 << 2),
721 FTRACE_ITER_FAILURES = (1 << 3),
724 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
726 struct ftrace_iterator {
727 loff_t pos;
728 struct ftrace_page *pg;
729 unsigned idx;
730 unsigned flags;
731 unsigned char buffer[FTRACE_BUFF_MAX+1];
732 unsigned buffer_idx;
733 unsigned filtered;
736 static void *
737 t_next(struct seq_file *m, void *v, loff_t *pos)
739 struct ftrace_iterator *iter = m->private;
740 struct dyn_ftrace *rec = NULL;
742 (*pos)++;
744 /* should not be called from interrupt context */
745 spin_lock(&ftrace_lock);
746 retry:
747 if (iter->idx >= iter->pg->index) {
748 if (iter->pg->next) {
749 iter->pg = iter->pg->next;
750 iter->idx = 0;
751 goto retry;
753 } else {
754 rec = &iter->pg->records[iter->idx++];
755 if ((rec->flags & FTRACE_FL_FREE) ||
757 (!(iter->flags & FTRACE_ITER_FAILURES) &&
758 (rec->flags & FTRACE_FL_FAILED)) ||
760 ((iter->flags & FTRACE_ITER_FAILURES) &&
761 !(rec->flags & FTRACE_FL_FAILED)) ||
763 ((iter->flags & FTRACE_ITER_FILTER) &&
764 !(rec->flags & FTRACE_FL_FILTER)) ||
766 ((iter->flags & FTRACE_ITER_NOTRACE) &&
767 !(rec->flags & FTRACE_FL_NOTRACE))) {
768 rec = NULL;
769 goto retry;
772 spin_unlock(&ftrace_lock);
774 iter->pos = *pos;
776 return rec;
779 static void *t_start(struct seq_file *m, loff_t *pos)
781 struct ftrace_iterator *iter = m->private;
782 void *p = NULL;
783 loff_t l = -1;
785 if (*pos != iter->pos) {
786 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
788 } else {
789 l = *pos;
790 p = t_next(m, p, &l);
793 return p;
796 static void t_stop(struct seq_file *m, void *p)
800 static int t_show(struct seq_file *m, void *v)
802 struct dyn_ftrace *rec = v;
803 char str[KSYM_SYMBOL_LEN];
805 if (!rec)
806 return 0;
808 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
810 seq_printf(m, "%s\n", str);
812 return 0;
815 static struct seq_operations show_ftrace_seq_ops = {
816 .start = t_start,
817 .next = t_next,
818 .stop = t_stop,
819 .show = t_show,
822 static int
823 ftrace_avail_open(struct inode *inode, struct file *file)
825 struct ftrace_iterator *iter;
826 int ret;
828 if (unlikely(ftrace_disabled))
829 return -ENODEV;
831 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
832 if (!iter)
833 return -ENOMEM;
835 iter->pg = ftrace_pages_start;
836 iter->pos = -1;
838 ret = seq_open(file, &show_ftrace_seq_ops);
839 if (!ret) {
840 struct seq_file *m = file->private_data;
842 m->private = iter;
843 } else {
844 kfree(iter);
847 return ret;
850 int ftrace_avail_release(struct inode *inode, struct file *file)
852 struct seq_file *m = (struct seq_file *)file->private_data;
853 struct ftrace_iterator *iter = m->private;
855 seq_release(inode, file);
856 kfree(iter);
858 return 0;
861 static int
862 ftrace_failures_open(struct inode *inode, struct file *file)
864 int ret;
865 struct seq_file *m;
866 struct ftrace_iterator *iter;
868 ret = ftrace_avail_open(inode, file);
869 if (!ret) {
870 m = (struct seq_file *)file->private_data;
871 iter = (struct ftrace_iterator *)m->private;
872 iter->flags = FTRACE_ITER_FAILURES;
875 return ret;
879 static void ftrace_filter_reset(int enable)
881 struct ftrace_page *pg;
882 struct dyn_ftrace *rec;
883 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
884 unsigned i;
886 /* should not be called from interrupt context */
887 spin_lock(&ftrace_lock);
888 if (enable)
889 ftrace_filtered = 0;
890 pg = ftrace_pages_start;
891 while (pg) {
892 for (i = 0; i < pg->index; i++) {
893 rec = &pg->records[i];
894 if (rec->flags & FTRACE_FL_FAILED)
895 continue;
896 rec->flags &= ~type;
898 pg = pg->next;
900 spin_unlock(&ftrace_lock);
903 static int
904 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
906 struct ftrace_iterator *iter;
907 int ret = 0;
909 if (unlikely(ftrace_disabled))
910 return -ENODEV;
912 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
913 if (!iter)
914 return -ENOMEM;
916 mutex_lock(&ftrace_regex_lock);
917 if ((file->f_mode & FMODE_WRITE) &&
918 !(file->f_flags & O_APPEND))
919 ftrace_filter_reset(enable);
921 if (file->f_mode & FMODE_READ) {
922 iter->pg = ftrace_pages_start;
923 iter->pos = -1;
924 iter->flags = enable ? FTRACE_ITER_FILTER :
925 FTRACE_ITER_NOTRACE;
927 ret = seq_open(file, &show_ftrace_seq_ops);
928 if (!ret) {
929 struct seq_file *m = file->private_data;
930 m->private = iter;
931 } else
932 kfree(iter);
933 } else
934 file->private_data = iter;
935 mutex_unlock(&ftrace_regex_lock);
937 return ret;
940 static int
941 ftrace_filter_open(struct inode *inode, struct file *file)
943 return ftrace_regex_open(inode, file, 1);
946 static int
947 ftrace_notrace_open(struct inode *inode, struct file *file)
949 return ftrace_regex_open(inode, file, 0);
952 static ssize_t
953 ftrace_regex_read(struct file *file, char __user *ubuf,
954 size_t cnt, loff_t *ppos)
956 if (file->f_mode & FMODE_READ)
957 return seq_read(file, ubuf, cnt, ppos);
958 else
959 return -EPERM;
962 static loff_t
963 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
965 loff_t ret;
967 if (file->f_mode & FMODE_READ)
968 ret = seq_lseek(file, offset, origin);
969 else
970 file->f_pos = ret = 1;
972 return ret;
975 enum {
976 MATCH_FULL,
977 MATCH_FRONT_ONLY,
978 MATCH_MIDDLE_ONLY,
979 MATCH_END_ONLY,
982 static void
983 ftrace_match(unsigned char *buff, int len, int enable)
985 char str[KSYM_SYMBOL_LEN];
986 char *search = NULL;
987 struct ftrace_page *pg;
988 struct dyn_ftrace *rec;
989 int type = MATCH_FULL;
990 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
991 unsigned i, match = 0, search_len = 0;
993 for (i = 0; i < len; i++) {
994 if (buff[i] == '*') {
995 if (!i) {
996 search = buff + i + 1;
997 type = MATCH_END_ONLY;
998 search_len = len - (i + 1);
999 } else {
1000 if (type == MATCH_END_ONLY) {
1001 type = MATCH_MIDDLE_ONLY;
1002 } else {
1003 match = i;
1004 type = MATCH_FRONT_ONLY;
1006 buff[i] = 0;
1007 break;
1012 /* should not be called from interrupt context */
1013 spin_lock(&ftrace_lock);
1014 if (enable)
1015 ftrace_filtered = 1;
1016 pg = ftrace_pages_start;
1017 while (pg) {
1018 for (i = 0; i < pg->index; i++) {
1019 int matched = 0;
1020 char *ptr;
1022 rec = &pg->records[i];
1023 if (rec->flags & FTRACE_FL_FAILED)
1024 continue;
1025 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1026 switch (type) {
1027 case MATCH_FULL:
1028 if (strcmp(str, buff) == 0)
1029 matched = 1;
1030 break;
1031 case MATCH_FRONT_ONLY:
1032 if (memcmp(str, buff, match) == 0)
1033 matched = 1;
1034 break;
1035 case MATCH_MIDDLE_ONLY:
1036 if (strstr(str, search))
1037 matched = 1;
1038 break;
1039 case MATCH_END_ONLY:
1040 ptr = strstr(str, search);
1041 if (ptr && (ptr[search_len] == 0))
1042 matched = 1;
1043 break;
1045 if (matched)
1046 rec->flags |= flag;
1048 pg = pg->next;
1050 spin_unlock(&ftrace_lock);
1053 static ssize_t
1054 ftrace_regex_write(struct file *file, const char __user *ubuf,
1055 size_t cnt, loff_t *ppos, int enable)
1057 struct ftrace_iterator *iter;
1058 char ch;
1059 size_t read = 0;
1060 ssize_t ret;
1062 if (!cnt || cnt < 0)
1063 return 0;
1065 mutex_lock(&ftrace_regex_lock);
1067 if (file->f_mode & FMODE_READ) {
1068 struct seq_file *m = file->private_data;
1069 iter = m->private;
1070 } else
1071 iter = file->private_data;
1073 if (!*ppos) {
1074 iter->flags &= ~FTRACE_ITER_CONT;
1075 iter->buffer_idx = 0;
1078 ret = get_user(ch, ubuf++);
1079 if (ret)
1080 goto out;
1081 read++;
1082 cnt--;
1084 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1085 /* skip white space */
1086 while (cnt && isspace(ch)) {
1087 ret = get_user(ch, ubuf++);
1088 if (ret)
1089 goto out;
1090 read++;
1091 cnt--;
1094 if (isspace(ch)) {
1095 file->f_pos += read;
1096 ret = read;
1097 goto out;
1100 iter->buffer_idx = 0;
1103 while (cnt && !isspace(ch)) {
1104 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1105 iter->buffer[iter->buffer_idx++] = ch;
1106 else {
1107 ret = -EINVAL;
1108 goto out;
1110 ret = get_user(ch, ubuf++);
1111 if (ret)
1112 goto out;
1113 read++;
1114 cnt--;
1117 if (isspace(ch)) {
1118 iter->filtered++;
1119 iter->buffer[iter->buffer_idx] = 0;
1120 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1121 iter->buffer_idx = 0;
1122 } else
1123 iter->flags |= FTRACE_ITER_CONT;
1126 file->f_pos += read;
1128 ret = read;
1129 out:
1130 mutex_unlock(&ftrace_regex_lock);
1132 return ret;
1135 static ssize_t
1136 ftrace_filter_write(struct file *file, const char __user *ubuf,
1137 size_t cnt, loff_t *ppos)
1139 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1142 static ssize_t
1143 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1144 size_t cnt, loff_t *ppos)
1146 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1149 static void
1150 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1152 if (unlikely(ftrace_disabled))
1153 return;
1155 mutex_lock(&ftrace_regex_lock);
1156 if (reset)
1157 ftrace_filter_reset(enable);
1158 if (buf)
1159 ftrace_match(buf, len, enable);
1160 mutex_unlock(&ftrace_regex_lock);
1164 * ftrace_set_filter - set a function to filter on in ftrace
1165 * @buf - the string that holds the function filter text.
1166 * @len - the length of the string.
1167 * @reset - non zero to reset all filters before applying this filter.
1169 * Filters denote which functions should be enabled when tracing is enabled.
1170 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1172 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1174 ftrace_set_regex(buf, len, reset, 1);
1178 * ftrace_set_notrace - set a function to not trace in ftrace
1179 * @buf - the string that holds the function notrace text.
1180 * @len - the length of the string.
1181 * @reset - non zero to reset all filters before applying this filter.
1183 * Notrace Filters denote which functions should not be enabled when tracing
1184 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1185 * for tracing.
1187 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1189 ftrace_set_regex(buf, len, reset, 0);
1192 static int
1193 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1195 struct seq_file *m = (struct seq_file *)file->private_data;
1196 struct ftrace_iterator *iter;
1198 mutex_lock(&ftrace_regex_lock);
1199 if (file->f_mode & FMODE_READ) {
1200 iter = m->private;
1202 seq_release(inode, file);
1203 } else
1204 iter = file->private_data;
1206 if (iter->buffer_idx) {
1207 iter->filtered++;
1208 iter->buffer[iter->buffer_idx] = 0;
1209 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1212 mutex_lock(&ftrace_sysctl_lock);
1213 mutex_lock(&ftrace_start_lock);
1214 if (iter->filtered && ftrace_start_up && ftrace_enabled)
1215 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1216 mutex_unlock(&ftrace_start_lock);
1217 mutex_unlock(&ftrace_sysctl_lock);
1219 kfree(iter);
1220 mutex_unlock(&ftrace_regex_lock);
1221 return 0;
1224 static int
1225 ftrace_filter_release(struct inode *inode, struct file *file)
1227 return ftrace_regex_release(inode, file, 1);
1230 static int
1231 ftrace_notrace_release(struct inode *inode, struct file *file)
1233 return ftrace_regex_release(inode, file, 0);
1236 static struct file_operations ftrace_avail_fops = {
1237 .open = ftrace_avail_open,
1238 .read = seq_read,
1239 .llseek = seq_lseek,
1240 .release = ftrace_avail_release,
1243 static struct file_operations ftrace_failures_fops = {
1244 .open = ftrace_failures_open,
1245 .read = seq_read,
1246 .llseek = seq_lseek,
1247 .release = ftrace_avail_release,
1250 static struct file_operations ftrace_filter_fops = {
1251 .open = ftrace_filter_open,
1252 .read = ftrace_regex_read,
1253 .write = ftrace_filter_write,
1254 .llseek = ftrace_regex_lseek,
1255 .release = ftrace_filter_release,
1258 static struct file_operations ftrace_notrace_fops = {
1259 .open = ftrace_notrace_open,
1260 .read = ftrace_regex_read,
1261 .write = ftrace_notrace_write,
1262 .llseek = ftrace_regex_lseek,
1263 .release = ftrace_notrace_release,
1266 static __init int ftrace_init_debugfs(void)
1268 struct dentry *d_tracer;
1269 struct dentry *entry;
1271 d_tracer = tracing_init_dentry();
1273 entry = debugfs_create_file("available_filter_functions", 0444,
1274 d_tracer, NULL, &ftrace_avail_fops);
1275 if (!entry)
1276 pr_warning("Could not create debugfs "
1277 "'available_filter_functions' entry\n");
1279 entry = debugfs_create_file("failures", 0444,
1280 d_tracer, NULL, &ftrace_failures_fops);
1281 if (!entry)
1282 pr_warning("Could not create debugfs 'failures' entry\n");
1284 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1285 NULL, &ftrace_filter_fops);
1286 if (!entry)
1287 pr_warning("Could not create debugfs "
1288 "'set_ftrace_filter' entry\n");
1290 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1291 NULL, &ftrace_notrace_fops);
1292 if (!entry)
1293 pr_warning("Could not create debugfs "
1294 "'set_ftrace_notrace' entry\n");
1296 return 0;
1299 fs_initcall(ftrace_init_debugfs);
1301 static int ftrace_convert_nops(unsigned long *start,
1302 unsigned long *end)
1304 unsigned long *p;
1305 unsigned long addr;
1306 unsigned long flags;
1308 mutex_lock(&ftrace_start_lock);
1309 p = start;
1310 while (p < end) {
1311 addr = ftrace_call_adjust(*p++);
1312 ftrace_record_ip(addr);
1315 /* disable interrupts to prevent kstop machine */
1316 local_irq_save(flags);
1317 ftrace_update_code();
1318 local_irq_restore(flags);
1319 mutex_unlock(&ftrace_start_lock);
1321 return 0;
1324 void ftrace_init_module(unsigned long *start, unsigned long *end)
1326 if (ftrace_disabled || start == end)
1327 return;
1328 ftrace_convert_nops(start, end);
1331 extern unsigned long __start_mcount_loc[];
1332 extern unsigned long __stop_mcount_loc[];
1334 void __init ftrace_init(void)
1336 unsigned long count, addr, flags;
1337 int ret;
1339 /* Keep the ftrace pointer to the stub */
1340 addr = (unsigned long)ftrace_stub;
1342 local_irq_save(flags);
1343 ftrace_dyn_arch_init(&addr);
1344 local_irq_restore(flags);
1346 /* ftrace_dyn_arch_init places the return code in addr */
1347 if (addr)
1348 goto failed;
1350 count = __stop_mcount_loc - __start_mcount_loc;
1352 ret = ftrace_dyn_table_alloc(count);
1353 if (ret)
1354 goto failed;
1356 last_ftrace_enabled = ftrace_enabled = 1;
1358 ret = ftrace_convert_nops(__start_mcount_loc,
1359 __stop_mcount_loc);
1361 return;
1362 failed:
1363 ftrace_disabled = 1;
1366 #else
1368 static int __init ftrace_nodyn_init(void)
1370 ftrace_enabled = 1;
1371 return 0;
1373 device_initcall(ftrace_nodyn_init);
1375 # define ftrace_startup() do { } while (0)
1376 # define ftrace_shutdown() do { } while (0)
1377 # define ftrace_startup_sysctl() do { } while (0)
1378 # define ftrace_shutdown_sysctl() do { } while (0)
1379 #endif /* CONFIG_DYNAMIC_FTRACE */
1382 * ftrace_kill - kill ftrace
1384 * This function should be used by panic code. It stops ftrace
1385 * but in a not so nice way. If you need to simply kill ftrace
1386 * from a non-atomic section, use ftrace_kill.
1388 void ftrace_kill(void)
1390 ftrace_disabled = 1;
1391 ftrace_enabled = 0;
1392 clear_ftrace_function();
1396 * register_ftrace_function - register a function for profiling
1397 * @ops - ops structure that holds the function for profiling.
1399 * Register a function to be called by all functions in the
1400 * kernel.
1402 * Note: @ops->func and all the functions it calls must be labeled
1403 * with "notrace", otherwise it will go into a
1404 * recursive loop.
1406 int register_ftrace_function(struct ftrace_ops *ops)
1408 int ret;
1410 if (unlikely(ftrace_disabled))
1411 return -1;
1413 mutex_lock(&ftrace_sysctl_lock);
1414 ret = __register_ftrace_function(ops);
1415 ftrace_startup();
1416 mutex_unlock(&ftrace_sysctl_lock);
1418 return ret;
1422 * unregister_ftrace_function - unresgister a function for profiling.
1423 * @ops - ops structure that holds the function to unregister
1425 * Unregister a function that was added to be called by ftrace profiling.
1427 int unregister_ftrace_function(struct ftrace_ops *ops)
1429 int ret;
1431 mutex_lock(&ftrace_sysctl_lock);
1432 ret = __unregister_ftrace_function(ops);
1433 ftrace_shutdown();
1434 mutex_unlock(&ftrace_sysctl_lock);
1436 return ret;
1440 ftrace_enable_sysctl(struct ctl_table *table, int write,
1441 struct file *file, void __user *buffer, size_t *lenp,
1442 loff_t *ppos)
1444 int ret;
1446 if (unlikely(ftrace_disabled))
1447 return -ENODEV;
1449 mutex_lock(&ftrace_sysctl_lock);
1451 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1453 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1454 goto out;
1456 last_ftrace_enabled = ftrace_enabled;
1458 if (ftrace_enabled) {
1460 ftrace_startup_sysctl();
1462 /* we are starting ftrace again */
1463 if (ftrace_list != &ftrace_list_end) {
1464 if (ftrace_list->next == &ftrace_list_end)
1465 ftrace_trace_function = ftrace_list->func;
1466 else
1467 ftrace_trace_function = ftrace_list_func;
1470 } else {
1471 /* stopping ftrace calls (just send to ftrace_stub) */
1472 ftrace_trace_function = ftrace_stub;
1474 ftrace_shutdown_sysctl();
1477 out:
1478 mutex_unlock(&ftrace_sysctl_lock);
1479 return ret;
1482 #ifdef CONFIG_FUNCTION_RET_TRACER
1483 trace_function_return_t ftrace_function_return =
1484 (trace_function_return_t)ftrace_stub;
1485 void register_ftrace_return(trace_function_return_t func)
1487 ftrace_function_return = func;
1490 void unregister_ftrace_return(void)
1492 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1494 #endif