ftrace: don't try to __ftrace_replace_code on !FTRACE_FL_CONVERTED rec
[linux-2.6/mini2440.git] / kernel / trace / ftrace.c
blob08f4a624e31f42fde51d482c1f528a747b307fa7
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
32 #include <asm/ftrace.h>
34 #include "trace.h"
36 #define FTRACE_WARN_ON(cond) \
37 do { \
38 if (WARN_ON(cond)) \
39 ftrace_kill(); \
40 } while (0)
42 #define FTRACE_WARN_ON_ONCE(cond) \
43 do { \
44 if (WARN_ON_ONCE(cond)) \
45 ftrace_kill(); \
46 } while (0)
48 /* hash bits for specific function selection */
49 #define FTRACE_HASH_BITS 7
50 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
52 /* ftrace_enabled is a method to turn ftrace on or off */
53 int ftrace_enabled __read_mostly;
54 static int last_ftrace_enabled;
56 /* Quick disabling of function tracer. */
57 int function_trace_stop;
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
63 static int ftrace_disabled __read_mostly;
65 static DEFINE_MUTEX(ftrace_lock);
67 static struct ftrace_ops ftrace_list_end __read_mostly =
69 .func = ftrace_stub,
72 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
77 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
79 struct ftrace_ops *op = ftrace_list;
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
84 while (op != &ftrace_list_end) {
85 /* silly alpha */
86 read_barrier_depends();
87 op->func(ip, parent_ip);
88 op = op->next;
92 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
94 if (!test_tsk_trace_trace(current))
95 return;
97 ftrace_pid_function(ip, parent_ip);
100 static void set_ftrace_pid_function(ftrace_func_t func)
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
108 * clear_ftrace_function - reset the ftrace function
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
113 void clear_ftrace_function(void)
115 ftrace_trace_function = ftrace_stub;
116 __ftrace_trace_function = ftrace_stub;
117 ftrace_pid_function = ftrace_stub;
120 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
125 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
127 if (function_trace_stop)
128 return;
130 __ftrace_trace_function(ip, parent_ip);
132 #endif
134 static int __register_ftrace_function(struct ftrace_ops *ops)
136 ops->next = ftrace_list;
138 * We are entering ops into the ftrace_list but another
139 * CPU might be walking that list. We need to make sure
140 * the ops->next pointer is valid before another CPU sees
141 * the ops pointer included into the ftrace_list.
143 smp_wmb();
144 ftrace_list = ops;
146 if (ftrace_enabled) {
147 ftrace_func_t func;
149 if (ops->next == &ftrace_list_end)
150 func = ops->func;
151 else
152 func = ftrace_list_func;
154 if (ftrace_pid_trace) {
155 set_ftrace_pid_function(func);
156 func = ftrace_pid_func;
160 * For one func, simply call it directly.
161 * For more than one func, call the chain.
163 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
164 ftrace_trace_function = func;
165 #else
166 __ftrace_trace_function = func;
167 ftrace_trace_function = ftrace_test_stop_func;
168 #endif
171 return 0;
174 static int __unregister_ftrace_function(struct ftrace_ops *ops)
176 struct ftrace_ops **p;
179 * If we are removing the last function, then simply point
180 * to the ftrace_stub.
182 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
183 ftrace_trace_function = ftrace_stub;
184 ftrace_list = &ftrace_list_end;
185 return 0;
188 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
189 if (*p == ops)
190 break;
192 if (*p != ops)
193 return -1;
195 *p = (*p)->next;
197 if (ftrace_enabled) {
198 /* If we only have one func left, then call that directly */
199 if (ftrace_list->next == &ftrace_list_end) {
200 ftrace_func_t func = ftrace_list->func;
202 if (ftrace_pid_trace) {
203 set_ftrace_pid_function(func);
204 func = ftrace_pid_func;
206 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
207 ftrace_trace_function = func;
208 #else
209 __ftrace_trace_function = func;
210 #endif
214 return 0;
217 static void ftrace_update_pid_func(void)
219 ftrace_func_t func;
221 if (ftrace_trace_function == ftrace_stub)
222 return;
224 func = ftrace_trace_function;
226 if (ftrace_pid_trace) {
227 set_ftrace_pid_function(func);
228 func = ftrace_pid_func;
229 } else {
230 if (func == ftrace_pid_func)
231 func = ftrace_pid_function;
234 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
235 ftrace_trace_function = func;
236 #else
237 __ftrace_trace_function = func;
238 #endif
241 /* set when tracing only a pid */
242 struct pid *ftrace_pid_trace;
243 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
245 #ifdef CONFIG_DYNAMIC_FTRACE
247 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
248 # error Dynamic ftrace depends on MCOUNT_RECORD
249 #endif
251 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
253 struct ftrace_func_probe {
254 struct hlist_node node;
255 struct ftrace_probe_ops *ops;
256 unsigned long flags;
257 unsigned long ip;
258 void *data;
259 struct rcu_head rcu;
263 enum {
264 FTRACE_ENABLE_CALLS = (1 << 0),
265 FTRACE_DISABLE_CALLS = (1 << 1),
266 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
267 FTRACE_ENABLE_MCOUNT = (1 << 3),
268 FTRACE_DISABLE_MCOUNT = (1 << 4),
269 FTRACE_START_FUNC_RET = (1 << 5),
270 FTRACE_STOP_FUNC_RET = (1 << 6),
273 static int ftrace_filtered;
275 static LIST_HEAD(ftrace_new_addrs);
277 static DEFINE_MUTEX(ftrace_regex_lock);
279 struct ftrace_page {
280 struct ftrace_page *next;
281 int index;
282 struct dyn_ftrace records[];
285 #define ENTRIES_PER_PAGE \
286 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
288 /* estimate from running different kernels */
289 #define NR_TO_INIT 10000
291 static struct ftrace_page *ftrace_pages_start;
292 static struct ftrace_page *ftrace_pages;
294 static struct dyn_ftrace *ftrace_free_records;
297 * This is a double for. Do not use 'break' to break out of the loop,
298 * you must use a goto.
300 #define do_for_each_ftrace_rec(pg, rec) \
301 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
302 int _____i; \
303 for (_____i = 0; _____i < pg->index; _____i++) { \
304 rec = &pg->records[_____i];
306 #define while_for_each_ftrace_rec() \
310 #ifdef CONFIG_KPROBES
312 static int frozen_record_count;
314 static inline void freeze_record(struct dyn_ftrace *rec)
316 if (!(rec->flags & FTRACE_FL_FROZEN)) {
317 rec->flags |= FTRACE_FL_FROZEN;
318 frozen_record_count++;
322 static inline void unfreeze_record(struct dyn_ftrace *rec)
324 if (rec->flags & FTRACE_FL_FROZEN) {
325 rec->flags &= ~FTRACE_FL_FROZEN;
326 frozen_record_count--;
330 static inline int record_frozen(struct dyn_ftrace *rec)
332 return rec->flags & FTRACE_FL_FROZEN;
334 #else
335 # define freeze_record(rec) ({ 0; })
336 # define unfreeze_record(rec) ({ 0; })
337 # define record_frozen(rec) ({ 0; })
338 #endif /* CONFIG_KPROBES */
340 static void ftrace_free_rec(struct dyn_ftrace *rec)
342 rec->ip = (unsigned long)ftrace_free_records;
343 ftrace_free_records = rec;
344 rec->flags |= FTRACE_FL_FREE;
347 void ftrace_release(void *start, unsigned long size)
349 struct dyn_ftrace *rec;
350 struct ftrace_page *pg;
351 unsigned long s = (unsigned long)start;
352 unsigned long e = s + size;
354 if (ftrace_disabled || !start)
355 return;
357 mutex_lock(&ftrace_lock);
358 do_for_each_ftrace_rec(pg, rec) {
359 if ((rec->ip >= s) && (rec->ip < e) &&
360 !(rec->flags & FTRACE_FL_FREE))
361 ftrace_free_rec(rec);
362 } while_for_each_ftrace_rec();
363 mutex_unlock(&ftrace_lock);
366 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
368 struct dyn_ftrace *rec;
370 /* First check for freed records */
371 if (ftrace_free_records) {
372 rec = ftrace_free_records;
374 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
375 FTRACE_WARN_ON_ONCE(1);
376 ftrace_free_records = NULL;
377 return NULL;
380 ftrace_free_records = (void *)rec->ip;
381 memset(rec, 0, sizeof(*rec));
382 return rec;
385 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
386 if (!ftrace_pages->next) {
387 /* allocate another page */
388 ftrace_pages->next =
389 (void *)get_zeroed_page(GFP_KERNEL);
390 if (!ftrace_pages->next)
391 return NULL;
393 ftrace_pages = ftrace_pages->next;
396 return &ftrace_pages->records[ftrace_pages->index++];
399 static struct dyn_ftrace *
400 ftrace_record_ip(unsigned long ip)
402 struct dyn_ftrace *rec;
404 if (ftrace_disabled)
405 return NULL;
407 rec = ftrace_alloc_dyn_node(ip);
408 if (!rec)
409 return NULL;
411 rec->ip = ip;
413 list_add(&rec->list, &ftrace_new_addrs);
415 return rec;
418 static void print_ip_ins(const char *fmt, unsigned char *p)
420 int i;
422 printk(KERN_CONT "%s", fmt);
424 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
425 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
428 static void ftrace_bug(int failed, unsigned long ip)
430 switch (failed) {
431 case -EFAULT:
432 FTRACE_WARN_ON_ONCE(1);
433 pr_info("ftrace faulted on modifying ");
434 print_ip_sym(ip);
435 break;
436 case -EINVAL:
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace failed to modify ");
439 print_ip_sym(ip);
440 print_ip_ins(" actual: ", (unsigned char *)ip);
441 printk(KERN_CONT "\n");
442 break;
443 case -EPERM:
444 FTRACE_WARN_ON_ONCE(1);
445 pr_info("ftrace faulted on writing ");
446 print_ip_sym(ip);
447 break;
448 default:
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on unknown error ");
451 print_ip_sym(ip);
456 static int
457 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
459 unsigned long ftrace_addr;
460 unsigned long ip, fl;
462 ftrace_addr = (unsigned long)FTRACE_ADDR;
464 ip = rec->ip;
467 * If this record is not to be traced and
468 * it is not enabled then do nothing.
470 * If this record is not to be traced and
471 * it is enabled then disable it.
474 if (rec->flags & FTRACE_FL_NOTRACE) {
475 if (rec->flags & FTRACE_FL_ENABLED)
476 rec->flags &= ~FTRACE_FL_ENABLED;
477 else
478 return 0;
480 } else if (ftrace_filtered && enable) {
482 * Filtering is on:
485 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
487 /* Record is filtered and enabled, do nothing */
488 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
489 return 0;
491 /* Record is not filtered or enabled, do nothing */
492 if (!fl)
493 return 0;
495 /* Record is not filtered but enabled, disable it */
496 if (fl == FTRACE_FL_ENABLED)
497 rec->flags &= ~FTRACE_FL_ENABLED;
498 else
499 /* Otherwise record is filtered but not enabled, enable it */
500 rec->flags |= FTRACE_FL_ENABLED;
501 } else {
502 /* Disable or not filtered */
504 if (enable) {
505 /* if record is enabled, do nothing */
506 if (rec->flags & FTRACE_FL_ENABLED)
507 return 0;
509 rec->flags |= FTRACE_FL_ENABLED;
511 } else {
513 /* if record is not enabled, do nothing */
514 if (!(rec->flags & FTRACE_FL_ENABLED))
515 return 0;
517 rec->flags &= ~FTRACE_FL_ENABLED;
521 if (rec->flags & FTRACE_FL_ENABLED)
522 return ftrace_make_call(rec, ftrace_addr);
523 else
524 return ftrace_make_nop(NULL, rec, ftrace_addr);
527 static void ftrace_replace_code(int enable)
529 struct dyn_ftrace *rec;
530 struct ftrace_page *pg;
531 int failed;
533 do_for_each_ftrace_rec(pg, rec) {
535 * Skip over free records, records that have
536 * failed and not converted.
538 if (rec->flags & FTRACE_FL_FREE ||
539 rec->flags & FTRACE_FL_FAILED ||
540 rec->flags & FTRACE_FL_CONVERTED)
541 continue;
543 /* ignore updates to this record's mcount site */
544 if (get_kprobe((void *)rec->ip)) {
545 freeze_record(rec);
546 continue;
547 } else {
548 unfreeze_record(rec);
551 failed = __ftrace_replace_code(rec, enable);
552 if (failed) {
553 rec->flags |= FTRACE_FL_FAILED;
554 if ((system_state == SYSTEM_BOOTING) ||
555 !core_kernel_text(rec->ip)) {
556 ftrace_free_rec(rec);
557 } else {
558 ftrace_bug(failed, rec->ip);
559 /* Stop processing */
560 return;
563 } while_for_each_ftrace_rec();
566 static int
567 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
569 unsigned long ip;
570 int ret;
572 ip = rec->ip;
574 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
575 if (ret) {
576 ftrace_bug(ret, ip);
577 rec->flags |= FTRACE_FL_FAILED;
578 return 0;
580 return 1;
584 * archs can override this function if they must do something
585 * before the modifying code is performed.
587 int __weak ftrace_arch_code_modify_prepare(void)
589 return 0;
593 * archs can override this function if they must do something
594 * after the modifying code is performed.
596 int __weak ftrace_arch_code_modify_post_process(void)
598 return 0;
601 static int __ftrace_modify_code(void *data)
603 int *command = data;
605 if (*command & FTRACE_ENABLE_CALLS)
606 ftrace_replace_code(1);
607 else if (*command & FTRACE_DISABLE_CALLS)
608 ftrace_replace_code(0);
610 if (*command & FTRACE_UPDATE_TRACE_FUNC)
611 ftrace_update_ftrace_func(ftrace_trace_function);
613 if (*command & FTRACE_START_FUNC_RET)
614 ftrace_enable_ftrace_graph_caller();
615 else if (*command & FTRACE_STOP_FUNC_RET)
616 ftrace_disable_ftrace_graph_caller();
618 return 0;
621 static void ftrace_run_update_code(int command)
623 int ret;
625 ret = ftrace_arch_code_modify_prepare();
626 FTRACE_WARN_ON(ret);
627 if (ret)
628 return;
630 stop_machine(__ftrace_modify_code, &command, NULL);
632 ret = ftrace_arch_code_modify_post_process();
633 FTRACE_WARN_ON(ret);
636 static ftrace_func_t saved_ftrace_func;
637 static int ftrace_start_up;
639 static void ftrace_startup_enable(int command)
641 if (saved_ftrace_func != ftrace_trace_function) {
642 saved_ftrace_func = ftrace_trace_function;
643 command |= FTRACE_UPDATE_TRACE_FUNC;
646 if (!command || !ftrace_enabled)
647 return;
649 ftrace_run_update_code(command);
652 static void ftrace_startup(int command)
654 if (unlikely(ftrace_disabled))
655 return;
657 ftrace_start_up++;
658 command |= FTRACE_ENABLE_CALLS;
660 ftrace_startup_enable(command);
663 static void ftrace_shutdown(int command)
665 if (unlikely(ftrace_disabled))
666 return;
668 ftrace_start_up--;
669 if (!ftrace_start_up)
670 command |= FTRACE_DISABLE_CALLS;
672 if (saved_ftrace_func != ftrace_trace_function) {
673 saved_ftrace_func = ftrace_trace_function;
674 command |= FTRACE_UPDATE_TRACE_FUNC;
677 if (!command || !ftrace_enabled)
678 return;
680 ftrace_run_update_code(command);
683 static void ftrace_startup_sysctl(void)
685 int command = FTRACE_ENABLE_MCOUNT;
687 if (unlikely(ftrace_disabled))
688 return;
690 /* Force update next time */
691 saved_ftrace_func = NULL;
692 /* ftrace_start_up is true if we want ftrace running */
693 if (ftrace_start_up)
694 command |= FTRACE_ENABLE_CALLS;
696 ftrace_run_update_code(command);
699 static void ftrace_shutdown_sysctl(void)
701 int command = FTRACE_DISABLE_MCOUNT;
703 if (unlikely(ftrace_disabled))
704 return;
706 /* ftrace_start_up is true if ftrace is running */
707 if (ftrace_start_up)
708 command |= FTRACE_DISABLE_CALLS;
710 ftrace_run_update_code(command);
713 static cycle_t ftrace_update_time;
714 static unsigned long ftrace_update_cnt;
715 unsigned long ftrace_update_tot_cnt;
717 static int ftrace_update_code(struct module *mod)
719 struct dyn_ftrace *p, *t;
720 cycle_t start, stop;
722 start = ftrace_now(raw_smp_processor_id());
723 ftrace_update_cnt = 0;
725 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
727 /* If something went wrong, bail without enabling anything */
728 if (unlikely(ftrace_disabled))
729 return -1;
731 list_del_init(&p->list);
733 /* convert record (i.e, patch mcount-call with NOP) */
734 if (ftrace_code_disable(mod, p)) {
735 p->flags |= FTRACE_FL_CONVERTED;
736 ftrace_update_cnt++;
737 } else
738 ftrace_free_rec(p);
741 stop = ftrace_now(raw_smp_processor_id());
742 ftrace_update_time = stop - start;
743 ftrace_update_tot_cnt += ftrace_update_cnt;
745 return 0;
748 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
750 struct ftrace_page *pg;
751 int cnt;
752 int i;
754 /* allocate a few pages */
755 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
756 if (!ftrace_pages_start)
757 return -1;
760 * Allocate a few more pages.
762 * TODO: have some parser search vmlinux before
763 * final linking to find all calls to ftrace.
764 * Then we can:
765 * a) know how many pages to allocate.
766 * and/or
767 * b) set up the table then.
769 * The dynamic code is still necessary for
770 * modules.
773 pg = ftrace_pages = ftrace_pages_start;
775 cnt = num_to_init / ENTRIES_PER_PAGE;
776 pr_info("ftrace: allocating %ld entries in %d pages\n",
777 num_to_init, cnt + 1);
779 for (i = 0; i < cnt; i++) {
780 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
782 /* If we fail, we'll try later anyway */
783 if (!pg->next)
784 break;
786 pg = pg->next;
789 return 0;
792 enum {
793 FTRACE_ITER_FILTER = (1 << 0),
794 FTRACE_ITER_CONT = (1 << 1),
795 FTRACE_ITER_NOTRACE = (1 << 2),
796 FTRACE_ITER_FAILURES = (1 << 3),
797 FTRACE_ITER_PRINTALL = (1 << 4),
798 FTRACE_ITER_HASH = (1 << 5),
801 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
803 struct ftrace_iterator {
804 struct ftrace_page *pg;
805 int hidx;
806 int idx;
807 unsigned flags;
808 unsigned char buffer[FTRACE_BUFF_MAX+1];
809 unsigned buffer_idx;
810 unsigned filtered;
813 static void *
814 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
816 struct ftrace_iterator *iter = m->private;
817 struct hlist_node *hnd = v;
818 struct hlist_head *hhd;
820 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
822 (*pos)++;
824 retry:
825 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
826 return NULL;
828 hhd = &ftrace_func_hash[iter->hidx];
830 if (hlist_empty(hhd)) {
831 iter->hidx++;
832 hnd = NULL;
833 goto retry;
836 if (!hnd)
837 hnd = hhd->first;
838 else {
839 hnd = hnd->next;
840 if (!hnd) {
841 iter->hidx++;
842 goto retry;
846 return hnd;
849 static void *t_hash_start(struct seq_file *m, loff_t *pos)
851 struct ftrace_iterator *iter = m->private;
852 void *p = NULL;
854 iter->flags |= FTRACE_ITER_HASH;
856 return t_hash_next(m, p, pos);
859 static int t_hash_show(struct seq_file *m, void *v)
861 struct ftrace_func_probe *rec;
862 struct hlist_node *hnd = v;
863 char str[KSYM_SYMBOL_LEN];
865 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
867 if (rec->ops->print)
868 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
870 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
871 seq_printf(m, "%s:", str);
873 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
874 seq_printf(m, "%s", str);
876 if (rec->data)
877 seq_printf(m, ":%p", rec->data);
878 seq_putc(m, '\n');
880 return 0;
883 static void *
884 t_next(struct seq_file *m, void *v, loff_t *pos)
886 struct ftrace_iterator *iter = m->private;
887 struct dyn_ftrace *rec = NULL;
889 if (iter->flags & FTRACE_ITER_HASH)
890 return t_hash_next(m, v, pos);
892 (*pos)++;
894 if (iter->flags & FTRACE_ITER_PRINTALL)
895 return NULL;
897 retry:
898 if (iter->idx >= iter->pg->index) {
899 if (iter->pg->next) {
900 iter->pg = iter->pg->next;
901 iter->idx = 0;
902 goto retry;
903 } else {
904 iter->idx = -1;
906 } else {
907 rec = &iter->pg->records[iter->idx++];
908 if ((rec->flags & FTRACE_FL_FREE) ||
910 (!(iter->flags & FTRACE_ITER_FAILURES) &&
911 (rec->flags & FTRACE_FL_FAILED)) ||
913 ((iter->flags & FTRACE_ITER_FAILURES) &&
914 !(rec->flags & FTRACE_FL_FAILED)) ||
916 ((iter->flags & FTRACE_ITER_FILTER) &&
917 !(rec->flags & FTRACE_FL_FILTER)) ||
919 ((iter->flags & FTRACE_ITER_NOTRACE) &&
920 !(rec->flags & FTRACE_FL_NOTRACE))) {
921 rec = NULL;
922 goto retry;
926 return rec;
929 static void *t_start(struct seq_file *m, loff_t *pos)
931 struct ftrace_iterator *iter = m->private;
932 void *p = NULL;
934 mutex_lock(&ftrace_lock);
936 * For set_ftrace_filter reading, if we have the filter
937 * off, we can short cut and just print out that all
938 * functions are enabled.
940 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
941 if (*pos > 0)
942 return t_hash_start(m, pos);
943 iter->flags |= FTRACE_ITER_PRINTALL;
944 (*pos)++;
945 return iter;
948 if (iter->flags & FTRACE_ITER_HASH)
949 return t_hash_start(m, pos);
951 if (*pos > 0) {
952 if (iter->idx < 0)
953 return p;
954 (*pos)--;
955 iter->idx--;
958 p = t_next(m, p, pos);
960 if (!p)
961 return t_hash_start(m, pos);
963 return p;
966 static void t_stop(struct seq_file *m, void *p)
968 mutex_unlock(&ftrace_lock);
971 static int t_show(struct seq_file *m, void *v)
973 struct ftrace_iterator *iter = m->private;
974 struct dyn_ftrace *rec = v;
975 char str[KSYM_SYMBOL_LEN];
977 if (iter->flags & FTRACE_ITER_HASH)
978 return t_hash_show(m, v);
980 if (iter->flags & FTRACE_ITER_PRINTALL) {
981 seq_printf(m, "#### all functions enabled ####\n");
982 return 0;
985 if (!rec)
986 return 0;
988 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
990 seq_printf(m, "%s\n", str);
992 return 0;
995 static struct seq_operations show_ftrace_seq_ops = {
996 .start = t_start,
997 .next = t_next,
998 .stop = t_stop,
999 .show = t_show,
1002 static int
1003 ftrace_avail_open(struct inode *inode, struct file *file)
1005 struct ftrace_iterator *iter;
1006 int ret;
1008 if (unlikely(ftrace_disabled))
1009 return -ENODEV;
1011 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1012 if (!iter)
1013 return -ENOMEM;
1015 iter->pg = ftrace_pages_start;
1017 ret = seq_open(file, &show_ftrace_seq_ops);
1018 if (!ret) {
1019 struct seq_file *m = file->private_data;
1021 m->private = iter;
1022 } else {
1023 kfree(iter);
1026 return ret;
1029 int ftrace_avail_release(struct inode *inode, struct file *file)
1031 struct seq_file *m = (struct seq_file *)file->private_data;
1032 struct ftrace_iterator *iter = m->private;
1034 seq_release(inode, file);
1035 kfree(iter);
1037 return 0;
1040 static int
1041 ftrace_failures_open(struct inode *inode, struct file *file)
1043 int ret;
1044 struct seq_file *m;
1045 struct ftrace_iterator *iter;
1047 ret = ftrace_avail_open(inode, file);
1048 if (!ret) {
1049 m = (struct seq_file *)file->private_data;
1050 iter = (struct ftrace_iterator *)m->private;
1051 iter->flags = FTRACE_ITER_FAILURES;
1054 return ret;
1058 static void ftrace_filter_reset(int enable)
1060 struct ftrace_page *pg;
1061 struct dyn_ftrace *rec;
1062 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1064 mutex_lock(&ftrace_lock);
1065 if (enable)
1066 ftrace_filtered = 0;
1067 do_for_each_ftrace_rec(pg, rec) {
1068 if (rec->flags & FTRACE_FL_FAILED)
1069 continue;
1070 rec->flags &= ~type;
1071 } while_for_each_ftrace_rec();
1072 mutex_unlock(&ftrace_lock);
1075 static int
1076 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1078 struct ftrace_iterator *iter;
1079 int ret = 0;
1081 if (unlikely(ftrace_disabled))
1082 return -ENODEV;
1084 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1085 if (!iter)
1086 return -ENOMEM;
1088 mutex_lock(&ftrace_regex_lock);
1089 if ((file->f_mode & FMODE_WRITE) &&
1090 !(file->f_flags & O_APPEND))
1091 ftrace_filter_reset(enable);
1093 if (file->f_mode & FMODE_READ) {
1094 iter->pg = ftrace_pages_start;
1095 iter->flags = enable ? FTRACE_ITER_FILTER :
1096 FTRACE_ITER_NOTRACE;
1098 ret = seq_open(file, &show_ftrace_seq_ops);
1099 if (!ret) {
1100 struct seq_file *m = file->private_data;
1101 m->private = iter;
1102 } else
1103 kfree(iter);
1104 } else
1105 file->private_data = iter;
1106 mutex_unlock(&ftrace_regex_lock);
1108 return ret;
1111 static int
1112 ftrace_filter_open(struct inode *inode, struct file *file)
1114 return ftrace_regex_open(inode, file, 1);
1117 static int
1118 ftrace_notrace_open(struct inode *inode, struct file *file)
1120 return ftrace_regex_open(inode, file, 0);
1123 static ssize_t
1124 ftrace_regex_read(struct file *file, char __user *ubuf,
1125 size_t cnt, loff_t *ppos)
1127 if (file->f_mode & FMODE_READ)
1128 return seq_read(file, ubuf, cnt, ppos);
1129 else
1130 return -EPERM;
1133 static loff_t
1134 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1136 loff_t ret;
1138 if (file->f_mode & FMODE_READ)
1139 ret = seq_lseek(file, offset, origin);
1140 else
1141 file->f_pos = ret = 1;
1143 return ret;
1146 enum {
1147 MATCH_FULL,
1148 MATCH_FRONT_ONLY,
1149 MATCH_MIDDLE_ONLY,
1150 MATCH_END_ONLY,
1154 * (static function - no need for kernel doc)
1156 * Pass in a buffer containing a glob and this function will
1157 * set search to point to the search part of the buffer and
1158 * return the type of search it is (see enum above).
1159 * This does modify buff.
1161 * Returns enum type.
1162 * search returns the pointer to use for comparison.
1163 * not returns 1 if buff started with a '!'
1164 * 0 otherwise.
1166 static int
1167 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1169 int type = MATCH_FULL;
1170 int i;
1172 if (buff[0] == '!') {
1173 *not = 1;
1174 buff++;
1175 len--;
1176 } else
1177 *not = 0;
1179 *search = buff;
1181 for (i = 0; i < len; i++) {
1182 if (buff[i] == '*') {
1183 if (!i) {
1184 *search = buff + 1;
1185 type = MATCH_END_ONLY;
1186 } else {
1187 if (type == MATCH_END_ONLY)
1188 type = MATCH_MIDDLE_ONLY;
1189 else
1190 type = MATCH_FRONT_ONLY;
1191 buff[i] = 0;
1192 break;
1197 return type;
1200 static int ftrace_match(char *str, char *regex, int len, int type)
1202 int matched = 0;
1203 char *ptr;
1205 switch (type) {
1206 case MATCH_FULL:
1207 if (strcmp(str, regex) == 0)
1208 matched = 1;
1209 break;
1210 case MATCH_FRONT_ONLY:
1211 if (strncmp(str, regex, len) == 0)
1212 matched = 1;
1213 break;
1214 case MATCH_MIDDLE_ONLY:
1215 if (strstr(str, regex))
1216 matched = 1;
1217 break;
1218 case MATCH_END_ONLY:
1219 ptr = strstr(str, regex);
1220 if (ptr && (ptr[len] == 0))
1221 matched = 1;
1222 break;
1225 return matched;
1228 static int
1229 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1231 char str[KSYM_SYMBOL_LEN];
1233 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1234 return ftrace_match(str, regex, len, type);
1237 static void ftrace_match_records(char *buff, int len, int enable)
1239 unsigned int search_len;
1240 struct ftrace_page *pg;
1241 struct dyn_ftrace *rec;
1242 unsigned long flag;
1243 char *search;
1244 int type;
1245 int not;
1247 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1248 type = ftrace_setup_glob(buff, len, &search, &not);
1250 search_len = strlen(search);
1252 mutex_lock(&ftrace_lock);
1253 do_for_each_ftrace_rec(pg, rec) {
1255 if (rec->flags & FTRACE_FL_FAILED)
1256 continue;
1258 if (ftrace_match_record(rec, search, search_len, type)) {
1259 if (not)
1260 rec->flags &= ~flag;
1261 else
1262 rec->flags |= flag;
1265 * Only enable filtering if we have a function that
1266 * is filtered on.
1268 if (enable && (rec->flags & FTRACE_FL_FILTER))
1269 ftrace_filtered = 1;
1270 } while_for_each_ftrace_rec();
1271 mutex_unlock(&ftrace_lock);
1274 static int
1275 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1276 char *regex, int len, int type)
1278 char str[KSYM_SYMBOL_LEN];
1279 char *modname;
1281 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1283 if (!modname || strcmp(modname, mod))
1284 return 0;
1286 /* blank search means to match all funcs in the mod */
1287 if (len)
1288 return ftrace_match(str, regex, len, type);
1289 else
1290 return 1;
1293 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1295 unsigned search_len = 0;
1296 struct ftrace_page *pg;
1297 struct dyn_ftrace *rec;
1298 int type = MATCH_FULL;
1299 char *search = buff;
1300 unsigned long flag;
1301 int not = 0;
1303 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1305 /* blank or '*' mean the same */
1306 if (strcmp(buff, "*") == 0)
1307 buff[0] = 0;
1309 /* handle the case of 'dont filter this module' */
1310 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1311 buff[0] = 0;
1312 not = 1;
1315 if (strlen(buff)) {
1316 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1317 search_len = strlen(search);
1320 mutex_lock(&ftrace_lock);
1321 do_for_each_ftrace_rec(pg, rec) {
1323 if (rec->flags & FTRACE_FL_FAILED)
1324 continue;
1326 if (ftrace_match_module_record(rec, mod,
1327 search, search_len, type)) {
1328 if (not)
1329 rec->flags &= ~flag;
1330 else
1331 rec->flags |= flag;
1333 if (enable && (rec->flags & FTRACE_FL_FILTER))
1334 ftrace_filtered = 1;
1336 } while_for_each_ftrace_rec();
1337 mutex_unlock(&ftrace_lock);
1341 * We register the module command as a template to show others how
1342 * to register the a command as well.
1345 static int
1346 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1348 char *mod;
1351 * cmd == 'mod' because we only registered this func
1352 * for the 'mod' ftrace_func_command.
1353 * But if you register one func with multiple commands,
1354 * you can tell which command was used by the cmd
1355 * parameter.
1358 /* we must have a module name */
1359 if (!param)
1360 return -EINVAL;
1362 mod = strsep(&param, ":");
1363 if (!strlen(mod))
1364 return -EINVAL;
1366 ftrace_match_module_records(func, mod, enable);
1367 return 0;
1370 static struct ftrace_func_command ftrace_mod_cmd = {
1371 .name = "mod",
1372 .func = ftrace_mod_callback,
1375 static int __init ftrace_mod_cmd_init(void)
1377 return register_ftrace_command(&ftrace_mod_cmd);
1379 device_initcall(ftrace_mod_cmd_init);
1381 static void
1382 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1384 struct ftrace_func_probe *entry;
1385 struct hlist_head *hhd;
1386 struct hlist_node *n;
1387 unsigned long key;
1388 int resched;
1390 key = hash_long(ip, FTRACE_HASH_BITS);
1392 hhd = &ftrace_func_hash[key];
1394 if (hlist_empty(hhd))
1395 return;
1398 * Disable preemption for these calls to prevent a RCU grace
1399 * period. This syncs the hash iteration and freeing of items
1400 * on the hash. rcu_read_lock is too dangerous here.
1402 resched = ftrace_preempt_disable();
1403 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1404 if (entry->ip == ip)
1405 entry->ops->func(ip, parent_ip, &entry->data);
1407 ftrace_preempt_enable(resched);
1410 static struct ftrace_ops trace_probe_ops __read_mostly =
1412 .func = function_trace_probe_call,
1415 static int ftrace_probe_registered;
1417 static void __enable_ftrace_function_probe(void)
1419 int i;
1421 if (ftrace_probe_registered)
1422 return;
1424 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1425 struct hlist_head *hhd = &ftrace_func_hash[i];
1426 if (hhd->first)
1427 break;
1429 /* Nothing registered? */
1430 if (i == FTRACE_FUNC_HASHSIZE)
1431 return;
1433 __register_ftrace_function(&trace_probe_ops);
1434 ftrace_startup(0);
1435 ftrace_probe_registered = 1;
1438 static void __disable_ftrace_function_probe(void)
1440 int i;
1442 if (!ftrace_probe_registered)
1443 return;
1445 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1446 struct hlist_head *hhd = &ftrace_func_hash[i];
1447 if (hhd->first)
1448 return;
1451 /* no more funcs left */
1452 __unregister_ftrace_function(&trace_probe_ops);
1453 ftrace_shutdown(0);
1454 ftrace_probe_registered = 0;
1458 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1460 struct ftrace_func_probe *entry =
1461 container_of(rhp, struct ftrace_func_probe, rcu);
1463 if (entry->ops->free)
1464 entry->ops->free(&entry->data);
1465 kfree(entry);
1470 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1471 void *data)
1473 struct ftrace_func_probe *entry;
1474 struct ftrace_page *pg;
1475 struct dyn_ftrace *rec;
1476 int type, len, not;
1477 unsigned long key;
1478 int count = 0;
1479 char *search;
1481 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1482 len = strlen(search);
1484 /* we do not support '!' for function probes */
1485 if (WARN_ON(not))
1486 return -EINVAL;
1488 mutex_lock(&ftrace_lock);
1489 do_for_each_ftrace_rec(pg, rec) {
1491 if (rec->flags & FTRACE_FL_FAILED)
1492 continue;
1494 if (!ftrace_match_record(rec, search, len, type))
1495 continue;
1497 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1498 if (!entry) {
1499 /* If we did not process any, then return error */
1500 if (!count)
1501 count = -ENOMEM;
1502 goto out_unlock;
1505 count++;
1507 entry->data = data;
1510 * The caller might want to do something special
1511 * for each function we find. We call the callback
1512 * to give the caller an opportunity to do so.
1514 if (ops->callback) {
1515 if (ops->callback(rec->ip, &entry->data) < 0) {
1516 /* caller does not like this func */
1517 kfree(entry);
1518 continue;
1522 entry->ops = ops;
1523 entry->ip = rec->ip;
1525 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1526 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1528 } while_for_each_ftrace_rec();
1529 __enable_ftrace_function_probe();
1531 out_unlock:
1532 mutex_unlock(&ftrace_lock);
1534 return count;
1537 enum {
1538 PROBE_TEST_FUNC = 1,
1539 PROBE_TEST_DATA = 2
1542 static void
1543 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1544 void *data, int flags)
1546 struct ftrace_func_probe *entry;
1547 struct hlist_node *n, *tmp;
1548 char str[KSYM_SYMBOL_LEN];
1549 int type = MATCH_FULL;
1550 int i, len = 0;
1551 char *search;
1553 if (glob && (strcmp(glob, "*") || !strlen(glob)))
1554 glob = NULL;
1555 else {
1556 int not;
1558 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1559 len = strlen(search);
1561 /* we do not support '!' for function probes */
1562 if (WARN_ON(not))
1563 return;
1566 mutex_lock(&ftrace_lock);
1567 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1568 struct hlist_head *hhd = &ftrace_func_hash[i];
1570 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1572 /* break up if statements for readability */
1573 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
1574 continue;
1576 if ((flags & PROBE_TEST_DATA) && entry->data != data)
1577 continue;
1579 /* do this last, since it is the most expensive */
1580 if (glob) {
1581 kallsyms_lookup(entry->ip, NULL, NULL,
1582 NULL, str);
1583 if (!ftrace_match(str, glob, len, type))
1584 continue;
1587 hlist_del(&entry->node);
1588 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1591 __disable_ftrace_function_probe();
1592 mutex_unlock(&ftrace_lock);
1595 void
1596 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1597 void *data)
1599 __unregister_ftrace_function_probe(glob, ops, data,
1600 PROBE_TEST_FUNC | PROBE_TEST_DATA);
1603 void
1604 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
1606 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
1609 void unregister_ftrace_function_probe_all(char *glob)
1611 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
1614 static LIST_HEAD(ftrace_commands);
1615 static DEFINE_MUTEX(ftrace_cmd_mutex);
1617 int register_ftrace_command(struct ftrace_func_command *cmd)
1619 struct ftrace_func_command *p;
1620 int ret = 0;
1622 mutex_lock(&ftrace_cmd_mutex);
1623 list_for_each_entry(p, &ftrace_commands, list) {
1624 if (strcmp(cmd->name, p->name) == 0) {
1625 ret = -EBUSY;
1626 goto out_unlock;
1629 list_add(&cmd->list, &ftrace_commands);
1630 out_unlock:
1631 mutex_unlock(&ftrace_cmd_mutex);
1633 return ret;
1636 int unregister_ftrace_command(struct ftrace_func_command *cmd)
1638 struct ftrace_func_command *p, *n;
1639 int ret = -ENODEV;
1641 mutex_lock(&ftrace_cmd_mutex);
1642 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1643 if (strcmp(cmd->name, p->name) == 0) {
1644 ret = 0;
1645 list_del_init(&p->list);
1646 goto out_unlock;
1649 out_unlock:
1650 mutex_unlock(&ftrace_cmd_mutex);
1652 return ret;
1655 static int ftrace_process_regex(char *buff, int len, int enable)
1657 char *func, *command, *next = buff;
1658 struct ftrace_func_command *p;
1659 int ret = -EINVAL;
1661 func = strsep(&next, ":");
1663 if (!next) {
1664 ftrace_match_records(func, len, enable);
1665 return 0;
1668 /* command found */
1670 command = strsep(&next, ":");
1672 mutex_lock(&ftrace_cmd_mutex);
1673 list_for_each_entry(p, &ftrace_commands, list) {
1674 if (strcmp(p->name, command) == 0) {
1675 ret = p->func(func, command, next, enable);
1676 goto out_unlock;
1679 out_unlock:
1680 mutex_unlock(&ftrace_cmd_mutex);
1682 return ret;
1685 static ssize_t
1686 ftrace_regex_write(struct file *file, const char __user *ubuf,
1687 size_t cnt, loff_t *ppos, int enable)
1689 struct ftrace_iterator *iter;
1690 char ch;
1691 size_t read = 0;
1692 ssize_t ret;
1694 if (!cnt || cnt < 0)
1695 return 0;
1697 mutex_lock(&ftrace_regex_lock);
1699 if (file->f_mode & FMODE_READ) {
1700 struct seq_file *m = file->private_data;
1701 iter = m->private;
1702 } else
1703 iter = file->private_data;
1705 if (!*ppos) {
1706 iter->flags &= ~FTRACE_ITER_CONT;
1707 iter->buffer_idx = 0;
1710 ret = get_user(ch, ubuf++);
1711 if (ret)
1712 goto out;
1713 read++;
1714 cnt--;
1716 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1717 /* skip white space */
1718 while (cnt && isspace(ch)) {
1719 ret = get_user(ch, ubuf++);
1720 if (ret)
1721 goto out;
1722 read++;
1723 cnt--;
1726 if (isspace(ch)) {
1727 file->f_pos += read;
1728 ret = read;
1729 goto out;
1732 iter->buffer_idx = 0;
1735 while (cnt && !isspace(ch)) {
1736 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1737 iter->buffer[iter->buffer_idx++] = ch;
1738 else {
1739 ret = -EINVAL;
1740 goto out;
1742 ret = get_user(ch, ubuf++);
1743 if (ret)
1744 goto out;
1745 read++;
1746 cnt--;
1749 if (isspace(ch)) {
1750 iter->filtered++;
1751 iter->buffer[iter->buffer_idx] = 0;
1752 ret = ftrace_process_regex(iter->buffer,
1753 iter->buffer_idx, enable);
1754 if (ret)
1755 goto out;
1756 iter->buffer_idx = 0;
1757 } else
1758 iter->flags |= FTRACE_ITER_CONT;
1761 file->f_pos += read;
1763 ret = read;
1764 out:
1765 mutex_unlock(&ftrace_regex_lock);
1767 return ret;
1770 static ssize_t
1771 ftrace_filter_write(struct file *file, const char __user *ubuf,
1772 size_t cnt, loff_t *ppos)
1774 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1777 static ssize_t
1778 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1779 size_t cnt, loff_t *ppos)
1781 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1784 static void
1785 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1787 if (unlikely(ftrace_disabled))
1788 return;
1790 mutex_lock(&ftrace_regex_lock);
1791 if (reset)
1792 ftrace_filter_reset(enable);
1793 if (buf)
1794 ftrace_match_records(buf, len, enable);
1795 mutex_unlock(&ftrace_regex_lock);
1799 * ftrace_set_filter - set a function to filter on in ftrace
1800 * @buf - the string that holds the function filter text.
1801 * @len - the length of the string.
1802 * @reset - non zero to reset all filters before applying this filter.
1804 * Filters denote which functions should be enabled when tracing is enabled.
1805 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1807 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1809 ftrace_set_regex(buf, len, reset, 1);
1813 * ftrace_set_notrace - set a function to not trace in ftrace
1814 * @buf - the string that holds the function notrace text.
1815 * @len - the length of the string.
1816 * @reset - non zero to reset all filters before applying this filter.
1818 * Notrace Filters denote which functions should not be enabled when tracing
1819 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1820 * for tracing.
1822 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1824 ftrace_set_regex(buf, len, reset, 0);
1827 static int
1828 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1830 struct seq_file *m = (struct seq_file *)file->private_data;
1831 struct ftrace_iterator *iter;
1833 mutex_lock(&ftrace_regex_lock);
1834 if (file->f_mode & FMODE_READ) {
1835 iter = m->private;
1837 seq_release(inode, file);
1838 } else
1839 iter = file->private_data;
1841 if (iter->buffer_idx) {
1842 iter->filtered++;
1843 iter->buffer[iter->buffer_idx] = 0;
1844 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
1847 mutex_lock(&ftrace_lock);
1848 if (ftrace_start_up && ftrace_enabled)
1849 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1850 mutex_unlock(&ftrace_lock);
1852 kfree(iter);
1853 mutex_unlock(&ftrace_regex_lock);
1854 return 0;
1857 static int
1858 ftrace_filter_release(struct inode *inode, struct file *file)
1860 return ftrace_regex_release(inode, file, 1);
1863 static int
1864 ftrace_notrace_release(struct inode *inode, struct file *file)
1866 return ftrace_regex_release(inode, file, 0);
1869 static const struct file_operations ftrace_avail_fops = {
1870 .open = ftrace_avail_open,
1871 .read = seq_read,
1872 .llseek = seq_lseek,
1873 .release = ftrace_avail_release,
1876 static const struct file_operations ftrace_failures_fops = {
1877 .open = ftrace_failures_open,
1878 .read = seq_read,
1879 .llseek = seq_lseek,
1880 .release = ftrace_avail_release,
1883 static const struct file_operations ftrace_filter_fops = {
1884 .open = ftrace_filter_open,
1885 .read = ftrace_regex_read,
1886 .write = ftrace_filter_write,
1887 .llseek = ftrace_regex_lseek,
1888 .release = ftrace_filter_release,
1891 static const struct file_operations ftrace_notrace_fops = {
1892 .open = ftrace_notrace_open,
1893 .read = ftrace_regex_read,
1894 .write = ftrace_notrace_write,
1895 .llseek = ftrace_regex_lseek,
1896 .release = ftrace_notrace_release,
1899 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1901 static DEFINE_MUTEX(graph_lock);
1903 int ftrace_graph_count;
1904 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1906 static void *
1907 g_next(struct seq_file *m, void *v, loff_t *pos)
1909 unsigned long *array = m->private;
1910 int index = *pos;
1912 (*pos)++;
1914 if (index >= ftrace_graph_count)
1915 return NULL;
1917 return &array[index];
1920 static void *g_start(struct seq_file *m, loff_t *pos)
1922 void *p = NULL;
1924 mutex_lock(&graph_lock);
1926 /* Nothing, tell g_show to print all functions are enabled */
1927 if (!ftrace_graph_count && !*pos)
1928 return (void *)1;
1930 p = g_next(m, p, pos);
1932 return p;
1935 static void g_stop(struct seq_file *m, void *p)
1937 mutex_unlock(&graph_lock);
1940 static int g_show(struct seq_file *m, void *v)
1942 unsigned long *ptr = v;
1943 char str[KSYM_SYMBOL_LEN];
1945 if (!ptr)
1946 return 0;
1948 if (ptr == (unsigned long *)1) {
1949 seq_printf(m, "#### all functions enabled ####\n");
1950 return 0;
1953 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1955 seq_printf(m, "%s\n", str);
1957 return 0;
1960 static struct seq_operations ftrace_graph_seq_ops = {
1961 .start = g_start,
1962 .next = g_next,
1963 .stop = g_stop,
1964 .show = g_show,
1967 static int
1968 ftrace_graph_open(struct inode *inode, struct file *file)
1970 int ret = 0;
1972 if (unlikely(ftrace_disabled))
1973 return -ENODEV;
1975 mutex_lock(&graph_lock);
1976 if ((file->f_mode & FMODE_WRITE) &&
1977 !(file->f_flags & O_APPEND)) {
1978 ftrace_graph_count = 0;
1979 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1982 if (file->f_mode & FMODE_READ) {
1983 ret = seq_open(file, &ftrace_graph_seq_ops);
1984 if (!ret) {
1985 struct seq_file *m = file->private_data;
1986 m->private = ftrace_graph_funcs;
1988 } else
1989 file->private_data = ftrace_graph_funcs;
1990 mutex_unlock(&graph_lock);
1992 return ret;
1995 static ssize_t
1996 ftrace_graph_read(struct file *file, char __user *ubuf,
1997 size_t cnt, loff_t *ppos)
1999 if (file->f_mode & FMODE_READ)
2000 return seq_read(file, ubuf, cnt, ppos);
2001 else
2002 return -EPERM;
2005 static int
2006 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2008 struct dyn_ftrace *rec;
2009 struct ftrace_page *pg;
2010 int search_len;
2011 int found = 0;
2012 int type, not;
2013 char *search;
2014 bool exists;
2015 int i;
2017 if (ftrace_disabled)
2018 return -ENODEV;
2020 /* decode regex */
2021 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2022 if (not)
2023 return -EINVAL;
2025 search_len = strlen(search);
2027 mutex_lock(&ftrace_lock);
2028 do_for_each_ftrace_rec(pg, rec) {
2030 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2031 break;
2033 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2034 continue;
2036 if (ftrace_match_record(rec, search, search_len, type)) {
2037 /* ensure it is not already in the array */
2038 exists = false;
2039 for (i = 0; i < *idx; i++)
2040 if (array[i] == rec->ip) {
2041 exists = true;
2042 break;
2044 if (!exists) {
2045 array[(*idx)++] = rec->ip;
2046 found = 1;
2049 } while_for_each_ftrace_rec();
2051 mutex_unlock(&ftrace_lock);
2053 return found ? 0 : -EINVAL;
2056 static ssize_t
2057 ftrace_graph_write(struct file *file, const char __user *ubuf,
2058 size_t cnt, loff_t *ppos)
2060 unsigned char buffer[FTRACE_BUFF_MAX+1];
2061 unsigned long *array;
2062 size_t read = 0;
2063 ssize_t ret;
2064 int index = 0;
2065 char ch;
2067 if (!cnt || cnt < 0)
2068 return 0;
2070 mutex_lock(&graph_lock);
2072 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2073 ret = -EBUSY;
2074 goto out;
2077 if (file->f_mode & FMODE_READ) {
2078 struct seq_file *m = file->private_data;
2079 array = m->private;
2080 } else
2081 array = file->private_data;
2083 ret = get_user(ch, ubuf++);
2084 if (ret)
2085 goto out;
2086 read++;
2087 cnt--;
2089 /* skip white space */
2090 while (cnt && isspace(ch)) {
2091 ret = get_user(ch, ubuf++);
2092 if (ret)
2093 goto out;
2094 read++;
2095 cnt--;
2098 if (isspace(ch)) {
2099 *ppos += read;
2100 ret = read;
2101 goto out;
2104 while (cnt && !isspace(ch)) {
2105 if (index < FTRACE_BUFF_MAX)
2106 buffer[index++] = ch;
2107 else {
2108 ret = -EINVAL;
2109 goto out;
2111 ret = get_user(ch, ubuf++);
2112 if (ret)
2113 goto out;
2114 read++;
2115 cnt--;
2117 buffer[index] = 0;
2119 /* we allow only one expression at a time */
2120 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
2121 if (ret)
2122 goto out;
2124 file->f_pos += read;
2126 ret = read;
2127 out:
2128 mutex_unlock(&graph_lock);
2130 return ret;
2133 static const struct file_operations ftrace_graph_fops = {
2134 .open = ftrace_graph_open,
2135 .read = ftrace_graph_read,
2136 .write = ftrace_graph_write,
2138 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2140 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2142 struct dentry *entry;
2144 entry = debugfs_create_file("available_filter_functions", 0444,
2145 d_tracer, NULL, &ftrace_avail_fops);
2146 if (!entry)
2147 pr_warning("Could not create debugfs "
2148 "'available_filter_functions' entry\n");
2150 entry = debugfs_create_file("failures", 0444,
2151 d_tracer, NULL, &ftrace_failures_fops);
2152 if (!entry)
2153 pr_warning("Could not create debugfs 'failures' entry\n");
2155 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2156 NULL, &ftrace_filter_fops);
2157 if (!entry)
2158 pr_warning("Could not create debugfs "
2159 "'set_ftrace_filter' entry\n");
2161 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2162 NULL, &ftrace_notrace_fops);
2163 if (!entry)
2164 pr_warning("Could not create debugfs "
2165 "'set_ftrace_notrace' entry\n");
2167 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2168 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2169 NULL,
2170 &ftrace_graph_fops);
2171 if (!entry)
2172 pr_warning("Could not create debugfs "
2173 "'set_graph_function' entry\n");
2174 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2176 return 0;
2179 static int ftrace_convert_nops(struct module *mod,
2180 unsigned long *start,
2181 unsigned long *end)
2183 unsigned long *p;
2184 unsigned long addr;
2185 unsigned long flags;
2187 mutex_lock(&ftrace_lock);
2188 p = start;
2189 while (p < end) {
2190 addr = ftrace_call_adjust(*p++);
2192 * Some architecture linkers will pad between
2193 * the different mcount_loc sections of different
2194 * object files to satisfy alignments.
2195 * Skip any NULL pointers.
2197 if (!addr)
2198 continue;
2199 ftrace_record_ip(addr);
2202 /* disable interrupts to prevent kstop machine */
2203 local_irq_save(flags);
2204 ftrace_update_code(mod);
2205 local_irq_restore(flags);
2206 mutex_unlock(&ftrace_lock);
2208 return 0;
2211 void ftrace_init_module(struct module *mod,
2212 unsigned long *start, unsigned long *end)
2214 if (ftrace_disabled || start == end)
2215 return;
2216 ftrace_convert_nops(mod, start, end);
2219 extern unsigned long __start_mcount_loc[];
2220 extern unsigned long __stop_mcount_loc[];
2222 void __init ftrace_init(void)
2224 unsigned long count, addr, flags;
2225 int ret;
2227 /* Keep the ftrace pointer to the stub */
2228 addr = (unsigned long)ftrace_stub;
2230 local_irq_save(flags);
2231 ftrace_dyn_arch_init(&addr);
2232 local_irq_restore(flags);
2234 /* ftrace_dyn_arch_init places the return code in addr */
2235 if (addr)
2236 goto failed;
2238 count = __stop_mcount_loc - __start_mcount_loc;
2240 ret = ftrace_dyn_table_alloc(count);
2241 if (ret)
2242 goto failed;
2244 last_ftrace_enabled = ftrace_enabled = 1;
2246 ret = ftrace_convert_nops(NULL,
2247 __start_mcount_loc,
2248 __stop_mcount_loc);
2250 return;
2251 failed:
2252 ftrace_disabled = 1;
2255 #else
2257 static int __init ftrace_nodyn_init(void)
2259 ftrace_enabled = 1;
2260 return 0;
2262 device_initcall(ftrace_nodyn_init);
2264 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2265 static inline void ftrace_startup_enable(int command) { }
2266 /* Keep as macros so we do not need to define the commands */
2267 # define ftrace_startup(command) do { } while (0)
2268 # define ftrace_shutdown(command) do { } while (0)
2269 # define ftrace_startup_sysctl() do { } while (0)
2270 # define ftrace_shutdown_sysctl() do { } while (0)
2271 #endif /* CONFIG_DYNAMIC_FTRACE */
2273 static ssize_t
2274 ftrace_pid_read(struct file *file, char __user *ubuf,
2275 size_t cnt, loff_t *ppos)
2277 char buf[64];
2278 int r;
2280 if (ftrace_pid_trace == ftrace_swapper_pid)
2281 r = sprintf(buf, "swapper tasks\n");
2282 else if (ftrace_pid_trace)
2283 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
2284 else
2285 r = sprintf(buf, "no pid\n");
2287 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2290 static void clear_ftrace_swapper(void)
2292 struct task_struct *p;
2293 int cpu;
2295 get_online_cpus();
2296 for_each_online_cpu(cpu) {
2297 p = idle_task(cpu);
2298 clear_tsk_trace_trace(p);
2300 put_online_cpus();
2303 static void set_ftrace_swapper(void)
2305 struct task_struct *p;
2306 int cpu;
2308 get_online_cpus();
2309 for_each_online_cpu(cpu) {
2310 p = idle_task(cpu);
2311 set_tsk_trace_trace(p);
2313 put_online_cpus();
2316 static void clear_ftrace_pid(struct pid *pid)
2318 struct task_struct *p;
2320 rcu_read_lock();
2321 do_each_pid_task(pid, PIDTYPE_PID, p) {
2322 clear_tsk_trace_trace(p);
2323 } while_each_pid_task(pid, PIDTYPE_PID, p);
2324 rcu_read_unlock();
2326 put_pid(pid);
2329 static void set_ftrace_pid(struct pid *pid)
2331 struct task_struct *p;
2333 rcu_read_lock();
2334 do_each_pid_task(pid, PIDTYPE_PID, p) {
2335 set_tsk_trace_trace(p);
2336 } while_each_pid_task(pid, PIDTYPE_PID, p);
2337 rcu_read_unlock();
2340 static void clear_ftrace_pid_task(struct pid **pid)
2342 if (*pid == ftrace_swapper_pid)
2343 clear_ftrace_swapper();
2344 else
2345 clear_ftrace_pid(*pid);
2347 *pid = NULL;
2350 static void set_ftrace_pid_task(struct pid *pid)
2352 if (pid == ftrace_swapper_pid)
2353 set_ftrace_swapper();
2354 else
2355 set_ftrace_pid(pid);
2358 static ssize_t
2359 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2360 size_t cnt, loff_t *ppos)
2362 struct pid *pid;
2363 char buf[64];
2364 long val;
2365 int ret;
2367 if (cnt >= sizeof(buf))
2368 return -EINVAL;
2370 if (copy_from_user(&buf, ubuf, cnt))
2371 return -EFAULT;
2373 buf[cnt] = 0;
2375 ret = strict_strtol(buf, 10, &val);
2376 if (ret < 0)
2377 return ret;
2379 mutex_lock(&ftrace_lock);
2380 if (val < 0) {
2381 /* disable pid tracing */
2382 if (!ftrace_pid_trace)
2383 goto out;
2385 clear_ftrace_pid_task(&ftrace_pid_trace);
2387 } else {
2388 /* swapper task is special */
2389 if (!val) {
2390 pid = ftrace_swapper_pid;
2391 if (pid == ftrace_pid_trace)
2392 goto out;
2393 } else {
2394 pid = find_get_pid(val);
2396 if (pid == ftrace_pid_trace) {
2397 put_pid(pid);
2398 goto out;
2402 if (ftrace_pid_trace)
2403 clear_ftrace_pid_task(&ftrace_pid_trace);
2405 if (!pid)
2406 goto out;
2408 ftrace_pid_trace = pid;
2410 set_ftrace_pid_task(ftrace_pid_trace);
2413 /* update the function call */
2414 ftrace_update_pid_func();
2415 ftrace_startup_enable(0);
2417 out:
2418 mutex_unlock(&ftrace_lock);
2420 return cnt;
2423 static const struct file_operations ftrace_pid_fops = {
2424 .read = ftrace_pid_read,
2425 .write = ftrace_pid_write,
2428 static __init int ftrace_init_debugfs(void)
2430 struct dentry *d_tracer;
2431 struct dentry *entry;
2433 d_tracer = tracing_init_dentry();
2434 if (!d_tracer)
2435 return 0;
2437 ftrace_init_dyn_debugfs(d_tracer);
2439 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2440 NULL, &ftrace_pid_fops);
2441 if (!entry)
2442 pr_warning("Could not create debugfs "
2443 "'set_ftrace_pid' entry\n");
2444 return 0;
2446 fs_initcall(ftrace_init_debugfs);
2449 * ftrace_kill - kill ftrace
2451 * This function should be used by panic code. It stops ftrace
2452 * but in a not so nice way. If you need to simply kill ftrace
2453 * from a non-atomic section, use ftrace_kill.
2455 void ftrace_kill(void)
2457 ftrace_disabled = 1;
2458 ftrace_enabled = 0;
2459 clear_ftrace_function();
2463 * register_ftrace_function - register a function for profiling
2464 * @ops - ops structure that holds the function for profiling.
2466 * Register a function to be called by all functions in the
2467 * kernel.
2469 * Note: @ops->func and all the functions it calls must be labeled
2470 * with "notrace", otherwise it will go into a
2471 * recursive loop.
2473 int register_ftrace_function(struct ftrace_ops *ops)
2475 int ret;
2477 if (unlikely(ftrace_disabled))
2478 return -1;
2480 mutex_lock(&ftrace_lock);
2482 ret = __register_ftrace_function(ops);
2483 ftrace_startup(0);
2485 mutex_unlock(&ftrace_lock);
2486 return ret;
2490 * unregister_ftrace_function - unregister a function for profiling.
2491 * @ops - ops structure that holds the function to unregister
2493 * Unregister a function that was added to be called by ftrace profiling.
2495 int unregister_ftrace_function(struct ftrace_ops *ops)
2497 int ret;
2499 mutex_lock(&ftrace_lock);
2500 ret = __unregister_ftrace_function(ops);
2501 ftrace_shutdown(0);
2502 mutex_unlock(&ftrace_lock);
2504 return ret;
2508 ftrace_enable_sysctl(struct ctl_table *table, int write,
2509 struct file *file, void __user *buffer, size_t *lenp,
2510 loff_t *ppos)
2512 int ret;
2514 if (unlikely(ftrace_disabled))
2515 return -ENODEV;
2517 mutex_lock(&ftrace_lock);
2519 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
2521 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2522 goto out;
2524 last_ftrace_enabled = ftrace_enabled;
2526 if (ftrace_enabled) {
2528 ftrace_startup_sysctl();
2530 /* we are starting ftrace again */
2531 if (ftrace_list != &ftrace_list_end) {
2532 if (ftrace_list->next == &ftrace_list_end)
2533 ftrace_trace_function = ftrace_list->func;
2534 else
2535 ftrace_trace_function = ftrace_list_func;
2538 } else {
2539 /* stopping ftrace calls (just send to ftrace_stub) */
2540 ftrace_trace_function = ftrace_stub;
2542 ftrace_shutdown_sysctl();
2545 out:
2546 mutex_unlock(&ftrace_lock);
2547 return ret;
2550 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2552 static atomic_t ftrace_graph_active;
2553 static struct notifier_block ftrace_suspend_notifier;
2555 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2557 return 0;
2560 /* The callbacks that hook a function */
2561 trace_func_graph_ret_t ftrace_graph_return =
2562 (trace_func_graph_ret_t)ftrace_stub;
2563 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
2565 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2566 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2568 int i;
2569 int ret = 0;
2570 unsigned long flags;
2571 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2572 struct task_struct *g, *t;
2574 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2575 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2576 * sizeof(struct ftrace_ret_stack),
2577 GFP_KERNEL);
2578 if (!ret_stack_list[i]) {
2579 start = 0;
2580 end = i;
2581 ret = -ENOMEM;
2582 goto free;
2586 read_lock_irqsave(&tasklist_lock, flags);
2587 do_each_thread(g, t) {
2588 if (start == end) {
2589 ret = -EAGAIN;
2590 goto unlock;
2593 if (t->ret_stack == NULL) {
2594 t->curr_ret_stack = -1;
2595 /* Make sure IRQs see the -1 first: */
2596 barrier();
2597 t->ret_stack = ret_stack_list[start++];
2598 atomic_set(&t->tracing_graph_pause, 0);
2599 atomic_set(&t->trace_overrun, 0);
2601 } while_each_thread(g, t);
2603 unlock:
2604 read_unlock_irqrestore(&tasklist_lock, flags);
2605 free:
2606 for (i = start; i < end; i++)
2607 kfree(ret_stack_list[i]);
2608 return ret;
2611 /* Allocate a return stack for each task */
2612 static int start_graph_tracing(void)
2614 struct ftrace_ret_stack **ret_stack_list;
2615 int ret, cpu;
2617 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2618 sizeof(struct ftrace_ret_stack *),
2619 GFP_KERNEL);
2621 if (!ret_stack_list)
2622 return -ENOMEM;
2624 /* The cpu_boot init_task->ret_stack will never be freed */
2625 for_each_online_cpu(cpu)
2626 ftrace_graph_init_task(idle_task(cpu));
2628 do {
2629 ret = alloc_retstack_tasklist(ret_stack_list);
2630 } while (ret == -EAGAIN);
2632 kfree(ret_stack_list);
2633 return ret;
2637 * Hibernation protection.
2638 * The state of the current task is too much unstable during
2639 * suspend/restore to disk. We want to protect against that.
2641 static int
2642 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2643 void *unused)
2645 switch (state) {
2646 case PM_HIBERNATION_PREPARE:
2647 pause_graph_tracing();
2648 break;
2650 case PM_POST_HIBERNATION:
2651 unpause_graph_tracing();
2652 break;
2654 return NOTIFY_DONE;
2657 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2658 trace_func_graph_ent_t entryfunc)
2660 int ret = 0;
2662 mutex_lock(&ftrace_lock);
2664 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2665 register_pm_notifier(&ftrace_suspend_notifier);
2667 atomic_inc(&ftrace_graph_active);
2668 ret = start_graph_tracing();
2669 if (ret) {
2670 atomic_dec(&ftrace_graph_active);
2671 goto out;
2674 ftrace_graph_return = retfunc;
2675 ftrace_graph_entry = entryfunc;
2677 ftrace_startup(FTRACE_START_FUNC_RET);
2679 out:
2680 mutex_unlock(&ftrace_lock);
2681 return ret;
2684 void unregister_ftrace_graph(void)
2686 mutex_lock(&ftrace_lock);
2688 atomic_dec(&ftrace_graph_active);
2689 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2690 ftrace_graph_entry = ftrace_graph_entry_stub;
2691 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2692 unregister_pm_notifier(&ftrace_suspend_notifier);
2694 mutex_unlock(&ftrace_lock);
2697 /* Allocate a return stack for newly created task */
2698 void ftrace_graph_init_task(struct task_struct *t)
2700 if (atomic_read(&ftrace_graph_active)) {
2701 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2702 * sizeof(struct ftrace_ret_stack),
2703 GFP_KERNEL);
2704 if (!t->ret_stack)
2705 return;
2706 t->curr_ret_stack = -1;
2707 atomic_set(&t->tracing_graph_pause, 0);
2708 atomic_set(&t->trace_overrun, 0);
2709 } else
2710 t->ret_stack = NULL;
2713 void ftrace_graph_exit_task(struct task_struct *t)
2715 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2717 t->ret_stack = NULL;
2718 /* NULL must become visible to IRQs before we free it: */
2719 barrier();
2721 kfree(ret_stack);
2724 void ftrace_graph_stop(void)
2726 ftrace_stop();
2728 #endif