udf: Try harder when looking for VAT inode
[linux-2.6/mini2440.git] / kernel / trace / ftrace.c
blob52eb25189928d9d8aa59f32fb9358c43baa9fb03
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
32 #include <trace/events/sched.h>
34 #include <asm/ftrace.h>
35 #include <asm/setup.h>
37 #include "trace_output.h"
38 #include "trace_stat.h"
40 #define FTRACE_WARN_ON(cond) \
41 do { \
42 if (WARN_ON(cond)) \
43 ftrace_kill(); \
44 } while (0)
46 #define FTRACE_WARN_ON_ONCE(cond) \
47 do { \
48 if (WARN_ON_ONCE(cond)) \
49 ftrace_kill(); \
50 } while (0)
52 /* hash bits for specific function selection */
53 #define FTRACE_HASH_BITS 7
54 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
56 /* ftrace_enabled is a method to turn ftrace on or off */
57 int ftrace_enabled __read_mostly;
58 static int last_ftrace_enabled;
60 /* Quick disabling of function tracer. */
61 int function_trace_stop;
64 * ftrace_disabled is set when an anomaly is discovered.
65 * ftrace_disabled is much stronger than ftrace_enabled.
67 static int ftrace_disabled __read_mostly;
69 static DEFINE_MUTEX(ftrace_lock);
71 static struct ftrace_ops ftrace_list_end __read_mostly =
73 .func = ftrace_stub,
76 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
77 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
78 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
79 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
81 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
83 struct ftrace_ops *op = ftrace_list;
85 /* in case someone actually ports this to alpha! */
86 read_barrier_depends();
88 while (op != &ftrace_list_end) {
89 /* silly alpha */
90 read_barrier_depends();
91 op->func(ip, parent_ip);
92 op = op->next;
96 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
98 if (!test_tsk_trace_trace(current))
99 return;
101 ftrace_pid_function(ip, parent_ip);
104 static void set_ftrace_pid_function(ftrace_func_t func)
106 /* do not set ftrace_pid_function to itself! */
107 if (func != ftrace_pid_func)
108 ftrace_pid_function = func;
112 * clear_ftrace_function - reset the ftrace function
114 * This NULLs the ftrace function and in essence stops
115 * tracing. There may be lag
117 void clear_ftrace_function(void)
119 ftrace_trace_function = ftrace_stub;
120 __ftrace_trace_function = ftrace_stub;
121 ftrace_pid_function = ftrace_stub;
124 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
126 * For those archs that do not test ftrace_trace_stop in their
127 * mcount call site, we need to do it from C.
129 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
131 if (function_trace_stop)
132 return;
134 __ftrace_trace_function(ip, parent_ip);
136 #endif
138 static int __register_ftrace_function(struct ftrace_ops *ops)
140 ops->next = ftrace_list;
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
147 smp_wmb();
148 ftrace_list = ops;
150 if (ftrace_enabled) {
151 ftrace_func_t func;
153 if (ops->next == &ftrace_list_end)
154 func = ops->func;
155 else
156 func = ftrace_list_func;
158 if (ftrace_pid_trace) {
159 set_ftrace_pid_function(func);
160 func = ftrace_pid_func;
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
167 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
168 ftrace_trace_function = func;
169 #else
170 __ftrace_trace_function = func;
171 ftrace_trace_function = ftrace_test_stop_func;
172 #endif
175 return 0;
178 static int __unregister_ftrace_function(struct ftrace_ops *ops)
180 struct ftrace_ops **p;
183 * If we are removing the last function, then simply point
184 * to the ftrace_stub.
186 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
187 ftrace_trace_function = ftrace_stub;
188 ftrace_list = &ftrace_list_end;
189 return 0;
192 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
193 if (*p == ops)
194 break;
196 if (*p != ops)
197 return -1;
199 *p = (*p)->next;
201 if (ftrace_enabled) {
202 /* If we only have one func left, then call that directly */
203 if (ftrace_list->next == &ftrace_list_end) {
204 ftrace_func_t func = ftrace_list->func;
206 if (ftrace_pid_trace) {
207 set_ftrace_pid_function(func);
208 func = ftrace_pid_func;
210 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
211 ftrace_trace_function = func;
212 #else
213 __ftrace_trace_function = func;
214 #endif
218 return 0;
221 static void ftrace_update_pid_func(void)
223 ftrace_func_t func;
225 if (ftrace_trace_function == ftrace_stub)
226 return;
228 func = ftrace_trace_function;
230 if (ftrace_pid_trace) {
231 set_ftrace_pid_function(func);
232 func = ftrace_pid_func;
233 } else {
234 if (func == ftrace_pid_func)
235 func = ftrace_pid_function;
238 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
239 ftrace_trace_function = func;
240 #else
241 __ftrace_trace_function = func;
242 #endif
245 #ifdef CONFIG_FUNCTION_PROFILER
246 struct ftrace_profile {
247 struct hlist_node node;
248 unsigned long ip;
249 unsigned long counter;
250 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
251 unsigned long long time;
252 #endif
255 struct ftrace_profile_page {
256 struct ftrace_profile_page *next;
257 unsigned long index;
258 struct ftrace_profile records[];
261 struct ftrace_profile_stat {
262 atomic_t disabled;
263 struct hlist_head *hash;
264 struct ftrace_profile_page *pages;
265 struct ftrace_profile_page *start;
266 struct tracer_stat stat;
269 #define PROFILE_RECORDS_SIZE \
270 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
272 #define PROFILES_PER_PAGE \
273 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
275 static int ftrace_profile_bits __read_mostly;
276 static int ftrace_profile_enabled __read_mostly;
278 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
279 static DEFINE_MUTEX(ftrace_profile_lock);
281 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
283 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
285 static void *
286 function_stat_next(void *v, int idx)
288 struct ftrace_profile *rec = v;
289 struct ftrace_profile_page *pg;
291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
293 again:
294 if (idx != 0)
295 rec++;
297 if ((void *)rec >= (void *)&pg->records[pg->index]) {
298 pg = pg->next;
299 if (!pg)
300 return NULL;
301 rec = &pg->records[0];
302 if (!rec->counter)
303 goto again;
306 return rec;
309 static void *function_stat_start(struct tracer_stat *trace)
311 struct ftrace_profile_stat *stat =
312 container_of(trace, struct ftrace_profile_stat, stat);
314 if (!stat || !stat->start)
315 return NULL;
317 return function_stat_next(&stat->start->records[0], 0);
320 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
321 /* function graph compares on total time */
322 static int function_stat_cmp(void *p1, void *p2)
324 struct ftrace_profile *a = p1;
325 struct ftrace_profile *b = p2;
327 if (a->time < b->time)
328 return -1;
329 if (a->time > b->time)
330 return 1;
331 else
332 return 0;
334 #else
335 /* not function graph compares against hits */
336 static int function_stat_cmp(void *p1, void *p2)
338 struct ftrace_profile *a = p1;
339 struct ftrace_profile *b = p2;
341 if (a->counter < b->counter)
342 return -1;
343 if (a->counter > b->counter)
344 return 1;
345 else
346 return 0;
348 #endif
350 static int function_stat_headers(struct seq_file *m)
352 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
353 seq_printf(m, " Function "
354 "Hit Time Avg\n"
355 " -------- "
356 "--- ---- ---\n");
357 #else
358 seq_printf(m, " Function Hit\n"
359 " -------- ---\n");
360 #endif
361 return 0;
364 static int function_stat_show(struct seq_file *m, void *v)
366 struct ftrace_profile *rec = v;
367 char str[KSYM_SYMBOL_LEN];
368 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
369 static DEFINE_MUTEX(mutex);
370 static struct trace_seq s;
371 unsigned long long avg;
372 #endif
374 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
375 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
377 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
378 seq_printf(m, " ");
379 avg = rec->time;
380 do_div(avg, rec->counter);
382 mutex_lock(&mutex);
383 trace_seq_init(&s);
384 trace_print_graph_duration(rec->time, &s);
385 trace_seq_puts(&s, " ");
386 trace_print_graph_duration(avg, &s);
387 trace_print_seq(m, &s);
388 mutex_unlock(&mutex);
389 #endif
390 seq_putc(m, '\n');
392 return 0;
395 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
397 struct ftrace_profile_page *pg;
399 pg = stat->pages = stat->start;
401 while (pg) {
402 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
403 pg->index = 0;
404 pg = pg->next;
407 memset(stat->hash, 0,
408 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
411 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
413 struct ftrace_profile_page *pg;
414 int functions;
415 int pages;
416 int i;
418 /* If we already allocated, do nothing */
419 if (stat->pages)
420 return 0;
422 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
423 if (!stat->pages)
424 return -ENOMEM;
426 #ifdef CONFIG_DYNAMIC_FTRACE
427 functions = ftrace_update_tot_cnt;
428 #else
430 * We do not know the number of functions that exist because
431 * dynamic tracing is what counts them. With past experience
432 * we have around 20K functions. That should be more than enough.
433 * It is highly unlikely we will execute every function in
434 * the kernel.
436 functions = 20000;
437 #endif
439 pg = stat->start = stat->pages;
441 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
443 for (i = 0; i < pages; i++) {
444 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
445 if (!pg->next)
446 goto out_free;
447 pg = pg->next;
450 return 0;
452 out_free:
453 pg = stat->start;
454 while (pg) {
455 unsigned long tmp = (unsigned long)pg;
457 pg = pg->next;
458 free_page(tmp);
461 free_page((unsigned long)stat->pages);
462 stat->pages = NULL;
463 stat->start = NULL;
465 return -ENOMEM;
468 static int ftrace_profile_init_cpu(int cpu)
470 struct ftrace_profile_stat *stat;
471 int size;
473 stat = &per_cpu(ftrace_profile_stats, cpu);
475 if (stat->hash) {
476 /* If the profile is already created, simply reset it */
477 ftrace_profile_reset(stat);
478 return 0;
482 * We are profiling all functions, but usually only a few thousand
483 * functions are hit. We'll make a hash of 1024 items.
485 size = FTRACE_PROFILE_HASH_SIZE;
487 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
489 if (!stat->hash)
490 return -ENOMEM;
492 if (!ftrace_profile_bits) {
493 size--;
495 for (; size; size >>= 1)
496 ftrace_profile_bits++;
499 /* Preallocate the function profiling pages */
500 if (ftrace_profile_pages_init(stat) < 0) {
501 kfree(stat->hash);
502 stat->hash = NULL;
503 return -ENOMEM;
506 return 0;
509 static int ftrace_profile_init(void)
511 int cpu;
512 int ret = 0;
514 for_each_online_cpu(cpu) {
515 ret = ftrace_profile_init_cpu(cpu);
516 if (ret)
517 break;
520 return ret;
523 /* interrupts must be disabled */
524 static struct ftrace_profile *
525 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
527 struct ftrace_profile *rec;
528 struct hlist_head *hhd;
529 struct hlist_node *n;
530 unsigned long key;
532 key = hash_long(ip, ftrace_profile_bits);
533 hhd = &stat->hash[key];
535 if (hlist_empty(hhd))
536 return NULL;
538 hlist_for_each_entry_rcu(rec, n, hhd, node) {
539 if (rec->ip == ip)
540 return rec;
543 return NULL;
546 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
547 struct ftrace_profile *rec)
549 unsigned long key;
551 key = hash_long(rec->ip, ftrace_profile_bits);
552 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
556 * The memory is already allocated, this simply finds a new record to use.
558 static struct ftrace_profile *
559 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
561 struct ftrace_profile *rec = NULL;
563 /* prevent recursion (from NMIs) */
564 if (atomic_inc_return(&stat->disabled) != 1)
565 goto out;
568 * Try to find the function again since an NMI
569 * could have added it
571 rec = ftrace_find_profiled_func(stat, ip);
572 if (rec)
573 goto out;
575 if (stat->pages->index == PROFILES_PER_PAGE) {
576 if (!stat->pages->next)
577 goto out;
578 stat->pages = stat->pages->next;
581 rec = &stat->pages->records[stat->pages->index++];
582 rec->ip = ip;
583 ftrace_add_profile(stat, rec);
585 out:
586 atomic_dec(&stat->disabled);
588 return rec;
591 static void
592 function_profile_call(unsigned long ip, unsigned long parent_ip)
594 struct ftrace_profile_stat *stat;
595 struct ftrace_profile *rec;
596 unsigned long flags;
598 if (!ftrace_profile_enabled)
599 return;
601 local_irq_save(flags);
603 stat = &__get_cpu_var(ftrace_profile_stats);
604 if (!stat->hash || !ftrace_profile_enabled)
605 goto out;
607 rec = ftrace_find_profiled_func(stat, ip);
608 if (!rec) {
609 rec = ftrace_profile_alloc(stat, ip);
610 if (!rec)
611 goto out;
614 rec->counter++;
615 out:
616 local_irq_restore(flags);
619 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
620 static int profile_graph_entry(struct ftrace_graph_ent *trace)
622 function_profile_call(trace->func, 0);
623 return 1;
626 static void profile_graph_return(struct ftrace_graph_ret *trace)
628 struct ftrace_profile_stat *stat;
629 unsigned long long calltime;
630 struct ftrace_profile *rec;
631 unsigned long flags;
633 local_irq_save(flags);
634 stat = &__get_cpu_var(ftrace_profile_stats);
635 if (!stat->hash || !ftrace_profile_enabled)
636 goto out;
638 calltime = trace->rettime - trace->calltime;
640 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
641 int index;
643 index = trace->depth;
645 /* Append this call time to the parent time to subtract */
646 if (index)
647 current->ret_stack[index - 1].subtime += calltime;
649 if (current->ret_stack[index].subtime < calltime)
650 calltime -= current->ret_stack[index].subtime;
651 else
652 calltime = 0;
655 rec = ftrace_find_profiled_func(stat, trace->func);
656 if (rec)
657 rec->time += calltime;
659 out:
660 local_irq_restore(flags);
663 static int register_ftrace_profiler(void)
665 return register_ftrace_graph(&profile_graph_return,
666 &profile_graph_entry);
669 static void unregister_ftrace_profiler(void)
671 unregister_ftrace_graph();
673 #else
674 static struct ftrace_ops ftrace_profile_ops __read_mostly =
676 .func = function_profile_call,
679 static int register_ftrace_profiler(void)
681 return register_ftrace_function(&ftrace_profile_ops);
684 static void unregister_ftrace_profiler(void)
686 unregister_ftrace_function(&ftrace_profile_ops);
688 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
690 static ssize_t
691 ftrace_profile_write(struct file *filp, const char __user *ubuf,
692 size_t cnt, loff_t *ppos)
694 unsigned long val;
695 char buf[64]; /* big enough to hold a number */
696 int ret;
698 if (cnt >= sizeof(buf))
699 return -EINVAL;
701 if (copy_from_user(&buf, ubuf, cnt))
702 return -EFAULT;
704 buf[cnt] = 0;
706 ret = strict_strtoul(buf, 10, &val);
707 if (ret < 0)
708 return ret;
710 val = !!val;
712 mutex_lock(&ftrace_profile_lock);
713 if (ftrace_profile_enabled ^ val) {
714 if (val) {
715 ret = ftrace_profile_init();
716 if (ret < 0) {
717 cnt = ret;
718 goto out;
721 ret = register_ftrace_profiler();
722 if (ret < 0) {
723 cnt = ret;
724 goto out;
726 ftrace_profile_enabled = 1;
727 } else {
728 ftrace_profile_enabled = 0;
730 * unregister_ftrace_profiler calls stop_machine
731 * so this acts like an synchronize_sched.
733 unregister_ftrace_profiler();
736 out:
737 mutex_unlock(&ftrace_profile_lock);
739 filp->f_pos += cnt;
741 return cnt;
744 static ssize_t
745 ftrace_profile_read(struct file *filp, char __user *ubuf,
746 size_t cnt, loff_t *ppos)
748 char buf[64]; /* big enough to hold a number */
749 int r;
751 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
752 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
755 static const struct file_operations ftrace_profile_fops = {
756 .open = tracing_open_generic,
757 .read = ftrace_profile_read,
758 .write = ftrace_profile_write,
761 /* used to initialize the real stat files */
762 static struct tracer_stat function_stats __initdata = {
763 .name = "functions",
764 .stat_start = function_stat_start,
765 .stat_next = function_stat_next,
766 .stat_cmp = function_stat_cmp,
767 .stat_headers = function_stat_headers,
768 .stat_show = function_stat_show
771 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
773 struct ftrace_profile_stat *stat;
774 struct dentry *entry;
775 char *name;
776 int ret;
777 int cpu;
779 for_each_possible_cpu(cpu) {
780 stat = &per_cpu(ftrace_profile_stats, cpu);
782 /* allocate enough for function name + cpu number */
783 name = kmalloc(32, GFP_KERNEL);
784 if (!name) {
786 * The files created are permanent, if something happens
787 * we still do not free memory.
789 WARN(1,
790 "Could not allocate stat file for cpu %d\n",
791 cpu);
792 return;
794 stat->stat = function_stats;
795 snprintf(name, 32, "function%d", cpu);
796 stat->stat.name = name;
797 ret = register_stat_tracer(&stat->stat);
798 if (ret) {
799 WARN(1,
800 "Could not register function stat for cpu %d\n",
801 cpu);
802 kfree(name);
803 return;
807 entry = debugfs_create_file("function_profile_enabled", 0644,
808 d_tracer, NULL, &ftrace_profile_fops);
809 if (!entry)
810 pr_warning("Could not create debugfs "
811 "'function_profile_enabled' entry\n");
814 #else /* CONFIG_FUNCTION_PROFILER */
815 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
818 #endif /* CONFIG_FUNCTION_PROFILER */
820 /* set when tracing only a pid */
821 struct pid *ftrace_pid_trace;
822 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
824 #ifdef CONFIG_DYNAMIC_FTRACE
826 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
827 # error Dynamic ftrace depends on MCOUNT_RECORD
828 #endif
830 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
832 struct ftrace_func_probe {
833 struct hlist_node node;
834 struct ftrace_probe_ops *ops;
835 unsigned long flags;
836 unsigned long ip;
837 void *data;
838 struct rcu_head rcu;
841 enum {
842 FTRACE_ENABLE_CALLS = (1 << 0),
843 FTRACE_DISABLE_CALLS = (1 << 1),
844 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
845 FTRACE_ENABLE_MCOUNT = (1 << 3),
846 FTRACE_DISABLE_MCOUNT = (1 << 4),
847 FTRACE_START_FUNC_RET = (1 << 5),
848 FTRACE_STOP_FUNC_RET = (1 << 6),
851 static int ftrace_filtered;
853 static struct dyn_ftrace *ftrace_new_addrs;
855 static DEFINE_MUTEX(ftrace_regex_lock);
857 struct ftrace_page {
858 struct ftrace_page *next;
859 int index;
860 struct dyn_ftrace records[];
863 #define ENTRIES_PER_PAGE \
864 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
866 /* estimate from running different kernels */
867 #define NR_TO_INIT 10000
869 static struct ftrace_page *ftrace_pages_start;
870 static struct ftrace_page *ftrace_pages;
872 static struct dyn_ftrace *ftrace_free_records;
875 * This is a double for. Do not use 'break' to break out of the loop,
876 * you must use a goto.
878 #define do_for_each_ftrace_rec(pg, rec) \
879 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
880 int _____i; \
881 for (_____i = 0; _____i < pg->index; _____i++) { \
882 rec = &pg->records[_____i];
884 #define while_for_each_ftrace_rec() \
888 #ifdef CONFIG_KPROBES
890 static int frozen_record_count;
892 static inline void freeze_record(struct dyn_ftrace *rec)
894 if (!(rec->flags & FTRACE_FL_FROZEN)) {
895 rec->flags |= FTRACE_FL_FROZEN;
896 frozen_record_count++;
900 static inline void unfreeze_record(struct dyn_ftrace *rec)
902 if (rec->flags & FTRACE_FL_FROZEN) {
903 rec->flags &= ~FTRACE_FL_FROZEN;
904 frozen_record_count--;
908 static inline int record_frozen(struct dyn_ftrace *rec)
910 return rec->flags & FTRACE_FL_FROZEN;
912 #else
913 # define freeze_record(rec) ({ 0; })
914 # define unfreeze_record(rec) ({ 0; })
915 # define record_frozen(rec) ({ 0; })
916 #endif /* CONFIG_KPROBES */
918 static void ftrace_free_rec(struct dyn_ftrace *rec)
920 rec->freelist = ftrace_free_records;
921 ftrace_free_records = rec;
922 rec->flags |= FTRACE_FL_FREE;
925 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
927 struct dyn_ftrace *rec;
929 /* First check for freed records */
930 if (ftrace_free_records) {
931 rec = ftrace_free_records;
933 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
934 FTRACE_WARN_ON_ONCE(1);
935 ftrace_free_records = NULL;
936 return NULL;
939 ftrace_free_records = rec->freelist;
940 memset(rec, 0, sizeof(*rec));
941 return rec;
944 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
945 if (!ftrace_pages->next) {
946 /* allocate another page */
947 ftrace_pages->next =
948 (void *)get_zeroed_page(GFP_KERNEL);
949 if (!ftrace_pages->next)
950 return NULL;
952 ftrace_pages = ftrace_pages->next;
955 return &ftrace_pages->records[ftrace_pages->index++];
958 static struct dyn_ftrace *
959 ftrace_record_ip(unsigned long ip)
961 struct dyn_ftrace *rec;
963 if (ftrace_disabled)
964 return NULL;
966 rec = ftrace_alloc_dyn_node(ip);
967 if (!rec)
968 return NULL;
970 rec->ip = ip;
971 rec->newlist = ftrace_new_addrs;
972 ftrace_new_addrs = rec;
974 return rec;
977 static void print_ip_ins(const char *fmt, unsigned char *p)
979 int i;
981 printk(KERN_CONT "%s", fmt);
983 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
984 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
987 static void ftrace_bug(int failed, unsigned long ip)
989 switch (failed) {
990 case -EFAULT:
991 FTRACE_WARN_ON_ONCE(1);
992 pr_info("ftrace faulted on modifying ");
993 print_ip_sym(ip);
994 break;
995 case -EINVAL:
996 FTRACE_WARN_ON_ONCE(1);
997 pr_info("ftrace failed to modify ");
998 print_ip_sym(ip);
999 print_ip_ins(" actual: ", (unsigned char *)ip);
1000 printk(KERN_CONT "\n");
1001 break;
1002 case -EPERM:
1003 FTRACE_WARN_ON_ONCE(1);
1004 pr_info("ftrace faulted on writing ");
1005 print_ip_sym(ip);
1006 break;
1007 default:
1008 FTRACE_WARN_ON_ONCE(1);
1009 pr_info("ftrace faulted on unknown error ");
1010 print_ip_sym(ip);
1015 static int
1016 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1018 unsigned long ftrace_addr;
1019 unsigned long ip, fl;
1021 ftrace_addr = (unsigned long)FTRACE_ADDR;
1023 ip = rec->ip;
1026 * If this record is not to be traced and
1027 * it is not enabled then do nothing.
1029 * If this record is not to be traced and
1030 * it is enabled then disable it.
1033 if (rec->flags & FTRACE_FL_NOTRACE) {
1034 if (rec->flags & FTRACE_FL_ENABLED)
1035 rec->flags &= ~FTRACE_FL_ENABLED;
1036 else
1037 return 0;
1039 } else if (ftrace_filtered && enable) {
1041 * Filtering is on:
1044 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
1046 /* Record is filtered and enabled, do nothing */
1047 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
1048 return 0;
1050 /* Record is not filtered or enabled, do nothing */
1051 if (!fl)
1052 return 0;
1054 /* Record is not filtered but enabled, disable it */
1055 if (fl == FTRACE_FL_ENABLED)
1056 rec->flags &= ~FTRACE_FL_ENABLED;
1057 else
1058 /* Otherwise record is filtered but not enabled, enable it */
1059 rec->flags |= FTRACE_FL_ENABLED;
1060 } else {
1061 /* Disable or not filtered */
1063 if (enable) {
1064 /* if record is enabled, do nothing */
1065 if (rec->flags & FTRACE_FL_ENABLED)
1066 return 0;
1068 rec->flags |= FTRACE_FL_ENABLED;
1070 } else {
1072 /* if record is not enabled, do nothing */
1073 if (!(rec->flags & FTRACE_FL_ENABLED))
1074 return 0;
1076 rec->flags &= ~FTRACE_FL_ENABLED;
1080 if (rec->flags & FTRACE_FL_ENABLED)
1081 return ftrace_make_call(rec, ftrace_addr);
1082 else
1083 return ftrace_make_nop(NULL, rec, ftrace_addr);
1086 static void ftrace_replace_code(int enable)
1088 struct dyn_ftrace *rec;
1089 struct ftrace_page *pg;
1090 int failed;
1092 do_for_each_ftrace_rec(pg, rec) {
1094 * Skip over free records, records that have
1095 * failed and not converted.
1097 if (rec->flags & FTRACE_FL_FREE ||
1098 rec->flags & FTRACE_FL_FAILED ||
1099 !(rec->flags & FTRACE_FL_CONVERTED))
1100 continue;
1102 /* ignore updates to this record's mcount site */
1103 if (get_kprobe((void *)rec->ip)) {
1104 freeze_record(rec);
1105 continue;
1106 } else {
1107 unfreeze_record(rec);
1110 failed = __ftrace_replace_code(rec, enable);
1111 if (failed) {
1112 rec->flags |= FTRACE_FL_FAILED;
1113 ftrace_bug(failed, rec->ip);
1114 /* Stop processing */
1115 return;
1117 } while_for_each_ftrace_rec();
1120 static int
1121 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1123 unsigned long ip;
1124 int ret;
1126 ip = rec->ip;
1128 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1129 if (ret) {
1130 ftrace_bug(ret, ip);
1131 rec->flags |= FTRACE_FL_FAILED;
1132 return 0;
1134 return 1;
1138 * archs can override this function if they must do something
1139 * before the modifying code is performed.
1141 int __weak ftrace_arch_code_modify_prepare(void)
1143 return 0;
1147 * archs can override this function if they must do something
1148 * after the modifying code is performed.
1150 int __weak ftrace_arch_code_modify_post_process(void)
1152 return 0;
1155 static int __ftrace_modify_code(void *data)
1157 int *command = data;
1159 if (*command & FTRACE_ENABLE_CALLS)
1160 ftrace_replace_code(1);
1161 else if (*command & FTRACE_DISABLE_CALLS)
1162 ftrace_replace_code(0);
1164 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1165 ftrace_update_ftrace_func(ftrace_trace_function);
1167 if (*command & FTRACE_START_FUNC_RET)
1168 ftrace_enable_ftrace_graph_caller();
1169 else if (*command & FTRACE_STOP_FUNC_RET)
1170 ftrace_disable_ftrace_graph_caller();
1172 return 0;
1175 static void ftrace_run_update_code(int command)
1177 int ret;
1179 ret = ftrace_arch_code_modify_prepare();
1180 FTRACE_WARN_ON(ret);
1181 if (ret)
1182 return;
1184 stop_machine(__ftrace_modify_code, &command, NULL);
1186 ret = ftrace_arch_code_modify_post_process();
1187 FTRACE_WARN_ON(ret);
1190 static ftrace_func_t saved_ftrace_func;
1191 static int ftrace_start_up;
1193 static void ftrace_startup_enable(int command)
1195 if (saved_ftrace_func != ftrace_trace_function) {
1196 saved_ftrace_func = ftrace_trace_function;
1197 command |= FTRACE_UPDATE_TRACE_FUNC;
1200 if (!command || !ftrace_enabled)
1201 return;
1203 ftrace_run_update_code(command);
1206 static void ftrace_startup(int command)
1208 if (unlikely(ftrace_disabled))
1209 return;
1211 ftrace_start_up++;
1212 command |= FTRACE_ENABLE_CALLS;
1214 ftrace_startup_enable(command);
1217 static void ftrace_shutdown(int command)
1219 if (unlikely(ftrace_disabled))
1220 return;
1222 ftrace_start_up--;
1224 * Just warn in case of unbalance, no need to kill ftrace, it's not
1225 * critical but the ftrace_call callers may be never nopped again after
1226 * further ftrace uses.
1228 WARN_ON_ONCE(ftrace_start_up < 0);
1230 if (!ftrace_start_up)
1231 command |= FTRACE_DISABLE_CALLS;
1233 if (saved_ftrace_func != ftrace_trace_function) {
1234 saved_ftrace_func = ftrace_trace_function;
1235 command |= FTRACE_UPDATE_TRACE_FUNC;
1238 if (!command || !ftrace_enabled)
1239 return;
1241 ftrace_run_update_code(command);
1244 static void ftrace_startup_sysctl(void)
1246 int command = FTRACE_ENABLE_MCOUNT;
1248 if (unlikely(ftrace_disabled))
1249 return;
1251 /* Force update next time */
1252 saved_ftrace_func = NULL;
1253 /* ftrace_start_up is true if we want ftrace running */
1254 if (ftrace_start_up)
1255 command |= FTRACE_ENABLE_CALLS;
1257 ftrace_run_update_code(command);
1260 static void ftrace_shutdown_sysctl(void)
1262 int command = FTRACE_DISABLE_MCOUNT;
1264 if (unlikely(ftrace_disabled))
1265 return;
1267 /* ftrace_start_up is true if ftrace is running */
1268 if (ftrace_start_up)
1269 command |= FTRACE_DISABLE_CALLS;
1271 ftrace_run_update_code(command);
1274 static cycle_t ftrace_update_time;
1275 static unsigned long ftrace_update_cnt;
1276 unsigned long ftrace_update_tot_cnt;
1278 static int ftrace_update_code(struct module *mod)
1280 struct dyn_ftrace *p;
1281 cycle_t start, stop;
1283 start = ftrace_now(raw_smp_processor_id());
1284 ftrace_update_cnt = 0;
1286 while (ftrace_new_addrs) {
1288 /* If something went wrong, bail without enabling anything */
1289 if (unlikely(ftrace_disabled))
1290 return -1;
1292 p = ftrace_new_addrs;
1293 ftrace_new_addrs = p->newlist;
1294 p->flags = 0L;
1296 /* convert record (i.e, patch mcount-call with NOP) */
1297 if (ftrace_code_disable(mod, p)) {
1298 p->flags |= FTRACE_FL_CONVERTED;
1299 ftrace_update_cnt++;
1300 } else
1301 ftrace_free_rec(p);
1304 stop = ftrace_now(raw_smp_processor_id());
1305 ftrace_update_time = stop - start;
1306 ftrace_update_tot_cnt += ftrace_update_cnt;
1308 return 0;
1311 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1313 struct ftrace_page *pg;
1314 int cnt;
1315 int i;
1317 /* allocate a few pages */
1318 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1319 if (!ftrace_pages_start)
1320 return -1;
1323 * Allocate a few more pages.
1325 * TODO: have some parser search vmlinux before
1326 * final linking to find all calls to ftrace.
1327 * Then we can:
1328 * a) know how many pages to allocate.
1329 * and/or
1330 * b) set up the table then.
1332 * The dynamic code is still necessary for
1333 * modules.
1336 pg = ftrace_pages = ftrace_pages_start;
1338 cnt = num_to_init / ENTRIES_PER_PAGE;
1339 pr_info("ftrace: allocating %ld entries in %d pages\n",
1340 num_to_init, cnt + 1);
1342 for (i = 0; i < cnt; i++) {
1343 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1345 /* If we fail, we'll try later anyway */
1346 if (!pg->next)
1347 break;
1349 pg = pg->next;
1352 return 0;
1355 enum {
1356 FTRACE_ITER_FILTER = (1 << 0),
1357 FTRACE_ITER_CONT = (1 << 1),
1358 FTRACE_ITER_NOTRACE = (1 << 2),
1359 FTRACE_ITER_FAILURES = (1 << 3),
1360 FTRACE_ITER_PRINTALL = (1 << 4),
1361 FTRACE_ITER_HASH = (1 << 5),
1364 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1366 struct ftrace_iterator {
1367 struct ftrace_page *pg;
1368 int hidx;
1369 int idx;
1370 unsigned flags;
1371 unsigned char buffer[FTRACE_BUFF_MAX+1];
1372 unsigned buffer_idx;
1373 unsigned filtered;
1376 static void *
1377 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1379 struct ftrace_iterator *iter = m->private;
1380 struct hlist_node *hnd = v;
1381 struct hlist_head *hhd;
1383 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1385 (*pos)++;
1387 retry:
1388 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1389 return NULL;
1391 hhd = &ftrace_func_hash[iter->hidx];
1393 if (hlist_empty(hhd)) {
1394 iter->hidx++;
1395 hnd = NULL;
1396 goto retry;
1399 if (!hnd)
1400 hnd = hhd->first;
1401 else {
1402 hnd = hnd->next;
1403 if (!hnd) {
1404 iter->hidx++;
1405 goto retry;
1409 return hnd;
1412 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1414 struct ftrace_iterator *iter = m->private;
1415 void *p = NULL;
1416 loff_t l;
1418 if (!(iter->flags & FTRACE_ITER_HASH))
1419 *pos = 0;
1421 iter->flags |= FTRACE_ITER_HASH;
1423 iter->hidx = 0;
1424 for (l = 0; l <= *pos; ) {
1425 p = t_hash_next(m, p, &l);
1426 if (!p)
1427 break;
1429 return p;
1432 static int t_hash_show(struct seq_file *m, void *v)
1434 struct ftrace_func_probe *rec;
1435 struct hlist_node *hnd = v;
1436 char str[KSYM_SYMBOL_LEN];
1438 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1440 if (rec->ops->print)
1441 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1443 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1444 seq_printf(m, "%s:", str);
1446 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
1447 seq_printf(m, "%s", str);
1449 if (rec->data)
1450 seq_printf(m, ":%p", rec->data);
1451 seq_putc(m, '\n');
1453 return 0;
1456 static void *
1457 t_next(struct seq_file *m, void *v, loff_t *pos)
1459 struct ftrace_iterator *iter = m->private;
1460 struct dyn_ftrace *rec = NULL;
1462 if (iter->flags & FTRACE_ITER_HASH)
1463 return t_hash_next(m, v, pos);
1465 (*pos)++;
1467 if (iter->flags & FTRACE_ITER_PRINTALL)
1468 return NULL;
1470 retry:
1471 if (iter->idx >= iter->pg->index) {
1472 if (iter->pg->next) {
1473 iter->pg = iter->pg->next;
1474 iter->idx = 0;
1475 goto retry;
1477 } else {
1478 rec = &iter->pg->records[iter->idx++];
1479 if ((rec->flags & FTRACE_FL_FREE) ||
1481 (!(iter->flags & FTRACE_ITER_FAILURES) &&
1482 (rec->flags & FTRACE_FL_FAILED)) ||
1484 ((iter->flags & FTRACE_ITER_FAILURES) &&
1485 !(rec->flags & FTRACE_FL_FAILED)) ||
1487 ((iter->flags & FTRACE_ITER_FILTER) &&
1488 !(rec->flags & FTRACE_FL_FILTER)) ||
1490 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1491 !(rec->flags & FTRACE_FL_NOTRACE))) {
1492 rec = NULL;
1493 goto retry;
1497 return rec;
1500 static void *t_start(struct seq_file *m, loff_t *pos)
1502 struct ftrace_iterator *iter = m->private;
1503 void *p = NULL;
1504 loff_t l;
1506 mutex_lock(&ftrace_lock);
1508 * For set_ftrace_filter reading, if we have the filter
1509 * off, we can short cut and just print out that all
1510 * functions are enabled.
1512 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1513 if (*pos > 0)
1514 return t_hash_start(m, pos);
1515 iter->flags |= FTRACE_ITER_PRINTALL;
1516 return iter;
1519 if (iter->flags & FTRACE_ITER_HASH)
1520 return t_hash_start(m, pos);
1522 iter->pg = ftrace_pages_start;
1523 iter->idx = 0;
1524 for (l = 0; l <= *pos; ) {
1525 p = t_next(m, p, &l);
1526 if (!p)
1527 break;
1530 if (!p && iter->flags & FTRACE_ITER_FILTER)
1531 return t_hash_start(m, pos);
1533 return p;
1536 static void t_stop(struct seq_file *m, void *p)
1538 mutex_unlock(&ftrace_lock);
1541 static int t_show(struct seq_file *m, void *v)
1543 struct ftrace_iterator *iter = m->private;
1544 struct dyn_ftrace *rec = v;
1545 char str[KSYM_SYMBOL_LEN];
1547 if (iter->flags & FTRACE_ITER_HASH)
1548 return t_hash_show(m, v);
1550 if (iter->flags & FTRACE_ITER_PRINTALL) {
1551 seq_printf(m, "#### all functions enabled ####\n");
1552 return 0;
1555 if (!rec)
1556 return 0;
1558 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1560 seq_printf(m, "%s\n", str);
1562 return 0;
1565 static struct seq_operations show_ftrace_seq_ops = {
1566 .start = t_start,
1567 .next = t_next,
1568 .stop = t_stop,
1569 .show = t_show,
1572 static int
1573 ftrace_avail_open(struct inode *inode, struct file *file)
1575 struct ftrace_iterator *iter;
1576 int ret;
1578 if (unlikely(ftrace_disabled))
1579 return -ENODEV;
1581 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1582 if (!iter)
1583 return -ENOMEM;
1585 iter->pg = ftrace_pages_start;
1587 ret = seq_open(file, &show_ftrace_seq_ops);
1588 if (!ret) {
1589 struct seq_file *m = file->private_data;
1591 m->private = iter;
1592 } else {
1593 kfree(iter);
1596 return ret;
1599 int ftrace_avail_release(struct inode *inode, struct file *file)
1601 struct seq_file *m = (struct seq_file *)file->private_data;
1602 struct ftrace_iterator *iter = m->private;
1604 seq_release(inode, file);
1605 kfree(iter);
1607 return 0;
1610 static int
1611 ftrace_failures_open(struct inode *inode, struct file *file)
1613 int ret;
1614 struct seq_file *m;
1615 struct ftrace_iterator *iter;
1617 ret = ftrace_avail_open(inode, file);
1618 if (!ret) {
1619 m = (struct seq_file *)file->private_data;
1620 iter = (struct ftrace_iterator *)m->private;
1621 iter->flags = FTRACE_ITER_FAILURES;
1624 return ret;
1628 static void ftrace_filter_reset(int enable)
1630 struct ftrace_page *pg;
1631 struct dyn_ftrace *rec;
1632 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1634 mutex_lock(&ftrace_lock);
1635 if (enable)
1636 ftrace_filtered = 0;
1637 do_for_each_ftrace_rec(pg, rec) {
1638 if (rec->flags & FTRACE_FL_FAILED)
1639 continue;
1640 rec->flags &= ~type;
1641 } while_for_each_ftrace_rec();
1642 mutex_unlock(&ftrace_lock);
1645 static int
1646 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1648 struct ftrace_iterator *iter;
1649 int ret = 0;
1651 if (unlikely(ftrace_disabled))
1652 return -ENODEV;
1654 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1655 if (!iter)
1656 return -ENOMEM;
1658 mutex_lock(&ftrace_regex_lock);
1659 if ((file->f_mode & FMODE_WRITE) &&
1660 (file->f_flags & O_TRUNC))
1661 ftrace_filter_reset(enable);
1663 if (file->f_mode & FMODE_READ) {
1664 iter->pg = ftrace_pages_start;
1665 iter->flags = enable ? FTRACE_ITER_FILTER :
1666 FTRACE_ITER_NOTRACE;
1668 ret = seq_open(file, &show_ftrace_seq_ops);
1669 if (!ret) {
1670 struct seq_file *m = file->private_data;
1671 m->private = iter;
1672 } else
1673 kfree(iter);
1674 } else
1675 file->private_data = iter;
1676 mutex_unlock(&ftrace_regex_lock);
1678 return ret;
1681 static int
1682 ftrace_filter_open(struct inode *inode, struct file *file)
1684 return ftrace_regex_open(inode, file, 1);
1687 static int
1688 ftrace_notrace_open(struct inode *inode, struct file *file)
1690 return ftrace_regex_open(inode, file, 0);
1693 static loff_t
1694 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1696 loff_t ret;
1698 if (file->f_mode & FMODE_READ)
1699 ret = seq_lseek(file, offset, origin);
1700 else
1701 file->f_pos = ret = 1;
1703 return ret;
1706 enum {
1707 MATCH_FULL,
1708 MATCH_FRONT_ONLY,
1709 MATCH_MIDDLE_ONLY,
1710 MATCH_END_ONLY,
1714 * (static function - no need for kernel doc)
1716 * Pass in a buffer containing a glob and this function will
1717 * set search to point to the search part of the buffer and
1718 * return the type of search it is (see enum above).
1719 * This does modify buff.
1721 * Returns enum type.
1722 * search returns the pointer to use for comparison.
1723 * not returns 1 if buff started with a '!'
1724 * 0 otherwise.
1726 static int
1727 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1729 int type = MATCH_FULL;
1730 int i;
1732 if (buff[0] == '!') {
1733 *not = 1;
1734 buff++;
1735 len--;
1736 } else
1737 *not = 0;
1739 *search = buff;
1741 for (i = 0; i < len; i++) {
1742 if (buff[i] == '*') {
1743 if (!i) {
1744 *search = buff + 1;
1745 type = MATCH_END_ONLY;
1746 } else {
1747 if (type == MATCH_END_ONLY)
1748 type = MATCH_MIDDLE_ONLY;
1749 else
1750 type = MATCH_FRONT_ONLY;
1751 buff[i] = 0;
1752 break;
1757 return type;
1760 static int ftrace_match(char *str, char *regex, int len, int type)
1762 int matched = 0;
1763 char *ptr;
1765 switch (type) {
1766 case MATCH_FULL:
1767 if (strcmp(str, regex) == 0)
1768 matched = 1;
1769 break;
1770 case MATCH_FRONT_ONLY:
1771 if (strncmp(str, regex, len) == 0)
1772 matched = 1;
1773 break;
1774 case MATCH_MIDDLE_ONLY:
1775 if (strstr(str, regex))
1776 matched = 1;
1777 break;
1778 case MATCH_END_ONLY:
1779 ptr = strstr(str, regex);
1780 if (ptr && (ptr[len] == 0))
1781 matched = 1;
1782 break;
1785 return matched;
1788 static int
1789 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1791 char str[KSYM_SYMBOL_LEN];
1793 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1794 return ftrace_match(str, regex, len, type);
1797 static void ftrace_match_records(char *buff, int len, int enable)
1799 unsigned int search_len;
1800 struct ftrace_page *pg;
1801 struct dyn_ftrace *rec;
1802 unsigned long flag;
1803 char *search;
1804 int type;
1805 int not;
1807 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1808 type = ftrace_setup_glob(buff, len, &search, &not);
1810 search_len = strlen(search);
1812 mutex_lock(&ftrace_lock);
1813 do_for_each_ftrace_rec(pg, rec) {
1815 if (rec->flags & FTRACE_FL_FAILED)
1816 continue;
1818 if (ftrace_match_record(rec, search, search_len, type)) {
1819 if (not)
1820 rec->flags &= ~flag;
1821 else
1822 rec->flags |= flag;
1825 * Only enable filtering if we have a function that
1826 * is filtered on.
1828 if (enable && (rec->flags & FTRACE_FL_FILTER))
1829 ftrace_filtered = 1;
1830 } while_for_each_ftrace_rec();
1831 mutex_unlock(&ftrace_lock);
1834 static int
1835 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1836 char *regex, int len, int type)
1838 char str[KSYM_SYMBOL_LEN];
1839 char *modname;
1841 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1843 if (!modname || strcmp(modname, mod))
1844 return 0;
1846 /* blank search means to match all funcs in the mod */
1847 if (len)
1848 return ftrace_match(str, regex, len, type);
1849 else
1850 return 1;
1853 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1855 unsigned search_len = 0;
1856 struct ftrace_page *pg;
1857 struct dyn_ftrace *rec;
1858 int type = MATCH_FULL;
1859 char *search = buff;
1860 unsigned long flag;
1861 int not = 0;
1863 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1865 /* blank or '*' mean the same */
1866 if (strcmp(buff, "*") == 0)
1867 buff[0] = 0;
1869 /* handle the case of 'dont filter this module' */
1870 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1871 buff[0] = 0;
1872 not = 1;
1875 if (strlen(buff)) {
1876 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1877 search_len = strlen(search);
1880 mutex_lock(&ftrace_lock);
1881 do_for_each_ftrace_rec(pg, rec) {
1883 if (rec->flags & FTRACE_FL_FAILED)
1884 continue;
1886 if (ftrace_match_module_record(rec, mod,
1887 search, search_len, type)) {
1888 if (not)
1889 rec->flags &= ~flag;
1890 else
1891 rec->flags |= flag;
1893 if (enable && (rec->flags & FTRACE_FL_FILTER))
1894 ftrace_filtered = 1;
1896 } while_for_each_ftrace_rec();
1897 mutex_unlock(&ftrace_lock);
1901 * We register the module command as a template to show others how
1902 * to register the a command as well.
1905 static int
1906 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1908 char *mod;
1911 * cmd == 'mod' because we only registered this func
1912 * for the 'mod' ftrace_func_command.
1913 * But if you register one func with multiple commands,
1914 * you can tell which command was used by the cmd
1915 * parameter.
1918 /* we must have a module name */
1919 if (!param)
1920 return -EINVAL;
1922 mod = strsep(&param, ":");
1923 if (!strlen(mod))
1924 return -EINVAL;
1926 ftrace_match_module_records(func, mod, enable);
1927 return 0;
1930 static struct ftrace_func_command ftrace_mod_cmd = {
1931 .name = "mod",
1932 .func = ftrace_mod_callback,
1935 static int __init ftrace_mod_cmd_init(void)
1937 return register_ftrace_command(&ftrace_mod_cmd);
1939 device_initcall(ftrace_mod_cmd_init);
1941 static void
1942 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1944 struct ftrace_func_probe *entry;
1945 struct hlist_head *hhd;
1946 struct hlist_node *n;
1947 unsigned long key;
1948 int resched;
1950 key = hash_long(ip, FTRACE_HASH_BITS);
1952 hhd = &ftrace_func_hash[key];
1954 if (hlist_empty(hhd))
1955 return;
1958 * Disable preemption for these calls to prevent a RCU grace
1959 * period. This syncs the hash iteration and freeing of items
1960 * on the hash. rcu_read_lock is too dangerous here.
1962 resched = ftrace_preempt_disable();
1963 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1964 if (entry->ip == ip)
1965 entry->ops->func(ip, parent_ip, &entry->data);
1967 ftrace_preempt_enable(resched);
1970 static struct ftrace_ops trace_probe_ops __read_mostly =
1972 .func = function_trace_probe_call,
1975 static int ftrace_probe_registered;
1977 static void __enable_ftrace_function_probe(void)
1979 int i;
1981 if (ftrace_probe_registered)
1982 return;
1984 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1985 struct hlist_head *hhd = &ftrace_func_hash[i];
1986 if (hhd->first)
1987 break;
1989 /* Nothing registered? */
1990 if (i == FTRACE_FUNC_HASHSIZE)
1991 return;
1993 __register_ftrace_function(&trace_probe_ops);
1994 ftrace_startup(0);
1995 ftrace_probe_registered = 1;
1998 static void __disable_ftrace_function_probe(void)
2000 int i;
2002 if (!ftrace_probe_registered)
2003 return;
2005 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2006 struct hlist_head *hhd = &ftrace_func_hash[i];
2007 if (hhd->first)
2008 return;
2011 /* no more funcs left */
2012 __unregister_ftrace_function(&trace_probe_ops);
2013 ftrace_shutdown(0);
2014 ftrace_probe_registered = 0;
2018 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2020 struct ftrace_func_probe *entry =
2021 container_of(rhp, struct ftrace_func_probe, rcu);
2023 if (entry->ops->free)
2024 entry->ops->free(&entry->data);
2025 kfree(entry);
2030 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2031 void *data)
2033 struct ftrace_func_probe *entry;
2034 struct ftrace_page *pg;
2035 struct dyn_ftrace *rec;
2036 int type, len, not;
2037 unsigned long key;
2038 int count = 0;
2039 char *search;
2041 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2042 len = strlen(search);
2044 /* we do not support '!' for function probes */
2045 if (WARN_ON(not))
2046 return -EINVAL;
2048 mutex_lock(&ftrace_lock);
2049 do_for_each_ftrace_rec(pg, rec) {
2051 if (rec->flags & FTRACE_FL_FAILED)
2052 continue;
2054 if (!ftrace_match_record(rec, search, len, type))
2055 continue;
2057 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2058 if (!entry) {
2059 /* If we did not process any, then return error */
2060 if (!count)
2061 count = -ENOMEM;
2062 goto out_unlock;
2065 count++;
2067 entry->data = data;
2070 * The caller might want to do something special
2071 * for each function we find. We call the callback
2072 * to give the caller an opportunity to do so.
2074 if (ops->callback) {
2075 if (ops->callback(rec->ip, &entry->data) < 0) {
2076 /* caller does not like this func */
2077 kfree(entry);
2078 continue;
2082 entry->ops = ops;
2083 entry->ip = rec->ip;
2085 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2086 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2088 } while_for_each_ftrace_rec();
2089 __enable_ftrace_function_probe();
2091 out_unlock:
2092 mutex_unlock(&ftrace_lock);
2094 return count;
2097 enum {
2098 PROBE_TEST_FUNC = 1,
2099 PROBE_TEST_DATA = 2
2102 static void
2103 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2104 void *data, int flags)
2106 struct ftrace_func_probe *entry;
2107 struct hlist_node *n, *tmp;
2108 char str[KSYM_SYMBOL_LEN];
2109 int type = MATCH_FULL;
2110 int i, len = 0;
2111 char *search;
2113 if (glob && (strcmp(glob, "*") || !strlen(glob)))
2114 glob = NULL;
2115 else {
2116 int not;
2118 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2119 len = strlen(search);
2121 /* we do not support '!' for function probes */
2122 if (WARN_ON(not))
2123 return;
2126 mutex_lock(&ftrace_lock);
2127 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2128 struct hlist_head *hhd = &ftrace_func_hash[i];
2130 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2132 /* break up if statements for readability */
2133 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2134 continue;
2136 if ((flags & PROBE_TEST_DATA) && entry->data != data)
2137 continue;
2139 /* do this last, since it is the most expensive */
2140 if (glob) {
2141 kallsyms_lookup(entry->ip, NULL, NULL,
2142 NULL, str);
2143 if (!ftrace_match(str, glob, len, type))
2144 continue;
2147 hlist_del(&entry->node);
2148 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2151 __disable_ftrace_function_probe();
2152 mutex_unlock(&ftrace_lock);
2155 void
2156 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2157 void *data)
2159 __unregister_ftrace_function_probe(glob, ops, data,
2160 PROBE_TEST_FUNC | PROBE_TEST_DATA);
2163 void
2164 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2166 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2169 void unregister_ftrace_function_probe_all(char *glob)
2171 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2174 static LIST_HEAD(ftrace_commands);
2175 static DEFINE_MUTEX(ftrace_cmd_mutex);
2177 int register_ftrace_command(struct ftrace_func_command *cmd)
2179 struct ftrace_func_command *p;
2180 int ret = 0;
2182 mutex_lock(&ftrace_cmd_mutex);
2183 list_for_each_entry(p, &ftrace_commands, list) {
2184 if (strcmp(cmd->name, p->name) == 0) {
2185 ret = -EBUSY;
2186 goto out_unlock;
2189 list_add(&cmd->list, &ftrace_commands);
2190 out_unlock:
2191 mutex_unlock(&ftrace_cmd_mutex);
2193 return ret;
2196 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2198 struct ftrace_func_command *p, *n;
2199 int ret = -ENODEV;
2201 mutex_lock(&ftrace_cmd_mutex);
2202 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2203 if (strcmp(cmd->name, p->name) == 0) {
2204 ret = 0;
2205 list_del_init(&p->list);
2206 goto out_unlock;
2209 out_unlock:
2210 mutex_unlock(&ftrace_cmd_mutex);
2212 return ret;
2215 static int ftrace_process_regex(char *buff, int len, int enable)
2217 char *func, *command, *next = buff;
2218 struct ftrace_func_command *p;
2219 int ret = -EINVAL;
2221 func = strsep(&next, ":");
2223 if (!next) {
2224 ftrace_match_records(func, len, enable);
2225 return 0;
2228 /* command found */
2230 command = strsep(&next, ":");
2232 mutex_lock(&ftrace_cmd_mutex);
2233 list_for_each_entry(p, &ftrace_commands, list) {
2234 if (strcmp(p->name, command) == 0) {
2235 ret = p->func(func, command, next, enable);
2236 goto out_unlock;
2239 out_unlock:
2240 mutex_unlock(&ftrace_cmd_mutex);
2242 return ret;
2245 static ssize_t
2246 ftrace_regex_write(struct file *file, const char __user *ubuf,
2247 size_t cnt, loff_t *ppos, int enable)
2249 struct ftrace_iterator *iter;
2250 char ch;
2251 size_t read = 0;
2252 ssize_t ret;
2254 if (!cnt || cnt < 0)
2255 return 0;
2257 mutex_lock(&ftrace_regex_lock);
2259 if (file->f_mode & FMODE_READ) {
2260 struct seq_file *m = file->private_data;
2261 iter = m->private;
2262 } else
2263 iter = file->private_data;
2265 if (!*ppos) {
2266 iter->flags &= ~FTRACE_ITER_CONT;
2267 iter->buffer_idx = 0;
2270 ret = get_user(ch, ubuf++);
2271 if (ret)
2272 goto out;
2273 read++;
2274 cnt--;
2277 * If the parser haven't finished with the last write,
2278 * continue reading the user input without skipping spaces.
2280 if (!(iter->flags & FTRACE_ITER_CONT)) {
2281 /* skip white space */
2282 while (cnt && isspace(ch)) {
2283 ret = get_user(ch, ubuf++);
2284 if (ret)
2285 goto out;
2286 read++;
2287 cnt--;
2290 /* only spaces were written */
2291 if (isspace(ch)) {
2292 *ppos += read;
2293 ret = read;
2294 goto out;
2297 iter->buffer_idx = 0;
2300 while (cnt && !isspace(ch)) {
2301 if (iter->buffer_idx < FTRACE_BUFF_MAX)
2302 iter->buffer[iter->buffer_idx++] = ch;
2303 else {
2304 ret = -EINVAL;
2305 goto out;
2307 ret = get_user(ch, ubuf++);
2308 if (ret)
2309 goto out;
2310 read++;
2311 cnt--;
2314 if (isspace(ch)) {
2315 iter->filtered++;
2316 iter->buffer[iter->buffer_idx] = 0;
2317 ret = ftrace_process_regex(iter->buffer,
2318 iter->buffer_idx, enable);
2319 if (ret)
2320 goto out;
2321 iter->buffer_idx = 0;
2322 } else {
2323 iter->flags |= FTRACE_ITER_CONT;
2324 iter->buffer[iter->buffer_idx++] = ch;
2327 *ppos += read;
2328 ret = read;
2329 out:
2330 mutex_unlock(&ftrace_regex_lock);
2332 return ret;
2335 static ssize_t
2336 ftrace_filter_write(struct file *file, const char __user *ubuf,
2337 size_t cnt, loff_t *ppos)
2339 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2342 static ssize_t
2343 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2344 size_t cnt, loff_t *ppos)
2346 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2349 static void
2350 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2352 if (unlikely(ftrace_disabled))
2353 return;
2355 mutex_lock(&ftrace_regex_lock);
2356 if (reset)
2357 ftrace_filter_reset(enable);
2358 if (buf)
2359 ftrace_match_records(buf, len, enable);
2360 mutex_unlock(&ftrace_regex_lock);
2364 * ftrace_set_filter - set a function to filter on in ftrace
2365 * @buf - the string that holds the function filter text.
2366 * @len - the length of the string.
2367 * @reset - non zero to reset all filters before applying this filter.
2369 * Filters denote which functions should be enabled when tracing is enabled.
2370 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2372 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2374 ftrace_set_regex(buf, len, reset, 1);
2378 * ftrace_set_notrace - set a function to not trace in ftrace
2379 * @buf - the string that holds the function notrace text.
2380 * @len - the length of the string.
2381 * @reset - non zero to reset all filters before applying this filter.
2383 * Notrace Filters denote which functions should not be enabled when tracing
2384 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2385 * for tracing.
2387 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2389 ftrace_set_regex(buf, len, reset, 0);
2393 * command line interface to allow users to set filters on boot up.
2395 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2396 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2397 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2399 static int __init set_ftrace_notrace(char *str)
2401 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2402 return 1;
2404 __setup("ftrace_notrace=", set_ftrace_notrace);
2406 static int __init set_ftrace_filter(char *str)
2408 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2409 return 1;
2411 __setup("ftrace_filter=", set_ftrace_filter);
2413 static void __init set_ftrace_early_filter(char *buf, int enable)
2415 char *func;
2417 while (buf) {
2418 func = strsep(&buf, ",");
2419 ftrace_set_regex(func, strlen(func), 0, enable);
2423 static void __init set_ftrace_early_filters(void)
2425 if (ftrace_filter_buf[0])
2426 set_ftrace_early_filter(ftrace_filter_buf, 1);
2427 if (ftrace_notrace_buf[0])
2428 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2431 static int
2432 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2434 struct seq_file *m = (struct seq_file *)file->private_data;
2435 struct ftrace_iterator *iter;
2437 mutex_lock(&ftrace_regex_lock);
2438 if (file->f_mode & FMODE_READ) {
2439 iter = m->private;
2441 seq_release(inode, file);
2442 } else
2443 iter = file->private_data;
2445 if (iter->buffer_idx) {
2446 iter->filtered++;
2447 iter->buffer[iter->buffer_idx] = 0;
2448 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
2451 mutex_lock(&ftrace_lock);
2452 if (ftrace_start_up && ftrace_enabled)
2453 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2454 mutex_unlock(&ftrace_lock);
2456 kfree(iter);
2457 mutex_unlock(&ftrace_regex_lock);
2458 return 0;
2461 static int
2462 ftrace_filter_release(struct inode *inode, struct file *file)
2464 return ftrace_regex_release(inode, file, 1);
2467 static int
2468 ftrace_notrace_release(struct inode *inode, struct file *file)
2470 return ftrace_regex_release(inode, file, 0);
2473 static const struct file_operations ftrace_avail_fops = {
2474 .open = ftrace_avail_open,
2475 .read = seq_read,
2476 .llseek = seq_lseek,
2477 .release = ftrace_avail_release,
2480 static const struct file_operations ftrace_failures_fops = {
2481 .open = ftrace_failures_open,
2482 .read = seq_read,
2483 .llseek = seq_lseek,
2484 .release = ftrace_avail_release,
2487 static const struct file_operations ftrace_filter_fops = {
2488 .open = ftrace_filter_open,
2489 .read = seq_read,
2490 .write = ftrace_filter_write,
2491 .llseek = ftrace_regex_lseek,
2492 .release = ftrace_filter_release,
2495 static const struct file_operations ftrace_notrace_fops = {
2496 .open = ftrace_notrace_open,
2497 .read = seq_read,
2498 .write = ftrace_notrace_write,
2499 .llseek = ftrace_regex_lseek,
2500 .release = ftrace_notrace_release,
2503 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2505 static DEFINE_MUTEX(graph_lock);
2507 int ftrace_graph_count;
2508 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2510 static void *
2511 __g_next(struct seq_file *m, loff_t *pos)
2513 unsigned long *array = m->private;
2515 if (*pos >= ftrace_graph_count)
2516 return NULL;
2517 return &array[*pos];
2520 static void *
2521 g_next(struct seq_file *m, void *v, loff_t *pos)
2523 (*pos)++;
2524 return __g_next(m, pos);
2527 static void *g_start(struct seq_file *m, loff_t *pos)
2529 mutex_lock(&graph_lock);
2531 /* Nothing, tell g_show to print all functions are enabled */
2532 if (!ftrace_graph_count && !*pos)
2533 return (void *)1;
2535 return __g_next(m, pos);
2538 static void g_stop(struct seq_file *m, void *p)
2540 mutex_unlock(&graph_lock);
2543 static int g_show(struct seq_file *m, void *v)
2545 unsigned long *ptr = v;
2546 char str[KSYM_SYMBOL_LEN];
2548 if (!ptr)
2549 return 0;
2551 if (ptr == (unsigned long *)1) {
2552 seq_printf(m, "#### all functions enabled ####\n");
2553 return 0;
2556 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
2558 seq_printf(m, "%s\n", str);
2560 return 0;
2563 static struct seq_operations ftrace_graph_seq_ops = {
2564 .start = g_start,
2565 .next = g_next,
2566 .stop = g_stop,
2567 .show = g_show,
2570 static int
2571 ftrace_graph_open(struct inode *inode, struct file *file)
2573 int ret = 0;
2575 if (unlikely(ftrace_disabled))
2576 return -ENODEV;
2578 mutex_lock(&graph_lock);
2579 if ((file->f_mode & FMODE_WRITE) &&
2580 (file->f_flags & O_TRUNC)) {
2581 ftrace_graph_count = 0;
2582 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2585 if (file->f_mode & FMODE_READ) {
2586 ret = seq_open(file, &ftrace_graph_seq_ops);
2587 if (!ret) {
2588 struct seq_file *m = file->private_data;
2589 m->private = ftrace_graph_funcs;
2591 } else
2592 file->private_data = ftrace_graph_funcs;
2593 mutex_unlock(&graph_lock);
2595 return ret;
2598 static int
2599 ftrace_graph_release(struct inode *inode, struct file *file)
2601 if (file->f_mode & FMODE_READ)
2602 seq_release(inode, file);
2603 return 0;
2606 static int
2607 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2609 struct dyn_ftrace *rec;
2610 struct ftrace_page *pg;
2611 int search_len;
2612 int found = 0;
2613 int type, not;
2614 char *search;
2615 bool exists;
2616 int i;
2618 if (ftrace_disabled)
2619 return -ENODEV;
2621 /* decode regex */
2622 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2623 if (not)
2624 return -EINVAL;
2626 search_len = strlen(search);
2628 mutex_lock(&ftrace_lock);
2629 do_for_each_ftrace_rec(pg, rec) {
2631 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2632 break;
2634 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2635 continue;
2637 if (ftrace_match_record(rec, search, search_len, type)) {
2638 /* ensure it is not already in the array */
2639 exists = false;
2640 for (i = 0; i < *idx; i++)
2641 if (array[i] == rec->ip) {
2642 exists = true;
2643 break;
2645 if (!exists) {
2646 array[(*idx)++] = rec->ip;
2647 found = 1;
2650 } while_for_each_ftrace_rec();
2652 mutex_unlock(&ftrace_lock);
2654 return found ? 0 : -EINVAL;
2657 static ssize_t
2658 ftrace_graph_write(struct file *file, const char __user *ubuf,
2659 size_t cnt, loff_t *ppos)
2661 unsigned char buffer[FTRACE_BUFF_MAX+1];
2662 unsigned long *array;
2663 size_t read = 0;
2664 ssize_t ret;
2665 int index = 0;
2666 char ch;
2668 if (!cnt || cnt < 0)
2669 return 0;
2671 mutex_lock(&graph_lock);
2673 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2674 ret = -EBUSY;
2675 goto out;
2678 if (file->f_mode & FMODE_READ) {
2679 struct seq_file *m = file->private_data;
2680 array = m->private;
2681 } else
2682 array = file->private_data;
2684 ret = get_user(ch, ubuf++);
2685 if (ret)
2686 goto out;
2687 read++;
2688 cnt--;
2690 /* skip white space */
2691 while (cnt && isspace(ch)) {
2692 ret = get_user(ch, ubuf++);
2693 if (ret)
2694 goto out;
2695 read++;
2696 cnt--;
2699 if (isspace(ch)) {
2700 *ppos += read;
2701 ret = read;
2702 goto out;
2705 while (cnt && !isspace(ch)) {
2706 if (index < FTRACE_BUFF_MAX)
2707 buffer[index++] = ch;
2708 else {
2709 ret = -EINVAL;
2710 goto out;
2712 ret = get_user(ch, ubuf++);
2713 if (ret)
2714 goto out;
2715 read++;
2716 cnt--;
2718 buffer[index] = 0;
2720 /* we allow only one expression at a time */
2721 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
2722 if (ret)
2723 goto out;
2725 file->f_pos += read;
2727 ret = read;
2728 out:
2729 mutex_unlock(&graph_lock);
2731 return ret;
2734 static const struct file_operations ftrace_graph_fops = {
2735 .open = ftrace_graph_open,
2736 .read = seq_read,
2737 .write = ftrace_graph_write,
2738 .release = ftrace_graph_release,
2740 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2742 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2745 trace_create_file("available_filter_functions", 0444,
2746 d_tracer, NULL, &ftrace_avail_fops);
2748 trace_create_file("failures", 0444,
2749 d_tracer, NULL, &ftrace_failures_fops);
2751 trace_create_file("set_ftrace_filter", 0644, d_tracer,
2752 NULL, &ftrace_filter_fops);
2754 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2755 NULL, &ftrace_notrace_fops);
2757 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2758 trace_create_file("set_graph_function", 0444, d_tracer,
2759 NULL,
2760 &ftrace_graph_fops);
2761 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2763 return 0;
2766 static int ftrace_convert_nops(struct module *mod,
2767 unsigned long *start,
2768 unsigned long *end)
2770 unsigned long *p;
2771 unsigned long addr;
2772 unsigned long flags;
2774 mutex_lock(&ftrace_lock);
2775 p = start;
2776 while (p < end) {
2777 addr = ftrace_call_adjust(*p++);
2779 * Some architecture linkers will pad between
2780 * the different mcount_loc sections of different
2781 * object files to satisfy alignments.
2782 * Skip any NULL pointers.
2784 if (!addr)
2785 continue;
2786 ftrace_record_ip(addr);
2789 /* disable interrupts to prevent kstop machine */
2790 local_irq_save(flags);
2791 ftrace_update_code(mod);
2792 local_irq_restore(flags);
2793 mutex_unlock(&ftrace_lock);
2795 return 0;
2798 #ifdef CONFIG_MODULES
2799 void ftrace_release_mod(struct module *mod)
2801 struct dyn_ftrace *rec;
2802 struct ftrace_page *pg;
2804 if (ftrace_disabled)
2805 return;
2807 mutex_lock(&ftrace_lock);
2808 do_for_each_ftrace_rec(pg, rec) {
2809 if (within_module_core(rec->ip, mod)) {
2811 * rec->ip is changed in ftrace_free_rec()
2812 * It should not between s and e if record was freed.
2814 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2815 ftrace_free_rec(rec);
2817 } while_for_each_ftrace_rec();
2818 mutex_unlock(&ftrace_lock);
2821 static void ftrace_init_module(struct module *mod,
2822 unsigned long *start, unsigned long *end)
2824 if (ftrace_disabled || start == end)
2825 return;
2826 ftrace_convert_nops(mod, start, end);
2829 static int ftrace_module_notify(struct notifier_block *self,
2830 unsigned long val, void *data)
2832 struct module *mod = data;
2834 switch (val) {
2835 case MODULE_STATE_COMING:
2836 ftrace_init_module(mod, mod->ftrace_callsites,
2837 mod->ftrace_callsites +
2838 mod->num_ftrace_callsites);
2839 break;
2840 case MODULE_STATE_GOING:
2841 ftrace_release_mod(mod);
2842 break;
2845 return 0;
2847 #else
2848 static int ftrace_module_notify(struct notifier_block *self,
2849 unsigned long val, void *data)
2851 return 0;
2853 #endif /* CONFIG_MODULES */
2855 struct notifier_block ftrace_module_nb = {
2856 .notifier_call = ftrace_module_notify,
2857 .priority = 0,
2860 extern unsigned long __start_mcount_loc[];
2861 extern unsigned long __stop_mcount_loc[];
2863 void __init ftrace_init(void)
2865 unsigned long count, addr, flags;
2866 int ret;
2868 /* Keep the ftrace pointer to the stub */
2869 addr = (unsigned long)ftrace_stub;
2871 local_irq_save(flags);
2872 ftrace_dyn_arch_init(&addr);
2873 local_irq_restore(flags);
2875 /* ftrace_dyn_arch_init places the return code in addr */
2876 if (addr)
2877 goto failed;
2879 count = __stop_mcount_loc - __start_mcount_loc;
2881 ret = ftrace_dyn_table_alloc(count);
2882 if (ret)
2883 goto failed;
2885 last_ftrace_enabled = ftrace_enabled = 1;
2887 ret = ftrace_convert_nops(NULL,
2888 __start_mcount_loc,
2889 __stop_mcount_loc);
2891 ret = register_module_notifier(&ftrace_module_nb);
2892 if (ret)
2893 pr_warning("Failed to register trace ftrace module notifier\n");
2895 set_ftrace_early_filters();
2897 return;
2898 failed:
2899 ftrace_disabled = 1;
2902 #else
2904 static int __init ftrace_nodyn_init(void)
2906 ftrace_enabled = 1;
2907 return 0;
2909 device_initcall(ftrace_nodyn_init);
2911 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2912 static inline void ftrace_startup_enable(int command) { }
2913 /* Keep as macros so we do not need to define the commands */
2914 # define ftrace_startup(command) do { } while (0)
2915 # define ftrace_shutdown(command) do { } while (0)
2916 # define ftrace_startup_sysctl() do { } while (0)
2917 # define ftrace_shutdown_sysctl() do { } while (0)
2918 #endif /* CONFIG_DYNAMIC_FTRACE */
2920 static ssize_t
2921 ftrace_pid_read(struct file *file, char __user *ubuf,
2922 size_t cnt, loff_t *ppos)
2924 char buf[64];
2925 int r;
2927 if (ftrace_pid_trace == ftrace_swapper_pid)
2928 r = sprintf(buf, "swapper tasks\n");
2929 else if (ftrace_pid_trace)
2930 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
2931 else
2932 r = sprintf(buf, "no pid\n");
2934 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2937 static void clear_ftrace_swapper(void)
2939 struct task_struct *p;
2940 int cpu;
2942 get_online_cpus();
2943 for_each_online_cpu(cpu) {
2944 p = idle_task(cpu);
2945 clear_tsk_trace_trace(p);
2947 put_online_cpus();
2950 static void set_ftrace_swapper(void)
2952 struct task_struct *p;
2953 int cpu;
2955 get_online_cpus();
2956 for_each_online_cpu(cpu) {
2957 p = idle_task(cpu);
2958 set_tsk_trace_trace(p);
2960 put_online_cpus();
2963 static void clear_ftrace_pid(struct pid *pid)
2965 struct task_struct *p;
2967 rcu_read_lock();
2968 do_each_pid_task(pid, PIDTYPE_PID, p) {
2969 clear_tsk_trace_trace(p);
2970 } while_each_pid_task(pid, PIDTYPE_PID, p);
2971 rcu_read_unlock();
2973 put_pid(pid);
2976 static void set_ftrace_pid(struct pid *pid)
2978 struct task_struct *p;
2980 rcu_read_lock();
2981 do_each_pid_task(pid, PIDTYPE_PID, p) {
2982 set_tsk_trace_trace(p);
2983 } while_each_pid_task(pid, PIDTYPE_PID, p);
2984 rcu_read_unlock();
2987 static void clear_ftrace_pid_task(struct pid **pid)
2989 if (*pid == ftrace_swapper_pid)
2990 clear_ftrace_swapper();
2991 else
2992 clear_ftrace_pid(*pid);
2994 *pid = NULL;
2997 static void set_ftrace_pid_task(struct pid *pid)
2999 if (pid == ftrace_swapper_pid)
3000 set_ftrace_swapper();
3001 else
3002 set_ftrace_pid(pid);
3005 static ssize_t
3006 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3007 size_t cnt, loff_t *ppos)
3009 struct pid *pid;
3010 char buf[64];
3011 long val;
3012 int ret;
3014 if (cnt >= sizeof(buf))
3015 return -EINVAL;
3017 if (copy_from_user(&buf, ubuf, cnt))
3018 return -EFAULT;
3020 buf[cnt] = 0;
3022 ret = strict_strtol(buf, 10, &val);
3023 if (ret < 0)
3024 return ret;
3026 mutex_lock(&ftrace_lock);
3027 if (val < 0) {
3028 /* disable pid tracing */
3029 if (!ftrace_pid_trace)
3030 goto out;
3032 clear_ftrace_pid_task(&ftrace_pid_trace);
3034 } else {
3035 /* swapper task is special */
3036 if (!val) {
3037 pid = ftrace_swapper_pid;
3038 if (pid == ftrace_pid_trace)
3039 goto out;
3040 } else {
3041 pid = find_get_pid(val);
3043 if (pid == ftrace_pid_trace) {
3044 put_pid(pid);
3045 goto out;
3049 if (ftrace_pid_trace)
3050 clear_ftrace_pid_task(&ftrace_pid_trace);
3052 if (!pid)
3053 goto out;
3055 ftrace_pid_trace = pid;
3057 set_ftrace_pid_task(ftrace_pid_trace);
3060 /* update the function call */
3061 ftrace_update_pid_func();
3062 ftrace_startup_enable(0);
3064 out:
3065 mutex_unlock(&ftrace_lock);
3067 return cnt;
3070 static const struct file_operations ftrace_pid_fops = {
3071 .read = ftrace_pid_read,
3072 .write = ftrace_pid_write,
3075 static __init int ftrace_init_debugfs(void)
3077 struct dentry *d_tracer;
3079 d_tracer = tracing_init_dentry();
3080 if (!d_tracer)
3081 return 0;
3083 ftrace_init_dyn_debugfs(d_tracer);
3085 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3086 NULL, &ftrace_pid_fops);
3088 ftrace_profile_debugfs(d_tracer);
3090 return 0;
3092 fs_initcall(ftrace_init_debugfs);
3095 * ftrace_kill - kill ftrace
3097 * This function should be used by panic code. It stops ftrace
3098 * but in a not so nice way. If you need to simply kill ftrace
3099 * from a non-atomic section, use ftrace_kill.
3101 void ftrace_kill(void)
3103 ftrace_disabled = 1;
3104 ftrace_enabled = 0;
3105 clear_ftrace_function();
3109 * register_ftrace_function - register a function for profiling
3110 * @ops - ops structure that holds the function for profiling.
3112 * Register a function to be called by all functions in the
3113 * kernel.
3115 * Note: @ops->func and all the functions it calls must be labeled
3116 * with "notrace", otherwise it will go into a
3117 * recursive loop.
3119 int register_ftrace_function(struct ftrace_ops *ops)
3121 int ret;
3123 if (unlikely(ftrace_disabled))
3124 return -1;
3126 mutex_lock(&ftrace_lock);
3128 ret = __register_ftrace_function(ops);
3129 ftrace_startup(0);
3131 mutex_unlock(&ftrace_lock);
3132 return ret;
3136 * unregister_ftrace_function - unregister a function for profiling.
3137 * @ops - ops structure that holds the function to unregister
3139 * Unregister a function that was added to be called by ftrace profiling.
3141 int unregister_ftrace_function(struct ftrace_ops *ops)
3143 int ret;
3145 mutex_lock(&ftrace_lock);
3146 ret = __unregister_ftrace_function(ops);
3147 ftrace_shutdown(0);
3148 mutex_unlock(&ftrace_lock);
3150 return ret;
3154 ftrace_enable_sysctl(struct ctl_table *table, int write,
3155 struct file *file, void __user *buffer, size_t *lenp,
3156 loff_t *ppos)
3158 int ret;
3160 if (unlikely(ftrace_disabled))
3161 return -ENODEV;
3163 mutex_lock(&ftrace_lock);
3165 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
3167 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3168 goto out;
3170 last_ftrace_enabled = !!ftrace_enabled;
3172 if (ftrace_enabled) {
3174 ftrace_startup_sysctl();
3176 /* we are starting ftrace again */
3177 if (ftrace_list != &ftrace_list_end) {
3178 if (ftrace_list->next == &ftrace_list_end)
3179 ftrace_trace_function = ftrace_list->func;
3180 else
3181 ftrace_trace_function = ftrace_list_func;
3184 } else {
3185 /* stopping ftrace calls (just send to ftrace_stub) */
3186 ftrace_trace_function = ftrace_stub;
3188 ftrace_shutdown_sysctl();
3191 out:
3192 mutex_unlock(&ftrace_lock);
3193 return ret;
3196 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3198 static int ftrace_graph_active;
3199 static struct notifier_block ftrace_suspend_notifier;
3201 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3203 return 0;
3206 /* The callbacks that hook a function */
3207 trace_func_graph_ret_t ftrace_graph_return =
3208 (trace_func_graph_ret_t)ftrace_stub;
3209 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3211 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3212 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3214 int i;
3215 int ret = 0;
3216 unsigned long flags;
3217 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3218 struct task_struct *g, *t;
3220 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3221 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3222 * sizeof(struct ftrace_ret_stack),
3223 GFP_KERNEL);
3224 if (!ret_stack_list[i]) {
3225 start = 0;
3226 end = i;
3227 ret = -ENOMEM;
3228 goto free;
3232 read_lock_irqsave(&tasklist_lock, flags);
3233 do_each_thread(g, t) {
3234 if (start == end) {
3235 ret = -EAGAIN;
3236 goto unlock;
3239 if (t->ret_stack == NULL) {
3240 atomic_set(&t->tracing_graph_pause, 0);
3241 atomic_set(&t->trace_overrun, 0);
3242 t->curr_ret_stack = -1;
3243 /* Make sure the tasks see the -1 first: */
3244 smp_wmb();
3245 t->ret_stack = ret_stack_list[start++];
3247 } while_each_thread(g, t);
3249 unlock:
3250 read_unlock_irqrestore(&tasklist_lock, flags);
3251 free:
3252 for (i = start; i < end; i++)
3253 kfree(ret_stack_list[i]);
3254 return ret;
3257 static void
3258 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3259 struct task_struct *next)
3261 unsigned long long timestamp;
3262 int index;
3265 * Does the user want to count the time a function was asleep.
3266 * If so, do not update the time stamps.
3268 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3269 return;
3271 timestamp = trace_clock_local();
3273 prev->ftrace_timestamp = timestamp;
3275 /* only process tasks that we timestamped */
3276 if (!next->ftrace_timestamp)
3277 return;
3280 * Update all the counters in next to make up for the
3281 * time next was sleeping.
3283 timestamp -= next->ftrace_timestamp;
3285 for (index = next->curr_ret_stack; index >= 0; index--)
3286 next->ret_stack[index].calltime += timestamp;
3289 /* Allocate a return stack for each task */
3290 static int start_graph_tracing(void)
3292 struct ftrace_ret_stack **ret_stack_list;
3293 int ret, cpu;
3295 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3296 sizeof(struct ftrace_ret_stack *),
3297 GFP_KERNEL);
3299 if (!ret_stack_list)
3300 return -ENOMEM;
3302 /* The cpu_boot init_task->ret_stack will never be freed */
3303 for_each_online_cpu(cpu) {
3304 if (!idle_task(cpu)->ret_stack)
3305 ftrace_graph_init_task(idle_task(cpu));
3308 do {
3309 ret = alloc_retstack_tasklist(ret_stack_list);
3310 } while (ret == -EAGAIN);
3312 if (!ret) {
3313 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3314 if (ret)
3315 pr_info("ftrace_graph: Couldn't activate tracepoint"
3316 " probe to kernel_sched_switch\n");
3319 kfree(ret_stack_list);
3320 return ret;
3324 * Hibernation protection.
3325 * The state of the current task is too much unstable during
3326 * suspend/restore to disk. We want to protect against that.
3328 static int
3329 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3330 void *unused)
3332 switch (state) {
3333 case PM_HIBERNATION_PREPARE:
3334 pause_graph_tracing();
3335 break;
3337 case PM_POST_HIBERNATION:
3338 unpause_graph_tracing();
3339 break;
3341 return NOTIFY_DONE;
3344 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3345 trace_func_graph_ent_t entryfunc)
3347 int ret = 0;
3349 mutex_lock(&ftrace_lock);
3351 /* we currently allow only one tracer registered at a time */
3352 if (ftrace_graph_active) {
3353 ret = -EBUSY;
3354 goto out;
3357 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3358 register_pm_notifier(&ftrace_suspend_notifier);
3360 ftrace_graph_active++;
3361 ret = start_graph_tracing();
3362 if (ret) {
3363 ftrace_graph_active--;
3364 goto out;
3367 ftrace_graph_return = retfunc;
3368 ftrace_graph_entry = entryfunc;
3370 ftrace_startup(FTRACE_START_FUNC_RET);
3372 out:
3373 mutex_unlock(&ftrace_lock);
3374 return ret;
3377 void unregister_ftrace_graph(void)
3379 mutex_lock(&ftrace_lock);
3381 if (unlikely(!ftrace_graph_active))
3382 goto out;
3384 ftrace_graph_active--;
3385 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3386 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3387 ftrace_graph_entry = ftrace_graph_entry_stub;
3388 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3389 unregister_pm_notifier(&ftrace_suspend_notifier);
3391 out:
3392 mutex_unlock(&ftrace_lock);
3395 /* Allocate a return stack for newly created task */
3396 void ftrace_graph_init_task(struct task_struct *t)
3398 /* Make sure we do not use the parent ret_stack */
3399 t->ret_stack = NULL;
3401 if (ftrace_graph_active) {
3402 struct ftrace_ret_stack *ret_stack;
3404 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3405 * sizeof(struct ftrace_ret_stack),
3406 GFP_KERNEL);
3407 if (!ret_stack)
3408 return;
3409 t->curr_ret_stack = -1;
3410 atomic_set(&t->tracing_graph_pause, 0);
3411 atomic_set(&t->trace_overrun, 0);
3412 t->ftrace_timestamp = 0;
3413 /* make curr_ret_stack visable before we add the ret_stack */
3414 smp_wmb();
3415 t->ret_stack = ret_stack;
3419 void ftrace_graph_exit_task(struct task_struct *t)
3421 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3423 t->ret_stack = NULL;
3424 /* NULL must become visible to IRQs before we free it: */
3425 barrier();
3427 kfree(ret_stack);
3430 void ftrace_graph_stop(void)
3432 ftrace_stop();
3434 #endif