ftrace: Add ops parameter to ftrace_startup/shutdown functions
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / trace / ftrace.c
blob8fef1d99bbbf9dfc1af07416c436687645aec66d
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
33 #include <trace/events/sched.h>
35 #include <asm/ftrace.h>
36 #include <asm/setup.h>
38 #include "trace_output.h"
39 #include "trace_stat.h"
41 #define FTRACE_WARN_ON(cond) \
42 ({ \
43 int ___r = cond; \
44 if (WARN_ON(___r)) \
45 ftrace_kill(); \
46 ___r; \
49 #define FTRACE_WARN_ON_ONCE(cond) \
50 ({ \
51 int ___r = cond; \
52 if (WARN_ON_ONCE(___r)) \
53 ftrace_kill(); \
54 ___r; \
57 /* hash bits for specific function selection */
58 #define FTRACE_HASH_BITS 7
59 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60 #define FTRACE_HASH_DEFAULT_BITS 10
61 #define FTRACE_HASH_MAX_BITS 12
63 /* ftrace_enabled is a method to turn ftrace on or off */
64 int ftrace_enabled __read_mostly;
65 static int last_ftrace_enabled;
67 /* Quick disabling of function tracer. */
68 int function_trace_stop;
70 /* List for set_ftrace_pid's pids. */
71 LIST_HEAD(ftrace_pids);
72 struct ftrace_pid {
73 struct list_head list;
74 struct pid *pid;
78 * ftrace_disabled is set when an anomaly is discovered.
79 * ftrace_disabled is much stronger than ftrace_enabled.
81 static int ftrace_disabled __read_mostly;
83 static DEFINE_MUTEX(ftrace_lock);
85 static struct ftrace_ops ftrace_list_end __read_mostly =
87 .func = ftrace_stub,
90 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
91 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
92 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
93 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
96 * Traverse the ftrace_list, invoking all entries. The reason that we
97 * can use rcu_dereference_raw() is that elements removed from this list
98 * are simply leaked, so there is no need to interact with a grace-period
99 * mechanism. The rcu_dereference_raw() calls are needed to handle
100 * concurrent insertions into the ftrace_list.
102 * Silly Alpha and silly pointer-speculation compiler optimizations!
104 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
106 struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
108 while (op != &ftrace_list_end) {
109 op->func(ip, parent_ip);
110 op = rcu_dereference_raw(op->next); /*see above*/
114 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
116 if (!test_tsk_trace_trace(current))
117 return;
119 ftrace_pid_function(ip, parent_ip);
122 static void set_ftrace_pid_function(ftrace_func_t func)
124 /* do not set ftrace_pid_function to itself! */
125 if (func != ftrace_pid_func)
126 ftrace_pid_function = func;
130 * clear_ftrace_function - reset the ftrace function
132 * This NULLs the ftrace function and in essence stops
133 * tracing. There may be lag
135 void clear_ftrace_function(void)
137 ftrace_trace_function = ftrace_stub;
138 __ftrace_trace_function = ftrace_stub;
139 ftrace_pid_function = ftrace_stub;
142 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
144 * For those archs that do not test ftrace_trace_stop in their
145 * mcount call site, we need to do it from C.
147 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
149 if (function_trace_stop)
150 return;
152 __ftrace_trace_function(ip, parent_ip);
154 #endif
156 static void update_ftrace_function(void)
158 ftrace_func_t func;
161 * If there's only one function registered, then call that
162 * function directly. Otherwise, we need to iterate over the
163 * registered callers.
165 if (ftrace_list == &ftrace_list_end ||
166 ftrace_list->next == &ftrace_list_end)
167 func = ftrace_list->func;
168 else
169 func = ftrace_list_func;
171 /* If we filter on pids, update to use the pid function */
172 if (!list_empty(&ftrace_pids)) {
173 set_ftrace_pid_function(func);
174 func = ftrace_pid_func;
176 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
177 ftrace_trace_function = func;
178 #else
179 __ftrace_trace_function = func;
180 ftrace_trace_function = ftrace_test_stop_func;
181 #endif
184 static int __register_ftrace_function(struct ftrace_ops *ops)
186 ops->next = ftrace_list;
188 * We are entering ops into the ftrace_list but another
189 * CPU might be walking that list. We need to make sure
190 * the ops->next pointer is valid before another CPU sees
191 * the ops pointer included into the ftrace_list.
193 rcu_assign_pointer(ftrace_list, ops);
195 if (ftrace_enabled)
196 update_ftrace_function();
198 return 0;
201 static int __unregister_ftrace_function(struct ftrace_ops *ops)
203 struct ftrace_ops **p;
206 * If we are removing the last function, then simply point
207 * to the ftrace_stub.
209 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
210 ftrace_trace_function = ftrace_stub;
211 ftrace_list = &ftrace_list_end;
212 return 0;
215 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
216 if (*p == ops)
217 break;
219 if (*p != ops)
220 return -1;
222 *p = (*p)->next;
224 if (ftrace_enabled)
225 update_ftrace_function();
227 return 0;
230 static void ftrace_update_pid_func(void)
232 /* Only do something if we are tracing something */
233 if (ftrace_trace_function == ftrace_stub)
234 return;
236 update_ftrace_function();
239 #ifdef CONFIG_FUNCTION_PROFILER
240 struct ftrace_profile {
241 struct hlist_node node;
242 unsigned long ip;
243 unsigned long counter;
244 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
245 unsigned long long time;
246 unsigned long long time_squared;
247 #endif
250 struct ftrace_profile_page {
251 struct ftrace_profile_page *next;
252 unsigned long index;
253 struct ftrace_profile records[];
256 struct ftrace_profile_stat {
257 atomic_t disabled;
258 struct hlist_head *hash;
259 struct ftrace_profile_page *pages;
260 struct ftrace_profile_page *start;
261 struct tracer_stat stat;
264 #define PROFILE_RECORDS_SIZE \
265 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
267 #define PROFILES_PER_PAGE \
268 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
270 static int ftrace_profile_bits __read_mostly;
271 static int ftrace_profile_enabled __read_mostly;
273 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
274 static DEFINE_MUTEX(ftrace_profile_lock);
276 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
278 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
280 static void *
281 function_stat_next(void *v, int idx)
283 struct ftrace_profile *rec = v;
284 struct ftrace_profile_page *pg;
286 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
288 again:
289 if (idx != 0)
290 rec++;
292 if ((void *)rec >= (void *)&pg->records[pg->index]) {
293 pg = pg->next;
294 if (!pg)
295 return NULL;
296 rec = &pg->records[0];
297 if (!rec->counter)
298 goto again;
301 return rec;
304 static void *function_stat_start(struct tracer_stat *trace)
306 struct ftrace_profile_stat *stat =
307 container_of(trace, struct ftrace_profile_stat, stat);
309 if (!stat || !stat->start)
310 return NULL;
312 return function_stat_next(&stat->start->records[0], 0);
315 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
316 /* function graph compares on total time */
317 static int function_stat_cmp(void *p1, void *p2)
319 struct ftrace_profile *a = p1;
320 struct ftrace_profile *b = p2;
322 if (a->time < b->time)
323 return -1;
324 if (a->time > b->time)
325 return 1;
326 else
327 return 0;
329 #else
330 /* not function graph compares against hits */
331 static int function_stat_cmp(void *p1, void *p2)
333 struct ftrace_profile *a = p1;
334 struct ftrace_profile *b = p2;
336 if (a->counter < b->counter)
337 return -1;
338 if (a->counter > b->counter)
339 return 1;
340 else
341 return 0;
343 #endif
345 static int function_stat_headers(struct seq_file *m)
347 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
348 seq_printf(m, " Function "
349 "Hit Time Avg s^2\n"
350 " -------- "
351 "--- ---- --- ---\n");
352 #else
353 seq_printf(m, " Function Hit\n"
354 " -------- ---\n");
355 #endif
356 return 0;
359 static int function_stat_show(struct seq_file *m, void *v)
361 struct ftrace_profile *rec = v;
362 char str[KSYM_SYMBOL_LEN];
363 int ret = 0;
364 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
365 static struct trace_seq s;
366 unsigned long long avg;
367 unsigned long long stddev;
368 #endif
369 mutex_lock(&ftrace_profile_lock);
371 /* we raced with function_profile_reset() */
372 if (unlikely(rec->counter == 0)) {
373 ret = -EBUSY;
374 goto out;
377 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
378 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
380 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
381 seq_printf(m, " ");
382 avg = rec->time;
383 do_div(avg, rec->counter);
385 /* Sample standard deviation (s^2) */
386 if (rec->counter <= 1)
387 stddev = 0;
388 else {
389 stddev = rec->time_squared - rec->counter * avg * avg;
391 * Divide only 1000 for ns^2 -> us^2 conversion.
392 * trace_print_graph_duration will divide 1000 again.
394 do_div(stddev, (rec->counter - 1) * 1000);
397 trace_seq_init(&s);
398 trace_print_graph_duration(rec->time, &s);
399 trace_seq_puts(&s, " ");
400 trace_print_graph_duration(avg, &s);
401 trace_seq_puts(&s, " ");
402 trace_print_graph_duration(stddev, &s);
403 trace_print_seq(m, &s);
404 #endif
405 seq_putc(m, '\n');
406 out:
407 mutex_unlock(&ftrace_profile_lock);
409 return ret;
412 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
414 struct ftrace_profile_page *pg;
416 pg = stat->pages = stat->start;
418 while (pg) {
419 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
420 pg->index = 0;
421 pg = pg->next;
424 memset(stat->hash, 0,
425 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
428 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
430 struct ftrace_profile_page *pg;
431 int functions;
432 int pages;
433 int i;
435 /* If we already allocated, do nothing */
436 if (stat->pages)
437 return 0;
439 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
440 if (!stat->pages)
441 return -ENOMEM;
443 #ifdef CONFIG_DYNAMIC_FTRACE
444 functions = ftrace_update_tot_cnt;
445 #else
447 * We do not know the number of functions that exist because
448 * dynamic tracing is what counts them. With past experience
449 * we have around 20K functions. That should be more than enough.
450 * It is highly unlikely we will execute every function in
451 * the kernel.
453 functions = 20000;
454 #endif
456 pg = stat->start = stat->pages;
458 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
460 for (i = 0; i < pages; i++) {
461 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
462 if (!pg->next)
463 goto out_free;
464 pg = pg->next;
467 return 0;
469 out_free:
470 pg = stat->start;
471 while (pg) {
472 unsigned long tmp = (unsigned long)pg;
474 pg = pg->next;
475 free_page(tmp);
478 free_page((unsigned long)stat->pages);
479 stat->pages = NULL;
480 stat->start = NULL;
482 return -ENOMEM;
485 static int ftrace_profile_init_cpu(int cpu)
487 struct ftrace_profile_stat *stat;
488 int size;
490 stat = &per_cpu(ftrace_profile_stats, cpu);
492 if (stat->hash) {
493 /* If the profile is already created, simply reset it */
494 ftrace_profile_reset(stat);
495 return 0;
499 * We are profiling all functions, but usually only a few thousand
500 * functions are hit. We'll make a hash of 1024 items.
502 size = FTRACE_PROFILE_HASH_SIZE;
504 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
506 if (!stat->hash)
507 return -ENOMEM;
509 if (!ftrace_profile_bits) {
510 size--;
512 for (; size; size >>= 1)
513 ftrace_profile_bits++;
516 /* Preallocate the function profiling pages */
517 if (ftrace_profile_pages_init(stat) < 0) {
518 kfree(stat->hash);
519 stat->hash = NULL;
520 return -ENOMEM;
523 return 0;
526 static int ftrace_profile_init(void)
528 int cpu;
529 int ret = 0;
531 for_each_online_cpu(cpu) {
532 ret = ftrace_profile_init_cpu(cpu);
533 if (ret)
534 break;
537 return ret;
540 /* interrupts must be disabled */
541 static struct ftrace_profile *
542 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
544 struct ftrace_profile *rec;
545 struct hlist_head *hhd;
546 struct hlist_node *n;
547 unsigned long key;
549 key = hash_long(ip, ftrace_profile_bits);
550 hhd = &stat->hash[key];
552 if (hlist_empty(hhd))
553 return NULL;
555 hlist_for_each_entry_rcu(rec, n, hhd, node) {
556 if (rec->ip == ip)
557 return rec;
560 return NULL;
563 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
564 struct ftrace_profile *rec)
566 unsigned long key;
568 key = hash_long(rec->ip, ftrace_profile_bits);
569 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
573 * The memory is already allocated, this simply finds a new record to use.
575 static struct ftrace_profile *
576 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
578 struct ftrace_profile *rec = NULL;
580 /* prevent recursion (from NMIs) */
581 if (atomic_inc_return(&stat->disabled) != 1)
582 goto out;
585 * Try to find the function again since an NMI
586 * could have added it
588 rec = ftrace_find_profiled_func(stat, ip);
589 if (rec)
590 goto out;
592 if (stat->pages->index == PROFILES_PER_PAGE) {
593 if (!stat->pages->next)
594 goto out;
595 stat->pages = stat->pages->next;
598 rec = &stat->pages->records[stat->pages->index++];
599 rec->ip = ip;
600 ftrace_add_profile(stat, rec);
602 out:
603 atomic_dec(&stat->disabled);
605 return rec;
608 static void
609 function_profile_call(unsigned long ip, unsigned long parent_ip)
611 struct ftrace_profile_stat *stat;
612 struct ftrace_profile *rec;
613 unsigned long flags;
615 if (!ftrace_profile_enabled)
616 return;
618 local_irq_save(flags);
620 stat = &__get_cpu_var(ftrace_profile_stats);
621 if (!stat->hash || !ftrace_profile_enabled)
622 goto out;
624 rec = ftrace_find_profiled_func(stat, ip);
625 if (!rec) {
626 rec = ftrace_profile_alloc(stat, ip);
627 if (!rec)
628 goto out;
631 rec->counter++;
632 out:
633 local_irq_restore(flags);
636 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
637 static int profile_graph_entry(struct ftrace_graph_ent *trace)
639 function_profile_call(trace->func, 0);
640 return 1;
643 static void profile_graph_return(struct ftrace_graph_ret *trace)
645 struct ftrace_profile_stat *stat;
646 unsigned long long calltime;
647 struct ftrace_profile *rec;
648 unsigned long flags;
650 local_irq_save(flags);
651 stat = &__get_cpu_var(ftrace_profile_stats);
652 if (!stat->hash || !ftrace_profile_enabled)
653 goto out;
655 /* If the calltime was zero'd ignore it */
656 if (!trace->calltime)
657 goto out;
659 calltime = trace->rettime - trace->calltime;
661 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
662 int index;
664 index = trace->depth;
666 /* Append this call time to the parent time to subtract */
667 if (index)
668 current->ret_stack[index - 1].subtime += calltime;
670 if (current->ret_stack[index].subtime < calltime)
671 calltime -= current->ret_stack[index].subtime;
672 else
673 calltime = 0;
676 rec = ftrace_find_profiled_func(stat, trace->func);
677 if (rec) {
678 rec->time += calltime;
679 rec->time_squared += calltime * calltime;
682 out:
683 local_irq_restore(flags);
686 static int register_ftrace_profiler(void)
688 return register_ftrace_graph(&profile_graph_return,
689 &profile_graph_entry);
692 static void unregister_ftrace_profiler(void)
694 unregister_ftrace_graph();
696 #else
697 static struct ftrace_ops ftrace_profile_ops __read_mostly =
699 .func = function_profile_call,
702 static int register_ftrace_profiler(void)
704 return register_ftrace_function(&ftrace_profile_ops);
707 static void unregister_ftrace_profiler(void)
709 unregister_ftrace_function(&ftrace_profile_ops);
711 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
713 static ssize_t
714 ftrace_profile_write(struct file *filp, const char __user *ubuf,
715 size_t cnt, loff_t *ppos)
717 unsigned long val;
718 char buf[64]; /* big enough to hold a number */
719 int ret;
721 if (cnt >= sizeof(buf))
722 return -EINVAL;
724 if (copy_from_user(&buf, ubuf, cnt))
725 return -EFAULT;
727 buf[cnt] = 0;
729 ret = strict_strtoul(buf, 10, &val);
730 if (ret < 0)
731 return ret;
733 val = !!val;
735 mutex_lock(&ftrace_profile_lock);
736 if (ftrace_profile_enabled ^ val) {
737 if (val) {
738 ret = ftrace_profile_init();
739 if (ret < 0) {
740 cnt = ret;
741 goto out;
744 ret = register_ftrace_profiler();
745 if (ret < 0) {
746 cnt = ret;
747 goto out;
749 ftrace_profile_enabled = 1;
750 } else {
751 ftrace_profile_enabled = 0;
753 * unregister_ftrace_profiler calls stop_machine
754 * so this acts like an synchronize_sched.
756 unregister_ftrace_profiler();
759 out:
760 mutex_unlock(&ftrace_profile_lock);
762 *ppos += cnt;
764 return cnt;
767 static ssize_t
768 ftrace_profile_read(struct file *filp, char __user *ubuf,
769 size_t cnt, loff_t *ppos)
771 char buf[64]; /* big enough to hold a number */
772 int r;
774 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
775 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
778 static const struct file_operations ftrace_profile_fops = {
779 .open = tracing_open_generic,
780 .read = ftrace_profile_read,
781 .write = ftrace_profile_write,
782 .llseek = default_llseek,
785 /* used to initialize the real stat files */
786 static struct tracer_stat function_stats __initdata = {
787 .name = "functions",
788 .stat_start = function_stat_start,
789 .stat_next = function_stat_next,
790 .stat_cmp = function_stat_cmp,
791 .stat_headers = function_stat_headers,
792 .stat_show = function_stat_show
795 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
797 struct ftrace_profile_stat *stat;
798 struct dentry *entry;
799 char *name;
800 int ret;
801 int cpu;
803 for_each_possible_cpu(cpu) {
804 stat = &per_cpu(ftrace_profile_stats, cpu);
806 /* allocate enough for function name + cpu number */
807 name = kmalloc(32, GFP_KERNEL);
808 if (!name) {
810 * The files created are permanent, if something happens
811 * we still do not free memory.
813 WARN(1,
814 "Could not allocate stat file for cpu %d\n",
815 cpu);
816 return;
818 stat->stat = function_stats;
819 snprintf(name, 32, "function%d", cpu);
820 stat->stat.name = name;
821 ret = register_stat_tracer(&stat->stat);
822 if (ret) {
823 WARN(1,
824 "Could not register function stat for cpu %d\n",
825 cpu);
826 kfree(name);
827 return;
831 entry = debugfs_create_file("function_profile_enabled", 0644,
832 d_tracer, NULL, &ftrace_profile_fops);
833 if (!entry)
834 pr_warning("Could not create debugfs "
835 "'function_profile_enabled' entry\n");
838 #else /* CONFIG_FUNCTION_PROFILER */
839 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
842 #endif /* CONFIG_FUNCTION_PROFILER */
844 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
846 #ifdef CONFIG_DYNAMIC_FTRACE
848 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
849 # error Dynamic ftrace depends on MCOUNT_RECORD
850 #endif
852 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
854 struct ftrace_func_probe {
855 struct hlist_node node;
856 struct ftrace_probe_ops *ops;
857 unsigned long flags;
858 unsigned long ip;
859 void *data;
860 struct rcu_head rcu;
863 enum {
864 FTRACE_ENABLE_CALLS = (1 << 0),
865 FTRACE_DISABLE_CALLS = (1 << 1),
866 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
867 FTRACE_START_FUNC_RET = (1 << 3),
868 FTRACE_STOP_FUNC_RET = (1 << 4),
870 struct ftrace_func_entry {
871 struct hlist_node hlist;
872 unsigned long ip;
875 struct ftrace_hash {
876 unsigned long size_bits;
877 struct hlist_head *buckets;
878 unsigned long count;
882 * We make these constant because no one should touch them,
883 * but they are used as the default "empty hash", to avoid allocating
884 * it all the time. These are in a read only section such that if
885 * anyone does try to modify it, it will cause an exception.
887 static const struct hlist_head empty_buckets[1];
888 static const struct ftrace_hash empty_hash = {
889 .buckets = (struct hlist_head *)empty_buckets,
891 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
893 enum {
894 FTRACE_OPS_FL_ENABLED = 1,
897 struct ftrace_ops global_ops = {
898 .func = ftrace_stub,
899 .notrace_hash = EMPTY_HASH,
900 .filter_hash = EMPTY_HASH,
903 static struct dyn_ftrace *ftrace_new_addrs;
905 static DEFINE_MUTEX(ftrace_regex_lock);
907 struct ftrace_page {
908 struct ftrace_page *next;
909 int index;
910 struct dyn_ftrace records[];
913 #define ENTRIES_PER_PAGE \
914 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
916 /* estimate from running different kernels */
917 #define NR_TO_INIT 10000
919 static struct ftrace_page *ftrace_pages_start;
920 static struct ftrace_page *ftrace_pages;
922 static struct dyn_ftrace *ftrace_free_records;
924 static struct ftrace_func_entry *
925 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
927 unsigned long key;
928 struct ftrace_func_entry *entry;
929 struct hlist_head *hhd;
930 struct hlist_node *n;
932 if (!hash->count)
933 return NULL;
935 if (hash->size_bits > 0)
936 key = hash_long(ip, hash->size_bits);
937 else
938 key = 0;
940 hhd = &hash->buckets[key];
942 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
943 if (entry->ip == ip)
944 return entry;
946 return NULL;
949 static void __add_hash_entry(struct ftrace_hash *hash,
950 struct ftrace_func_entry *entry)
952 struct hlist_head *hhd;
953 unsigned long key;
955 if (hash->size_bits)
956 key = hash_long(entry->ip, hash->size_bits);
957 else
958 key = 0;
960 hhd = &hash->buckets[key];
961 hlist_add_head(&entry->hlist, hhd);
962 hash->count++;
965 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
967 struct ftrace_func_entry *entry;
969 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
970 if (!entry)
971 return -ENOMEM;
973 entry->ip = ip;
974 __add_hash_entry(hash, entry);
976 return 0;
979 static void
980 free_hash_entry(struct ftrace_hash *hash,
981 struct ftrace_func_entry *entry)
983 hlist_del(&entry->hlist);
984 kfree(entry);
985 hash->count--;
988 static void
989 remove_hash_entry(struct ftrace_hash *hash,
990 struct ftrace_func_entry *entry)
992 hlist_del(&entry->hlist);
993 hash->count--;
996 static void ftrace_hash_clear(struct ftrace_hash *hash)
998 struct hlist_head *hhd;
999 struct hlist_node *tp, *tn;
1000 struct ftrace_func_entry *entry;
1001 int size = 1 << hash->size_bits;
1002 int i;
1004 if (!hash->count)
1005 return;
1007 for (i = 0; i < size; i++) {
1008 hhd = &hash->buckets[i];
1009 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1010 free_hash_entry(hash, entry);
1012 FTRACE_WARN_ON(hash->count);
1015 static void free_ftrace_hash(struct ftrace_hash *hash)
1017 if (!hash || hash == EMPTY_HASH)
1018 return;
1019 ftrace_hash_clear(hash);
1020 kfree(hash->buckets);
1021 kfree(hash);
1024 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1026 struct ftrace_hash *hash;
1027 int size;
1029 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1030 if (!hash)
1031 return NULL;
1033 size = 1 << size_bits;
1034 hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1036 if (!hash->buckets) {
1037 kfree(hash);
1038 return NULL;
1041 hash->size_bits = size_bits;
1043 return hash;
1046 static struct ftrace_hash *
1047 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1049 struct ftrace_func_entry *entry;
1050 struct ftrace_hash *new_hash;
1051 struct hlist_node *tp;
1052 int size;
1053 int ret;
1054 int i;
1056 new_hash = alloc_ftrace_hash(size_bits);
1057 if (!new_hash)
1058 return NULL;
1060 /* Empty hash? */
1061 if (!hash || !hash->count)
1062 return new_hash;
1064 size = 1 << hash->size_bits;
1065 for (i = 0; i < size; i++) {
1066 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1067 ret = add_hash_entry(new_hash, entry->ip);
1068 if (ret < 0)
1069 goto free_hash;
1073 FTRACE_WARN_ON(new_hash->count != hash->count);
1075 return new_hash;
1077 free_hash:
1078 free_ftrace_hash(new_hash);
1079 return NULL;
1082 static int
1083 ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1085 struct ftrace_func_entry *entry;
1086 struct hlist_node *tp, *tn;
1087 struct hlist_head *hhd;
1088 struct ftrace_hash *hash = *dst;
1089 unsigned long key;
1090 int size = src->count;
1091 int bits = 0;
1092 int i;
1095 * If the new source is empty, just free dst and assign it
1096 * the empty_hash.
1098 if (!src->count) {
1099 free_ftrace_hash(*dst);
1100 *dst = EMPTY_HASH;
1101 return 0;
1104 ftrace_hash_clear(hash);
1107 * Make the hash size about 1/2 the # found
1109 for (size /= 2; size; size >>= 1)
1110 bits++;
1112 /* Don't allocate too much */
1113 if (bits > FTRACE_HASH_MAX_BITS)
1114 bits = FTRACE_HASH_MAX_BITS;
1116 /* We can't modify the empty_hash */
1117 if (hash == EMPTY_HASH) {
1118 /* Create a new hash */
1119 *dst = alloc_ftrace_hash(bits);
1120 if (!*dst) {
1121 *dst = EMPTY_HASH;
1122 return -ENOMEM;
1124 hash = *dst;
1125 } else {
1126 size = 1 << bits;
1128 /* Use the old hash, but create new buckets */
1129 hhd = kzalloc(sizeof(*hhd) * size, GFP_KERNEL);
1130 if (!hhd)
1131 return -ENOMEM;
1133 kfree(hash->buckets);
1134 hash->buckets = hhd;
1135 hash->size_bits = bits;
1138 size = 1 << src->size_bits;
1139 for (i = 0; i < size; i++) {
1140 hhd = &src->buckets[i];
1141 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1142 if (bits > 0)
1143 key = hash_long(entry->ip, bits);
1144 else
1145 key = 0;
1146 remove_hash_entry(src, entry);
1147 __add_hash_entry(hash, entry);
1151 return 0;
1155 * This is a double for. Do not use 'break' to break out of the loop,
1156 * you must use a goto.
1158 #define do_for_each_ftrace_rec(pg, rec) \
1159 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1160 int _____i; \
1161 for (_____i = 0; _____i < pg->index; _____i++) { \
1162 rec = &pg->records[_____i];
1164 #define while_for_each_ftrace_rec() \
1168 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1169 int filter_hash,
1170 bool inc)
1172 struct ftrace_hash *hash;
1173 struct ftrace_hash *other_hash;
1174 struct ftrace_page *pg;
1175 struct dyn_ftrace *rec;
1176 int count = 0;
1177 int all = 0;
1179 /* Only update if the ops has been registered */
1180 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1181 return;
1184 * In the filter_hash case:
1185 * If the count is zero, we update all records.
1186 * Otherwise we just update the items in the hash.
1188 * In the notrace_hash case:
1189 * We enable the update in the hash.
1190 * As disabling notrace means enabling the tracing,
1191 * and enabling notrace means disabling, the inc variable
1192 * gets inversed.
1194 if (filter_hash) {
1195 hash = ops->filter_hash;
1196 other_hash = ops->notrace_hash;
1197 if (!hash->count)
1198 all = 1;
1199 } else {
1200 inc = !inc;
1201 hash = ops->notrace_hash;
1202 other_hash = ops->filter_hash;
1204 * If the notrace hash has no items,
1205 * then there's nothing to do.
1207 if (!hash->count)
1208 return;
1211 do_for_each_ftrace_rec(pg, rec) {
1212 int in_other_hash = 0;
1213 int in_hash = 0;
1214 int match = 0;
1216 if (all) {
1218 * Only the filter_hash affects all records.
1219 * Update if the record is not in the notrace hash.
1221 if (!ftrace_lookup_ip(other_hash, rec->ip))
1222 match = 1;
1223 } else {
1224 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1225 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1230 if (filter_hash && in_hash && !in_other_hash)
1231 match = 1;
1232 else if (!filter_hash && in_hash &&
1233 (in_other_hash || !other_hash->count))
1234 match = 1;
1236 if (!match)
1237 continue;
1239 if (inc) {
1240 rec->flags++;
1241 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1242 return;
1243 } else {
1244 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1245 return;
1246 rec->flags--;
1248 count++;
1249 /* Shortcut, if we handled all records, we are done. */
1250 if (!all && count == hash->count)
1251 return;
1252 } while_for_each_ftrace_rec();
1255 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1256 int filter_hash)
1258 __ftrace_hash_rec_update(ops, filter_hash, 0);
1261 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1262 int filter_hash)
1264 __ftrace_hash_rec_update(ops, filter_hash, 1);
1267 static void ftrace_free_rec(struct dyn_ftrace *rec)
1269 rec->freelist = ftrace_free_records;
1270 ftrace_free_records = rec;
1271 rec->flags |= FTRACE_FL_FREE;
1274 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1276 struct dyn_ftrace *rec;
1278 /* First check for freed records */
1279 if (ftrace_free_records) {
1280 rec = ftrace_free_records;
1282 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1283 FTRACE_WARN_ON_ONCE(1);
1284 ftrace_free_records = NULL;
1285 return NULL;
1288 ftrace_free_records = rec->freelist;
1289 memset(rec, 0, sizeof(*rec));
1290 return rec;
1293 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1294 if (!ftrace_pages->next) {
1295 /* allocate another page */
1296 ftrace_pages->next =
1297 (void *)get_zeroed_page(GFP_KERNEL);
1298 if (!ftrace_pages->next)
1299 return NULL;
1301 ftrace_pages = ftrace_pages->next;
1304 return &ftrace_pages->records[ftrace_pages->index++];
1307 static struct dyn_ftrace *
1308 ftrace_record_ip(unsigned long ip)
1310 struct dyn_ftrace *rec;
1312 if (ftrace_disabled)
1313 return NULL;
1315 rec = ftrace_alloc_dyn_node(ip);
1316 if (!rec)
1317 return NULL;
1319 rec->ip = ip;
1320 rec->newlist = ftrace_new_addrs;
1321 ftrace_new_addrs = rec;
1323 return rec;
1326 static void print_ip_ins(const char *fmt, unsigned char *p)
1328 int i;
1330 printk(KERN_CONT "%s", fmt);
1332 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1333 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1336 static void ftrace_bug(int failed, unsigned long ip)
1338 switch (failed) {
1339 case -EFAULT:
1340 FTRACE_WARN_ON_ONCE(1);
1341 pr_info("ftrace faulted on modifying ");
1342 print_ip_sym(ip);
1343 break;
1344 case -EINVAL:
1345 FTRACE_WARN_ON_ONCE(1);
1346 pr_info("ftrace failed to modify ");
1347 print_ip_sym(ip);
1348 print_ip_ins(" actual: ", (unsigned char *)ip);
1349 printk(KERN_CONT "\n");
1350 break;
1351 case -EPERM:
1352 FTRACE_WARN_ON_ONCE(1);
1353 pr_info("ftrace faulted on writing ");
1354 print_ip_sym(ip);
1355 break;
1356 default:
1357 FTRACE_WARN_ON_ONCE(1);
1358 pr_info("ftrace faulted on unknown error ");
1359 print_ip_sym(ip);
1364 /* Return 1 if the address range is reserved for ftrace */
1365 int ftrace_text_reserved(void *start, void *end)
1367 struct dyn_ftrace *rec;
1368 struct ftrace_page *pg;
1370 do_for_each_ftrace_rec(pg, rec) {
1371 if (rec->ip <= (unsigned long)end &&
1372 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1373 return 1;
1374 } while_for_each_ftrace_rec();
1375 return 0;
1379 static int
1380 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1382 unsigned long ftrace_addr;
1383 unsigned long flag = 0UL;
1385 ftrace_addr = (unsigned long)FTRACE_ADDR;
1388 * If we are enabling tracing:
1390 * If the record has a ref count, then we need to enable it
1391 * because someone is using it.
1393 * Otherwise we make sure its disabled.
1395 * If we are disabling tracing, then disable all records that
1396 * are enabled.
1398 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1399 flag = FTRACE_FL_ENABLED;
1401 /* If the state of this record hasn't changed, then do nothing */
1402 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1403 return 0;
1405 if (flag) {
1406 rec->flags |= FTRACE_FL_ENABLED;
1407 return ftrace_make_call(rec, ftrace_addr);
1410 rec->flags &= ~FTRACE_FL_ENABLED;
1411 return ftrace_make_nop(NULL, rec, ftrace_addr);
1414 static void ftrace_replace_code(int enable)
1416 struct dyn_ftrace *rec;
1417 struct ftrace_page *pg;
1418 int failed;
1420 if (unlikely(ftrace_disabled))
1421 return;
1423 do_for_each_ftrace_rec(pg, rec) {
1424 /* Skip over free records */
1425 if (rec->flags & FTRACE_FL_FREE)
1426 continue;
1428 failed = __ftrace_replace_code(rec, enable);
1429 if (failed) {
1430 ftrace_bug(failed, rec->ip);
1431 /* Stop processing */
1432 return;
1434 } while_for_each_ftrace_rec();
1437 static int
1438 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1440 unsigned long ip;
1441 int ret;
1443 ip = rec->ip;
1445 if (unlikely(ftrace_disabled))
1446 return 0;
1448 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1449 if (ret) {
1450 ftrace_bug(ret, ip);
1451 return 0;
1453 return 1;
1457 * archs can override this function if they must do something
1458 * before the modifying code is performed.
1460 int __weak ftrace_arch_code_modify_prepare(void)
1462 return 0;
1466 * archs can override this function if they must do something
1467 * after the modifying code is performed.
1469 int __weak ftrace_arch_code_modify_post_process(void)
1471 return 0;
1474 static int __ftrace_modify_code(void *data)
1476 int *command = data;
1478 if (*command & FTRACE_ENABLE_CALLS)
1479 ftrace_replace_code(1);
1480 else if (*command & FTRACE_DISABLE_CALLS)
1481 ftrace_replace_code(0);
1483 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1484 ftrace_update_ftrace_func(ftrace_trace_function);
1486 if (*command & FTRACE_START_FUNC_RET)
1487 ftrace_enable_ftrace_graph_caller();
1488 else if (*command & FTRACE_STOP_FUNC_RET)
1489 ftrace_disable_ftrace_graph_caller();
1491 return 0;
1494 static void ftrace_run_update_code(int command)
1496 int ret;
1498 ret = ftrace_arch_code_modify_prepare();
1499 FTRACE_WARN_ON(ret);
1500 if (ret)
1501 return;
1503 stop_machine(__ftrace_modify_code, &command, NULL);
1505 ret = ftrace_arch_code_modify_post_process();
1506 FTRACE_WARN_ON(ret);
1509 static ftrace_func_t saved_ftrace_func;
1510 static int ftrace_start_up;
1512 static void ftrace_startup_enable(int command)
1514 if (saved_ftrace_func != ftrace_trace_function) {
1515 saved_ftrace_func = ftrace_trace_function;
1516 command |= FTRACE_UPDATE_TRACE_FUNC;
1519 if (!command || !ftrace_enabled)
1520 return;
1522 ftrace_run_update_code(command);
1525 static void ftrace_startup(struct ftrace_ops *ops, int command)
1527 if (unlikely(ftrace_disabled))
1528 return;
1530 ftrace_start_up++;
1531 command |= FTRACE_ENABLE_CALLS;
1533 ops->flags |= FTRACE_OPS_FL_ENABLED;
1534 if (ftrace_start_up == 1)
1535 ftrace_hash_rec_enable(ops, 1);
1537 ftrace_startup_enable(command);
1540 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1542 if (unlikely(ftrace_disabled))
1543 return;
1545 ftrace_start_up--;
1547 * Just warn in case of unbalance, no need to kill ftrace, it's not
1548 * critical but the ftrace_call callers may be never nopped again after
1549 * further ftrace uses.
1551 WARN_ON_ONCE(ftrace_start_up < 0);
1553 if (!ftrace_start_up)
1554 ftrace_hash_rec_disable(ops, 1);
1556 if (!ftrace_start_up) {
1557 command |= FTRACE_DISABLE_CALLS;
1558 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1561 if (saved_ftrace_func != ftrace_trace_function) {
1562 saved_ftrace_func = ftrace_trace_function;
1563 command |= FTRACE_UPDATE_TRACE_FUNC;
1566 if (!command || !ftrace_enabled)
1567 return;
1569 ftrace_run_update_code(command);
1572 static void ftrace_startup_sysctl(void)
1574 if (unlikely(ftrace_disabled))
1575 return;
1577 /* Force update next time */
1578 saved_ftrace_func = NULL;
1579 /* ftrace_start_up is true if we want ftrace running */
1580 if (ftrace_start_up)
1581 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1584 static void ftrace_shutdown_sysctl(void)
1586 if (unlikely(ftrace_disabled))
1587 return;
1589 /* ftrace_start_up is true if ftrace is running */
1590 if (ftrace_start_up)
1591 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1594 static cycle_t ftrace_update_time;
1595 static unsigned long ftrace_update_cnt;
1596 unsigned long ftrace_update_tot_cnt;
1598 static int ftrace_update_code(struct module *mod)
1600 struct dyn_ftrace *p;
1601 cycle_t start, stop;
1603 start = ftrace_now(raw_smp_processor_id());
1604 ftrace_update_cnt = 0;
1606 while (ftrace_new_addrs) {
1608 /* If something went wrong, bail without enabling anything */
1609 if (unlikely(ftrace_disabled))
1610 return -1;
1612 p = ftrace_new_addrs;
1613 ftrace_new_addrs = p->newlist;
1614 p->flags = 0L;
1617 * Do the initial record conversion from mcount jump
1618 * to the NOP instructions.
1620 if (!ftrace_code_disable(mod, p)) {
1621 ftrace_free_rec(p);
1622 /* Game over */
1623 break;
1626 ftrace_update_cnt++;
1629 * If the tracing is enabled, go ahead and enable the record.
1631 * The reason not to enable the record immediatelly is the
1632 * inherent check of ftrace_make_nop/ftrace_make_call for
1633 * correct previous instructions. Making first the NOP
1634 * conversion puts the module to the correct state, thus
1635 * passing the ftrace_make_call check.
1637 if (ftrace_start_up) {
1638 int failed = __ftrace_replace_code(p, 1);
1639 if (failed) {
1640 ftrace_bug(failed, p->ip);
1641 ftrace_free_rec(p);
1646 stop = ftrace_now(raw_smp_processor_id());
1647 ftrace_update_time = stop - start;
1648 ftrace_update_tot_cnt += ftrace_update_cnt;
1650 return 0;
1653 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1655 struct ftrace_page *pg;
1656 int cnt;
1657 int i;
1659 /* allocate a few pages */
1660 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1661 if (!ftrace_pages_start)
1662 return -1;
1665 * Allocate a few more pages.
1667 * TODO: have some parser search vmlinux before
1668 * final linking to find all calls to ftrace.
1669 * Then we can:
1670 * a) know how many pages to allocate.
1671 * and/or
1672 * b) set up the table then.
1674 * The dynamic code is still necessary for
1675 * modules.
1678 pg = ftrace_pages = ftrace_pages_start;
1680 cnt = num_to_init / ENTRIES_PER_PAGE;
1681 pr_info("ftrace: allocating %ld entries in %d pages\n",
1682 num_to_init, cnt + 1);
1684 for (i = 0; i < cnt; i++) {
1685 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1687 /* If we fail, we'll try later anyway */
1688 if (!pg->next)
1689 break;
1691 pg = pg->next;
1694 return 0;
1697 enum {
1698 FTRACE_ITER_FILTER = (1 << 0),
1699 FTRACE_ITER_NOTRACE = (1 << 1),
1700 FTRACE_ITER_PRINTALL = (1 << 2),
1701 FTRACE_ITER_HASH = (1 << 3),
1702 FTRACE_ITER_ENABLED = (1 << 4),
1705 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1707 struct ftrace_iterator {
1708 loff_t pos;
1709 loff_t func_pos;
1710 struct ftrace_page *pg;
1711 struct dyn_ftrace *func;
1712 struct ftrace_func_probe *probe;
1713 struct trace_parser parser;
1714 struct ftrace_hash *hash;
1715 struct ftrace_ops *ops;
1716 int hidx;
1717 int idx;
1718 unsigned flags;
1721 static void *
1722 t_hash_next(struct seq_file *m, loff_t *pos)
1724 struct ftrace_iterator *iter = m->private;
1725 struct hlist_node *hnd = NULL;
1726 struct hlist_head *hhd;
1728 (*pos)++;
1729 iter->pos = *pos;
1731 if (iter->probe)
1732 hnd = &iter->probe->node;
1733 retry:
1734 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1735 return NULL;
1737 hhd = &ftrace_func_hash[iter->hidx];
1739 if (hlist_empty(hhd)) {
1740 iter->hidx++;
1741 hnd = NULL;
1742 goto retry;
1745 if (!hnd)
1746 hnd = hhd->first;
1747 else {
1748 hnd = hnd->next;
1749 if (!hnd) {
1750 iter->hidx++;
1751 goto retry;
1755 if (WARN_ON_ONCE(!hnd))
1756 return NULL;
1758 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1760 return iter;
1763 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1765 struct ftrace_iterator *iter = m->private;
1766 void *p = NULL;
1767 loff_t l;
1769 if (iter->func_pos > *pos)
1770 return NULL;
1772 iter->hidx = 0;
1773 for (l = 0; l <= (*pos - iter->func_pos); ) {
1774 p = t_hash_next(m, &l);
1775 if (!p)
1776 break;
1778 if (!p)
1779 return NULL;
1781 /* Only set this if we have an item */
1782 iter->flags |= FTRACE_ITER_HASH;
1784 return iter;
1787 static int
1788 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1790 struct ftrace_func_probe *rec;
1792 rec = iter->probe;
1793 if (WARN_ON_ONCE(!rec))
1794 return -EIO;
1796 if (rec->ops->print)
1797 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1799 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1801 if (rec->data)
1802 seq_printf(m, ":%p", rec->data);
1803 seq_putc(m, '\n');
1805 return 0;
1808 static void *
1809 t_next(struct seq_file *m, void *v, loff_t *pos)
1811 struct ftrace_iterator *iter = m->private;
1812 struct ftrace_ops *ops = &global_ops;
1813 struct dyn_ftrace *rec = NULL;
1815 if (unlikely(ftrace_disabled))
1816 return NULL;
1818 if (iter->flags & FTRACE_ITER_HASH)
1819 return t_hash_next(m, pos);
1821 (*pos)++;
1822 iter->pos = iter->func_pos = *pos;
1824 if (iter->flags & FTRACE_ITER_PRINTALL)
1825 return t_hash_start(m, pos);
1827 retry:
1828 if (iter->idx >= iter->pg->index) {
1829 if (iter->pg->next) {
1830 iter->pg = iter->pg->next;
1831 iter->idx = 0;
1832 goto retry;
1834 } else {
1835 rec = &iter->pg->records[iter->idx++];
1836 if ((rec->flags & FTRACE_FL_FREE) ||
1838 ((iter->flags & FTRACE_ITER_FILTER) &&
1839 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
1841 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1842 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
1844 ((iter->flags & FTRACE_ITER_ENABLED) &&
1845 !(rec->flags & ~FTRACE_FL_MASK))) {
1847 rec = NULL;
1848 goto retry;
1852 if (!rec)
1853 return t_hash_start(m, pos);
1855 iter->func = rec;
1857 return iter;
1860 static void reset_iter_read(struct ftrace_iterator *iter)
1862 iter->pos = 0;
1863 iter->func_pos = 0;
1864 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
1867 static void *t_start(struct seq_file *m, loff_t *pos)
1869 struct ftrace_iterator *iter = m->private;
1870 struct ftrace_ops *ops = &global_ops;
1871 void *p = NULL;
1872 loff_t l;
1874 mutex_lock(&ftrace_lock);
1876 if (unlikely(ftrace_disabled))
1877 return NULL;
1880 * If an lseek was done, then reset and start from beginning.
1882 if (*pos < iter->pos)
1883 reset_iter_read(iter);
1886 * For set_ftrace_filter reading, if we have the filter
1887 * off, we can short cut and just print out that all
1888 * functions are enabled.
1890 if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
1891 if (*pos > 0)
1892 return t_hash_start(m, pos);
1893 iter->flags |= FTRACE_ITER_PRINTALL;
1894 /* reset in case of seek/pread */
1895 iter->flags &= ~FTRACE_ITER_HASH;
1896 return iter;
1899 if (iter->flags & FTRACE_ITER_HASH)
1900 return t_hash_start(m, pos);
1903 * Unfortunately, we need to restart at ftrace_pages_start
1904 * every time we let go of the ftrace_mutex. This is because
1905 * those pointers can change without the lock.
1907 iter->pg = ftrace_pages_start;
1908 iter->idx = 0;
1909 for (l = 0; l <= *pos; ) {
1910 p = t_next(m, p, &l);
1911 if (!p)
1912 break;
1915 if (!p) {
1916 if (iter->flags & FTRACE_ITER_FILTER)
1917 return t_hash_start(m, pos);
1919 return NULL;
1922 return iter;
1925 static void t_stop(struct seq_file *m, void *p)
1927 mutex_unlock(&ftrace_lock);
1930 static int t_show(struct seq_file *m, void *v)
1932 struct ftrace_iterator *iter = m->private;
1933 struct dyn_ftrace *rec;
1935 if (iter->flags & FTRACE_ITER_HASH)
1936 return t_hash_show(m, iter);
1938 if (iter->flags & FTRACE_ITER_PRINTALL) {
1939 seq_printf(m, "#### all functions enabled ####\n");
1940 return 0;
1943 rec = iter->func;
1945 if (!rec)
1946 return 0;
1948 seq_printf(m, "%ps", (void *)rec->ip);
1949 if (iter->flags & FTRACE_ITER_ENABLED)
1950 seq_printf(m, " (%ld)",
1951 rec->flags & ~FTRACE_FL_MASK);
1952 seq_printf(m, "\n");
1954 return 0;
1957 static const struct seq_operations show_ftrace_seq_ops = {
1958 .start = t_start,
1959 .next = t_next,
1960 .stop = t_stop,
1961 .show = t_show,
1964 static int
1965 ftrace_avail_open(struct inode *inode, struct file *file)
1967 struct ftrace_iterator *iter;
1968 int ret;
1970 if (unlikely(ftrace_disabled))
1971 return -ENODEV;
1973 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1974 if (!iter)
1975 return -ENOMEM;
1977 iter->pg = ftrace_pages_start;
1979 ret = seq_open(file, &show_ftrace_seq_ops);
1980 if (!ret) {
1981 struct seq_file *m = file->private_data;
1983 m->private = iter;
1984 } else {
1985 kfree(iter);
1988 return ret;
1991 static int
1992 ftrace_enabled_open(struct inode *inode, struct file *file)
1994 struct ftrace_iterator *iter;
1995 int ret;
1997 if (unlikely(ftrace_disabled))
1998 return -ENODEV;
2000 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2001 if (!iter)
2002 return -ENOMEM;
2004 iter->pg = ftrace_pages_start;
2005 iter->flags = FTRACE_ITER_ENABLED;
2007 ret = seq_open(file, &show_ftrace_seq_ops);
2008 if (!ret) {
2009 struct seq_file *m = file->private_data;
2011 m->private = iter;
2012 } else {
2013 kfree(iter);
2016 return ret;
2019 static void ftrace_filter_reset(struct ftrace_hash *hash)
2021 mutex_lock(&ftrace_lock);
2022 ftrace_hash_clear(hash);
2023 mutex_unlock(&ftrace_lock);
2026 static int
2027 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2028 struct inode *inode, struct file *file)
2030 struct ftrace_iterator *iter;
2031 struct ftrace_hash *hash;
2032 int ret = 0;
2034 if (unlikely(ftrace_disabled))
2035 return -ENODEV;
2037 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2038 if (!iter)
2039 return -ENOMEM;
2041 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2042 kfree(iter);
2043 return -ENOMEM;
2046 if (flag & FTRACE_ITER_NOTRACE)
2047 hash = ops->notrace_hash;
2048 else
2049 hash = ops->filter_hash;
2051 iter->ops = ops;
2052 iter->flags = flag;
2054 if (file->f_mode & FMODE_WRITE) {
2055 mutex_lock(&ftrace_lock);
2056 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2057 mutex_unlock(&ftrace_lock);
2059 if (!iter->hash) {
2060 trace_parser_put(&iter->parser);
2061 kfree(iter);
2062 return -ENOMEM;
2066 mutex_lock(&ftrace_regex_lock);
2068 if ((file->f_mode & FMODE_WRITE) &&
2069 (file->f_flags & O_TRUNC))
2070 ftrace_filter_reset(iter->hash);
2072 if (file->f_mode & FMODE_READ) {
2073 iter->pg = ftrace_pages_start;
2075 ret = seq_open(file, &show_ftrace_seq_ops);
2076 if (!ret) {
2077 struct seq_file *m = file->private_data;
2078 m->private = iter;
2079 } else {
2080 /* Failed */
2081 free_ftrace_hash(iter->hash);
2082 trace_parser_put(&iter->parser);
2083 kfree(iter);
2085 } else
2086 file->private_data = iter;
2087 mutex_unlock(&ftrace_regex_lock);
2089 return ret;
2092 static int
2093 ftrace_filter_open(struct inode *inode, struct file *file)
2095 return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2096 inode, file);
2099 static int
2100 ftrace_notrace_open(struct inode *inode, struct file *file)
2102 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2103 inode, file);
2106 static loff_t
2107 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2109 loff_t ret;
2111 if (file->f_mode & FMODE_READ)
2112 ret = seq_lseek(file, offset, origin);
2113 else
2114 file->f_pos = ret = 1;
2116 return ret;
2119 static int ftrace_match(char *str, char *regex, int len, int type)
2121 int matched = 0;
2122 int slen;
2124 switch (type) {
2125 case MATCH_FULL:
2126 if (strcmp(str, regex) == 0)
2127 matched = 1;
2128 break;
2129 case MATCH_FRONT_ONLY:
2130 if (strncmp(str, regex, len) == 0)
2131 matched = 1;
2132 break;
2133 case MATCH_MIDDLE_ONLY:
2134 if (strstr(str, regex))
2135 matched = 1;
2136 break;
2137 case MATCH_END_ONLY:
2138 slen = strlen(str);
2139 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2140 matched = 1;
2141 break;
2144 return matched;
2147 static int
2148 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2150 struct ftrace_func_entry *entry;
2151 int ret = 0;
2153 entry = ftrace_lookup_ip(hash, rec->ip);
2154 if (not) {
2155 /* Do nothing if it doesn't exist */
2156 if (!entry)
2157 return 0;
2159 free_hash_entry(hash, entry);
2160 } else {
2161 /* Do nothing if it exists */
2162 if (entry)
2163 return 0;
2165 ret = add_hash_entry(hash, rec->ip);
2167 return ret;
2170 static int
2171 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2172 char *regex, int len, int type)
2174 char str[KSYM_SYMBOL_LEN];
2175 char *modname;
2177 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2179 if (mod) {
2180 /* module lookup requires matching the module */
2181 if (!modname || strcmp(modname, mod))
2182 return 0;
2184 /* blank search means to match all funcs in the mod */
2185 if (!len)
2186 return 1;
2189 return ftrace_match(str, regex, len, type);
2192 static int
2193 match_records(struct ftrace_hash *hash, char *buff,
2194 int len, char *mod, int not)
2196 unsigned search_len = 0;
2197 struct ftrace_page *pg;
2198 struct dyn_ftrace *rec;
2199 int type = MATCH_FULL;
2200 char *search = buff;
2201 int found = 0;
2202 int ret;
2204 if (len) {
2205 type = filter_parse_regex(buff, len, &search, &not);
2206 search_len = strlen(search);
2209 mutex_lock(&ftrace_lock);
2211 if (unlikely(ftrace_disabled))
2212 goto out_unlock;
2214 do_for_each_ftrace_rec(pg, rec) {
2216 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2217 ret = enter_record(hash, rec, not);
2218 if (ret < 0) {
2219 found = ret;
2220 goto out_unlock;
2222 found = 1;
2224 } while_for_each_ftrace_rec();
2225 out_unlock:
2226 mutex_unlock(&ftrace_lock);
2228 return found;
2231 static int
2232 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2234 return match_records(hash, buff, len, NULL, 0);
2237 static int
2238 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2240 int not = 0;
2242 /* blank or '*' mean the same */
2243 if (strcmp(buff, "*") == 0)
2244 buff[0] = 0;
2246 /* handle the case of 'dont filter this module' */
2247 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2248 buff[0] = 0;
2249 not = 1;
2252 return match_records(hash, buff, strlen(buff), mod, not);
2256 * We register the module command as a template to show others how
2257 * to register the a command as well.
2260 static int
2261 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
2263 struct ftrace_ops *ops = &global_ops;
2264 struct ftrace_hash *hash;
2265 char *mod;
2266 int ret = -EINVAL;
2269 * cmd == 'mod' because we only registered this func
2270 * for the 'mod' ftrace_func_command.
2271 * But if you register one func with multiple commands,
2272 * you can tell which command was used by the cmd
2273 * parameter.
2276 /* we must have a module name */
2277 if (!param)
2278 return ret;
2280 mod = strsep(&param, ":");
2281 if (!strlen(mod))
2282 return ret;
2284 if (enable)
2285 hash = ops->filter_hash;
2286 else
2287 hash = ops->notrace_hash;
2289 ret = ftrace_match_module_records(hash, func, mod);
2290 if (!ret)
2291 ret = -EINVAL;
2292 if (ret < 0)
2293 return ret;
2295 return 0;
2298 static struct ftrace_func_command ftrace_mod_cmd = {
2299 .name = "mod",
2300 .func = ftrace_mod_callback,
2303 static int __init ftrace_mod_cmd_init(void)
2305 return register_ftrace_command(&ftrace_mod_cmd);
2307 device_initcall(ftrace_mod_cmd_init);
2309 static void
2310 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2312 struct ftrace_func_probe *entry;
2313 struct hlist_head *hhd;
2314 struct hlist_node *n;
2315 unsigned long key;
2317 key = hash_long(ip, FTRACE_HASH_BITS);
2319 hhd = &ftrace_func_hash[key];
2321 if (hlist_empty(hhd))
2322 return;
2325 * Disable preemption for these calls to prevent a RCU grace
2326 * period. This syncs the hash iteration and freeing of items
2327 * on the hash. rcu_read_lock is too dangerous here.
2329 preempt_disable_notrace();
2330 hlist_for_each_entry_rcu(entry, n, hhd, node) {
2331 if (entry->ip == ip)
2332 entry->ops->func(ip, parent_ip, &entry->data);
2334 preempt_enable_notrace();
2337 static struct ftrace_ops trace_probe_ops __read_mostly =
2339 .func = function_trace_probe_call,
2342 static int ftrace_probe_registered;
2344 static void __enable_ftrace_function_probe(void)
2346 int i;
2348 if (ftrace_probe_registered)
2349 return;
2351 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2352 struct hlist_head *hhd = &ftrace_func_hash[i];
2353 if (hhd->first)
2354 break;
2356 /* Nothing registered? */
2357 if (i == FTRACE_FUNC_HASHSIZE)
2358 return;
2360 __register_ftrace_function(&trace_probe_ops);
2361 ftrace_startup(&global_ops, 0);
2362 ftrace_probe_registered = 1;
2365 static void __disable_ftrace_function_probe(void)
2367 int i;
2369 if (!ftrace_probe_registered)
2370 return;
2372 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2373 struct hlist_head *hhd = &ftrace_func_hash[i];
2374 if (hhd->first)
2375 return;
2378 /* no more funcs left */
2379 __unregister_ftrace_function(&trace_probe_ops);
2380 ftrace_shutdown(&global_ops, 0);
2381 ftrace_probe_registered = 0;
2385 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2387 struct ftrace_func_probe *entry =
2388 container_of(rhp, struct ftrace_func_probe, rcu);
2390 if (entry->ops->free)
2391 entry->ops->free(&entry->data);
2392 kfree(entry);
2397 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2398 void *data)
2400 struct ftrace_func_probe *entry;
2401 struct ftrace_page *pg;
2402 struct dyn_ftrace *rec;
2403 int type, len, not;
2404 unsigned long key;
2405 int count = 0;
2406 char *search;
2408 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2409 len = strlen(search);
2411 /* we do not support '!' for function probes */
2412 if (WARN_ON(not))
2413 return -EINVAL;
2415 mutex_lock(&ftrace_lock);
2417 if (unlikely(ftrace_disabled))
2418 goto out_unlock;
2420 do_for_each_ftrace_rec(pg, rec) {
2422 if (!ftrace_match_record(rec, NULL, search, len, type))
2423 continue;
2425 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2426 if (!entry) {
2427 /* If we did not process any, then return error */
2428 if (!count)
2429 count = -ENOMEM;
2430 goto out_unlock;
2433 count++;
2435 entry->data = data;
2438 * The caller might want to do something special
2439 * for each function we find. We call the callback
2440 * to give the caller an opportunity to do so.
2442 if (ops->callback) {
2443 if (ops->callback(rec->ip, &entry->data) < 0) {
2444 /* caller does not like this func */
2445 kfree(entry);
2446 continue;
2450 entry->ops = ops;
2451 entry->ip = rec->ip;
2453 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2454 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2456 } while_for_each_ftrace_rec();
2457 __enable_ftrace_function_probe();
2459 out_unlock:
2460 mutex_unlock(&ftrace_lock);
2462 return count;
2465 enum {
2466 PROBE_TEST_FUNC = 1,
2467 PROBE_TEST_DATA = 2
2470 static void
2471 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2472 void *data, int flags)
2474 struct ftrace_func_probe *entry;
2475 struct hlist_node *n, *tmp;
2476 char str[KSYM_SYMBOL_LEN];
2477 int type = MATCH_FULL;
2478 int i, len = 0;
2479 char *search;
2481 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2482 glob = NULL;
2483 else if (glob) {
2484 int not;
2486 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2487 len = strlen(search);
2489 /* we do not support '!' for function probes */
2490 if (WARN_ON(not))
2491 return;
2494 mutex_lock(&ftrace_lock);
2495 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2496 struct hlist_head *hhd = &ftrace_func_hash[i];
2498 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2500 /* break up if statements for readability */
2501 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2502 continue;
2504 if ((flags & PROBE_TEST_DATA) && entry->data != data)
2505 continue;
2507 /* do this last, since it is the most expensive */
2508 if (glob) {
2509 kallsyms_lookup(entry->ip, NULL, NULL,
2510 NULL, str);
2511 if (!ftrace_match(str, glob, len, type))
2512 continue;
2515 hlist_del(&entry->node);
2516 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2519 __disable_ftrace_function_probe();
2520 mutex_unlock(&ftrace_lock);
2523 void
2524 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2525 void *data)
2527 __unregister_ftrace_function_probe(glob, ops, data,
2528 PROBE_TEST_FUNC | PROBE_TEST_DATA);
2531 void
2532 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2534 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2537 void unregister_ftrace_function_probe_all(char *glob)
2539 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2542 static LIST_HEAD(ftrace_commands);
2543 static DEFINE_MUTEX(ftrace_cmd_mutex);
2545 int register_ftrace_command(struct ftrace_func_command *cmd)
2547 struct ftrace_func_command *p;
2548 int ret = 0;
2550 mutex_lock(&ftrace_cmd_mutex);
2551 list_for_each_entry(p, &ftrace_commands, list) {
2552 if (strcmp(cmd->name, p->name) == 0) {
2553 ret = -EBUSY;
2554 goto out_unlock;
2557 list_add(&cmd->list, &ftrace_commands);
2558 out_unlock:
2559 mutex_unlock(&ftrace_cmd_mutex);
2561 return ret;
2564 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2566 struct ftrace_func_command *p, *n;
2567 int ret = -ENODEV;
2569 mutex_lock(&ftrace_cmd_mutex);
2570 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2571 if (strcmp(cmd->name, p->name) == 0) {
2572 ret = 0;
2573 list_del_init(&p->list);
2574 goto out_unlock;
2577 out_unlock:
2578 mutex_unlock(&ftrace_cmd_mutex);
2580 return ret;
2583 static int ftrace_process_regex(struct ftrace_hash *hash,
2584 char *buff, int len, int enable)
2586 char *func, *command, *next = buff;
2587 struct ftrace_func_command *p;
2588 int ret;
2590 func = strsep(&next, ":");
2592 if (!next) {
2593 ret = ftrace_match_records(hash, func, len);
2594 if (!ret)
2595 ret = -EINVAL;
2596 if (ret < 0)
2597 return ret;
2598 return 0;
2601 /* command found */
2603 command = strsep(&next, ":");
2605 mutex_lock(&ftrace_cmd_mutex);
2606 list_for_each_entry(p, &ftrace_commands, list) {
2607 if (strcmp(p->name, command) == 0) {
2608 ret = p->func(func, command, next, enable);
2609 goto out_unlock;
2612 out_unlock:
2613 mutex_unlock(&ftrace_cmd_mutex);
2615 return ret;
2618 static ssize_t
2619 ftrace_regex_write(struct file *file, const char __user *ubuf,
2620 size_t cnt, loff_t *ppos, int enable)
2622 struct ftrace_iterator *iter;
2623 struct trace_parser *parser;
2624 ssize_t ret, read;
2626 if (!cnt)
2627 return 0;
2629 mutex_lock(&ftrace_regex_lock);
2631 ret = -ENODEV;
2632 if (unlikely(ftrace_disabled))
2633 goto out_unlock;
2635 if (file->f_mode & FMODE_READ) {
2636 struct seq_file *m = file->private_data;
2637 iter = m->private;
2638 } else
2639 iter = file->private_data;
2641 parser = &iter->parser;
2642 read = trace_get_user(parser, ubuf, cnt, ppos);
2644 if (read >= 0 && trace_parser_loaded(parser) &&
2645 !trace_parser_cont(parser)) {
2646 ret = ftrace_process_regex(iter->hash, parser->buffer,
2647 parser->idx, enable);
2648 trace_parser_clear(parser);
2649 if (ret)
2650 goto out_unlock;
2653 ret = read;
2654 out_unlock:
2655 mutex_unlock(&ftrace_regex_lock);
2657 return ret;
2660 static ssize_t
2661 ftrace_filter_write(struct file *file, const char __user *ubuf,
2662 size_t cnt, loff_t *ppos)
2664 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2667 static ssize_t
2668 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2669 size_t cnt, loff_t *ppos)
2671 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2674 static int
2675 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2676 int reset, int enable)
2678 struct ftrace_hash **orig_hash;
2679 struct ftrace_hash *hash;
2680 int ret;
2682 if (unlikely(ftrace_disabled))
2683 return -ENODEV;
2685 if (enable)
2686 orig_hash = &ops->filter_hash;
2687 else
2688 orig_hash = &ops->notrace_hash;
2690 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2691 if (!hash)
2692 return -ENOMEM;
2694 mutex_lock(&ftrace_regex_lock);
2695 if (reset)
2696 ftrace_filter_reset(hash);
2697 if (buf)
2698 ftrace_match_records(hash, buf, len);
2700 mutex_lock(&ftrace_lock);
2701 ret = ftrace_hash_move(orig_hash, hash);
2702 mutex_unlock(&ftrace_lock);
2704 mutex_unlock(&ftrace_regex_lock);
2706 free_ftrace_hash(hash);
2707 return ret;
2711 * ftrace_set_filter - set a function to filter on in ftrace
2712 * @buf - the string that holds the function filter text.
2713 * @len - the length of the string.
2714 * @reset - non zero to reset all filters before applying this filter.
2716 * Filters denote which functions should be enabled when tracing is enabled.
2717 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2719 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2721 ftrace_set_regex(&global_ops, buf, len, reset, 1);
2725 * ftrace_set_notrace - set a function to not trace in ftrace
2726 * @buf - the string that holds the function notrace text.
2727 * @len - the length of the string.
2728 * @reset - non zero to reset all filters before applying this filter.
2730 * Notrace Filters denote which functions should not be enabled when tracing
2731 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2732 * for tracing.
2734 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2736 ftrace_set_regex(&global_ops, buf, len, reset, 0);
2740 * command line interface to allow users to set filters on boot up.
2742 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2743 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2744 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2746 static int __init set_ftrace_notrace(char *str)
2748 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2749 return 1;
2751 __setup("ftrace_notrace=", set_ftrace_notrace);
2753 static int __init set_ftrace_filter(char *str)
2755 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2756 return 1;
2758 __setup("ftrace_filter=", set_ftrace_filter);
2760 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2761 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2762 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2764 static int __init set_graph_function(char *str)
2766 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2767 return 1;
2769 __setup("ftrace_graph_filter=", set_graph_function);
2771 static void __init set_ftrace_early_graph(char *buf)
2773 int ret;
2774 char *func;
2776 while (buf) {
2777 func = strsep(&buf, ",");
2778 /* we allow only one expression at a time */
2779 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2780 func);
2781 if (ret)
2782 printk(KERN_DEBUG "ftrace: function %s not "
2783 "traceable\n", func);
2786 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2788 static void __init
2789 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2791 char *func;
2793 while (buf) {
2794 func = strsep(&buf, ",");
2795 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2799 static void __init set_ftrace_early_filters(void)
2801 if (ftrace_filter_buf[0])
2802 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
2803 if (ftrace_notrace_buf[0])
2804 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
2805 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2806 if (ftrace_graph_buf[0])
2807 set_ftrace_early_graph(ftrace_graph_buf);
2808 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2811 static int
2812 ftrace_regex_release(struct inode *inode, struct file *file)
2814 struct seq_file *m = (struct seq_file *)file->private_data;
2815 struct ftrace_iterator *iter;
2816 struct ftrace_hash **orig_hash;
2817 struct trace_parser *parser;
2818 int filter_hash;
2819 int ret;
2821 mutex_lock(&ftrace_regex_lock);
2822 if (file->f_mode & FMODE_READ) {
2823 iter = m->private;
2825 seq_release(inode, file);
2826 } else
2827 iter = file->private_data;
2829 parser = &iter->parser;
2830 if (trace_parser_loaded(parser)) {
2831 parser->buffer[parser->idx] = 0;
2832 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
2835 trace_parser_put(parser);
2837 if (file->f_mode & FMODE_WRITE) {
2838 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
2840 if (filter_hash)
2841 orig_hash = &iter->ops->filter_hash;
2842 else
2843 orig_hash = &iter->ops->notrace_hash;
2845 mutex_lock(&ftrace_lock);
2847 * Remove the current set, update the hash and add
2848 * them back.
2850 ftrace_hash_rec_disable(iter->ops, filter_hash);
2851 ret = ftrace_hash_move(orig_hash, iter->hash);
2852 if (!ret) {
2853 ftrace_hash_rec_enable(iter->ops, filter_hash);
2854 if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
2855 && ftrace_enabled)
2856 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2858 mutex_unlock(&ftrace_lock);
2860 free_ftrace_hash(iter->hash);
2861 kfree(iter);
2863 mutex_unlock(&ftrace_regex_lock);
2864 return 0;
2867 static const struct file_operations ftrace_avail_fops = {
2868 .open = ftrace_avail_open,
2869 .read = seq_read,
2870 .llseek = seq_lseek,
2871 .release = seq_release_private,
2874 static const struct file_operations ftrace_enabled_fops = {
2875 .open = ftrace_enabled_open,
2876 .read = seq_read,
2877 .llseek = seq_lseek,
2878 .release = seq_release_private,
2881 static const struct file_operations ftrace_filter_fops = {
2882 .open = ftrace_filter_open,
2883 .read = seq_read,
2884 .write = ftrace_filter_write,
2885 .llseek = ftrace_regex_lseek,
2886 .release = ftrace_regex_release,
2889 static const struct file_operations ftrace_notrace_fops = {
2890 .open = ftrace_notrace_open,
2891 .read = seq_read,
2892 .write = ftrace_notrace_write,
2893 .llseek = ftrace_regex_lseek,
2894 .release = ftrace_regex_release,
2897 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2899 static DEFINE_MUTEX(graph_lock);
2901 int ftrace_graph_count;
2902 int ftrace_graph_filter_enabled;
2903 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2905 static void *
2906 __g_next(struct seq_file *m, loff_t *pos)
2908 if (*pos >= ftrace_graph_count)
2909 return NULL;
2910 return &ftrace_graph_funcs[*pos];
2913 static void *
2914 g_next(struct seq_file *m, void *v, loff_t *pos)
2916 (*pos)++;
2917 return __g_next(m, pos);
2920 static void *g_start(struct seq_file *m, loff_t *pos)
2922 mutex_lock(&graph_lock);
2924 /* Nothing, tell g_show to print all functions are enabled */
2925 if (!ftrace_graph_filter_enabled && !*pos)
2926 return (void *)1;
2928 return __g_next(m, pos);
2931 static void g_stop(struct seq_file *m, void *p)
2933 mutex_unlock(&graph_lock);
2936 static int g_show(struct seq_file *m, void *v)
2938 unsigned long *ptr = v;
2940 if (!ptr)
2941 return 0;
2943 if (ptr == (unsigned long *)1) {
2944 seq_printf(m, "#### all functions enabled ####\n");
2945 return 0;
2948 seq_printf(m, "%ps\n", (void *)*ptr);
2950 return 0;
2953 static const struct seq_operations ftrace_graph_seq_ops = {
2954 .start = g_start,
2955 .next = g_next,
2956 .stop = g_stop,
2957 .show = g_show,
2960 static int
2961 ftrace_graph_open(struct inode *inode, struct file *file)
2963 int ret = 0;
2965 if (unlikely(ftrace_disabled))
2966 return -ENODEV;
2968 mutex_lock(&graph_lock);
2969 if ((file->f_mode & FMODE_WRITE) &&
2970 (file->f_flags & O_TRUNC)) {
2971 ftrace_graph_filter_enabled = 0;
2972 ftrace_graph_count = 0;
2973 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2975 mutex_unlock(&graph_lock);
2977 if (file->f_mode & FMODE_READ)
2978 ret = seq_open(file, &ftrace_graph_seq_ops);
2980 return ret;
2983 static int
2984 ftrace_graph_release(struct inode *inode, struct file *file)
2986 if (file->f_mode & FMODE_READ)
2987 seq_release(inode, file);
2988 return 0;
2991 static int
2992 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2994 struct dyn_ftrace *rec;
2995 struct ftrace_page *pg;
2996 int search_len;
2997 int fail = 1;
2998 int type, not;
2999 char *search;
3000 bool exists;
3001 int i;
3003 /* decode regex */
3004 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3005 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3006 return -EBUSY;
3008 search_len = strlen(search);
3010 mutex_lock(&ftrace_lock);
3012 if (unlikely(ftrace_disabled)) {
3013 mutex_unlock(&ftrace_lock);
3014 return -ENODEV;
3017 do_for_each_ftrace_rec(pg, rec) {
3019 if (rec->flags & FTRACE_FL_FREE)
3020 continue;
3022 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3023 /* if it is in the array */
3024 exists = false;
3025 for (i = 0; i < *idx; i++) {
3026 if (array[i] == rec->ip) {
3027 exists = true;
3028 break;
3032 if (!not) {
3033 fail = 0;
3034 if (!exists) {
3035 array[(*idx)++] = rec->ip;
3036 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3037 goto out;
3039 } else {
3040 if (exists) {
3041 array[i] = array[--(*idx)];
3042 array[*idx] = 0;
3043 fail = 0;
3047 } while_for_each_ftrace_rec();
3048 out:
3049 mutex_unlock(&ftrace_lock);
3051 if (fail)
3052 return -EINVAL;
3054 ftrace_graph_filter_enabled = 1;
3055 return 0;
3058 static ssize_t
3059 ftrace_graph_write(struct file *file, const char __user *ubuf,
3060 size_t cnt, loff_t *ppos)
3062 struct trace_parser parser;
3063 ssize_t read, ret;
3065 if (!cnt)
3066 return 0;
3068 mutex_lock(&graph_lock);
3070 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3071 ret = -ENOMEM;
3072 goto out_unlock;
3075 read = trace_get_user(&parser, ubuf, cnt, ppos);
3077 if (read >= 0 && trace_parser_loaded((&parser))) {
3078 parser.buffer[parser.idx] = 0;
3080 /* we allow only one expression at a time */
3081 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3082 parser.buffer);
3083 if (ret)
3084 goto out_free;
3087 ret = read;
3089 out_free:
3090 trace_parser_put(&parser);
3091 out_unlock:
3092 mutex_unlock(&graph_lock);
3094 return ret;
3097 static const struct file_operations ftrace_graph_fops = {
3098 .open = ftrace_graph_open,
3099 .read = seq_read,
3100 .write = ftrace_graph_write,
3101 .release = ftrace_graph_release,
3102 .llseek = seq_lseek,
3104 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3106 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3109 trace_create_file("available_filter_functions", 0444,
3110 d_tracer, NULL, &ftrace_avail_fops);
3112 trace_create_file("enabled_functions", 0444,
3113 d_tracer, NULL, &ftrace_enabled_fops);
3115 trace_create_file("set_ftrace_filter", 0644, d_tracer,
3116 NULL, &ftrace_filter_fops);
3118 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3119 NULL, &ftrace_notrace_fops);
3121 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3122 trace_create_file("set_graph_function", 0444, d_tracer,
3123 NULL,
3124 &ftrace_graph_fops);
3125 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3127 return 0;
3130 static int ftrace_process_locs(struct module *mod,
3131 unsigned long *start,
3132 unsigned long *end)
3134 unsigned long *p;
3135 unsigned long addr;
3137 mutex_lock(&ftrace_lock);
3138 p = start;
3139 while (p < end) {
3140 addr = ftrace_call_adjust(*p++);
3142 * Some architecture linkers will pad between
3143 * the different mcount_loc sections of different
3144 * object files to satisfy alignments.
3145 * Skip any NULL pointers.
3147 if (!addr)
3148 continue;
3149 ftrace_record_ip(addr);
3152 ftrace_update_code(mod);
3153 mutex_unlock(&ftrace_lock);
3155 return 0;
3158 #ifdef CONFIG_MODULES
3159 void ftrace_release_mod(struct module *mod)
3161 struct dyn_ftrace *rec;
3162 struct ftrace_page *pg;
3164 mutex_lock(&ftrace_lock);
3166 if (ftrace_disabled)
3167 goto out_unlock;
3169 do_for_each_ftrace_rec(pg, rec) {
3170 if (within_module_core(rec->ip, mod)) {
3172 * rec->ip is changed in ftrace_free_rec()
3173 * It should not between s and e if record was freed.
3175 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3176 ftrace_free_rec(rec);
3178 } while_for_each_ftrace_rec();
3179 out_unlock:
3180 mutex_unlock(&ftrace_lock);
3183 static void ftrace_init_module(struct module *mod,
3184 unsigned long *start, unsigned long *end)
3186 if (ftrace_disabled || start == end)
3187 return;
3188 ftrace_process_locs(mod, start, end);
3191 static int ftrace_module_notify(struct notifier_block *self,
3192 unsigned long val, void *data)
3194 struct module *mod = data;
3196 switch (val) {
3197 case MODULE_STATE_COMING:
3198 ftrace_init_module(mod, mod->ftrace_callsites,
3199 mod->ftrace_callsites +
3200 mod->num_ftrace_callsites);
3201 break;
3202 case MODULE_STATE_GOING:
3203 ftrace_release_mod(mod);
3204 break;
3207 return 0;
3209 #else
3210 static int ftrace_module_notify(struct notifier_block *self,
3211 unsigned long val, void *data)
3213 return 0;
3215 #endif /* CONFIG_MODULES */
3217 struct notifier_block ftrace_module_nb = {
3218 .notifier_call = ftrace_module_notify,
3219 .priority = 0,
3222 extern unsigned long __start_mcount_loc[];
3223 extern unsigned long __stop_mcount_loc[];
3225 void __init ftrace_init(void)
3227 unsigned long count, addr, flags;
3228 int ret;
3230 /* Keep the ftrace pointer to the stub */
3231 addr = (unsigned long)ftrace_stub;
3233 local_irq_save(flags);
3234 ftrace_dyn_arch_init(&addr);
3235 local_irq_restore(flags);
3237 /* ftrace_dyn_arch_init places the return code in addr */
3238 if (addr)
3239 goto failed;
3241 count = __stop_mcount_loc - __start_mcount_loc;
3243 ret = ftrace_dyn_table_alloc(count);
3244 if (ret)
3245 goto failed;
3247 last_ftrace_enabled = ftrace_enabled = 1;
3249 ret = ftrace_process_locs(NULL,
3250 __start_mcount_loc,
3251 __stop_mcount_loc);
3253 ret = register_module_notifier(&ftrace_module_nb);
3254 if (ret)
3255 pr_warning("Failed to register trace ftrace module notifier\n");
3257 set_ftrace_early_filters();
3259 return;
3260 failed:
3261 ftrace_disabled = 1;
3264 #else
3266 struct ftrace_ops global_ops = {
3267 .func = ftrace_stub,
3270 static int __init ftrace_nodyn_init(void)
3272 ftrace_enabled = 1;
3273 return 0;
3275 device_initcall(ftrace_nodyn_init);
3277 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3278 static inline void ftrace_startup_enable(int command) { }
3279 /* Keep as macros so we do not need to define the commands */
3280 # define ftrace_startup(ops, command) do { } while (0)
3281 # define ftrace_shutdown(ops, command) do { } while (0)
3282 # define ftrace_startup_sysctl() do { } while (0)
3283 # define ftrace_shutdown_sysctl() do { } while (0)
3284 #endif /* CONFIG_DYNAMIC_FTRACE */
3286 static void clear_ftrace_swapper(void)
3288 struct task_struct *p;
3289 int cpu;
3291 get_online_cpus();
3292 for_each_online_cpu(cpu) {
3293 p = idle_task(cpu);
3294 clear_tsk_trace_trace(p);
3296 put_online_cpus();
3299 static void set_ftrace_swapper(void)
3301 struct task_struct *p;
3302 int cpu;
3304 get_online_cpus();
3305 for_each_online_cpu(cpu) {
3306 p = idle_task(cpu);
3307 set_tsk_trace_trace(p);
3309 put_online_cpus();
3312 static void clear_ftrace_pid(struct pid *pid)
3314 struct task_struct *p;
3316 rcu_read_lock();
3317 do_each_pid_task(pid, PIDTYPE_PID, p) {
3318 clear_tsk_trace_trace(p);
3319 } while_each_pid_task(pid, PIDTYPE_PID, p);
3320 rcu_read_unlock();
3322 put_pid(pid);
3325 static void set_ftrace_pid(struct pid *pid)
3327 struct task_struct *p;
3329 rcu_read_lock();
3330 do_each_pid_task(pid, PIDTYPE_PID, p) {
3331 set_tsk_trace_trace(p);
3332 } while_each_pid_task(pid, PIDTYPE_PID, p);
3333 rcu_read_unlock();
3336 static void clear_ftrace_pid_task(struct pid *pid)
3338 if (pid == ftrace_swapper_pid)
3339 clear_ftrace_swapper();
3340 else
3341 clear_ftrace_pid(pid);
3344 static void set_ftrace_pid_task(struct pid *pid)
3346 if (pid == ftrace_swapper_pid)
3347 set_ftrace_swapper();
3348 else
3349 set_ftrace_pid(pid);
3352 static int ftrace_pid_add(int p)
3354 struct pid *pid;
3355 struct ftrace_pid *fpid;
3356 int ret = -EINVAL;
3358 mutex_lock(&ftrace_lock);
3360 if (!p)
3361 pid = ftrace_swapper_pid;
3362 else
3363 pid = find_get_pid(p);
3365 if (!pid)
3366 goto out;
3368 ret = 0;
3370 list_for_each_entry(fpid, &ftrace_pids, list)
3371 if (fpid->pid == pid)
3372 goto out_put;
3374 ret = -ENOMEM;
3376 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3377 if (!fpid)
3378 goto out_put;
3380 list_add(&fpid->list, &ftrace_pids);
3381 fpid->pid = pid;
3383 set_ftrace_pid_task(pid);
3385 ftrace_update_pid_func();
3386 ftrace_startup_enable(0);
3388 mutex_unlock(&ftrace_lock);
3389 return 0;
3391 out_put:
3392 if (pid != ftrace_swapper_pid)
3393 put_pid(pid);
3395 out:
3396 mutex_unlock(&ftrace_lock);
3397 return ret;
3400 static void ftrace_pid_reset(void)
3402 struct ftrace_pid *fpid, *safe;
3404 mutex_lock(&ftrace_lock);
3405 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3406 struct pid *pid = fpid->pid;
3408 clear_ftrace_pid_task(pid);
3410 list_del(&fpid->list);
3411 kfree(fpid);
3414 ftrace_update_pid_func();
3415 ftrace_startup_enable(0);
3417 mutex_unlock(&ftrace_lock);
3420 static void *fpid_start(struct seq_file *m, loff_t *pos)
3422 mutex_lock(&ftrace_lock);
3424 if (list_empty(&ftrace_pids) && (!*pos))
3425 return (void *) 1;
3427 return seq_list_start(&ftrace_pids, *pos);
3430 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3432 if (v == (void *)1)
3433 return NULL;
3435 return seq_list_next(v, &ftrace_pids, pos);
3438 static void fpid_stop(struct seq_file *m, void *p)
3440 mutex_unlock(&ftrace_lock);
3443 static int fpid_show(struct seq_file *m, void *v)
3445 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3447 if (v == (void *)1) {
3448 seq_printf(m, "no pid\n");
3449 return 0;
3452 if (fpid->pid == ftrace_swapper_pid)
3453 seq_printf(m, "swapper tasks\n");
3454 else
3455 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3457 return 0;
3460 static const struct seq_operations ftrace_pid_sops = {
3461 .start = fpid_start,
3462 .next = fpid_next,
3463 .stop = fpid_stop,
3464 .show = fpid_show,
3467 static int
3468 ftrace_pid_open(struct inode *inode, struct file *file)
3470 int ret = 0;
3472 if ((file->f_mode & FMODE_WRITE) &&
3473 (file->f_flags & O_TRUNC))
3474 ftrace_pid_reset();
3476 if (file->f_mode & FMODE_READ)
3477 ret = seq_open(file, &ftrace_pid_sops);
3479 return ret;
3482 static ssize_t
3483 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3484 size_t cnt, loff_t *ppos)
3486 char buf[64], *tmp;
3487 long val;
3488 int ret;
3490 if (cnt >= sizeof(buf))
3491 return -EINVAL;
3493 if (copy_from_user(&buf, ubuf, cnt))
3494 return -EFAULT;
3496 buf[cnt] = 0;
3499 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3500 * to clean the filter quietly.
3502 tmp = strstrip(buf);
3503 if (strlen(tmp) == 0)
3504 return 1;
3506 ret = strict_strtol(tmp, 10, &val);
3507 if (ret < 0)
3508 return ret;
3510 ret = ftrace_pid_add(val);
3512 return ret ? ret : cnt;
3515 static int
3516 ftrace_pid_release(struct inode *inode, struct file *file)
3518 if (file->f_mode & FMODE_READ)
3519 seq_release(inode, file);
3521 return 0;
3524 static const struct file_operations ftrace_pid_fops = {
3525 .open = ftrace_pid_open,
3526 .write = ftrace_pid_write,
3527 .read = seq_read,
3528 .llseek = seq_lseek,
3529 .release = ftrace_pid_release,
3532 static __init int ftrace_init_debugfs(void)
3534 struct dentry *d_tracer;
3536 d_tracer = tracing_init_dentry();
3537 if (!d_tracer)
3538 return 0;
3540 ftrace_init_dyn_debugfs(d_tracer);
3542 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3543 NULL, &ftrace_pid_fops);
3545 ftrace_profile_debugfs(d_tracer);
3547 return 0;
3549 fs_initcall(ftrace_init_debugfs);
3552 * ftrace_kill - kill ftrace
3554 * This function should be used by panic code. It stops ftrace
3555 * but in a not so nice way. If you need to simply kill ftrace
3556 * from a non-atomic section, use ftrace_kill.
3558 void ftrace_kill(void)
3560 ftrace_disabled = 1;
3561 ftrace_enabled = 0;
3562 clear_ftrace_function();
3566 * register_ftrace_function - register a function for profiling
3567 * @ops - ops structure that holds the function for profiling.
3569 * Register a function to be called by all functions in the
3570 * kernel.
3572 * Note: @ops->func and all the functions it calls must be labeled
3573 * with "notrace", otherwise it will go into a
3574 * recursive loop.
3576 int register_ftrace_function(struct ftrace_ops *ops)
3578 int ret = -1;
3580 mutex_lock(&ftrace_lock);
3582 if (unlikely(ftrace_disabled))
3583 goto out_unlock;
3585 ret = __register_ftrace_function(ops);
3586 ftrace_startup(&global_ops, 0);
3588 out_unlock:
3589 mutex_unlock(&ftrace_lock);
3590 return ret;
3594 * unregister_ftrace_function - unregister a function for profiling.
3595 * @ops - ops structure that holds the function to unregister
3597 * Unregister a function that was added to be called by ftrace profiling.
3599 int unregister_ftrace_function(struct ftrace_ops *ops)
3601 int ret;
3603 mutex_lock(&ftrace_lock);
3604 ret = __unregister_ftrace_function(ops);
3605 ftrace_shutdown(&global_ops, 0);
3606 mutex_unlock(&ftrace_lock);
3608 return ret;
3612 ftrace_enable_sysctl(struct ctl_table *table, int write,
3613 void __user *buffer, size_t *lenp,
3614 loff_t *ppos)
3616 int ret = -ENODEV;
3618 mutex_lock(&ftrace_lock);
3620 if (unlikely(ftrace_disabled))
3621 goto out;
3623 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3625 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3626 goto out;
3628 last_ftrace_enabled = !!ftrace_enabled;
3630 if (ftrace_enabled) {
3632 ftrace_startup_sysctl();
3634 /* we are starting ftrace again */
3635 if (ftrace_list != &ftrace_list_end) {
3636 if (ftrace_list->next == &ftrace_list_end)
3637 ftrace_trace_function = ftrace_list->func;
3638 else
3639 ftrace_trace_function = ftrace_list_func;
3642 } else {
3643 /* stopping ftrace calls (just send to ftrace_stub) */
3644 ftrace_trace_function = ftrace_stub;
3646 ftrace_shutdown_sysctl();
3649 out:
3650 mutex_unlock(&ftrace_lock);
3651 return ret;
3654 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3656 static int ftrace_graph_active;
3657 static struct notifier_block ftrace_suspend_notifier;
3659 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3661 return 0;
3664 /* The callbacks that hook a function */
3665 trace_func_graph_ret_t ftrace_graph_return =
3666 (trace_func_graph_ret_t)ftrace_stub;
3667 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3669 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3670 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3672 int i;
3673 int ret = 0;
3674 unsigned long flags;
3675 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3676 struct task_struct *g, *t;
3678 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3679 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3680 * sizeof(struct ftrace_ret_stack),
3681 GFP_KERNEL);
3682 if (!ret_stack_list[i]) {
3683 start = 0;
3684 end = i;
3685 ret = -ENOMEM;
3686 goto free;
3690 read_lock_irqsave(&tasklist_lock, flags);
3691 do_each_thread(g, t) {
3692 if (start == end) {
3693 ret = -EAGAIN;
3694 goto unlock;
3697 if (t->ret_stack == NULL) {
3698 atomic_set(&t->tracing_graph_pause, 0);
3699 atomic_set(&t->trace_overrun, 0);
3700 t->curr_ret_stack = -1;
3701 /* Make sure the tasks see the -1 first: */
3702 smp_wmb();
3703 t->ret_stack = ret_stack_list[start++];
3705 } while_each_thread(g, t);
3707 unlock:
3708 read_unlock_irqrestore(&tasklist_lock, flags);
3709 free:
3710 for (i = start; i < end; i++)
3711 kfree(ret_stack_list[i]);
3712 return ret;
3715 static void
3716 ftrace_graph_probe_sched_switch(void *ignore,
3717 struct task_struct *prev, struct task_struct *next)
3719 unsigned long long timestamp;
3720 int index;
3723 * Does the user want to count the time a function was asleep.
3724 * If so, do not update the time stamps.
3726 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3727 return;
3729 timestamp = trace_clock_local();
3731 prev->ftrace_timestamp = timestamp;
3733 /* only process tasks that we timestamped */
3734 if (!next->ftrace_timestamp)
3735 return;
3738 * Update all the counters in next to make up for the
3739 * time next was sleeping.
3741 timestamp -= next->ftrace_timestamp;
3743 for (index = next->curr_ret_stack; index >= 0; index--)
3744 next->ret_stack[index].calltime += timestamp;
3747 /* Allocate a return stack for each task */
3748 static int start_graph_tracing(void)
3750 struct ftrace_ret_stack **ret_stack_list;
3751 int ret, cpu;
3753 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3754 sizeof(struct ftrace_ret_stack *),
3755 GFP_KERNEL);
3757 if (!ret_stack_list)
3758 return -ENOMEM;
3760 /* The cpu_boot init_task->ret_stack will never be freed */
3761 for_each_online_cpu(cpu) {
3762 if (!idle_task(cpu)->ret_stack)
3763 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
3766 do {
3767 ret = alloc_retstack_tasklist(ret_stack_list);
3768 } while (ret == -EAGAIN);
3770 if (!ret) {
3771 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3772 if (ret)
3773 pr_info("ftrace_graph: Couldn't activate tracepoint"
3774 " probe to kernel_sched_switch\n");
3777 kfree(ret_stack_list);
3778 return ret;
3782 * Hibernation protection.
3783 * The state of the current task is too much unstable during
3784 * suspend/restore to disk. We want to protect against that.
3786 static int
3787 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3788 void *unused)
3790 switch (state) {
3791 case PM_HIBERNATION_PREPARE:
3792 pause_graph_tracing();
3793 break;
3795 case PM_POST_HIBERNATION:
3796 unpause_graph_tracing();
3797 break;
3799 return NOTIFY_DONE;
3802 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3803 trace_func_graph_ent_t entryfunc)
3805 int ret = 0;
3807 mutex_lock(&ftrace_lock);
3809 /* we currently allow only one tracer registered at a time */
3810 if (ftrace_graph_active) {
3811 ret = -EBUSY;
3812 goto out;
3815 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3816 register_pm_notifier(&ftrace_suspend_notifier);
3818 ftrace_graph_active++;
3819 ret = start_graph_tracing();
3820 if (ret) {
3821 ftrace_graph_active--;
3822 goto out;
3825 ftrace_graph_return = retfunc;
3826 ftrace_graph_entry = entryfunc;
3828 ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
3830 out:
3831 mutex_unlock(&ftrace_lock);
3832 return ret;
3835 void unregister_ftrace_graph(void)
3837 mutex_lock(&ftrace_lock);
3839 if (unlikely(!ftrace_graph_active))
3840 goto out;
3842 ftrace_graph_active--;
3843 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3844 ftrace_graph_entry = ftrace_graph_entry_stub;
3845 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
3846 unregister_pm_notifier(&ftrace_suspend_notifier);
3847 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3849 out:
3850 mutex_unlock(&ftrace_lock);
3853 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
3855 static void
3856 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3858 atomic_set(&t->tracing_graph_pause, 0);
3859 atomic_set(&t->trace_overrun, 0);
3860 t->ftrace_timestamp = 0;
3861 /* make curr_ret_stack visible before we add the ret_stack */
3862 smp_wmb();
3863 t->ret_stack = ret_stack;
3867 * Allocate a return stack for the idle task. May be the first
3868 * time through, or it may be done by CPU hotplug online.
3870 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
3872 t->curr_ret_stack = -1;
3874 * The idle task has no parent, it either has its own
3875 * stack or no stack at all.
3877 if (t->ret_stack)
3878 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
3880 if (ftrace_graph_active) {
3881 struct ftrace_ret_stack *ret_stack;
3883 ret_stack = per_cpu(idle_ret_stack, cpu);
3884 if (!ret_stack) {
3885 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3886 * sizeof(struct ftrace_ret_stack),
3887 GFP_KERNEL);
3888 if (!ret_stack)
3889 return;
3890 per_cpu(idle_ret_stack, cpu) = ret_stack;
3892 graph_init_task(t, ret_stack);
3896 /* Allocate a return stack for newly created task */
3897 void ftrace_graph_init_task(struct task_struct *t)
3899 /* Make sure we do not use the parent ret_stack */
3900 t->ret_stack = NULL;
3901 t->curr_ret_stack = -1;
3903 if (ftrace_graph_active) {
3904 struct ftrace_ret_stack *ret_stack;
3906 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3907 * sizeof(struct ftrace_ret_stack),
3908 GFP_KERNEL);
3909 if (!ret_stack)
3910 return;
3911 graph_init_task(t, ret_stack);
3915 void ftrace_graph_exit_task(struct task_struct *t)
3917 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3919 t->ret_stack = NULL;
3920 /* NULL must become visible to IRQs before we free it: */
3921 barrier();
3923 kfree(ret_stack);
3926 void ftrace_graph_stop(void)
3928 ftrace_stop();
3930 #endif