Merge tag 'gpio-v3.13-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6.git] / kernel / trace / ftrace.c
blob0e9f9eaade2f6a2dd0e729cd2d3bb38b4f6f8ec0
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
36 #include <trace/events/sched.h>
38 #include <asm/setup.h>
40 #include "trace_output.h"
41 #include "trace_stat.h"
43 #define FTRACE_WARN_ON(cond) \
44 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
47 ftrace_kill(); \
48 ___r; \
51 #define FTRACE_WARN_ON_ONCE(cond) \
52 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
55 ftrace_kill(); \
56 ___r; \
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_REGEX_LOCK(opsname) \
69 .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
70 #else
71 #define INIT_REGEX_LOCK(opsname)
72 #endif
74 static struct ftrace_ops ftrace_list_end __read_mostly = {
75 .func = ftrace_stub,
76 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
79 /* ftrace_enabled is a method to turn ftrace on or off */
80 int ftrace_enabled __read_mostly;
81 static int last_ftrace_enabled;
83 /* Quick disabling of function tracer. */
84 int function_trace_stop __read_mostly;
86 /* Current function tracing op */
87 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
89 /* List for set_ftrace_pid's pids. */
90 LIST_HEAD(ftrace_pids);
91 struct ftrace_pid {
92 struct list_head list;
93 struct pid *pid;
97 * ftrace_disabled is set when an anomaly is discovered.
98 * ftrace_disabled is much stronger than ftrace_enabled.
100 static int ftrace_disabled __read_mostly;
102 static DEFINE_MUTEX(ftrace_lock);
104 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
105 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
106 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
107 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
108 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
109 static struct ftrace_ops global_ops;
110 static struct ftrace_ops control_ops;
112 #if ARCH_SUPPORTS_FTRACE_OPS
113 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
114 struct ftrace_ops *op, struct pt_regs *regs);
115 #else
116 /* See comment below, where ftrace_ops_list_func is defined */
117 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
118 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
119 #endif
122 * Traverse the ftrace_global_list, invoking all entries. The reason that we
123 * can use rcu_dereference_raw_notrace() is that elements removed from this list
124 * are simply leaked, so there is no need to interact with a grace-period
125 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
126 * concurrent insertions into the ftrace_global_list.
128 * Silly Alpha and silly pointer-speculation compiler optimizations!
130 #define do_for_each_ftrace_op(op, list) \
131 op = rcu_dereference_raw_notrace(list); \
135 * Optimized for just a single item in the list (as that is the normal case).
137 #define while_for_each_ftrace_op(op) \
138 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
139 unlikely((op) != &ftrace_list_end))
141 static inline void ftrace_ops_init(struct ftrace_ops *ops)
143 #ifdef CONFIG_DYNAMIC_FTRACE
144 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
145 mutex_init(&ops->regex_lock);
146 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
148 #endif
152 * ftrace_nr_registered_ops - return number of ops registered
154 * Returns the number of ftrace_ops registered and tracing functions
156 int ftrace_nr_registered_ops(void)
158 struct ftrace_ops *ops;
159 int cnt = 0;
161 mutex_lock(&ftrace_lock);
163 for (ops = ftrace_ops_list;
164 ops != &ftrace_list_end; ops = ops->next)
165 cnt++;
167 mutex_unlock(&ftrace_lock);
169 return cnt;
172 static void
173 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
174 struct ftrace_ops *op, struct pt_regs *regs)
176 int bit;
178 bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
179 if (bit < 0)
180 return;
182 do_for_each_ftrace_op(op, ftrace_global_list) {
183 op->func(ip, parent_ip, op, regs);
184 } while_for_each_ftrace_op(op);
186 trace_clear_recursion(bit);
189 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
190 struct ftrace_ops *op, struct pt_regs *regs)
192 if (!test_tsk_trace_trace(current))
193 return;
195 ftrace_pid_function(ip, parent_ip, op, regs);
198 static void set_ftrace_pid_function(ftrace_func_t func)
200 /* do not set ftrace_pid_function to itself! */
201 if (func != ftrace_pid_func)
202 ftrace_pid_function = func;
206 * clear_ftrace_function - reset the ftrace function
208 * This NULLs the ftrace function and in essence stops
209 * tracing. There may be lag
211 void clear_ftrace_function(void)
213 ftrace_trace_function = ftrace_stub;
214 ftrace_pid_function = ftrace_stub;
217 static void control_ops_disable_all(struct ftrace_ops *ops)
219 int cpu;
221 for_each_possible_cpu(cpu)
222 *per_cpu_ptr(ops->disabled, cpu) = 1;
225 static int control_ops_alloc(struct ftrace_ops *ops)
227 int __percpu *disabled;
229 disabled = alloc_percpu(int);
230 if (!disabled)
231 return -ENOMEM;
233 ops->disabled = disabled;
234 control_ops_disable_all(ops);
235 return 0;
238 static void control_ops_free(struct ftrace_ops *ops)
240 free_percpu(ops->disabled);
243 static void update_global_ops(void)
245 ftrace_func_t func;
248 * If there's only one function registered, then call that
249 * function directly. Otherwise, we need to iterate over the
250 * registered callers.
252 if (ftrace_global_list == &ftrace_list_end ||
253 ftrace_global_list->next == &ftrace_list_end) {
254 func = ftrace_global_list->func;
256 * As we are calling the function directly.
257 * If it does not have recursion protection,
258 * the function_trace_op needs to be updated
259 * accordingly.
261 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
262 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
263 else
264 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
265 } else {
266 func = ftrace_global_list_func;
267 /* The list has its own recursion protection. */
268 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
272 /* If we filter on pids, update to use the pid function */
273 if (!list_empty(&ftrace_pids)) {
274 set_ftrace_pid_function(func);
275 func = ftrace_pid_func;
278 global_ops.func = func;
281 static void update_ftrace_function(void)
283 ftrace_func_t func;
285 update_global_ops();
288 * If we are at the end of the list and this ops is
289 * recursion safe and not dynamic and the arch supports passing ops,
290 * then have the mcount trampoline call the function directly.
292 if (ftrace_ops_list == &ftrace_list_end ||
293 (ftrace_ops_list->next == &ftrace_list_end &&
294 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
295 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
296 !FTRACE_FORCE_LIST_FUNC)) {
297 /* Set the ftrace_ops that the arch callback uses */
298 if (ftrace_ops_list == &global_ops)
299 function_trace_op = ftrace_global_list;
300 else
301 function_trace_op = ftrace_ops_list;
302 func = ftrace_ops_list->func;
303 } else {
304 /* Just use the default ftrace_ops */
305 function_trace_op = &ftrace_list_end;
306 func = ftrace_ops_list_func;
309 ftrace_trace_function = func;
312 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
314 ops->next = *list;
316 * We are entering ops into the list but another
317 * CPU might be walking that list. We need to make sure
318 * the ops->next pointer is valid before another CPU sees
319 * the ops pointer included into the list.
321 rcu_assign_pointer(*list, ops);
324 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
326 struct ftrace_ops **p;
329 * If we are removing the last function, then simply point
330 * to the ftrace_stub.
332 if (*list == ops && ops->next == &ftrace_list_end) {
333 *list = &ftrace_list_end;
334 return 0;
337 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
338 if (*p == ops)
339 break;
341 if (*p != ops)
342 return -1;
344 *p = (*p)->next;
345 return 0;
348 static void add_ftrace_list_ops(struct ftrace_ops **list,
349 struct ftrace_ops *main_ops,
350 struct ftrace_ops *ops)
352 int first = *list == &ftrace_list_end;
353 add_ftrace_ops(list, ops);
354 if (first)
355 add_ftrace_ops(&ftrace_ops_list, main_ops);
358 static int remove_ftrace_list_ops(struct ftrace_ops **list,
359 struct ftrace_ops *main_ops,
360 struct ftrace_ops *ops)
362 int ret = remove_ftrace_ops(list, ops);
363 if (!ret && *list == &ftrace_list_end)
364 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
365 return ret;
368 static int __register_ftrace_function(struct ftrace_ops *ops)
370 if (FTRACE_WARN_ON(ops == &global_ops))
371 return -EINVAL;
373 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
374 return -EBUSY;
376 /* We don't support both control and global flags set. */
377 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
378 return -EINVAL;
380 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
382 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
383 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
384 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
386 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
387 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
388 return -EINVAL;
390 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
391 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
392 #endif
394 if (!core_kernel_data((unsigned long)ops))
395 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
397 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
398 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
399 ops->flags |= FTRACE_OPS_FL_ENABLED;
400 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
401 if (control_ops_alloc(ops))
402 return -ENOMEM;
403 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
404 } else
405 add_ftrace_ops(&ftrace_ops_list, ops);
407 if (ftrace_enabled)
408 update_ftrace_function();
410 return 0;
413 static void ftrace_sync(struct work_struct *work)
416 * This function is just a stub to implement a hard force
417 * of synchronize_sched(). This requires synchronizing
418 * tasks even in userspace and idle.
420 * Yes, function tracing is rude.
424 static int __unregister_ftrace_function(struct ftrace_ops *ops)
426 int ret;
428 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
429 return -EBUSY;
431 if (FTRACE_WARN_ON(ops == &global_ops))
432 return -EINVAL;
434 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
435 ret = remove_ftrace_list_ops(&ftrace_global_list,
436 &global_ops, ops);
437 if (!ret)
438 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
439 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
440 ret = remove_ftrace_list_ops(&ftrace_control_list,
441 &control_ops, ops);
442 if (!ret) {
444 * The ftrace_ops is now removed from the list,
445 * so there'll be no new users. We must ensure
446 * all current users are done before we free
447 * the control data.
448 * Note synchronize_sched() is not enough, as we
449 * use preempt_disable() to do RCU, but the function
450 * tracer can be called where RCU is not active
451 * (before user_exit()).
453 schedule_on_each_cpu(ftrace_sync);
454 control_ops_free(ops);
456 } else
457 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
459 if (ret < 0)
460 return ret;
462 if (ftrace_enabled)
463 update_ftrace_function();
466 * Dynamic ops may be freed, we must make sure that all
467 * callers are done before leaving this function.
469 * Again, normal synchronize_sched() is not good enough.
470 * We need to do a hard force of sched synchronization.
472 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
473 schedule_on_each_cpu(ftrace_sync);
476 return 0;
479 static void ftrace_update_pid_func(void)
481 /* Only do something if we are tracing something */
482 if (ftrace_trace_function == ftrace_stub)
483 return;
485 update_ftrace_function();
488 #ifdef CONFIG_FUNCTION_PROFILER
489 struct ftrace_profile {
490 struct hlist_node node;
491 unsigned long ip;
492 unsigned long counter;
493 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
494 unsigned long long time;
495 unsigned long long time_squared;
496 #endif
499 struct ftrace_profile_page {
500 struct ftrace_profile_page *next;
501 unsigned long index;
502 struct ftrace_profile records[];
505 struct ftrace_profile_stat {
506 atomic_t disabled;
507 struct hlist_head *hash;
508 struct ftrace_profile_page *pages;
509 struct ftrace_profile_page *start;
510 struct tracer_stat stat;
513 #define PROFILE_RECORDS_SIZE \
514 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
516 #define PROFILES_PER_PAGE \
517 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
519 static int ftrace_profile_enabled __read_mostly;
521 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
522 static DEFINE_MUTEX(ftrace_profile_lock);
524 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
526 #define FTRACE_PROFILE_HASH_BITS 10
527 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
529 static void *
530 function_stat_next(void *v, int idx)
532 struct ftrace_profile *rec = v;
533 struct ftrace_profile_page *pg;
535 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
537 again:
538 if (idx != 0)
539 rec++;
541 if ((void *)rec >= (void *)&pg->records[pg->index]) {
542 pg = pg->next;
543 if (!pg)
544 return NULL;
545 rec = &pg->records[0];
546 if (!rec->counter)
547 goto again;
550 return rec;
553 static void *function_stat_start(struct tracer_stat *trace)
555 struct ftrace_profile_stat *stat =
556 container_of(trace, struct ftrace_profile_stat, stat);
558 if (!stat || !stat->start)
559 return NULL;
561 return function_stat_next(&stat->start->records[0], 0);
564 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
565 /* function graph compares on total time */
566 static int function_stat_cmp(void *p1, void *p2)
568 struct ftrace_profile *a = p1;
569 struct ftrace_profile *b = p2;
571 if (a->time < b->time)
572 return -1;
573 if (a->time > b->time)
574 return 1;
575 else
576 return 0;
578 #else
579 /* not function graph compares against hits */
580 static int function_stat_cmp(void *p1, void *p2)
582 struct ftrace_profile *a = p1;
583 struct ftrace_profile *b = p2;
585 if (a->counter < b->counter)
586 return -1;
587 if (a->counter > b->counter)
588 return 1;
589 else
590 return 0;
592 #endif
594 static int function_stat_headers(struct seq_file *m)
596 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
597 seq_printf(m, " Function "
598 "Hit Time Avg s^2\n"
599 " -------- "
600 "--- ---- --- ---\n");
601 #else
602 seq_printf(m, " Function Hit\n"
603 " -------- ---\n");
604 #endif
605 return 0;
608 static int function_stat_show(struct seq_file *m, void *v)
610 struct ftrace_profile *rec = v;
611 char str[KSYM_SYMBOL_LEN];
612 int ret = 0;
613 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
614 static struct trace_seq s;
615 unsigned long long avg;
616 unsigned long long stddev;
617 #endif
618 mutex_lock(&ftrace_profile_lock);
620 /* we raced with function_profile_reset() */
621 if (unlikely(rec->counter == 0)) {
622 ret = -EBUSY;
623 goto out;
626 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
627 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
629 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
630 seq_printf(m, " ");
631 avg = rec->time;
632 do_div(avg, rec->counter);
634 /* Sample standard deviation (s^2) */
635 if (rec->counter <= 1)
636 stddev = 0;
637 else {
639 * Apply Welford's method:
640 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
642 stddev = rec->counter * rec->time_squared -
643 rec->time * rec->time;
646 * Divide only 1000 for ns^2 -> us^2 conversion.
647 * trace_print_graph_duration will divide 1000 again.
649 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
652 trace_seq_init(&s);
653 trace_print_graph_duration(rec->time, &s);
654 trace_seq_puts(&s, " ");
655 trace_print_graph_duration(avg, &s);
656 trace_seq_puts(&s, " ");
657 trace_print_graph_duration(stddev, &s);
658 trace_print_seq(m, &s);
659 #endif
660 seq_putc(m, '\n');
661 out:
662 mutex_unlock(&ftrace_profile_lock);
664 return ret;
667 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
669 struct ftrace_profile_page *pg;
671 pg = stat->pages = stat->start;
673 while (pg) {
674 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
675 pg->index = 0;
676 pg = pg->next;
679 memset(stat->hash, 0,
680 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
683 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
685 struct ftrace_profile_page *pg;
686 int functions;
687 int pages;
688 int i;
690 /* If we already allocated, do nothing */
691 if (stat->pages)
692 return 0;
694 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
695 if (!stat->pages)
696 return -ENOMEM;
698 #ifdef CONFIG_DYNAMIC_FTRACE
699 functions = ftrace_update_tot_cnt;
700 #else
702 * We do not know the number of functions that exist because
703 * dynamic tracing is what counts them. With past experience
704 * we have around 20K functions. That should be more than enough.
705 * It is highly unlikely we will execute every function in
706 * the kernel.
708 functions = 20000;
709 #endif
711 pg = stat->start = stat->pages;
713 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
715 for (i = 1; i < pages; i++) {
716 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
717 if (!pg->next)
718 goto out_free;
719 pg = pg->next;
722 return 0;
724 out_free:
725 pg = stat->start;
726 while (pg) {
727 unsigned long tmp = (unsigned long)pg;
729 pg = pg->next;
730 free_page(tmp);
733 stat->pages = NULL;
734 stat->start = NULL;
736 return -ENOMEM;
739 static int ftrace_profile_init_cpu(int cpu)
741 struct ftrace_profile_stat *stat;
742 int size;
744 stat = &per_cpu(ftrace_profile_stats, cpu);
746 if (stat->hash) {
747 /* If the profile is already created, simply reset it */
748 ftrace_profile_reset(stat);
749 return 0;
753 * We are profiling all functions, but usually only a few thousand
754 * functions are hit. We'll make a hash of 1024 items.
756 size = FTRACE_PROFILE_HASH_SIZE;
758 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
760 if (!stat->hash)
761 return -ENOMEM;
763 /* Preallocate the function profiling pages */
764 if (ftrace_profile_pages_init(stat) < 0) {
765 kfree(stat->hash);
766 stat->hash = NULL;
767 return -ENOMEM;
770 return 0;
773 static int ftrace_profile_init(void)
775 int cpu;
776 int ret = 0;
778 for_each_online_cpu(cpu) {
779 ret = ftrace_profile_init_cpu(cpu);
780 if (ret)
781 break;
784 return ret;
787 /* interrupts must be disabled */
788 static struct ftrace_profile *
789 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
791 struct ftrace_profile *rec;
792 struct hlist_head *hhd;
793 unsigned long key;
795 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
796 hhd = &stat->hash[key];
798 if (hlist_empty(hhd))
799 return NULL;
801 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
802 if (rec->ip == ip)
803 return rec;
806 return NULL;
809 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
810 struct ftrace_profile *rec)
812 unsigned long key;
814 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
815 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
819 * The memory is already allocated, this simply finds a new record to use.
821 static struct ftrace_profile *
822 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
824 struct ftrace_profile *rec = NULL;
826 /* prevent recursion (from NMIs) */
827 if (atomic_inc_return(&stat->disabled) != 1)
828 goto out;
831 * Try to find the function again since an NMI
832 * could have added it
834 rec = ftrace_find_profiled_func(stat, ip);
835 if (rec)
836 goto out;
838 if (stat->pages->index == PROFILES_PER_PAGE) {
839 if (!stat->pages->next)
840 goto out;
841 stat->pages = stat->pages->next;
844 rec = &stat->pages->records[stat->pages->index++];
845 rec->ip = ip;
846 ftrace_add_profile(stat, rec);
848 out:
849 atomic_dec(&stat->disabled);
851 return rec;
854 static void
855 function_profile_call(unsigned long ip, unsigned long parent_ip,
856 struct ftrace_ops *ops, struct pt_regs *regs)
858 struct ftrace_profile_stat *stat;
859 struct ftrace_profile *rec;
860 unsigned long flags;
862 if (!ftrace_profile_enabled)
863 return;
865 local_irq_save(flags);
867 stat = &__get_cpu_var(ftrace_profile_stats);
868 if (!stat->hash || !ftrace_profile_enabled)
869 goto out;
871 rec = ftrace_find_profiled_func(stat, ip);
872 if (!rec) {
873 rec = ftrace_profile_alloc(stat, ip);
874 if (!rec)
875 goto out;
878 rec->counter++;
879 out:
880 local_irq_restore(flags);
883 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
884 static int profile_graph_entry(struct ftrace_graph_ent *trace)
886 function_profile_call(trace->func, 0, NULL, NULL);
887 return 1;
890 static void profile_graph_return(struct ftrace_graph_ret *trace)
892 struct ftrace_profile_stat *stat;
893 unsigned long long calltime;
894 struct ftrace_profile *rec;
895 unsigned long flags;
897 local_irq_save(flags);
898 stat = &__get_cpu_var(ftrace_profile_stats);
899 if (!stat->hash || !ftrace_profile_enabled)
900 goto out;
902 /* If the calltime was zero'd ignore it */
903 if (!trace->calltime)
904 goto out;
906 calltime = trace->rettime - trace->calltime;
908 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
909 int index;
911 index = trace->depth;
913 /* Append this call time to the parent time to subtract */
914 if (index)
915 current->ret_stack[index - 1].subtime += calltime;
917 if (current->ret_stack[index].subtime < calltime)
918 calltime -= current->ret_stack[index].subtime;
919 else
920 calltime = 0;
923 rec = ftrace_find_profiled_func(stat, trace->func);
924 if (rec) {
925 rec->time += calltime;
926 rec->time_squared += calltime * calltime;
929 out:
930 local_irq_restore(flags);
933 static int register_ftrace_profiler(void)
935 return register_ftrace_graph(&profile_graph_return,
936 &profile_graph_entry);
939 static void unregister_ftrace_profiler(void)
941 unregister_ftrace_graph();
943 #else
944 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
945 .func = function_profile_call,
946 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
947 INIT_REGEX_LOCK(ftrace_profile_ops)
950 static int register_ftrace_profiler(void)
952 return register_ftrace_function(&ftrace_profile_ops);
955 static void unregister_ftrace_profiler(void)
957 unregister_ftrace_function(&ftrace_profile_ops);
959 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
961 static ssize_t
962 ftrace_profile_write(struct file *filp, const char __user *ubuf,
963 size_t cnt, loff_t *ppos)
965 unsigned long val;
966 int ret;
968 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
969 if (ret)
970 return ret;
972 val = !!val;
974 mutex_lock(&ftrace_profile_lock);
975 if (ftrace_profile_enabled ^ val) {
976 if (val) {
977 ret = ftrace_profile_init();
978 if (ret < 0) {
979 cnt = ret;
980 goto out;
983 ret = register_ftrace_profiler();
984 if (ret < 0) {
985 cnt = ret;
986 goto out;
988 ftrace_profile_enabled = 1;
989 } else {
990 ftrace_profile_enabled = 0;
992 * unregister_ftrace_profiler calls stop_machine
993 * so this acts like an synchronize_sched.
995 unregister_ftrace_profiler();
998 out:
999 mutex_unlock(&ftrace_profile_lock);
1001 *ppos += cnt;
1003 return cnt;
1006 static ssize_t
1007 ftrace_profile_read(struct file *filp, char __user *ubuf,
1008 size_t cnt, loff_t *ppos)
1010 char buf[64]; /* big enough to hold a number */
1011 int r;
1013 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1014 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1017 static const struct file_operations ftrace_profile_fops = {
1018 .open = tracing_open_generic,
1019 .read = ftrace_profile_read,
1020 .write = ftrace_profile_write,
1021 .llseek = default_llseek,
1024 /* used to initialize the real stat files */
1025 static struct tracer_stat function_stats __initdata = {
1026 .name = "functions",
1027 .stat_start = function_stat_start,
1028 .stat_next = function_stat_next,
1029 .stat_cmp = function_stat_cmp,
1030 .stat_headers = function_stat_headers,
1031 .stat_show = function_stat_show
1034 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1036 struct ftrace_profile_stat *stat;
1037 struct dentry *entry;
1038 char *name;
1039 int ret;
1040 int cpu;
1042 for_each_possible_cpu(cpu) {
1043 stat = &per_cpu(ftrace_profile_stats, cpu);
1045 /* allocate enough for function name + cpu number */
1046 name = kmalloc(32, GFP_KERNEL);
1047 if (!name) {
1049 * The files created are permanent, if something happens
1050 * we still do not free memory.
1052 WARN(1,
1053 "Could not allocate stat file for cpu %d\n",
1054 cpu);
1055 return;
1057 stat->stat = function_stats;
1058 snprintf(name, 32, "function%d", cpu);
1059 stat->stat.name = name;
1060 ret = register_stat_tracer(&stat->stat);
1061 if (ret) {
1062 WARN(1,
1063 "Could not register function stat for cpu %d\n",
1064 cpu);
1065 kfree(name);
1066 return;
1070 entry = debugfs_create_file("function_profile_enabled", 0644,
1071 d_tracer, NULL, &ftrace_profile_fops);
1072 if (!entry)
1073 pr_warning("Could not create debugfs "
1074 "'function_profile_enabled' entry\n");
1077 #else /* CONFIG_FUNCTION_PROFILER */
1078 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1081 #endif /* CONFIG_FUNCTION_PROFILER */
1083 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1085 loff_t
1086 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1088 loff_t ret;
1090 if (file->f_mode & FMODE_READ)
1091 ret = seq_lseek(file, offset, whence);
1092 else
1093 file->f_pos = ret = 1;
1095 return ret;
1098 #ifdef CONFIG_DYNAMIC_FTRACE
1100 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1101 # error Dynamic ftrace depends on MCOUNT_RECORD
1102 #endif
1104 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1106 struct ftrace_func_probe {
1107 struct hlist_node node;
1108 struct ftrace_probe_ops *ops;
1109 unsigned long flags;
1110 unsigned long ip;
1111 void *data;
1112 struct list_head free_list;
1115 struct ftrace_func_entry {
1116 struct hlist_node hlist;
1117 unsigned long ip;
1120 struct ftrace_hash {
1121 unsigned long size_bits;
1122 struct hlist_head *buckets;
1123 unsigned long count;
1124 struct rcu_head rcu;
1128 * We make these constant because no one should touch them,
1129 * but they are used as the default "empty hash", to avoid allocating
1130 * it all the time. These are in a read only section such that if
1131 * anyone does try to modify it, it will cause an exception.
1133 static const struct hlist_head empty_buckets[1];
1134 static const struct ftrace_hash empty_hash = {
1135 .buckets = (struct hlist_head *)empty_buckets,
1137 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1139 static struct ftrace_ops global_ops = {
1140 .func = ftrace_stub,
1141 .notrace_hash = EMPTY_HASH,
1142 .filter_hash = EMPTY_HASH,
1143 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1144 INIT_REGEX_LOCK(global_ops)
1147 struct ftrace_page {
1148 struct ftrace_page *next;
1149 struct dyn_ftrace *records;
1150 int index;
1151 int size;
1154 static struct ftrace_page *ftrace_new_pgs;
1156 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1157 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1159 /* estimate from running different kernels */
1160 #define NR_TO_INIT 10000
1162 static struct ftrace_page *ftrace_pages_start;
1163 static struct ftrace_page *ftrace_pages;
1165 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1167 return !hash || !hash->count;
1170 static struct ftrace_func_entry *
1171 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1173 unsigned long key;
1174 struct ftrace_func_entry *entry;
1175 struct hlist_head *hhd;
1177 if (ftrace_hash_empty(hash))
1178 return NULL;
1180 if (hash->size_bits > 0)
1181 key = hash_long(ip, hash->size_bits);
1182 else
1183 key = 0;
1185 hhd = &hash->buckets[key];
1187 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1188 if (entry->ip == ip)
1189 return entry;
1191 return NULL;
1194 static void __add_hash_entry(struct ftrace_hash *hash,
1195 struct ftrace_func_entry *entry)
1197 struct hlist_head *hhd;
1198 unsigned long key;
1200 if (hash->size_bits)
1201 key = hash_long(entry->ip, hash->size_bits);
1202 else
1203 key = 0;
1205 hhd = &hash->buckets[key];
1206 hlist_add_head(&entry->hlist, hhd);
1207 hash->count++;
1210 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1212 struct ftrace_func_entry *entry;
1214 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1215 if (!entry)
1216 return -ENOMEM;
1218 entry->ip = ip;
1219 __add_hash_entry(hash, entry);
1221 return 0;
1224 static void
1225 free_hash_entry(struct ftrace_hash *hash,
1226 struct ftrace_func_entry *entry)
1228 hlist_del(&entry->hlist);
1229 kfree(entry);
1230 hash->count--;
1233 static void
1234 remove_hash_entry(struct ftrace_hash *hash,
1235 struct ftrace_func_entry *entry)
1237 hlist_del(&entry->hlist);
1238 hash->count--;
1241 static void ftrace_hash_clear(struct ftrace_hash *hash)
1243 struct hlist_head *hhd;
1244 struct hlist_node *tn;
1245 struct ftrace_func_entry *entry;
1246 int size = 1 << hash->size_bits;
1247 int i;
1249 if (!hash->count)
1250 return;
1252 for (i = 0; i < size; i++) {
1253 hhd = &hash->buckets[i];
1254 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1255 free_hash_entry(hash, entry);
1257 FTRACE_WARN_ON(hash->count);
1260 static void free_ftrace_hash(struct ftrace_hash *hash)
1262 if (!hash || hash == EMPTY_HASH)
1263 return;
1264 ftrace_hash_clear(hash);
1265 kfree(hash->buckets);
1266 kfree(hash);
1269 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1271 struct ftrace_hash *hash;
1273 hash = container_of(rcu, struct ftrace_hash, rcu);
1274 free_ftrace_hash(hash);
1277 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1279 if (!hash || hash == EMPTY_HASH)
1280 return;
1281 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1284 void ftrace_free_filter(struct ftrace_ops *ops)
1286 ftrace_ops_init(ops);
1287 free_ftrace_hash(ops->filter_hash);
1288 free_ftrace_hash(ops->notrace_hash);
1291 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1293 struct ftrace_hash *hash;
1294 int size;
1296 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1297 if (!hash)
1298 return NULL;
1300 size = 1 << size_bits;
1301 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1303 if (!hash->buckets) {
1304 kfree(hash);
1305 return NULL;
1308 hash->size_bits = size_bits;
1310 return hash;
1313 static struct ftrace_hash *
1314 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1316 struct ftrace_func_entry *entry;
1317 struct ftrace_hash *new_hash;
1318 int size;
1319 int ret;
1320 int i;
1322 new_hash = alloc_ftrace_hash(size_bits);
1323 if (!new_hash)
1324 return NULL;
1326 /* Empty hash? */
1327 if (ftrace_hash_empty(hash))
1328 return new_hash;
1330 size = 1 << hash->size_bits;
1331 for (i = 0; i < size; i++) {
1332 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1333 ret = add_hash_entry(new_hash, entry->ip);
1334 if (ret < 0)
1335 goto free_hash;
1339 FTRACE_WARN_ON(new_hash->count != hash->count);
1341 return new_hash;
1343 free_hash:
1344 free_ftrace_hash(new_hash);
1345 return NULL;
1348 static void
1349 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1350 static void
1351 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1353 static int
1354 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1355 struct ftrace_hash **dst, struct ftrace_hash *src)
1357 struct ftrace_func_entry *entry;
1358 struct hlist_node *tn;
1359 struct hlist_head *hhd;
1360 struct ftrace_hash *old_hash;
1361 struct ftrace_hash *new_hash;
1362 int size = src->count;
1363 int bits = 0;
1364 int ret;
1365 int i;
1368 * Remove the current set, update the hash and add
1369 * them back.
1371 ftrace_hash_rec_disable(ops, enable);
1374 * If the new source is empty, just free dst and assign it
1375 * the empty_hash.
1377 if (!src->count) {
1378 free_ftrace_hash_rcu(*dst);
1379 rcu_assign_pointer(*dst, EMPTY_HASH);
1380 /* still need to update the function records */
1381 ret = 0;
1382 goto out;
1386 * Make the hash size about 1/2 the # found
1388 for (size /= 2; size; size >>= 1)
1389 bits++;
1391 /* Don't allocate too much */
1392 if (bits > FTRACE_HASH_MAX_BITS)
1393 bits = FTRACE_HASH_MAX_BITS;
1395 ret = -ENOMEM;
1396 new_hash = alloc_ftrace_hash(bits);
1397 if (!new_hash)
1398 goto out;
1400 size = 1 << src->size_bits;
1401 for (i = 0; i < size; i++) {
1402 hhd = &src->buckets[i];
1403 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1404 remove_hash_entry(src, entry);
1405 __add_hash_entry(new_hash, entry);
1409 old_hash = *dst;
1410 rcu_assign_pointer(*dst, new_hash);
1411 free_ftrace_hash_rcu(old_hash);
1413 ret = 0;
1414 out:
1416 * Enable regardless of ret:
1417 * On success, we enable the new hash.
1418 * On failure, we re-enable the original hash.
1420 ftrace_hash_rec_enable(ops, enable);
1422 return ret;
1426 * Test the hashes for this ops to see if we want to call
1427 * the ops->func or not.
1429 * It's a match if the ip is in the ops->filter_hash or
1430 * the filter_hash does not exist or is empty,
1431 * AND
1432 * the ip is not in the ops->notrace_hash.
1434 * This needs to be called with preemption disabled as
1435 * the hashes are freed with call_rcu_sched().
1437 static int
1438 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1440 struct ftrace_hash *filter_hash;
1441 struct ftrace_hash *notrace_hash;
1442 int ret;
1444 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1446 * There's a small race when adding ops that the ftrace handler
1447 * that wants regs, may be called without them. We can not
1448 * allow that handler to be called if regs is NULL.
1450 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1451 return 0;
1452 #endif
1454 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1455 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1457 if ((ftrace_hash_empty(filter_hash) ||
1458 ftrace_lookup_ip(filter_hash, ip)) &&
1459 (ftrace_hash_empty(notrace_hash) ||
1460 !ftrace_lookup_ip(notrace_hash, ip)))
1461 ret = 1;
1462 else
1463 ret = 0;
1465 return ret;
1469 * This is a double for. Do not use 'break' to break out of the loop,
1470 * you must use a goto.
1472 #define do_for_each_ftrace_rec(pg, rec) \
1473 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1474 int _____i; \
1475 for (_____i = 0; _____i < pg->index; _____i++) { \
1476 rec = &pg->records[_____i];
1478 #define while_for_each_ftrace_rec() \
1483 static int ftrace_cmp_recs(const void *a, const void *b)
1485 const struct dyn_ftrace *key = a;
1486 const struct dyn_ftrace *rec = b;
1488 if (key->flags < rec->ip)
1489 return -1;
1490 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1491 return 1;
1492 return 0;
1495 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1497 struct ftrace_page *pg;
1498 struct dyn_ftrace *rec;
1499 struct dyn_ftrace key;
1501 key.ip = start;
1502 key.flags = end; /* overload flags, as it is unsigned long */
1504 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1505 if (end < pg->records[0].ip ||
1506 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1507 continue;
1508 rec = bsearch(&key, pg->records, pg->index,
1509 sizeof(struct dyn_ftrace),
1510 ftrace_cmp_recs);
1511 if (rec)
1512 return rec->ip;
1515 return 0;
1519 * ftrace_location - return true if the ip giving is a traced location
1520 * @ip: the instruction pointer to check
1522 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1523 * That is, the instruction that is either a NOP or call to
1524 * the function tracer. It checks the ftrace internal tables to
1525 * determine if the address belongs or not.
1527 unsigned long ftrace_location(unsigned long ip)
1529 return ftrace_location_range(ip, ip);
1533 * ftrace_text_reserved - return true if range contains an ftrace location
1534 * @start: start of range to search
1535 * @end: end of range to search (inclusive). @end points to the last byte to check.
1537 * Returns 1 if @start and @end contains a ftrace location.
1538 * That is, the instruction that is either a NOP or call to
1539 * the function tracer. It checks the ftrace internal tables to
1540 * determine if the address belongs or not.
1542 int ftrace_text_reserved(void *start, void *end)
1544 unsigned long ret;
1546 ret = ftrace_location_range((unsigned long)start,
1547 (unsigned long)end);
1549 return (int)!!ret;
1552 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1553 int filter_hash,
1554 bool inc)
1556 struct ftrace_hash *hash;
1557 struct ftrace_hash *other_hash;
1558 struct ftrace_page *pg;
1559 struct dyn_ftrace *rec;
1560 int count = 0;
1561 int all = 0;
1563 /* Only update if the ops has been registered */
1564 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1565 return;
1568 * In the filter_hash case:
1569 * If the count is zero, we update all records.
1570 * Otherwise we just update the items in the hash.
1572 * In the notrace_hash case:
1573 * We enable the update in the hash.
1574 * As disabling notrace means enabling the tracing,
1575 * and enabling notrace means disabling, the inc variable
1576 * gets inversed.
1578 if (filter_hash) {
1579 hash = ops->filter_hash;
1580 other_hash = ops->notrace_hash;
1581 if (ftrace_hash_empty(hash))
1582 all = 1;
1583 } else {
1584 inc = !inc;
1585 hash = ops->notrace_hash;
1586 other_hash = ops->filter_hash;
1588 * If the notrace hash has no items,
1589 * then there's nothing to do.
1591 if (ftrace_hash_empty(hash))
1592 return;
1595 do_for_each_ftrace_rec(pg, rec) {
1596 int in_other_hash = 0;
1597 int in_hash = 0;
1598 int match = 0;
1600 if (all) {
1602 * Only the filter_hash affects all records.
1603 * Update if the record is not in the notrace hash.
1605 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1606 match = 1;
1607 } else {
1608 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1609 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1614 if (filter_hash && in_hash && !in_other_hash)
1615 match = 1;
1616 else if (!filter_hash && in_hash &&
1617 (in_other_hash || ftrace_hash_empty(other_hash)))
1618 match = 1;
1620 if (!match)
1621 continue;
1623 if (inc) {
1624 rec->flags++;
1625 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1626 return;
1628 * If any ops wants regs saved for this function
1629 * then all ops will get saved regs.
1631 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1632 rec->flags |= FTRACE_FL_REGS;
1633 } else {
1634 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1635 return;
1636 rec->flags--;
1638 count++;
1639 /* Shortcut, if we handled all records, we are done. */
1640 if (!all && count == hash->count)
1641 return;
1642 } while_for_each_ftrace_rec();
1645 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1646 int filter_hash)
1648 __ftrace_hash_rec_update(ops, filter_hash, 0);
1651 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1652 int filter_hash)
1654 __ftrace_hash_rec_update(ops, filter_hash, 1);
1657 static void print_ip_ins(const char *fmt, unsigned char *p)
1659 int i;
1661 printk(KERN_CONT "%s", fmt);
1663 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1664 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1668 * ftrace_bug - report and shutdown function tracer
1669 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1670 * @ip: The address that failed
1672 * The arch code that enables or disables the function tracing
1673 * can call ftrace_bug() when it has detected a problem in
1674 * modifying the code. @failed should be one of either:
1675 * EFAULT - if the problem happens on reading the @ip address
1676 * EINVAL - if what is read at @ip is not what was expected
1677 * EPERM - if the problem happens on writting to the @ip address
1679 void ftrace_bug(int failed, unsigned long ip)
1681 switch (failed) {
1682 case -EFAULT:
1683 FTRACE_WARN_ON_ONCE(1);
1684 pr_info("ftrace faulted on modifying ");
1685 print_ip_sym(ip);
1686 break;
1687 case -EINVAL:
1688 FTRACE_WARN_ON_ONCE(1);
1689 pr_info("ftrace failed to modify ");
1690 print_ip_sym(ip);
1691 print_ip_ins(" actual: ", (unsigned char *)ip);
1692 printk(KERN_CONT "\n");
1693 break;
1694 case -EPERM:
1695 FTRACE_WARN_ON_ONCE(1);
1696 pr_info("ftrace faulted on writing ");
1697 print_ip_sym(ip);
1698 break;
1699 default:
1700 FTRACE_WARN_ON_ONCE(1);
1701 pr_info("ftrace faulted on unknown error ");
1702 print_ip_sym(ip);
1706 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1708 unsigned long flag = 0UL;
1711 * If we are updating calls:
1713 * If the record has a ref count, then we need to enable it
1714 * because someone is using it.
1716 * Otherwise we make sure its disabled.
1718 * If we are disabling calls, then disable all records that
1719 * are enabled.
1721 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1722 flag = FTRACE_FL_ENABLED;
1725 * If enabling and the REGS flag does not match the REGS_EN, then
1726 * do not ignore this record. Set flags to fail the compare against
1727 * ENABLED.
1729 if (flag &&
1730 (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1731 flag |= FTRACE_FL_REGS;
1733 /* If the state of this record hasn't changed, then do nothing */
1734 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1735 return FTRACE_UPDATE_IGNORE;
1737 if (flag) {
1738 /* Save off if rec is being enabled (for return value) */
1739 flag ^= rec->flags & FTRACE_FL_ENABLED;
1741 if (update) {
1742 rec->flags |= FTRACE_FL_ENABLED;
1743 if (flag & FTRACE_FL_REGS) {
1744 if (rec->flags & FTRACE_FL_REGS)
1745 rec->flags |= FTRACE_FL_REGS_EN;
1746 else
1747 rec->flags &= ~FTRACE_FL_REGS_EN;
1752 * If this record is being updated from a nop, then
1753 * return UPDATE_MAKE_CALL.
1754 * Otherwise, if the EN flag is set, then return
1755 * UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1756 * from the non-save regs, to a save regs function.
1757 * Otherwise,
1758 * return UPDATE_MODIFY_CALL to tell the caller to convert
1759 * from the save regs, to a non-save regs function.
1761 if (flag & FTRACE_FL_ENABLED)
1762 return FTRACE_UPDATE_MAKE_CALL;
1763 else if (rec->flags & FTRACE_FL_REGS_EN)
1764 return FTRACE_UPDATE_MODIFY_CALL_REGS;
1765 else
1766 return FTRACE_UPDATE_MODIFY_CALL;
1769 if (update) {
1770 /* If there's no more users, clear all flags */
1771 if (!(rec->flags & ~FTRACE_FL_MASK))
1772 rec->flags = 0;
1773 else
1774 /* Just disable the record (keep REGS state) */
1775 rec->flags &= ~FTRACE_FL_ENABLED;
1778 return FTRACE_UPDATE_MAKE_NOP;
1782 * ftrace_update_record, set a record that now is tracing or not
1783 * @rec: the record to update
1784 * @enable: set to 1 if the record is tracing, zero to force disable
1786 * The records that represent all functions that can be traced need
1787 * to be updated when tracing has been enabled.
1789 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1791 return ftrace_check_record(rec, enable, 1);
1795 * ftrace_test_record, check if the record has been enabled or not
1796 * @rec: the record to test
1797 * @enable: set to 1 to check if enabled, 0 if it is disabled
1799 * The arch code may need to test if a record is already set to
1800 * tracing to determine how to modify the function code that it
1801 * represents.
1803 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1805 return ftrace_check_record(rec, enable, 0);
1808 static int
1809 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1811 unsigned long ftrace_old_addr;
1812 unsigned long ftrace_addr;
1813 int ret;
1815 ret = ftrace_update_record(rec, enable);
1817 if (rec->flags & FTRACE_FL_REGS)
1818 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1819 else
1820 ftrace_addr = (unsigned long)FTRACE_ADDR;
1822 switch (ret) {
1823 case FTRACE_UPDATE_IGNORE:
1824 return 0;
1826 case FTRACE_UPDATE_MAKE_CALL:
1827 return ftrace_make_call(rec, ftrace_addr);
1829 case FTRACE_UPDATE_MAKE_NOP:
1830 return ftrace_make_nop(NULL, rec, ftrace_addr);
1832 case FTRACE_UPDATE_MODIFY_CALL_REGS:
1833 case FTRACE_UPDATE_MODIFY_CALL:
1834 if (rec->flags & FTRACE_FL_REGS)
1835 ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1836 else
1837 ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1839 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1842 return -1; /* unknow ftrace bug */
1845 void __weak ftrace_replace_code(int enable)
1847 struct dyn_ftrace *rec;
1848 struct ftrace_page *pg;
1849 int failed;
1851 if (unlikely(ftrace_disabled))
1852 return;
1854 do_for_each_ftrace_rec(pg, rec) {
1855 failed = __ftrace_replace_code(rec, enable);
1856 if (failed) {
1857 ftrace_bug(failed, rec->ip);
1858 /* Stop processing */
1859 return;
1861 } while_for_each_ftrace_rec();
1864 struct ftrace_rec_iter {
1865 struct ftrace_page *pg;
1866 int index;
1870 * ftrace_rec_iter_start, start up iterating over traced functions
1872 * Returns an iterator handle that is used to iterate over all
1873 * the records that represent address locations where functions
1874 * are traced.
1876 * May return NULL if no records are available.
1878 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1881 * We only use a single iterator.
1882 * Protected by the ftrace_lock mutex.
1884 static struct ftrace_rec_iter ftrace_rec_iter;
1885 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1887 iter->pg = ftrace_pages_start;
1888 iter->index = 0;
1890 /* Could have empty pages */
1891 while (iter->pg && !iter->pg->index)
1892 iter->pg = iter->pg->next;
1894 if (!iter->pg)
1895 return NULL;
1897 return iter;
1901 * ftrace_rec_iter_next, get the next record to process.
1902 * @iter: The handle to the iterator.
1904 * Returns the next iterator after the given iterator @iter.
1906 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1908 iter->index++;
1910 if (iter->index >= iter->pg->index) {
1911 iter->pg = iter->pg->next;
1912 iter->index = 0;
1914 /* Could have empty pages */
1915 while (iter->pg && !iter->pg->index)
1916 iter->pg = iter->pg->next;
1919 if (!iter->pg)
1920 return NULL;
1922 return iter;
1926 * ftrace_rec_iter_record, get the record at the iterator location
1927 * @iter: The current iterator location
1929 * Returns the record that the current @iter is at.
1931 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1933 return &iter->pg->records[iter->index];
1936 static int
1937 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1939 unsigned long ip;
1940 int ret;
1942 ip = rec->ip;
1944 if (unlikely(ftrace_disabled))
1945 return 0;
1947 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1948 if (ret) {
1949 ftrace_bug(ret, ip);
1950 return 0;
1952 return 1;
1956 * archs can override this function if they must do something
1957 * before the modifying code is performed.
1959 int __weak ftrace_arch_code_modify_prepare(void)
1961 return 0;
1965 * archs can override this function if they must do something
1966 * after the modifying code is performed.
1968 int __weak ftrace_arch_code_modify_post_process(void)
1970 return 0;
1973 void ftrace_modify_all_code(int command)
1975 int update = command & FTRACE_UPDATE_TRACE_FUNC;
1978 * If the ftrace_caller calls a ftrace_ops func directly,
1979 * we need to make sure that it only traces functions it
1980 * expects to trace. When doing the switch of functions,
1981 * we need to update to the ftrace_ops_list_func first
1982 * before the transition between old and new calls are set,
1983 * as the ftrace_ops_list_func will check the ops hashes
1984 * to make sure the ops are having the right functions
1985 * traced.
1987 if (update)
1988 ftrace_update_ftrace_func(ftrace_ops_list_func);
1990 if (command & FTRACE_UPDATE_CALLS)
1991 ftrace_replace_code(1);
1992 else if (command & FTRACE_DISABLE_CALLS)
1993 ftrace_replace_code(0);
1995 if (update && ftrace_trace_function != ftrace_ops_list_func)
1996 ftrace_update_ftrace_func(ftrace_trace_function);
1998 if (command & FTRACE_START_FUNC_RET)
1999 ftrace_enable_ftrace_graph_caller();
2000 else if (command & FTRACE_STOP_FUNC_RET)
2001 ftrace_disable_ftrace_graph_caller();
2004 static int __ftrace_modify_code(void *data)
2006 int *command = data;
2008 ftrace_modify_all_code(*command);
2010 return 0;
2014 * ftrace_run_stop_machine, go back to the stop machine method
2015 * @command: The command to tell ftrace what to do
2017 * If an arch needs to fall back to the stop machine method, the
2018 * it can call this function.
2020 void ftrace_run_stop_machine(int command)
2022 stop_machine(__ftrace_modify_code, &command, NULL);
2026 * arch_ftrace_update_code, modify the code to trace or not trace
2027 * @command: The command that needs to be done
2029 * Archs can override this function if it does not need to
2030 * run stop_machine() to modify code.
2032 void __weak arch_ftrace_update_code(int command)
2034 ftrace_run_stop_machine(command);
2037 static void ftrace_run_update_code(int command)
2039 int ret;
2041 ret = ftrace_arch_code_modify_prepare();
2042 FTRACE_WARN_ON(ret);
2043 if (ret)
2044 return;
2046 * Do not call function tracer while we update the code.
2047 * We are in stop machine.
2049 function_trace_stop++;
2052 * By default we use stop_machine() to modify the code.
2053 * But archs can do what ever they want as long as it
2054 * is safe. The stop_machine() is the safest, but also
2055 * produces the most overhead.
2057 arch_ftrace_update_code(command);
2059 function_trace_stop--;
2061 ret = ftrace_arch_code_modify_post_process();
2062 FTRACE_WARN_ON(ret);
2065 static ftrace_func_t saved_ftrace_func;
2066 static int ftrace_start_up;
2067 static int global_start_up;
2069 static void ftrace_startup_enable(int command)
2071 if (saved_ftrace_func != ftrace_trace_function) {
2072 saved_ftrace_func = ftrace_trace_function;
2073 command |= FTRACE_UPDATE_TRACE_FUNC;
2076 if (!command || !ftrace_enabled)
2077 return;
2079 ftrace_run_update_code(command);
2082 static int ftrace_startup(struct ftrace_ops *ops, int command)
2084 bool hash_enable = true;
2085 int ret;
2087 if (unlikely(ftrace_disabled))
2088 return -ENODEV;
2090 ret = __register_ftrace_function(ops);
2091 if (ret)
2092 return ret;
2094 ftrace_start_up++;
2095 command |= FTRACE_UPDATE_CALLS;
2097 /* ops marked global share the filter hashes */
2098 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2099 ops = &global_ops;
2100 /* Don't update hash if global is already set */
2101 if (global_start_up)
2102 hash_enable = false;
2103 global_start_up++;
2106 ops->flags |= FTRACE_OPS_FL_ENABLED;
2107 if (hash_enable)
2108 ftrace_hash_rec_enable(ops, 1);
2110 ftrace_startup_enable(command);
2112 return 0;
2115 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2117 bool hash_disable = true;
2118 int ret;
2120 if (unlikely(ftrace_disabled))
2121 return -ENODEV;
2123 ret = __unregister_ftrace_function(ops);
2124 if (ret)
2125 return ret;
2127 ftrace_start_up--;
2129 * Just warn in case of unbalance, no need to kill ftrace, it's not
2130 * critical but the ftrace_call callers may be never nopped again after
2131 * further ftrace uses.
2133 WARN_ON_ONCE(ftrace_start_up < 0);
2135 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2136 ops = &global_ops;
2137 global_start_up--;
2138 WARN_ON_ONCE(global_start_up < 0);
2139 /* Don't update hash if global still has users */
2140 if (global_start_up) {
2141 WARN_ON_ONCE(!ftrace_start_up);
2142 hash_disable = false;
2146 if (hash_disable)
2147 ftrace_hash_rec_disable(ops, 1);
2149 if (ops != &global_ops || !global_start_up)
2150 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2152 command |= FTRACE_UPDATE_CALLS;
2154 if (saved_ftrace_func != ftrace_trace_function) {
2155 saved_ftrace_func = ftrace_trace_function;
2156 command |= FTRACE_UPDATE_TRACE_FUNC;
2159 if (!command || !ftrace_enabled)
2160 return 0;
2162 ftrace_run_update_code(command);
2163 return 0;
2166 static void ftrace_startup_sysctl(void)
2168 if (unlikely(ftrace_disabled))
2169 return;
2171 /* Force update next time */
2172 saved_ftrace_func = NULL;
2173 /* ftrace_start_up is true if we want ftrace running */
2174 if (ftrace_start_up)
2175 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2178 static void ftrace_shutdown_sysctl(void)
2180 if (unlikely(ftrace_disabled))
2181 return;
2183 /* ftrace_start_up is true if ftrace is running */
2184 if (ftrace_start_up)
2185 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2188 static cycle_t ftrace_update_time;
2189 static unsigned long ftrace_update_cnt;
2190 unsigned long ftrace_update_tot_cnt;
2192 static inline int ops_traces_mod(struct ftrace_ops *ops)
2195 * Filter_hash being empty will default to trace module.
2196 * But notrace hash requires a test of individual module functions.
2198 return ftrace_hash_empty(ops->filter_hash) &&
2199 ftrace_hash_empty(ops->notrace_hash);
2203 * Check if the current ops references the record.
2205 * If the ops traces all functions, then it was already accounted for.
2206 * If the ops does not trace the current record function, skip it.
2207 * If the ops ignores the function via notrace filter, skip it.
2209 static inline bool
2210 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2212 /* If ops isn't enabled, ignore it */
2213 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2214 return 0;
2216 /* If ops traces all mods, we already accounted for it */
2217 if (ops_traces_mod(ops))
2218 return 0;
2220 /* The function must be in the filter */
2221 if (!ftrace_hash_empty(ops->filter_hash) &&
2222 !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2223 return 0;
2225 /* If in notrace hash, we ignore it too */
2226 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2227 return 0;
2229 return 1;
2232 static int referenced_filters(struct dyn_ftrace *rec)
2234 struct ftrace_ops *ops;
2235 int cnt = 0;
2237 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2238 if (ops_references_rec(ops, rec))
2239 cnt++;
2242 return cnt;
2245 static int ftrace_update_code(struct module *mod)
2247 struct ftrace_page *pg;
2248 struct dyn_ftrace *p;
2249 cycle_t start, stop;
2250 unsigned long ref = 0;
2251 bool test = false;
2252 int i;
2255 * When adding a module, we need to check if tracers are
2256 * currently enabled and if they are set to trace all functions.
2257 * If they are, we need to enable the module functions as well
2258 * as update the reference counts for those function records.
2260 if (mod) {
2261 struct ftrace_ops *ops;
2263 for (ops = ftrace_ops_list;
2264 ops != &ftrace_list_end; ops = ops->next) {
2265 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2266 if (ops_traces_mod(ops))
2267 ref++;
2268 else
2269 test = true;
2274 start = ftrace_now(raw_smp_processor_id());
2275 ftrace_update_cnt = 0;
2277 for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2279 for (i = 0; i < pg->index; i++) {
2280 int cnt = ref;
2282 /* If something went wrong, bail without enabling anything */
2283 if (unlikely(ftrace_disabled))
2284 return -1;
2286 p = &pg->records[i];
2287 if (test)
2288 cnt += referenced_filters(p);
2289 p->flags = cnt;
2292 * Do the initial record conversion from mcount jump
2293 * to the NOP instructions.
2295 if (!ftrace_code_disable(mod, p))
2296 break;
2298 ftrace_update_cnt++;
2301 * If the tracing is enabled, go ahead and enable the record.
2303 * The reason not to enable the record immediatelly is the
2304 * inherent check of ftrace_make_nop/ftrace_make_call for
2305 * correct previous instructions. Making first the NOP
2306 * conversion puts the module to the correct state, thus
2307 * passing the ftrace_make_call check.
2309 if (ftrace_start_up && cnt) {
2310 int failed = __ftrace_replace_code(p, 1);
2311 if (failed)
2312 ftrace_bug(failed, p->ip);
2317 ftrace_new_pgs = NULL;
2319 stop = ftrace_now(raw_smp_processor_id());
2320 ftrace_update_time = stop - start;
2321 ftrace_update_tot_cnt += ftrace_update_cnt;
2323 return 0;
2326 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2328 int order;
2329 int cnt;
2331 if (WARN_ON(!count))
2332 return -EINVAL;
2334 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2337 * We want to fill as much as possible. No more than a page
2338 * may be empty.
2340 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2341 order--;
2343 again:
2344 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2346 if (!pg->records) {
2347 /* if we can't allocate this size, try something smaller */
2348 if (!order)
2349 return -ENOMEM;
2350 order >>= 1;
2351 goto again;
2354 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2355 pg->size = cnt;
2357 if (cnt > count)
2358 cnt = count;
2360 return cnt;
2363 static struct ftrace_page *
2364 ftrace_allocate_pages(unsigned long num_to_init)
2366 struct ftrace_page *start_pg;
2367 struct ftrace_page *pg;
2368 int order;
2369 int cnt;
2371 if (!num_to_init)
2372 return 0;
2374 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2375 if (!pg)
2376 return NULL;
2379 * Try to allocate as much as possible in one continues
2380 * location that fills in all of the space. We want to
2381 * waste as little space as possible.
2383 for (;;) {
2384 cnt = ftrace_allocate_records(pg, num_to_init);
2385 if (cnt < 0)
2386 goto free_pages;
2388 num_to_init -= cnt;
2389 if (!num_to_init)
2390 break;
2392 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2393 if (!pg->next)
2394 goto free_pages;
2396 pg = pg->next;
2399 return start_pg;
2401 free_pages:
2402 while (start_pg) {
2403 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2404 free_pages((unsigned long)pg->records, order);
2405 start_pg = pg->next;
2406 kfree(pg);
2407 pg = start_pg;
2409 pr_info("ftrace: FAILED to allocate memory for functions\n");
2410 return NULL;
2413 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2415 int cnt;
2417 if (!num_to_init) {
2418 pr_info("ftrace: No functions to be traced?\n");
2419 return -1;
2422 cnt = num_to_init / ENTRIES_PER_PAGE;
2423 pr_info("ftrace: allocating %ld entries in %d pages\n",
2424 num_to_init, cnt + 1);
2426 return 0;
2429 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2431 struct ftrace_iterator {
2432 loff_t pos;
2433 loff_t func_pos;
2434 struct ftrace_page *pg;
2435 struct dyn_ftrace *func;
2436 struct ftrace_func_probe *probe;
2437 struct trace_parser parser;
2438 struct ftrace_hash *hash;
2439 struct ftrace_ops *ops;
2440 int hidx;
2441 int idx;
2442 unsigned flags;
2445 static void *
2446 t_hash_next(struct seq_file *m, loff_t *pos)
2448 struct ftrace_iterator *iter = m->private;
2449 struct hlist_node *hnd = NULL;
2450 struct hlist_head *hhd;
2452 (*pos)++;
2453 iter->pos = *pos;
2455 if (iter->probe)
2456 hnd = &iter->probe->node;
2457 retry:
2458 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2459 return NULL;
2461 hhd = &ftrace_func_hash[iter->hidx];
2463 if (hlist_empty(hhd)) {
2464 iter->hidx++;
2465 hnd = NULL;
2466 goto retry;
2469 if (!hnd)
2470 hnd = hhd->first;
2471 else {
2472 hnd = hnd->next;
2473 if (!hnd) {
2474 iter->hidx++;
2475 goto retry;
2479 if (WARN_ON_ONCE(!hnd))
2480 return NULL;
2482 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2484 return iter;
2487 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2489 struct ftrace_iterator *iter = m->private;
2490 void *p = NULL;
2491 loff_t l;
2493 if (!(iter->flags & FTRACE_ITER_DO_HASH))
2494 return NULL;
2496 if (iter->func_pos > *pos)
2497 return NULL;
2499 iter->hidx = 0;
2500 for (l = 0; l <= (*pos - iter->func_pos); ) {
2501 p = t_hash_next(m, &l);
2502 if (!p)
2503 break;
2505 if (!p)
2506 return NULL;
2508 /* Only set this if we have an item */
2509 iter->flags |= FTRACE_ITER_HASH;
2511 return iter;
2514 static int
2515 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2517 struct ftrace_func_probe *rec;
2519 rec = iter->probe;
2520 if (WARN_ON_ONCE(!rec))
2521 return -EIO;
2523 if (rec->ops->print)
2524 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2526 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2528 if (rec->data)
2529 seq_printf(m, ":%p", rec->data);
2530 seq_putc(m, '\n');
2532 return 0;
2535 static void *
2536 t_next(struct seq_file *m, void *v, loff_t *pos)
2538 struct ftrace_iterator *iter = m->private;
2539 struct ftrace_ops *ops = iter->ops;
2540 struct dyn_ftrace *rec = NULL;
2542 if (unlikely(ftrace_disabled))
2543 return NULL;
2545 if (iter->flags & FTRACE_ITER_HASH)
2546 return t_hash_next(m, pos);
2548 (*pos)++;
2549 iter->pos = iter->func_pos = *pos;
2551 if (iter->flags & FTRACE_ITER_PRINTALL)
2552 return t_hash_start(m, pos);
2554 retry:
2555 if (iter->idx >= iter->pg->index) {
2556 if (iter->pg->next) {
2557 iter->pg = iter->pg->next;
2558 iter->idx = 0;
2559 goto retry;
2561 } else {
2562 rec = &iter->pg->records[iter->idx++];
2563 if (((iter->flags & FTRACE_ITER_FILTER) &&
2564 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2566 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2567 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2569 ((iter->flags & FTRACE_ITER_ENABLED) &&
2570 !(rec->flags & FTRACE_FL_ENABLED))) {
2572 rec = NULL;
2573 goto retry;
2577 if (!rec)
2578 return t_hash_start(m, pos);
2580 iter->func = rec;
2582 return iter;
2585 static void reset_iter_read(struct ftrace_iterator *iter)
2587 iter->pos = 0;
2588 iter->func_pos = 0;
2589 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2592 static void *t_start(struct seq_file *m, loff_t *pos)
2594 struct ftrace_iterator *iter = m->private;
2595 struct ftrace_ops *ops = iter->ops;
2596 void *p = NULL;
2597 loff_t l;
2599 mutex_lock(&ftrace_lock);
2601 if (unlikely(ftrace_disabled))
2602 return NULL;
2605 * If an lseek was done, then reset and start from beginning.
2607 if (*pos < iter->pos)
2608 reset_iter_read(iter);
2611 * For set_ftrace_filter reading, if we have the filter
2612 * off, we can short cut and just print out that all
2613 * functions are enabled.
2615 if (iter->flags & FTRACE_ITER_FILTER &&
2616 ftrace_hash_empty(ops->filter_hash)) {
2617 if (*pos > 0)
2618 return t_hash_start(m, pos);
2619 iter->flags |= FTRACE_ITER_PRINTALL;
2620 /* reset in case of seek/pread */
2621 iter->flags &= ~FTRACE_ITER_HASH;
2622 return iter;
2625 if (iter->flags & FTRACE_ITER_HASH)
2626 return t_hash_start(m, pos);
2629 * Unfortunately, we need to restart at ftrace_pages_start
2630 * every time we let go of the ftrace_mutex. This is because
2631 * those pointers can change without the lock.
2633 iter->pg = ftrace_pages_start;
2634 iter->idx = 0;
2635 for (l = 0; l <= *pos; ) {
2636 p = t_next(m, p, &l);
2637 if (!p)
2638 break;
2641 if (!p)
2642 return t_hash_start(m, pos);
2644 return iter;
2647 static void t_stop(struct seq_file *m, void *p)
2649 mutex_unlock(&ftrace_lock);
2652 static int t_show(struct seq_file *m, void *v)
2654 struct ftrace_iterator *iter = m->private;
2655 struct dyn_ftrace *rec;
2657 if (iter->flags & FTRACE_ITER_HASH)
2658 return t_hash_show(m, iter);
2660 if (iter->flags & FTRACE_ITER_PRINTALL) {
2661 seq_printf(m, "#### all functions enabled ####\n");
2662 return 0;
2665 rec = iter->func;
2667 if (!rec)
2668 return 0;
2670 seq_printf(m, "%ps", (void *)rec->ip);
2671 if (iter->flags & FTRACE_ITER_ENABLED)
2672 seq_printf(m, " (%ld)%s",
2673 rec->flags & ~FTRACE_FL_MASK,
2674 rec->flags & FTRACE_FL_REGS ? " R" : "");
2675 seq_printf(m, "\n");
2677 return 0;
2680 static const struct seq_operations show_ftrace_seq_ops = {
2681 .start = t_start,
2682 .next = t_next,
2683 .stop = t_stop,
2684 .show = t_show,
2687 static int
2688 ftrace_avail_open(struct inode *inode, struct file *file)
2690 struct ftrace_iterator *iter;
2692 if (unlikely(ftrace_disabled))
2693 return -ENODEV;
2695 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2696 if (iter) {
2697 iter->pg = ftrace_pages_start;
2698 iter->ops = &global_ops;
2701 return iter ? 0 : -ENOMEM;
2704 static int
2705 ftrace_enabled_open(struct inode *inode, struct file *file)
2707 struct ftrace_iterator *iter;
2709 if (unlikely(ftrace_disabled))
2710 return -ENODEV;
2712 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2713 if (iter) {
2714 iter->pg = ftrace_pages_start;
2715 iter->flags = FTRACE_ITER_ENABLED;
2716 iter->ops = &global_ops;
2719 return iter ? 0 : -ENOMEM;
2722 static void ftrace_filter_reset(struct ftrace_hash *hash)
2724 mutex_lock(&ftrace_lock);
2725 ftrace_hash_clear(hash);
2726 mutex_unlock(&ftrace_lock);
2730 * ftrace_regex_open - initialize function tracer filter files
2731 * @ops: The ftrace_ops that hold the hash filters
2732 * @flag: The type of filter to process
2733 * @inode: The inode, usually passed in to your open routine
2734 * @file: The file, usually passed in to your open routine
2736 * ftrace_regex_open() initializes the filter files for the
2737 * @ops. Depending on @flag it may process the filter hash or
2738 * the notrace hash of @ops. With this called from the open
2739 * routine, you can use ftrace_filter_write() for the write
2740 * routine if @flag has FTRACE_ITER_FILTER set, or
2741 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2742 * ftrace_filter_lseek() should be used as the lseek routine, and
2743 * release must call ftrace_regex_release().
2746 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2747 struct inode *inode, struct file *file)
2749 struct ftrace_iterator *iter;
2750 struct ftrace_hash *hash;
2751 int ret = 0;
2753 ftrace_ops_init(ops);
2755 if (unlikely(ftrace_disabled))
2756 return -ENODEV;
2758 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2759 if (!iter)
2760 return -ENOMEM;
2762 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2763 kfree(iter);
2764 return -ENOMEM;
2767 iter->ops = ops;
2768 iter->flags = flag;
2770 mutex_lock(&ops->regex_lock);
2772 if (flag & FTRACE_ITER_NOTRACE)
2773 hash = ops->notrace_hash;
2774 else
2775 hash = ops->filter_hash;
2777 if (file->f_mode & FMODE_WRITE) {
2778 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2779 if (!iter->hash) {
2780 trace_parser_put(&iter->parser);
2781 kfree(iter);
2782 ret = -ENOMEM;
2783 goto out_unlock;
2787 if ((file->f_mode & FMODE_WRITE) &&
2788 (file->f_flags & O_TRUNC))
2789 ftrace_filter_reset(iter->hash);
2791 if (file->f_mode & FMODE_READ) {
2792 iter->pg = ftrace_pages_start;
2794 ret = seq_open(file, &show_ftrace_seq_ops);
2795 if (!ret) {
2796 struct seq_file *m = file->private_data;
2797 m->private = iter;
2798 } else {
2799 /* Failed */
2800 free_ftrace_hash(iter->hash);
2801 trace_parser_put(&iter->parser);
2802 kfree(iter);
2804 } else
2805 file->private_data = iter;
2807 out_unlock:
2808 mutex_unlock(&ops->regex_lock);
2810 return ret;
2813 static int
2814 ftrace_filter_open(struct inode *inode, struct file *file)
2816 return ftrace_regex_open(&global_ops,
2817 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2818 inode, file);
2821 static int
2822 ftrace_notrace_open(struct inode *inode, struct file *file)
2824 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2825 inode, file);
2828 static int ftrace_match(char *str, char *regex, int len, int type)
2830 int matched = 0;
2831 int slen;
2833 switch (type) {
2834 case MATCH_FULL:
2835 if (strcmp(str, regex) == 0)
2836 matched = 1;
2837 break;
2838 case MATCH_FRONT_ONLY:
2839 if (strncmp(str, regex, len) == 0)
2840 matched = 1;
2841 break;
2842 case MATCH_MIDDLE_ONLY:
2843 if (strstr(str, regex))
2844 matched = 1;
2845 break;
2846 case MATCH_END_ONLY:
2847 slen = strlen(str);
2848 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2849 matched = 1;
2850 break;
2853 return matched;
2856 static int
2857 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2859 struct ftrace_func_entry *entry;
2860 int ret = 0;
2862 entry = ftrace_lookup_ip(hash, rec->ip);
2863 if (not) {
2864 /* Do nothing if it doesn't exist */
2865 if (!entry)
2866 return 0;
2868 free_hash_entry(hash, entry);
2869 } else {
2870 /* Do nothing if it exists */
2871 if (entry)
2872 return 0;
2874 ret = add_hash_entry(hash, rec->ip);
2876 return ret;
2879 static int
2880 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2881 char *regex, int len, int type)
2883 char str[KSYM_SYMBOL_LEN];
2884 char *modname;
2886 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2888 if (mod) {
2889 /* module lookup requires matching the module */
2890 if (!modname || strcmp(modname, mod))
2891 return 0;
2893 /* blank search means to match all funcs in the mod */
2894 if (!len)
2895 return 1;
2898 return ftrace_match(str, regex, len, type);
2901 static int
2902 match_records(struct ftrace_hash *hash, char *buff,
2903 int len, char *mod, int not)
2905 unsigned search_len = 0;
2906 struct ftrace_page *pg;
2907 struct dyn_ftrace *rec;
2908 int type = MATCH_FULL;
2909 char *search = buff;
2910 int found = 0;
2911 int ret;
2913 if (len) {
2914 type = filter_parse_regex(buff, len, &search, &not);
2915 search_len = strlen(search);
2918 mutex_lock(&ftrace_lock);
2920 if (unlikely(ftrace_disabled))
2921 goto out_unlock;
2923 do_for_each_ftrace_rec(pg, rec) {
2924 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2925 ret = enter_record(hash, rec, not);
2926 if (ret < 0) {
2927 found = ret;
2928 goto out_unlock;
2930 found = 1;
2932 } while_for_each_ftrace_rec();
2933 out_unlock:
2934 mutex_unlock(&ftrace_lock);
2936 return found;
2939 static int
2940 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2942 return match_records(hash, buff, len, NULL, 0);
2945 static int
2946 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2948 int not = 0;
2950 /* blank or '*' mean the same */
2951 if (strcmp(buff, "*") == 0)
2952 buff[0] = 0;
2954 /* handle the case of 'dont filter this module' */
2955 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2956 buff[0] = 0;
2957 not = 1;
2960 return match_records(hash, buff, strlen(buff), mod, not);
2964 * We register the module command as a template to show others how
2965 * to register the a command as well.
2968 static int
2969 ftrace_mod_callback(struct ftrace_hash *hash,
2970 char *func, char *cmd, char *param, int enable)
2972 char *mod;
2973 int ret = -EINVAL;
2976 * cmd == 'mod' because we only registered this func
2977 * for the 'mod' ftrace_func_command.
2978 * But if you register one func with multiple commands,
2979 * you can tell which command was used by the cmd
2980 * parameter.
2983 /* we must have a module name */
2984 if (!param)
2985 return ret;
2987 mod = strsep(&param, ":");
2988 if (!strlen(mod))
2989 return ret;
2991 ret = ftrace_match_module_records(hash, func, mod);
2992 if (!ret)
2993 ret = -EINVAL;
2994 if (ret < 0)
2995 return ret;
2997 return 0;
3000 static struct ftrace_func_command ftrace_mod_cmd = {
3001 .name = "mod",
3002 .func = ftrace_mod_callback,
3005 static int __init ftrace_mod_cmd_init(void)
3007 return register_ftrace_command(&ftrace_mod_cmd);
3009 core_initcall(ftrace_mod_cmd_init);
3011 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3012 struct ftrace_ops *op, struct pt_regs *pt_regs)
3014 struct ftrace_func_probe *entry;
3015 struct hlist_head *hhd;
3016 unsigned long key;
3018 key = hash_long(ip, FTRACE_HASH_BITS);
3020 hhd = &ftrace_func_hash[key];
3022 if (hlist_empty(hhd))
3023 return;
3026 * Disable preemption for these calls to prevent a RCU grace
3027 * period. This syncs the hash iteration and freeing of items
3028 * on the hash. rcu_read_lock is too dangerous here.
3030 preempt_disable_notrace();
3031 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3032 if (entry->ip == ip)
3033 entry->ops->func(ip, parent_ip, &entry->data);
3035 preempt_enable_notrace();
3038 static struct ftrace_ops trace_probe_ops __read_mostly =
3040 .func = function_trace_probe_call,
3041 .flags = FTRACE_OPS_FL_INITIALIZED,
3042 INIT_REGEX_LOCK(trace_probe_ops)
3045 static int ftrace_probe_registered;
3047 static void __enable_ftrace_function_probe(void)
3049 int ret;
3050 int i;
3052 if (ftrace_probe_registered) {
3053 /* still need to update the function call sites */
3054 if (ftrace_enabled)
3055 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3056 return;
3059 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3060 struct hlist_head *hhd = &ftrace_func_hash[i];
3061 if (hhd->first)
3062 break;
3064 /* Nothing registered? */
3065 if (i == FTRACE_FUNC_HASHSIZE)
3066 return;
3068 ret = ftrace_startup(&trace_probe_ops, 0);
3070 ftrace_probe_registered = 1;
3073 static void __disable_ftrace_function_probe(void)
3075 int i;
3077 if (!ftrace_probe_registered)
3078 return;
3080 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3081 struct hlist_head *hhd = &ftrace_func_hash[i];
3082 if (hhd->first)
3083 return;
3086 /* no more funcs left */
3087 ftrace_shutdown(&trace_probe_ops, 0);
3089 ftrace_probe_registered = 0;
3093 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3095 if (entry->ops->free)
3096 entry->ops->free(entry->ops, entry->ip, &entry->data);
3097 kfree(entry);
3101 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3102 void *data)
3104 struct ftrace_func_probe *entry;
3105 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3106 struct ftrace_hash *hash;
3107 struct ftrace_page *pg;
3108 struct dyn_ftrace *rec;
3109 int type, len, not;
3110 unsigned long key;
3111 int count = 0;
3112 char *search;
3113 int ret;
3115 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3116 len = strlen(search);
3118 /* we do not support '!' for function probes */
3119 if (WARN_ON(not))
3120 return -EINVAL;
3122 mutex_lock(&trace_probe_ops.regex_lock);
3124 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3125 if (!hash) {
3126 count = -ENOMEM;
3127 goto out;
3130 if (unlikely(ftrace_disabled)) {
3131 count = -ENODEV;
3132 goto out;
3135 mutex_lock(&ftrace_lock);
3137 do_for_each_ftrace_rec(pg, rec) {
3139 if (!ftrace_match_record(rec, NULL, search, len, type))
3140 continue;
3142 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3143 if (!entry) {
3144 /* If we did not process any, then return error */
3145 if (!count)
3146 count = -ENOMEM;
3147 goto out_unlock;
3150 count++;
3152 entry->data = data;
3155 * The caller might want to do something special
3156 * for each function we find. We call the callback
3157 * to give the caller an opportunity to do so.
3159 if (ops->init) {
3160 if (ops->init(ops, rec->ip, &entry->data) < 0) {
3161 /* caller does not like this func */
3162 kfree(entry);
3163 continue;
3167 ret = enter_record(hash, rec, 0);
3168 if (ret < 0) {
3169 kfree(entry);
3170 count = ret;
3171 goto out_unlock;
3174 entry->ops = ops;
3175 entry->ip = rec->ip;
3177 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3178 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3180 } while_for_each_ftrace_rec();
3182 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3183 if (ret < 0)
3184 count = ret;
3186 __enable_ftrace_function_probe();
3188 out_unlock:
3189 mutex_unlock(&ftrace_lock);
3190 out:
3191 mutex_unlock(&trace_probe_ops.regex_lock);
3192 free_ftrace_hash(hash);
3194 return count;
3197 enum {
3198 PROBE_TEST_FUNC = 1,
3199 PROBE_TEST_DATA = 2
3202 static void
3203 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3204 void *data, int flags)
3206 struct ftrace_func_entry *rec_entry;
3207 struct ftrace_func_probe *entry;
3208 struct ftrace_func_probe *p;
3209 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3210 struct list_head free_list;
3211 struct ftrace_hash *hash;
3212 struct hlist_node *tmp;
3213 char str[KSYM_SYMBOL_LEN];
3214 int type = MATCH_FULL;
3215 int i, len = 0;
3216 char *search;
3218 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3219 glob = NULL;
3220 else if (glob) {
3221 int not;
3223 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3224 len = strlen(search);
3226 /* we do not support '!' for function probes */
3227 if (WARN_ON(not))
3228 return;
3231 mutex_lock(&trace_probe_ops.regex_lock);
3233 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3234 if (!hash)
3235 /* Hmm, should report this somehow */
3236 goto out_unlock;
3238 INIT_LIST_HEAD(&free_list);
3240 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3241 struct hlist_head *hhd = &ftrace_func_hash[i];
3243 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3245 /* break up if statements for readability */
3246 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3247 continue;
3249 if ((flags & PROBE_TEST_DATA) && entry->data != data)
3250 continue;
3252 /* do this last, since it is the most expensive */
3253 if (glob) {
3254 kallsyms_lookup(entry->ip, NULL, NULL,
3255 NULL, str);
3256 if (!ftrace_match(str, glob, len, type))
3257 continue;
3260 rec_entry = ftrace_lookup_ip(hash, entry->ip);
3261 /* It is possible more than one entry had this ip */
3262 if (rec_entry)
3263 free_hash_entry(hash, rec_entry);
3265 hlist_del_rcu(&entry->node);
3266 list_add(&entry->free_list, &free_list);
3269 mutex_lock(&ftrace_lock);
3270 __disable_ftrace_function_probe();
3272 * Remove after the disable is called. Otherwise, if the last
3273 * probe is removed, a null hash means *all enabled*.
3275 ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3276 synchronize_sched();
3277 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3278 list_del(&entry->free_list);
3279 ftrace_free_entry(entry);
3281 mutex_unlock(&ftrace_lock);
3283 out_unlock:
3284 mutex_unlock(&trace_probe_ops.regex_lock);
3285 free_ftrace_hash(hash);
3288 void
3289 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3290 void *data)
3292 __unregister_ftrace_function_probe(glob, ops, data,
3293 PROBE_TEST_FUNC | PROBE_TEST_DATA);
3296 void
3297 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3299 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3302 void unregister_ftrace_function_probe_all(char *glob)
3304 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3307 static LIST_HEAD(ftrace_commands);
3308 static DEFINE_MUTEX(ftrace_cmd_mutex);
3311 * Currently we only register ftrace commands from __init, so mark this
3312 * __init too.
3314 __init int register_ftrace_command(struct ftrace_func_command *cmd)
3316 struct ftrace_func_command *p;
3317 int ret = 0;
3319 mutex_lock(&ftrace_cmd_mutex);
3320 list_for_each_entry(p, &ftrace_commands, list) {
3321 if (strcmp(cmd->name, p->name) == 0) {
3322 ret = -EBUSY;
3323 goto out_unlock;
3326 list_add(&cmd->list, &ftrace_commands);
3327 out_unlock:
3328 mutex_unlock(&ftrace_cmd_mutex);
3330 return ret;
3334 * Currently we only unregister ftrace commands from __init, so mark
3335 * this __init too.
3337 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3339 struct ftrace_func_command *p, *n;
3340 int ret = -ENODEV;
3342 mutex_lock(&ftrace_cmd_mutex);
3343 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3344 if (strcmp(cmd->name, p->name) == 0) {
3345 ret = 0;
3346 list_del_init(&p->list);
3347 goto out_unlock;
3350 out_unlock:
3351 mutex_unlock(&ftrace_cmd_mutex);
3353 return ret;
3356 static int ftrace_process_regex(struct ftrace_hash *hash,
3357 char *buff, int len, int enable)
3359 char *func, *command, *next = buff;
3360 struct ftrace_func_command *p;
3361 int ret = -EINVAL;
3363 func = strsep(&next, ":");
3365 if (!next) {
3366 ret = ftrace_match_records(hash, func, len);
3367 if (!ret)
3368 ret = -EINVAL;
3369 if (ret < 0)
3370 return ret;
3371 return 0;
3374 /* command found */
3376 command = strsep(&next, ":");
3378 mutex_lock(&ftrace_cmd_mutex);
3379 list_for_each_entry(p, &ftrace_commands, list) {
3380 if (strcmp(p->name, command) == 0) {
3381 ret = p->func(hash, func, command, next, enable);
3382 goto out_unlock;
3385 out_unlock:
3386 mutex_unlock(&ftrace_cmd_mutex);
3388 return ret;
3391 static ssize_t
3392 ftrace_regex_write(struct file *file, const char __user *ubuf,
3393 size_t cnt, loff_t *ppos, int enable)
3395 struct ftrace_iterator *iter;
3396 struct trace_parser *parser;
3397 ssize_t ret, read;
3399 if (!cnt)
3400 return 0;
3402 if (file->f_mode & FMODE_READ) {
3403 struct seq_file *m = file->private_data;
3404 iter = m->private;
3405 } else
3406 iter = file->private_data;
3408 if (unlikely(ftrace_disabled))
3409 return -ENODEV;
3411 /* iter->hash is a local copy, so we don't need regex_lock */
3413 parser = &iter->parser;
3414 read = trace_get_user(parser, ubuf, cnt, ppos);
3416 if (read >= 0 && trace_parser_loaded(parser) &&
3417 !trace_parser_cont(parser)) {
3418 ret = ftrace_process_regex(iter->hash, parser->buffer,
3419 parser->idx, enable);
3420 trace_parser_clear(parser);
3421 if (ret < 0)
3422 goto out;
3425 ret = read;
3426 out:
3427 return ret;
3430 ssize_t
3431 ftrace_filter_write(struct file *file, const char __user *ubuf,
3432 size_t cnt, loff_t *ppos)
3434 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3437 ssize_t
3438 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3439 size_t cnt, loff_t *ppos)
3441 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3444 static int
3445 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3447 struct ftrace_func_entry *entry;
3449 if (!ftrace_location(ip))
3450 return -EINVAL;
3452 if (remove) {
3453 entry = ftrace_lookup_ip(hash, ip);
3454 if (!entry)
3455 return -ENOENT;
3456 free_hash_entry(hash, entry);
3457 return 0;
3460 return add_hash_entry(hash, ip);
3463 static void ftrace_ops_update_code(struct ftrace_ops *ops)
3465 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3466 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3469 static int
3470 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3471 unsigned long ip, int remove, int reset, int enable)
3473 struct ftrace_hash **orig_hash;
3474 struct ftrace_hash *hash;
3475 int ret;
3477 /* All global ops uses the global ops filters */
3478 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3479 ops = &global_ops;
3481 if (unlikely(ftrace_disabled))
3482 return -ENODEV;
3484 mutex_lock(&ops->regex_lock);
3486 if (enable)
3487 orig_hash = &ops->filter_hash;
3488 else
3489 orig_hash = &ops->notrace_hash;
3491 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3492 if (!hash) {
3493 ret = -ENOMEM;
3494 goto out_regex_unlock;
3497 if (reset)
3498 ftrace_filter_reset(hash);
3499 if (buf && !ftrace_match_records(hash, buf, len)) {
3500 ret = -EINVAL;
3501 goto out_regex_unlock;
3503 if (ip) {
3504 ret = ftrace_match_addr(hash, ip, remove);
3505 if (ret < 0)
3506 goto out_regex_unlock;
3509 mutex_lock(&ftrace_lock);
3510 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3511 if (!ret)
3512 ftrace_ops_update_code(ops);
3514 mutex_unlock(&ftrace_lock);
3516 out_regex_unlock:
3517 mutex_unlock(&ops->regex_lock);
3519 free_ftrace_hash(hash);
3520 return ret;
3523 static int
3524 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3525 int reset, int enable)
3527 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3531 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3532 * @ops - the ops to set the filter with
3533 * @ip - the address to add to or remove from the filter.
3534 * @remove - non zero to remove the ip from the filter
3535 * @reset - non zero to reset all filters before applying this filter.
3537 * Filters denote which functions should be enabled when tracing is enabled
3538 * If @ip is NULL, it failes to update filter.
3540 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3541 int remove, int reset)
3543 ftrace_ops_init(ops);
3544 return ftrace_set_addr(ops, ip, remove, reset, 1);
3546 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3548 static int
3549 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3550 int reset, int enable)
3552 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3556 * ftrace_set_filter - set a function to filter on in ftrace
3557 * @ops - the ops to set the filter with
3558 * @buf - the string that holds the function filter text.
3559 * @len - the length of the string.
3560 * @reset - non zero to reset all filters before applying this filter.
3562 * Filters denote which functions should be enabled when tracing is enabled.
3563 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3565 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3566 int len, int reset)
3568 ftrace_ops_init(ops);
3569 return ftrace_set_regex(ops, buf, len, reset, 1);
3571 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3574 * ftrace_set_notrace - set a function to not trace in ftrace
3575 * @ops - the ops to set the notrace filter with
3576 * @buf - the string that holds the function notrace text.
3577 * @len - the length of the string.
3578 * @reset - non zero to reset all filters before applying this filter.
3580 * Notrace Filters denote which functions should not be enabled when tracing
3581 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3582 * for tracing.
3584 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3585 int len, int reset)
3587 ftrace_ops_init(ops);
3588 return ftrace_set_regex(ops, buf, len, reset, 0);
3590 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3592 * ftrace_set_filter - set a function to filter on in ftrace
3593 * @ops - the ops to set the filter with
3594 * @buf - the string that holds the function filter text.
3595 * @len - the length of the string.
3596 * @reset - non zero to reset all filters before applying this filter.
3598 * Filters denote which functions should be enabled when tracing is enabled.
3599 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3601 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3603 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3605 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3608 * ftrace_set_notrace - set a function to not trace in ftrace
3609 * @ops - the ops to set the notrace filter with
3610 * @buf - the string that holds the function notrace text.
3611 * @len - the length of the string.
3612 * @reset - non zero to reset all filters before applying this filter.
3614 * Notrace Filters denote which functions should not be enabled when tracing
3615 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3616 * for tracing.
3618 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3620 ftrace_set_regex(&global_ops, buf, len, reset, 0);
3622 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3625 * command line interface to allow users to set filters on boot up.
3627 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3628 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3629 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3631 /* Used by function selftest to not test if filter is set */
3632 bool ftrace_filter_param __initdata;
3634 static int __init set_ftrace_notrace(char *str)
3636 ftrace_filter_param = true;
3637 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3638 return 1;
3640 __setup("ftrace_notrace=", set_ftrace_notrace);
3642 static int __init set_ftrace_filter(char *str)
3644 ftrace_filter_param = true;
3645 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3646 return 1;
3648 __setup("ftrace_filter=", set_ftrace_filter);
3650 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3651 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3652 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
3654 static int __init set_graph_function(char *str)
3656 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3657 return 1;
3659 __setup("ftrace_graph_filter=", set_graph_function);
3661 static void __init set_ftrace_early_graph(char *buf)
3663 int ret;
3664 char *func;
3666 while (buf) {
3667 func = strsep(&buf, ",");
3668 /* we allow only one expression at a time */
3669 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3670 FTRACE_GRAPH_MAX_FUNCS, func);
3671 if (ret)
3672 printk(KERN_DEBUG "ftrace: function %s not "
3673 "traceable\n", func);
3676 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3678 void __init
3679 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3681 char *func;
3683 ftrace_ops_init(ops);
3685 while (buf) {
3686 func = strsep(&buf, ",");
3687 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3691 static void __init set_ftrace_early_filters(void)
3693 if (ftrace_filter_buf[0])
3694 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3695 if (ftrace_notrace_buf[0])
3696 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3697 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3698 if (ftrace_graph_buf[0])
3699 set_ftrace_early_graph(ftrace_graph_buf);
3700 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3703 int ftrace_regex_release(struct inode *inode, struct file *file)
3705 struct seq_file *m = (struct seq_file *)file->private_data;
3706 struct ftrace_iterator *iter;
3707 struct ftrace_hash **orig_hash;
3708 struct trace_parser *parser;
3709 int filter_hash;
3710 int ret;
3712 if (file->f_mode & FMODE_READ) {
3713 iter = m->private;
3714 seq_release(inode, file);
3715 } else
3716 iter = file->private_data;
3718 parser = &iter->parser;
3719 if (trace_parser_loaded(parser)) {
3720 parser->buffer[parser->idx] = 0;
3721 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3724 trace_parser_put(parser);
3726 mutex_lock(&iter->ops->regex_lock);
3728 if (file->f_mode & FMODE_WRITE) {
3729 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3731 if (filter_hash)
3732 orig_hash = &iter->ops->filter_hash;
3733 else
3734 orig_hash = &iter->ops->notrace_hash;
3736 mutex_lock(&ftrace_lock);
3737 ret = ftrace_hash_move(iter->ops, filter_hash,
3738 orig_hash, iter->hash);
3739 if (!ret)
3740 ftrace_ops_update_code(iter->ops);
3742 mutex_unlock(&ftrace_lock);
3745 mutex_unlock(&iter->ops->regex_lock);
3746 free_ftrace_hash(iter->hash);
3747 kfree(iter);
3749 return 0;
3752 static const struct file_operations ftrace_avail_fops = {
3753 .open = ftrace_avail_open,
3754 .read = seq_read,
3755 .llseek = seq_lseek,
3756 .release = seq_release_private,
3759 static const struct file_operations ftrace_enabled_fops = {
3760 .open = ftrace_enabled_open,
3761 .read = seq_read,
3762 .llseek = seq_lseek,
3763 .release = seq_release_private,
3766 static const struct file_operations ftrace_filter_fops = {
3767 .open = ftrace_filter_open,
3768 .read = seq_read,
3769 .write = ftrace_filter_write,
3770 .llseek = ftrace_filter_lseek,
3771 .release = ftrace_regex_release,
3774 static const struct file_operations ftrace_notrace_fops = {
3775 .open = ftrace_notrace_open,
3776 .read = seq_read,
3777 .write = ftrace_notrace_write,
3778 .llseek = ftrace_filter_lseek,
3779 .release = ftrace_regex_release,
3782 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3784 static DEFINE_MUTEX(graph_lock);
3786 int ftrace_graph_count;
3787 int ftrace_graph_notrace_count;
3788 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3789 unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3791 struct ftrace_graph_data {
3792 unsigned long *table;
3793 size_t size;
3794 int *count;
3795 const struct seq_operations *seq_ops;
3798 static void *
3799 __g_next(struct seq_file *m, loff_t *pos)
3801 struct ftrace_graph_data *fgd = m->private;
3803 if (*pos >= *fgd->count)
3804 return NULL;
3805 return &fgd->table[*pos];
3808 static void *
3809 g_next(struct seq_file *m, void *v, loff_t *pos)
3811 (*pos)++;
3812 return __g_next(m, pos);
3815 static void *g_start(struct seq_file *m, loff_t *pos)
3817 struct ftrace_graph_data *fgd = m->private;
3819 mutex_lock(&graph_lock);
3821 /* Nothing, tell g_show to print all functions are enabled */
3822 if (!*fgd->count && !*pos)
3823 return (void *)1;
3825 return __g_next(m, pos);
3828 static void g_stop(struct seq_file *m, void *p)
3830 mutex_unlock(&graph_lock);
3833 static int g_show(struct seq_file *m, void *v)
3835 unsigned long *ptr = v;
3837 if (!ptr)
3838 return 0;
3840 if (ptr == (unsigned long *)1) {
3841 seq_printf(m, "#### all functions enabled ####\n");
3842 return 0;
3845 seq_printf(m, "%ps\n", (void *)*ptr);
3847 return 0;
3850 static const struct seq_operations ftrace_graph_seq_ops = {
3851 .start = g_start,
3852 .next = g_next,
3853 .stop = g_stop,
3854 .show = g_show,
3857 static int
3858 __ftrace_graph_open(struct inode *inode, struct file *file,
3859 struct ftrace_graph_data *fgd)
3861 int ret = 0;
3863 mutex_lock(&graph_lock);
3864 if ((file->f_mode & FMODE_WRITE) &&
3865 (file->f_flags & O_TRUNC)) {
3866 *fgd->count = 0;
3867 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
3869 mutex_unlock(&graph_lock);
3871 if (file->f_mode & FMODE_READ) {
3872 ret = seq_open(file, fgd->seq_ops);
3873 if (!ret) {
3874 struct seq_file *m = file->private_data;
3875 m->private = fgd;
3877 } else
3878 file->private_data = fgd;
3880 return ret;
3883 static int
3884 ftrace_graph_open(struct inode *inode, struct file *file)
3886 struct ftrace_graph_data *fgd;
3888 if (unlikely(ftrace_disabled))
3889 return -ENODEV;
3891 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
3892 if (fgd == NULL)
3893 return -ENOMEM;
3895 fgd->table = ftrace_graph_funcs;
3896 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
3897 fgd->count = &ftrace_graph_count;
3898 fgd->seq_ops = &ftrace_graph_seq_ops;
3900 return __ftrace_graph_open(inode, file, fgd);
3903 static int
3904 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
3906 struct ftrace_graph_data *fgd;
3908 if (unlikely(ftrace_disabled))
3909 return -ENODEV;
3911 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
3912 if (fgd == NULL)
3913 return -ENOMEM;
3915 fgd->table = ftrace_graph_notrace_funcs;
3916 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
3917 fgd->count = &ftrace_graph_notrace_count;
3918 fgd->seq_ops = &ftrace_graph_seq_ops;
3920 return __ftrace_graph_open(inode, file, fgd);
3923 static int
3924 ftrace_graph_release(struct inode *inode, struct file *file)
3926 if (file->f_mode & FMODE_READ) {
3927 struct seq_file *m = file->private_data;
3929 kfree(m->private);
3930 seq_release(inode, file);
3931 } else {
3932 kfree(file->private_data);
3935 return 0;
3938 static int
3939 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
3941 struct dyn_ftrace *rec;
3942 struct ftrace_page *pg;
3943 int search_len;
3944 int fail = 1;
3945 int type, not;
3946 char *search;
3947 bool exists;
3948 int i;
3950 /* decode regex */
3951 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3952 if (!not && *idx >= size)
3953 return -EBUSY;
3955 search_len = strlen(search);
3957 mutex_lock(&ftrace_lock);
3959 if (unlikely(ftrace_disabled)) {
3960 mutex_unlock(&ftrace_lock);
3961 return -ENODEV;
3964 do_for_each_ftrace_rec(pg, rec) {
3966 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3967 /* if it is in the array */
3968 exists = false;
3969 for (i = 0; i < *idx; i++) {
3970 if (array[i] == rec->ip) {
3971 exists = true;
3972 break;
3976 if (!not) {
3977 fail = 0;
3978 if (!exists) {
3979 array[(*idx)++] = rec->ip;
3980 if (*idx >= size)
3981 goto out;
3983 } else {
3984 if (exists) {
3985 array[i] = array[--(*idx)];
3986 array[*idx] = 0;
3987 fail = 0;
3991 } while_for_each_ftrace_rec();
3992 out:
3993 mutex_unlock(&ftrace_lock);
3995 if (fail)
3996 return -EINVAL;
3998 return 0;
4001 static ssize_t
4002 ftrace_graph_write(struct file *file, const char __user *ubuf,
4003 size_t cnt, loff_t *ppos)
4005 struct trace_parser parser;
4006 ssize_t read, ret = 0;
4007 struct ftrace_graph_data *fgd = file->private_data;
4009 if (!cnt)
4010 return 0;
4012 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4013 return -ENOMEM;
4015 read = trace_get_user(&parser, ubuf, cnt, ppos);
4017 if (read >= 0 && trace_parser_loaded((&parser))) {
4018 parser.buffer[parser.idx] = 0;
4020 mutex_lock(&graph_lock);
4022 /* we allow only one expression at a time */
4023 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4024 parser.buffer);
4026 mutex_unlock(&graph_lock);
4029 if (!ret)
4030 ret = read;
4032 trace_parser_put(&parser);
4034 return ret;
4037 static const struct file_operations ftrace_graph_fops = {
4038 .open = ftrace_graph_open,
4039 .read = seq_read,
4040 .write = ftrace_graph_write,
4041 .llseek = ftrace_filter_lseek,
4042 .release = ftrace_graph_release,
4045 static const struct file_operations ftrace_graph_notrace_fops = {
4046 .open = ftrace_graph_notrace_open,
4047 .read = seq_read,
4048 .write = ftrace_graph_write,
4049 .llseek = ftrace_filter_lseek,
4050 .release = ftrace_graph_release,
4052 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4054 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4057 trace_create_file("available_filter_functions", 0444,
4058 d_tracer, NULL, &ftrace_avail_fops);
4060 trace_create_file("enabled_functions", 0444,
4061 d_tracer, NULL, &ftrace_enabled_fops);
4063 trace_create_file("set_ftrace_filter", 0644, d_tracer,
4064 NULL, &ftrace_filter_fops);
4066 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
4067 NULL, &ftrace_notrace_fops);
4069 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4070 trace_create_file("set_graph_function", 0444, d_tracer,
4071 NULL,
4072 &ftrace_graph_fops);
4073 trace_create_file("set_graph_notrace", 0444, d_tracer,
4074 NULL,
4075 &ftrace_graph_notrace_fops);
4076 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4078 return 0;
4081 static int ftrace_cmp_ips(const void *a, const void *b)
4083 const unsigned long *ipa = a;
4084 const unsigned long *ipb = b;
4086 if (*ipa > *ipb)
4087 return 1;
4088 if (*ipa < *ipb)
4089 return -1;
4090 return 0;
4093 static void ftrace_swap_ips(void *a, void *b, int size)
4095 unsigned long *ipa = a;
4096 unsigned long *ipb = b;
4097 unsigned long t;
4099 t = *ipa;
4100 *ipa = *ipb;
4101 *ipb = t;
4104 static int ftrace_process_locs(struct module *mod,
4105 unsigned long *start,
4106 unsigned long *end)
4108 struct ftrace_page *start_pg;
4109 struct ftrace_page *pg;
4110 struct dyn_ftrace *rec;
4111 unsigned long count;
4112 unsigned long *p;
4113 unsigned long addr;
4114 unsigned long flags = 0; /* Shut up gcc */
4115 int ret = -ENOMEM;
4117 count = end - start;
4119 if (!count)
4120 return 0;
4122 sort(start, count, sizeof(*start),
4123 ftrace_cmp_ips, ftrace_swap_ips);
4125 start_pg = ftrace_allocate_pages(count);
4126 if (!start_pg)
4127 return -ENOMEM;
4129 mutex_lock(&ftrace_lock);
4132 * Core and each module needs their own pages, as
4133 * modules will free them when they are removed.
4134 * Force a new page to be allocated for modules.
4136 if (!mod) {
4137 WARN_ON(ftrace_pages || ftrace_pages_start);
4138 /* First initialization */
4139 ftrace_pages = ftrace_pages_start = start_pg;
4140 } else {
4141 if (!ftrace_pages)
4142 goto out;
4144 if (WARN_ON(ftrace_pages->next)) {
4145 /* Hmm, we have free pages? */
4146 while (ftrace_pages->next)
4147 ftrace_pages = ftrace_pages->next;
4150 ftrace_pages->next = start_pg;
4153 p = start;
4154 pg = start_pg;
4155 while (p < end) {
4156 addr = ftrace_call_adjust(*p++);
4158 * Some architecture linkers will pad between
4159 * the different mcount_loc sections of different
4160 * object files to satisfy alignments.
4161 * Skip any NULL pointers.
4163 if (!addr)
4164 continue;
4166 if (pg->index == pg->size) {
4167 /* We should have allocated enough */
4168 if (WARN_ON(!pg->next))
4169 break;
4170 pg = pg->next;
4173 rec = &pg->records[pg->index++];
4174 rec->ip = addr;
4177 /* We should have used all pages */
4178 WARN_ON(pg->next);
4180 /* Assign the last page to ftrace_pages */
4181 ftrace_pages = pg;
4183 /* These new locations need to be initialized */
4184 ftrace_new_pgs = start_pg;
4187 * We only need to disable interrupts on start up
4188 * because we are modifying code that an interrupt
4189 * may execute, and the modification is not atomic.
4190 * But for modules, nothing runs the code we modify
4191 * until we are finished with it, and there's no
4192 * reason to cause large interrupt latencies while we do it.
4194 if (!mod)
4195 local_irq_save(flags);
4196 ftrace_update_code(mod);
4197 if (!mod)
4198 local_irq_restore(flags);
4199 ret = 0;
4200 out:
4201 mutex_unlock(&ftrace_lock);
4203 return ret;
4206 #ifdef CONFIG_MODULES
4208 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4210 void ftrace_release_mod(struct module *mod)
4212 struct dyn_ftrace *rec;
4213 struct ftrace_page **last_pg;
4214 struct ftrace_page *pg;
4215 int order;
4217 mutex_lock(&ftrace_lock);
4219 if (ftrace_disabled)
4220 goto out_unlock;
4223 * Each module has its own ftrace_pages, remove
4224 * them from the list.
4226 last_pg = &ftrace_pages_start;
4227 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4228 rec = &pg->records[0];
4229 if (within_module_core(rec->ip, mod)) {
4231 * As core pages are first, the first
4232 * page should never be a module page.
4234 if (WARN_ON(pg == ftrace_pages_start))
4235 goto out_unlock;
4237 /* Check if we are deleting the last page */
4238 if (pg == ftrace_pages)
4239 ftrace_pages = next_to_ftrace_page(last_pg);
4241 *last_pg = pg->next;
4242 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4243 free_pages((unsigned long)pg->records, order);
4244 kfree(pg);
4245 } else
4246 last_pg = &pg->next;
4248 out_unlock:
4249 mutex_unlock(&ftrace_lock);
4252 static void ftrace_init_module(struct module *mod,
4253 unsigned long *start, unsigned long *end)
4255 if (ftrace_disabled || start == end)
4256 return;
4257 ftrace_process_locs(mod, start, end);
4260 static int ftrace_module_notify_enter(struct notifier_block *self,
4261 unsigned long val, void *data)
4263 struct module *mod = data;
4265 if (val == MODULE_STATE_COMING)
4266 ftrace_init_module(mod, mod->ftrace_callsites,
4267 mod->ftrace_callsites +
4268 mod->num_ftrace_callsites);
4269 return 0;
4272 static int ftrace_module_notify_exit(struct notifier_block *self,
4273 unsigned long val, void *data)
4275 struct module *mod = data;
4277 if (val == MODULE_STATE_GOING)
4278 ftrace_release_mod(mod);
4280 return 0;
4282 #else
4283 static int ftrace_module_notify_enter(struct notifier_block *self,
4284 unsigned long val, void *data)
4286 return 0;
4288 static int ftrace_module_notify_exit(struct notifier_block *self,
4289 unsigned long val, void *data)
4291 return 0;
4293 #endif /* CONFIG_MODULES */
4295 struct notifier_block ftrace_module_enter_nb = {
4296 .notifier_call = ftrace_module_notify_enter,
4297 .priority = INT_MAX, /* Run before anything that can use kprobes */
4300 struct notifier_block ftrace_module_exit_nb = {
4301 .notifier_call = ftrace_module_notify_exit,
4302 .priority = INT_MIN, /* Run after anything that can remove kprobes */
4305 extern unsigned long __start_mcount_loc[];
4306 extern unsigned long __stop_mcount_loc[];
4308 void __init ftrace_init(void)
4310 unsigned long count, addr, flags;
4311 int ret;
4313 /* Keep the ftrace pointer to the stub */
4314 addr = (unsigned long)ftrace_stub;
4316 local_irq_save(flags);
4317 ftrace_dyn_arch_init(&addr);
4318 local_irq_restore(flags);
4320 /* ftrace_dyn_arch_init places the return code in addr */
4321 if (addr)
4322 goto failed;
4324 count = __stop_mcount_loc - __start_mcount_loc;
4326 ret = ftrace_dyn_table_alloc(count);
4327 if (ret)
4328 goto failed;
4330 last_ftrace_enabled = ftrace_enabled = 1;
4332 ret = ftrace_process_locs(NULL,
4333 __start_mcount_loc,
4334 __stop_mcount_loc);
4336 ret = register_module_notifier(&ftrace_module_enter_nb);
4337 if (ret)
4338 pr_warning("Failed to register trace ftrace module enter notifier\n");
4340 ret = register_module_notifier(&ftrace_module_exit_nb);
4341 if (ret)
4342 pr_warning("Failed to register trace ftrace module exit notifier\n");
4344 set_ftrace_early_filters();
4346 return;
4347 failed:
4348 ftrace_disabled = 1;
4351 #else
4353 static struct ftrace_ops global_ops = {
4354 .func = ftrace_stub,
4355 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4356 INIT_REGEX_LOCK(global_ops)
4359 static int __init ftrace_nodyn_init(void)
4361 ftrace_enabled = 1;
4362 return 0;
4364 core_initcall(ftrace_nodyn_init);
4366 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4367 static inline void ftrace_startup_enable(int command) { }
4368 /* Keep as macros so we do not need to define the commands */
4369 # define ftrace_startup(ops, command) \
4370 ({ \
4371 int ___ret = __register_ftrace_function(ops); \
4372 if (!___ret) \
4373 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4374 ___ret; \
4376 # define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
4378 # define ftrace_startup_sysctl() do { } while (0)
4379 # define ftrace_shutdown_sysctl() do { } while (0)
4381 static inline int
4382 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4384 return 1;
4387 #endif /* CONFIG_DYNAMIC_FTRACE */
4389 static void
4390 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4391 struct ftrace_ops *op, struct pt_regs *regs)
4393 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4394 return;
4397 * Some of the ops may be dynamically allocated,
4398 * they must be freed after a synchronize_sched().
4400 preempt_disable_notrace();
4401 trace_recursion_set(TRACE_CONTROL_BIT);
4404 * Control funcs (perf) uses RCU. Only trace if
4405 * RCU is currently active.
4407 if (!rcu_is_watching())
4408 goto out;
4410 do_for_each_ftrace_op(op, ftrace_control_list) {
4411 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4412 !ftrace_function_local_disabled(op) &&
4413 ftrace_ops_test(op, ip, regs))
4414 op->func(ip, parent_ip, op, regs);
4415 } while_for_each_ftrace_op(op);
4416 out:
4417 trace_recursion_clear(TRACE_CONTROL_BIT);
4418 preempt_enable_notrace();
4421 static struct ftrace_ops control_ops = {
4422 .func = ftrace_ops_control_func,
4423 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4424 INIT_REGEX_LOCK(control_ops)
4427 static inline void
4428 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4429 struct ftrace_ops *ignored, struct pt_regs *regs)
4431 struct ftrace_ops *op;
4432 int bit;
4434 if (function_trace_stop)
4435 return;
4437 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4438 if (bit < 0)
4439 return;
4442 * Some of the ops may be dynamically allocated,
4443 * they must be freed after a synchronize_sched().
4445 preempt_disable_notrace();
4446 do_for_each_ftrace_op(op, ftrace_ops_list) {
4447 if (ftrace_ops_test(op, ip, regs))
4448 op->func(ip, parent_ip, op, regs);
4449 } while_for_each_ftrace_op(op);
4450 preempt_enable_notrace();
4451 trace_clear_recursion(bit);
4455 * Some archs only support passing ip and parent_ip. Even though
4456 * the list function ignores the op parameter, we do not want any
4457 * C side effects, where a function is called without the caller
4458 * sending a third parameter.
4459 * Archs are to support both the regs and ftrace_ops at the same time.
4460 * If they support ftrace_ops, it is assumed they support regs.
4461 * If call backs want to use regs, they must either check for regs
4462 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4463 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4464 * An architecture can pass partial regs with ftrace_ops and still
4465 * set the ARCH_SUPPORT_FTARCE_OPS.
4467 #if ARCH_SUPPORTS_FTRACE_OPS
4468 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4469 struct ftrace_ops *op, struct pt_regs *regs)
4471 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4473 #else
4474 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4476 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4478 #endif
4480 static void clear_ftrace_swapper(void)
4482 struct task_struct *p;
4483 int cpu;
4485 get_online_cpus();
4486 for_each_online_cpu(cpu) {
4487 p = idle_task(cpu);
4488 clear_tsk_trace_trace(p);
4490 put_online_cpus();
4493 static void set_ftrace_swapper(void)
4495 struct task_struct *p;
4496 int cpu;
4498 get_online_cpus();
4499 for_each_online_cpu(cpu) {
4500 p = idle_task(cpu);
4501 set_tsk_trace_trace(p);
4503 put_online_cpus();
4506 static void clear_ftrace_pid(struct pid *pid)
4508 struct task_struct *p;
4510 rcu_read_lock();
4511 do_each_pid_task(pid, PIDTYPE_PID, p) {
4512 clear_tsk_trace_trace(p);
4513 } while_each_pid_task(pid, PIDTYPE_PID, p);
4514 rcu_read_unlock();
4516 put_pid(pid);
4519 static void set_ftrace_pid(struct pid *pid)
4521 struct task_struct *p;
4523 rcu_read_lock();
4524 do_each_pid_task(pid, PIDTYPE_PID, p) {
4525 set_tsk_trace_trace(p);
4526 } while_each_pid_task(pid, PIDTYPE_PID, p);
4527 rcu_read_unlock();
4530 static void clear_ftrace_pid_task(struct pid *pid)
4532 if (pid == ftrace_swapper_pid)
4533 clear_ftrace_swapper();
4534 else
4535 clear_ftrace_pid(pid);
4538 static void set_ftrace_pid_task(struct pid *pid)
4540 if (pid == ftrace_swapper_pid)
4541 set_ftrace_swapper();
4542 else
4543 set_ftrace_pid(pid);
4546 static int ftrace_pid_add(int p)
4548 struct pid *pid;
4549 struct ftrace_pid *fpid;
4550 int ret = -EINVAL;
4552 mutex_lock(&ftrace_lock);
4554 if (!p)
4555 pid = ftrace_swapper_pid;
4556 else
4557 pid = find_get_pid(p);
4559 if (!pid)
4560 goto out;
4562 ret = 0;
4564 list_for_each_entry(fpid, &ftrace_pids, list)
4565 if (fpid->pid == pid)
4566 goto out_put;
4568 ret = -ENOMEM;
4570 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4571 if (!fpid)
4572 goto out_put;
4574 list_add(&fpid->list, &ftrace_pids);
4575 fpid->pid = pid;
4577 set_ftrace_pid_task(pid);
4579 ftrace_update_pid_func();
4580 ftrace_startup_enable(0);
4582 mutex_unlock(&ftrace_lock);
4583 return 0;
4585 out_put:
4586 if (pid != ftrace_swapper_pid)
4587 put_pid(pid);
4589 out:
4590 mutex_unlock(&ftrace_lock);
4591 return ret;
4594 static void ftrace_pid_reset(void)
4596 struct ftrace_pid *fpid, *safe;
4598 mutex_lock(&ftrace_lock);
4599 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4600 struct pid *pid = fpid->pid;
4602 clear_ftrace_pid_task(pid);
4604 list_del(&fpid->list);
4605 kfree(fpid);
4608 ftrace_update_pid_func();
4609 ftrace_startup_enable(0);
4611 mutex_unlock(&ftrace_lock);
4614 static void *fpid_start(struct seq_file *m, loff_t *pos)
4616 mutex_lock(&ftrace_lock);
4618 if (list_empty(&ftrace_pids) && (!*pos))
4619 return (void *) 1;
4621 return seq_list_start(&ftrace_pids, *pos);
4624 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4626 if (v == (void *)1)
4627 return NULL;
4629 return seq_list_next(v, &ftrace_pids, pos);
4632 static void fpid_stop(struct seq_file *m, void *p)
4634 mutex_unlock(&ftrace_lock);
4637 static int fpid_show(struct seq_file *m, void *v)
4639 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4641 if (v == (void *)1) {
4642 seq_printf(m, "no pid\n");
4643 return 0;
4646 if (fpid->pid == ftrace_swapper_pid)
4647 seq_printf(m, "swapper tasks\n");
4648 else
4649 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4651 return 0;
4654 static const struct seq_operations ftrace_pid_sops = {
4655 .start = fpid_start,
4656 .next = fpid_next,
4657 .stop = fpid_stop,
4658 .show = fpid_show,
4661 static int
4662 ftrace_pid_open(struct inode *inode, struct file *file)
4664 int ret = 0;
4666 if ((file->f_mode & FMODE_WRITE) &&
4667 (file->f_flags & O_TRUNC))
4668 ftrace_pid_reset();
4670 if (file->f_mode & FMODE_READ)
4671 ret = seq_open(file, &ftrace_pid_sops);
4673 return ret;
4676 static ssize_t
4677 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4678 size_t cnt, loff_t *ppos)
4680 char buf[64], *tmp;
4681 long val;
4682 int ret;
4684 if (cnt >= sizeof(buf))
4685 return -EINVAL;
4687 if (copy_from_user(&buf, ubuf, cnt))
4688 return -EFAULT;
4690 buf[cnt] = 0;
4693 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4694 * to clean the filter quietly.
4696 tmp = strstrip(buf);
4697 if (strlen(tmp) == 0)
4698 return 1;
4700 ret = kstrtol(tmp, 10, &val);
4701 if (ret < 0)
4702 return ret;
4704 ret = ftrace_pid_add(val);
4706 return ret ? ret : cnt;
4709 static int
4710 ftrace_pid_release(struct inode *inode, struct file *file)
4712 if (file->f_mode & FMODE_READ)
4713 seq_release(inode, file);
4715 return 0;
4718 static const struct file_operations ftrace_pid_fops = {
4719 .open = ftrace_pid_open,
4720 .write = ftrace_pid_write,
4721 .read = seq_read,
4722 .llseek = ftrace_filter_lseek,
4723 .release = ftrace_pid_release,
4726 static __init int ftrace_init_debugfs(void)
4728 struct dentry *d_tracer;
4730 d_tracer = tracing_init_dentry();
4731 if (!d_tracer)
4732 return 0;
4734 ftrace_init_dyn_debugfs(d_tracer);
4736 trace_create_file("set_ftrace_pid", 0644, d_tracer,
4737 NULL, &ftrace_pid_fops);
4739 ftrace_profile_debugfs(d_tracer);
4741 return 0;
4743 fs_initcall(ftrace_init_debugfs);
4746 * ftrace_kill - kill ftrace
4748 * This function should be used by panic code. It stops ftrace
4749 * but in a not so nice way. If you need to simply kill ftrace
4750 * from a non-atomic section, use ftrace_kill.
4752 void ftrace_kill(void)
4754 ftrace_disabled = 1;
4755 ftrace_enabled = 0;
4756 clear_ftrace_function();
4760 * Test if ftrace is dead or not.
4762 int ftrace_is_dead(void)
4764 return ftrace_disabled;
4768 * register_ftrace_function - register a function for profiling
4769 * @ops - ops structure that holds the function for profiling.
4771 * Register a function to be called by all functions in the
4772 * kernel.
4774 * Note: @ops->func and all the functions it calls must be labeled
4775 * with "notrace", otherwise it will go into a
4776 * recursive loop.
4778 int register_ftrace_function(struct ftrace_ops *ops)
4780 int ret = -1;
4782 ftrace_ops_init(ops);
4784 mutex_lock(&ftrace_lock);
4786 ret = ftrace_startup(ops, 0);
4788 mutex_unlock(&ftrace_lock);
4790 return ret;
4792 EXPORT_SYMBOL_GPL(register_ftrace_function);
4795 * unregister_ftrace_function - unregister a function for profiling.
4796 * @ops - ops structure that holds the function to unregister
4798 * Unregister a function that was added to be called by ftrace profiling.
4800 int unregister_ftrace_function(struct ftrace_ops *ops)
4802 int ret;
4804 mutex_lock(&ftrace_lock);
4805 ret = ftrace_shutdown(ops, 0);
4806 mutex_unlock(&ftrace_lock);
4808 return ret;
4810 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4813 ftrace_enable_sysctl(struct ctl_table *table, int write,
4814 void __user *buffer, size_t *lenp,
4815 loff_t *ppos)
4817 int ret = -ENODEV;
4819 mutex_lock(&ftrace_lock);
4821 if (unlikely(ftrace_disabled))
4822 goto out;
4824 ret = proc_dointvec(table, write, buffer, lenp, ppos);
4826 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4827 goto out;
4829 last_ftrace_enabled = !!ftrace_enabled;
4831 if (ftrace_enabled) {
4833 ftrace_startup_sysctl();
4835 /* we are starting ftrace again */
4836 if (ftrace_ops_list != &ftrace_list_end)
4837 update_ftrace_function();
4839 } else {
4840 /* stopping ftrace calls (just send to ftrace_stub) */
4841 ftrace_trace_function = ftrace_stub;
4843 ftrace_shutdown_sysctl();
4846 out:
4847 mutex_unlock(&ftrace_lock);
4848 return ret;
4851 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4853 static int ftrace_graph_active;
4854 static struct notifier_block ftrace_suspend_notifier;
4856 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4858 return 0;
4861 /* The callbacks that hook a function */
4862 trace_func_graph_ret_t ftrace_graph_return =
4863 (trace_func_graph_ret_t)ftrace_stub;
4864 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4866 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4867 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4869 int i;
4870 int ret = 0;
4871 unsigned long flags;
4872 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4873 struct task_struct *g, *t;
4875 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4876 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4877 * sizeof(struct ftrace_ret_stack),
4878 GFP_KERNEL);
4879 if (!ret_stack_list[i]) {
4880 start = 0;
4881 end = i;
4882 ret = -ENOMEM;
4883 goto free;
4887 read_lock_irqsave(&tasklist_lock, flags);
4888 do_each_thread(g, t) {
4889 if (start == end) {
4890 ret = -EAGAIN;
4891 goto unlock;
4894 if (t->ret_stack == NULL) {
4895 atomic_set(&t->tracing_graph_pause, 0);
4896 atomic_set(&t->trace_overrun, 0);
4897 t->curr_ret_stack = -1;
4898 /* Make sure the tasks see the -1 first: */
4899 smp_wmb();
4900 t->ret_stack = ret_stack_list[start++];
4902 } while_each_thread(g, t);
4904 unlock:
4905 read_unlock_irqrestore(&tasklist_lock, flags);
4906 free:
4907 for (i = start; i < end; i++)
4908 kfree(ret_stack_list[i]);
4909 return ret;
4912 static void
4913 ftrace_graph_probe_sched_switch(void *ignore,
4914 struct task_struct *prev, struct task_struct *next)
4916 unsigned long long timestamp;
4917 int index;
4920 * Does the user want to count the time a function was asleep.
4921 * If so, do not update the time stamps.
4923 if (trace_flags & TRACE_ITER_SLEEP_TIME)
4924 return;
4926 timestamp = trace_clock_local();
4928 prev->ftrace_timestamp = timestamp;
4930 /* only process tasks that we timestamped */
4931 if (!next->ftrace_timestamp)
4932 return;
4935 * Update all the counters in next to make up for the
4936 * time next was sleeping.
4938 timestamp -= next->ftrace_timestamp;
4940 for (index = next->curr_ret_stack; index >= 0; index--)
4941 next->ret_stack[index].calltime += timestamp;
4944 /* Allocate a return stack for each task */
4945 static int start_graph_tracing(void)
4947 struct ftrace_ret_stack **ret_stack_list;
4948 int ret, cpu;
4950 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4951 sizeof(struct ftrace_ret_stack *),
4952 GFP_KERNEL);
4954 if (!ret_stack_list)
4955 return -ENOMEM;
4957 /* The cpu_boot init_task->ret_stack will never be freed */
4958 for_each_online_cpu(cpu) {
4959 if (!idle_task(cpu)->ret_stack)
4960 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4963 do {
4964 ret = alloc_retstack_tasklist(ret_stack_list);
4965 } while (ret == -EAGAIN);
4967 if (!ret) {
4968 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4969 if (ret)
4970 pr_info("ftrace_graph: Couldn't activate tracepoint"
4971 " probe to kernel_sched_switch\n");
4974 kfree(ret_stack_list);
4975 return ret;
4979 * Hibernation protection.
4980 * The state of the current task is too much unstable during
4981 * suspend/restore to disk. We want to protect against that.
4983 static int
4984 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4985 void *unused)
4987 switch (state) {
4988 case PM_HIBERNATION_PREPARE:
4989 pause_graph_tracing();
4990 break;
4992 case PM_POST_HIBERNATION:
4993 unpause_graph_tracing();
4994 break;
4996 return NOTIFY_DONE;
4999 /* Just a place holder for function graph */
5000 static struct ftrace_ops fgraph_ops __read_mostly = {
5001 .func = ftrace_stub,
5002 .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
5003 FTRACE_OPS_FL_RECURSION_SAFE,
5006 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5007 trace_func_graph_ent_t entryfunc)
5009 int ret = 0;
5011 mutex_lock(&ftrace_lock);
5013 /* we currently allow only one tracer registered at a time */
5014 if (ftrace_graph_active) {
5015 ret = -EBUSY;
5016 goto out;
5019 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
5020 register_pm_notifier(&ftrace_suspend_notifier);
5022 ftrace_graph_active++;
5023 ret = start_graph_tracing();
5024 if (ret) {
5025 ftrace_graph_active--;
5026 goto out;
5029 ftrace_graph_return = retfunc;
5030 ftrace_graph_entry = entryfunc;
5032 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
5034 out:
5035 mutex_unlock(&ftrace_lock);
5036 return ret;
5039 void unregister_ftrace_graph(void)
5041 mutex_lock(&ftrace_lock);
5043 if (unlikely(!ftrace_graph_active))
5044 goto out;
5046 ftrace_graph_active--;
5047 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5048 ftrace_graph_entry = ftrace_graph_entry_stub;
5049 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
5050 unregister_pm_notifier(&ftrace_suspend_notifier);
5051 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5053 out:
5054 mutex_unlock(&ftrace_lock);
5057 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5059 static void
5060 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5062 atomic_set(&t->tracing_graph_pause, 0);
5063 atomic_set(&t->trace_overrun, 0);
5064 t->ftrace_timestamp = 0;
5065 /* make curr_ret_stack visible before we add the ret_stack */
5066 smp_wmb();
5067 t->ret_stack = ret_stack;
5071 * Allocate a return stack for the idle task. May be the first
5072 * time through, or it may be done by CPU hotplug online.
5074 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5076 t->curr_ret_stack = -1;
5078 * The idle task has no parent, it either has its own
5079 * stack or no stack at all.
5081 if (t->ret_stack)
5082 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5084 if (ftrace_graph_active) {
5085 struct ftrace_ret_stack *ret_stack;
5087 ret_stack = per_cpu(idle_ret_stack, cpu);
5088 if (!ret_stack) {
5089 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5090 * sizeof(struct ftrace_ret_stack),
5091 GFP_KERNEL);
5092 if (!ret_stack)
5093 return;
5094 per_cpu(idle_ret_stack, cpu) = ret_stack;
5096 graph_init_task(t, ret_stack);
5100 /* Allocate a return stack for newly created task */
5101 void ftrace_graph_init_task(struct task_struct *t)
5103 /* Make sure we do not use the parent ret_stack */
5104 t->ret_stack = NULL;
5105 t->curr_ret_stack = -1;
5107 if (ftrace_graph_active) {
5108 struct ftrace_ret_stack *ret_stack;
5110 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5111 * sizeof(struct ftrace_ret_stack),
5112 GFP_KERNEL);
5113 if (!ret_stack)
5114 return;
5115 graph_init_task(t, ret_stack);
5119 void ftrace_graph_exit_task(struct task_struct *t)
5121 struct ftrace_ret_stack *ret_stack = t->ret_stack;
5123 t->ret_stack = NULL;
5124 /* NULL must become visible to IRQs before we free it: */
5125 barrier();
5127 kfree(ret_stack);
5130 void ftrace_graph_stop(void)
5132 ftrace_stop();
5134 #endif