2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
32 #include <asm/ftrace.h>
36 #define FTRACE_WARN_ON(cond) \
42 #define FTRACE_WARN_ON_ONCE(cond) \
44 if (WARN_ON_ONCE(cond)) \
48 /* hash bits for specific function selection */
49 #define FTRACE_HASH_BITS 7
50 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
52 /* ftrace_enabled is a method to turn ftrace on or off */
53 int ftrace_enabled __read_mostly
;
54 static int last_ftrace_enabled
;
56 /* Quick disabling of function tracer. */
57 int function_trace_stop
;
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
63 static int ftrace_disabled __read_mostly
;
65 static DEFINE_MUTEX(ftrace_lock
);
67 static struct ftrace_ops ftrace_list_end __read_mostly
=
72 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
73 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
74 ftrace_func_t __ftrace_trace_function __read_mostly
= ftrace_stub
;
75 ftrace_func_t ftrace_pid_function __read_mostly
= ftrace_stub
;
77 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
79 struct ftrace_ops
*op
= ftrace_list
;
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
84 while (op
!= &ftrace_list_end
) {
86 read_barrier_depends();
87 op
->func(ip
, parent_ip
);
92 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
)
94 if (!test_tsk_trace_trace(current
))
97 ftrace_pid_function(ip
, parent_ip
);
100 static void set_ftrace_pid_function(ftrace_func_t func
)
102 /* do not set ftrace_pid_function to itself! */
103 if (func
!= ftrace_pid_func
)
104 ftrace_pid_function
= func
;
108 * clear_ftrace_function - reset the ftrace function
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
113 void clear_ftrace_function(void)
115 ftrace_trace_function
= ftrace_stub
;
116 __ftrace_trace_function
= ftrace_stub
;
117 ftrace_pid_function
= ftrace_stub
;
120 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
125 static void ftrace_test_stop_func(unsigned long ip
, unsigned long parent_ip
)
127 if (function_trace_stop
)
130 __ftrace_trace_function(ip
, parent_ip
);
134 static int __register_ftrace_function(struct ftrace_ops
*ops
)
136 ops
->next
= ftrace_list
;
138 * We are entering ops into the ftrace_list but another
139 * CPU might be walking that list. We need to make sure
140 * the ops->next pointer is valid before another CPU sees
141 * the ops pointer included into the ftrace_list.
146 if (ftrace_enabled
) {
149 if (ops
->next
== &ftrace_list_end
)
152 func
= ftrace_list_func
;
154 if (ftrace_pid_trace
) {
155 set_ftrace_pid_function(func
);
156 func
= ftrace_pid_func
;
160 * For one func, simply call it directly.
161 * For more than one func, call the chain.
163 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
164 ftrace_trace_function
= func
;
166 __ftrace_trace_function
= func
;
167 ftrace_trace_function
= ftrace_test_stop_func
;
174 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
176 struct ftrace_ops
**p
;
179 * If we are removing the last function, then simply point
180 * to the ftrace_stub.
182 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
183 ftrace_trace_function
= ftrace_stub
;
184 ftrace_list
= &ftrace_list_end
;
188 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
197 if (ftrace_enabled
) {
198 /* If we only have one func left, then call that directly */
199 if (ftrace_list
->next
== &ftrace_list_end
) {
200 ftrace_func_t func
= ftrace_list
->func
;
202 if (ftrace_pid_trace
) {
203 set_ftrace_pid_function(func
);
204 func
= ftrace_pid_func
;
206 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
207 ftrace_trace_function
= func
;
209 __ftrace_trace_function
= func
;
217 static void ftrace_update_pid_func(void)
221 if (ftrace_trace_function
== ftrace_stub
)
224 func
= ftrace_trace_function
;
226 if (ftrace_pid_trace
) {
227 set_ftrace_pid_function(func
);
228 func
= ftrace_pid_func
;
230 if (func
== ftrace_pid_func
)
231 func
= ftrace_pid_function
;
234 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
235 ftrace_trace_function
= func
;
237 __ftrace_trace_function
= func
;
241 /* set when tracing only a pid */
242 struct pid
*ftrace_pid_trace
;
243 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
245 #ifdef CONFIG_DYNAMIC_FTRACE
247 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
248 # error Dynamic ftrace depends on MCOUNT_RECORD
251 static struct hlist_head ftrace_func_hash
[FTRACE_FUNC_HASHSIZE
] __read_mostly
;
253 struct ftrace_func_probe
{
254 struct hlist_node node
;
255 struct ftrace_probe_ops
*ops
;
264 FTRACE_ENABLE_CALLS
= (1 << 0),
265 FTRACE_DISABLE_CALLS
= (1 << 1),
266 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
267 FTRACE_ENABLE_MCOUNT
= (1 << 3),
268 FTRACE_DISABLE_MCOUNT
= (1 << 4),
269 FTRACE_START_FUNC_RET
= (1 << 5),
270 FTRACE_STOP_FUNC_RET
= (1 << 6),
273 static int ftrace_filtered
;
275 static LIST_HEAD(ftrace_new_addrs
);
277 static DEFINE_MUTEX(ftrace_regex_lock
);
280 struct ftrace_page
*next
;
282 struct dyn_ftrace records
[];
285 #define ENTRIES_PER_PAGE \
286 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
288 /* estimate from running different kernels */
289 #define NR_TO_INIT 10000
291 static struct ftrace_page
*ftrace_pages_start
;
292 static struct ftrace_page
*ftrace_pages
;
294 static struct dyn_ftrace
*ftrace_free_records
;
297 * This is a double for. Do not use 'break' to break out of the loop,
298 * you must use a goto.
300 #define do_for_each_ftrace_rec(pg, rec) \
301 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
303 for (_____i = 0; _____i < pg->index; _____i++) { \
304 rec = &pg->records[_____i];
306 #define while_for_each_ftrace_rec() \
310 #ifdef CONFIG_KPROBES
312 static int frozen_record_count
;
314 static inline void freeze_record(struct dyn_ftrace
*rec
)
316 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
317 rec
->flags
|= FTRACE_FL_FROZEN
;
318 frozen_record_count
++;
322 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
324 if (rec
->flags
& FTRACE_FL_FROZEN
) {
325 rec
->flags
&= ~FTRACE_FL_FROZEN
;
326 frozen_record_count
--;
330 static inline int record_frozen(struct dyn_ftrace
*rec
)
332 return rec
->flags
& FTRACE_FL_FROZEN
;
335 # define freeze_record(rec) ({ 0; })
336 # define unfreeze_record(rec) ({ 0; })
337 # define record_frozen(rec) ({ 0; })
338 #endif /* CONFIG_KPROBES */
340 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
342 rec
->ip
= (unsigned long)ftrace_free_records
;
343 ftrace_free_records
= rec
;
344 rec
->flags
|= FTRACE_FL_FREE
;
347 void ftrace_release(void *start
, unsigned long size
)
349 struct dyn_ftrace
*rec
;
350 struct ftrace_page
*pg
;
351 unsigned long s
= (unsigned long)start
;
352 unsigned long e
= s
+ size
;
354 if (ftrace_disabled
|| !start
)
357 mutex_lock(&ftrace_lock
);
358 do_for_each_ftrace_rec(pg
, rec
) {
359 if ((rec
->ip
>= s
) && (rec
->ip
< e
) &&
360 !(rec
->flags
& FTRACE_FL_FREE
))
361 ftrace_free_rec(rec
);
362 } while_for_each_ftrace_rec();
363 mutex_unlock(&ftrace_lock
);
366 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
368 struct dyn_ftrace
*rec
;
370 /* First check for freed records */
371 if (ftrace_free_records
) {
372 rec
= ftrace_free_records
;
374 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
375 FTRACE_WARN_ON_ONCE(1);
376 ftrace_free_records
= NULL
;
380 ftrace_free_records
= (void *)rec
->ip
;
381 memset(rec
, 0, sizeof(*rec
));
385 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
386 if (!ftrace_pages
->next
) {
387 /* allocate another page */
389 (void *)get_zeroed_page(GFP_KERNEL
);
390 if (!ftrace_pages
->next
)
393 ftrace_pages
= ftrace_pages
->next
;
396 return &ftrace_pages
->records
[ftrace_pages
->index
++];
399 static struct dyn_ftrace
*
400 ftrace_record_ip(unsigned long ip
)
402 struct dyn_ftrace
*rec
;
407 rec
= ftrace_alloc_dyn_node(ip
);
413 list_add(&rec
->list
, &ftrace_new_addrs
);
418 static void print_ip_ins(const char *fmt
, unsigned char *p
)
422 printk(KERN_CONT
"%s", fmt
);
424 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
425 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
428 static void ftrace_bug(int failed
, unsigned long ip
)
432 FTRACE_WARN_ON_ONCE(1);
433 pr_info("ftrace faulted on modifying ");
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace failed to modify ");
440 print_ip_ins(" actual: ", (unsigned char *)ip
);
441 printk(KERN_CONT
"\n");
444 FTRACE_WARN_ON_ONCE(1);
445 pr_info("ftrace faulted on writing ");
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on unknown error ");
457 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
459 unsigned long ftrace_addr
;
460 unsigned long ip
, fl
;
462 ftrace_addr
= (unsigned long)FTRACE_ADDR
;
467 * If this record is not to be traced and
468 * it is not enabled then do nothing.
470 * If this record is not to be traced and
471 * it is enabled then disable it.
474 if (rec
->flags
& FTRACE_FL_NOTRACE
) {
475 if (rec
->flags
& FTRACE_FL_ENABLED
)
476 rec
->flags
&= ~FTRACE_FL_ENABLED
;
480 } else if (ftrace_filtered
&& enable
) {
485 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
);
487 /* Record is filtered and enabled, do nothing */
488 if (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
))
491 /* Record is not filtered or enabled, do nothing */
495 /* Record is not filtered but enabled, disable it */
496 if (fl
== FTRACE_FL_ENABLED
)
497 rec
->flags
&= ~FTRACE_FL_ENABLED
;
499 /* Otherwise record is filtered but not enabled, enable it */
500 rec
->flags
|= FTRACE_FL_ENABLED
;
502 /* Disable or not filtered */
505 /* if record is enabled, do nothing */
506 if (rec
->flags
& FTRACE_FL_ENABLED
)
509 rec
->flags
|= FTRACE_FL_ENABLED
;
513 /* if record is not enabled, do nothing */
514 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
517 rec
->flags
&= ~FTRACE_FL_ENABLED
;
521 if (rec
->flags
& FTRACE_FL_ENABLED
)
522 return ftrace_make_call(rec
, ftrace_addr
);
524 return ftrace_make_nop(NULL
, rec
, ftrace_addr
);
527 static void ftrace_replace_code(int enable
)
529 struct dyn_ftrace
*rec
;
530 struct ftrace_page
*pg
;
533 do_for_each_ftrace_rec(pg
, rec
) {
535 * Skip over free records, records that have
536 * failed and not converted.
538 if (rec
->flags
& FTRACE_FL_FREE
||
539 rec
->flags
& FTRACE_FL_FAILED
||
540 rec
->flags
& FTRACE_FL_CONVERTED
)
543 /* ignore updates to this record's mcount site */
544 if (get_kprobe((void *)rec
->ip
)) {
548 unfreeze_record(rec
);
551 failed
= __ftrace_replace_code(rec
, enable
);
553 rec
->flags
|= FTRACE_FL_FAILED
;
554 if ((system_state
== SYSTEM_BOOTING
) ||
555 !core_kernel_text(rec
->ip
)) {
556 ftrace_free_rec(rec
);
558 ftrace_bug(failed
, rec
->ip
);
559 /* Stop processing */
563 } while_for_each_ftrace_rec();
567 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
574 ret
= ftrace_make_nop(mod
, rec
, MCOUNT_ADDR
);
577 rec
->flags
|= FTRACE_FL_FAILED
;
584 * archs can override this function if they must do something
585 * before the modifying code is performed.
587 int __weak
ftrace_arch_code_modify_prepare(void)
593 * archs can override this function if they must do something
594 * after the modifying code is performed.
596 int __weak
ftrace_arch_code_modify_post_process(void)
601 static int __ftrace_modify_code(void *data
)
605 if (*command
& FTRACE_ENABLE_CALLS
)
606 ftrace_replace_code(1);
607 else if (*command
& FTRACE_DISABLE_CALLS
)
608 ftrace_replace_code(0);
610 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
611 ftrace_update_ftrace_func(ftrace_trace_function
);
613 if (*command
& FTRACE_START_FUNC_RET
)
614 ftrace_enable_ftrace_graph_caller();
615 else if (*command
& FTRACE_STOP_FUNC_RET
)
616 ftrace_disable_ftrace_graph_caller();
621 static void ftrace_run_update_code(int command
)
625 ret
= ftrace_arch_code_modify_prepare();
630 stop_machine(__ftrace_modify_code
, &command
, NULL
);
632 ret
= ftrace_arch_code_modify_post_process();
636 static ftrace_func_t saved_ftrace_func
;
637 static int ftrace_start_up
;
639 static void ftrace_startup_enable(int command
)
641 if (saved_ftrace_func
!= ftrace_trace_function
) {
642 saved_ftrace_func
= ftrace_trace_function
;
643 command
|= FTRACE_UPDATE_TRACE_FUNC
;
646 if (!command
|| !ftrace_enabled
)
649 ftrace_run_update_code(command
);
652 static void ftrace_startup(int command
)
654 if (unlikely(ftrace_disabled
))
658 command
|= FTRACE_ENABLE_CALLS
;
660 ftrace_startup_enable(command
);
663 static void ftrace_shutdown(int command
)
665 if (unlikely(ftrace_disabled
))
669 if (!ftrace_start_up
)
670 command
|= FTRACE_DISABLE_CALLS
;
672 if (saved_ftrace_func
!= ftrace_trace_function
) {
673 saved_ftrace_func
= ftrace_trace_function
;
674 command
|= FTRACE_UPDATE_TRACE_FUNC
;
677 if (!command
|| !ftrace_enabled
)
680 ftrace_run_update_code(command
);
683 static void ftrace_startup_sysctl(void)
685 int command
= FTRACE_ENABLE_MCOUNT
;
687 if (unlikely(ftrace_disabled
))
690 /* Force update next time */
691 saved_ftrace_func
= NULL
;
692 /* ftrace_start_up is true if we want ftrace running */
694 command
|= FTRACE_ENABLE_CALLS
;
696 ftrace_run_update_code(command
);
699 static void ftrace_shutdown_sysctl(void)
701 int command
= FTRACE_DISABLE_MCOUNT
;
703 if (unlikely(ftrace_disabled
))
706 /* ftrace_start_up is true if ftrace is running */
708 command
|= FTRACE_DISABLE_CALLS
;
710 ftrace_run_update_code(command
);
713 static cycle_t ftrace_update_time
;
714 static unsigned long ftrace_update_cnt
;
715 unsigned long ftrace_update_tot_cnt
;
717 static int ftrace_update_code(struct module
*mod
)
719 struct dyn_ftrace
*p
, *t
;
722 start
= ftrace_now(raw_smp_processor_id());
723 ftrace_update_cnt
= 0;
725 list_for_each_entry_safe(p
, t
, &ftrace_new_addrs
, list
) {
727 /* If something went wrong, bail without enabling anything */
728 if (unlikely(ftrace_disabled
))
731 list_del_init(&p
->list
);
733 /* convert record (i.e, patch mcount-call with NOP) */
734 if (ftrace_code_disable(mod
, p
)) {
735 p
->flags
|= FTRACE_FL_CONVERTED
;
741 stop
= ftrace_now(raw_smp_processor_id());
742 ftrace_update_time
= stop
- start
;
743 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
748 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
750 struct ftrace_page
*pg
;
754 /* allocate a few pages */
755 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
756 if (!ftrace_pages_start
)
760 * Allocate a few more pages.
762 * TODO: have some parser search vmlinux before
763 * final linking to find all calls to ftrace.
765 * a) know how many pages to allocate.
767 * b) set up the table then.
769 * The dynamic code is still necessary for
773 pg
= ftrace_pages
= ftrace_pages_start
;
775 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
776 pr_info("ftrace: allocating %ld entries in %d pages\n",
777 num_to_init
, cnt
+ 1);
779 for (i
= 0; i
< cnt
; i
++) {
780 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
782 /* If we fail, we'll try later anyway */
793 FTRACE_ITER_FILTER
= (1 << 0),
794 FTRACE_ITER_CONT
= (1 << 1),
795 FTRACE_ITER_NOTRACE
= (1 << 2),
796 FTRACE_ITER_FAILURES
= (1 << 3),
797 FTRACE_ITER_PRINTALL
= (1 << 4),
798 FTRACE_ITER_HASH
= (1 << 5),
801 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
803 struct ftrace_iterator
{
804 struct ftrace_page
*pg
;
808 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
814 t_hash_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
816 struct ftrace_iterator
*iter
= m
->private;
817 struct hlist_node
*hnd
= v
;
818 struct hlist_head
*hhd
;
820 WARN_ON(!(iter
->flags
& FTRACE_ITER_HASH
));
825 if (iter
->hidx
>= FTRACE_FUNC_HASHSIZE
)
828 hhd
= &ftrace_func_hash
[iter
->hidx
];
830 if (hlist_empty(hhd
)) {
849 static void *t_hash_start(struct seq_file
*m
, loff_t
*pos
)
851 struct ftrace_iterator
*iter
= m
->private;
854 iter
->flags
|= FTRACE_ITER_HASH
;
856 return t_hash_next(m
, p
, pos
);
859 static int t_hash_show(struct seq_file
*m
, void *v
)
861 struct ftrace_func_probe
*rec
;
862 struct hlist_node
*hnd
= v
;
863 char str
[KSYM_SYMBOL_LEN
];
865 rec
= hlist_entry(hnd
, struct ftrace_func_probe
, node
);
868 return rec
->ops
->print(m
, rec
->ip
, rec
->ops
, rec
->data
);
870 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
871 seq_printf(m
, "%s:", str
);
873 kallsyms_lookup((unsigned long)rec
->ops
->func
, NULL
, NULL
, NULL
, str
);
874 seq_printf(m
, "%s", str
);
877 seq_printf(m
, ":%p", rec
->data
);
884 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
886 struct ftrace_iterator
*iter
= m
->private;
887 struct dyn_ftrace
*rec
= NULL
;
889 if (iter
->flags
& FTRACE_ITER_HASH
)
890 return t_hash_next(m
, v
, pos
);
894 if (iter
->flags
& FTRACE_ITER_PRINTALL
)
898 if (iter
->idx
>= iter
->pg
->index
) {
899 if (iter
->pg
->next
) {
900 iter
->pg
= iter
->pg
->next
;
907 rec
= &iter
->pg
->records
[iter
->idx
++];
908 if ((rec
->flags
& FTRACE_FL_FREE
) ||
910 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
911 (rec
->flags
& FTRACE_FL_FAILED
)) ||
913 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
914 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
916 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
917 !(rec
->flags
& FTRACE_FL_FILTER
)) ||
919 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
920 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
929 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
931 struct ftrace_iterator
*iter
= m
->private;
934 mutex_lock(&ftrace_lock
);
936 * For set_ftrace_filter reading, if we have the filter
937 * off, we can short cut and just print out that all
938 * functions are enabled.
940 if (iter
->flags
& FTRACE_ITER_FILTER
&& !ftrace_filtered
) {
942 return t_hash_start(m
, pos
);
943 iter
->flags
|= FTRACE_ITER_PRINTALL
;
948 if (iter
->flags
& FTRACE_ITER_HASH
)
949 return t_hash_start(m
, pos
);
958 p
= t_next(m
, p
, pos
);
961 return t_hash_start(m
, pos
);
966 static void t_stop(struct seq_file
*m
, void *p
)
968 mutex_unlock(&ftrace_lock
);
971 static int t_show(struct seq_file
*m
, void *v
)
973 struct ftrace_iterator
*iter
= m
->private;
974 struct dyn_ftrace
*rec
= v
;
975 char str
[KSYM_SYMBOL_LEN
];
977 if (iter
->flags
& FTRACE_ITER_HASH
)
978 return t_hash_show(m
, v
);
980 if (iter
->flags
& FTRACE_ITER_PRINTALL
) {
981 seq_printf(m
, "#### all functions enabled ####\n");
988 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
990 seq_printf(m
, "%s\n", str
);
995 static struct seq_operations show_ftrace_seq_ops
= {
1003 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
1005 struct ftrace_iterator
*iter
;
1008 if (unlikely(ftrace_disabled
))
1011 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1015 iter
->pg
= ftrace_pages_start
;
1017 ret
= seq_open(file
, &show_ftrace_seq_ops
);
1019 struct seq_file
*m
= file
->private_data
;
1029 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
1031 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1032 struct ftrace_iterator
*iter
= m
->private;
1034 seq_release(inode
, file
);
1041 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
1045 struct ftrace_iterator
*iter
;
1047 ret
= ftrace_avail_open(inode
, file
);
1049 m
= (struct seq_file
*)file
->private_data
;
1050 iter
= (struct ftrace_iterator
*)m
->private;
1051 iter
->flags
= FTRACE_ITER_FAILURES
;
1058 static void ftrace_filter_reset(int enable
)
1060 struct ftrace_page
*pg
;
1061 struct dyn_ftrace
*rec
;
1062 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1064 mutex_lock(&ftrace_lock
);
1066 ftrace_filtered
= 0;
1067 do_for_each_ftrace_rec(pg
, rec
) {
1068 if (rec
->flags
& FTRACE_FL_FAILED
)
1070 rec
->flags
&= ~type
;
1071 } while_for_each_ftrace_rec();
1072 mutex_unlock(&ftrace_lock
);
1076 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
1078 struct ftrace_iterator
*iter
;
1081 if (unlikely(ftrace_disabled
))
1084 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1088 mutex_lock(&ftrace_regex_lock
);
1089 if ((file
->f_mode
& FMODE_WRITE
) &&
1090 !(file
->f_flags
& O_APPEND
))
1091 ftrace_filter_reset(enable
);
1093 if (file
->f_mode
& FMODE_READ
) {
1094 iter
->pg
= ftrace_pages_start
;
1095 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
1096 FTRACE_ITER_NOTRACE
;
1098 ret
= seq_open(file
, &show_ftrace_seq_ops
);
1100 struct seq_file
*m
= file
->private_data
;
1105 file
->private_data
= iter
;
1106 mutex_unlock(&ftrace_regex_lock
);
1112 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
1114 return ftrace_regex_open(inode
, file
, 1);
1118 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
1120 return ftrace_regex_open(inode
, file
, 0);
1124 ftrace_regex_read(struct file
*file
, char __user
*ubuf
,
1125 size_t cnt
, loff_t
*ppos
)
1127 if (file
->f_mode
& FMODE_READ
)
1128 return seq_read(file
, ubuf
, cnt
, ppos
);
1134 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
1138 if (file
->f_mode
& FMODE_READ
)
1139 ret
= seq_lseek(file
, offset
, origin
);
1141 file
->f_pos
= ret
= 1;
1154 * (static function - no need for kernel doc)
1156 * Pass in a buffer containing a glob and this function will
1157 * set search to point to the search part of the buffer and
1158 * return the type of search it is (see enum above).
1159 * This does modify buff.
1161 * Returns enum type.
1162 * search returns the pointer to use for comparison.
1163 * not returns 1 if buff started with a '!'
1167 ftrace_setup_glob(char *buff
, int len
, char **search
, int *not)
1169 int type
= MATCH_FULL
;
1172 if (buff
[0] == '!') {
1181 for (i
= 0; i
< len
; i
++) {
1182 if (buff
[i
] == '*') {
1185 type
= MATCH_END_ONLY
;
1187 if (type
== MATCH_END_ONLY
)
1188 type
= MATCH_MIDDLE_ONLY
;
1190 type
= MATCH_FRONT_ONLY
;
1200 static int ftrace_match(char *str
, char *regex
, int len
, int type
)
1207 if (strcmp(str
, regex
) == 0)
1210 case MATCH_FRONT_ONLY
:
1211 if (strncmp(str
, regex
, len
) == 0)
1214 case MATCH_MIDDLE_ONLY
:
1215 if (strstr(str
, regex
))
1218 case MATCH_END_ONLY
:
1219 ptr
= strstr(str
, regex
);
1220 if (ptr
&& (ptr
[len
] == 0))
1229 ftrace_match_record(struct dyn_ftrace
*rec
, char *regex
, int len
, int type
)
1231 char str
[KSYM_SYMBOL_LEN
];
1233 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1234 return ftrace_match(str
, regex
, len
, type
);
1237 static void ftrace_match_records(char *buff
, int len
, int enable
)
1239 unsigned int search_len
;
1240 struct ftrace_page
*pg
;
1241 struct dyn_ftrace
*rec
;
1247 flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1248 type
= ftrace_setup_glob(buff
, len
, &search
, ¬);
1250 search_len
= strlen(search
);
1252 mutex_lock(&ftrace_lock
);
1253 do_for_each_ftrace_rec(pg
, rec
) {
1255 if (rec
->flags
& FTRACE_FL_FAILED
)
1258 if (ftrace_match_record(rec
, search
, search_len
, type
)) {
1260 rec
->flags
&= ~flag
;
1265 * Only enable filtering if we have a function that
1268 if (enable
&& (rec
->flags
& FTRACE_FL_FILTER
))
1269 ftrace_filtered
= 1;
1270 } while_for_each_ftrace_rec();
1271 mutex_unlock(&ftrace_lock
);
1275 ftrace_match_module_record(struct dyn_ftrace
*rec
, char *mod
,
1276 char *regex
, int len
, int type
)
1278 char str
[KSYM_SYMBOL_LEN
];
1281 kallsyms_lookup(rec
->ip
, NULL
, NULL
, &modname
, str
);
1283 if (!modname
|| strcmp(modname
, mod
))
1286 /* blank search means to match all funcs in the mod */
1288 return ftrace_match(str
, regex
, len
, type
);
1293 static void ftrace_match_module_records(char *buff
, char *mod
, int enable
)
1295 unsigned search_len
= 0;
1296 struct ftrace_page
*pg
;
1297 struct dyn_ftrace
*rec
;
1298 int type
= MATCH_FULL
;
1299 char *search
= buff
;
1303 flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1305 /* blank or '*' mean the same */
1306 if (strcmp(buff
, "*") == 0)
1309 /* handle the case of 'dont filter this module' */
1310 if (strcmp(buff
, "!") == 0 || strcmp(buff
, "!*") == 0) {
1316 type
= ftrace_setup_glob(buff
, strlen(buff
), &search
, ¬);
1317 search_len
= strlen(search
);
1320 mutex_lock(&ftrace_lock
);
1321 do_for_each_ftrace_rec(pg
, rec
) {
1323 if (rec
->flags
& FTRACE_FL_FAILED
)
1326 if (ftrace_match_module_record(rec
, mod
,
1327 search
, search_len
, type
)) {
1329 rec
->flags
&= ~flag
;
1333 if (enable
&& (rec
->flags
& FTRACE_FL_FILTER
))
1334 ftrace_filtered
= 1;
1336 } while_for_each_ftrace_rec();
1337 mutex_unlock(&ftrace_lock
);
1341 * We register the module command as a template to show others how
1342 * to register the a command as well.
1346 ftrace_mod_callback(char *func
, char *cmd
, char *param
, int enable
)
1351 * cmd == 'mod' because we only registered this func
1352 * for the 'mod' ftrace_func_command.
1353 * But if you register one func with multiple commands,
1354 * you can tell which command was used by the cmd
1358 /* we must have a module name */
1362 mod
= strsep(¶m
, ":");
1366 ftrace_match_module_records(func
, mod
, enable
);
1370 static struct ftrace_func_command ftrace_mod_cmd
= {
1372 .func
= ftrace_mod_callback
,
1375 static int __init
ftrace_mod_cmd_init(void)
1377 return register_ftrace_command(&ftrace_mod_cmd
);
1379 device_initcall(ftrace_mod_cmd_init
);
1382 function_trace_probe_call(unsigned long ip
, unsigned long parent_ip
)
1384 struct ftrace_func_probe
*entry
;
1385 struct hlist_head
*hhd
;
1386 struct hlist_node
*n
;
1390 key
= hash_long(ip
, FTRACE_HASH_BITS
);
1392 hhd
= &ftrace_func_hash
[key
];
1394 if (hlist_empty(hhd
))
1398 * Disable preemption for these calls to prevent a RCU grace
1399 * period. This syncs the hash iteration and freeing of items
1400 * on the hash. rcu_read_lock is too dangerous here.
1402 resched
= ftrace_preempt_disable();
1403 hlist_for_each_entry_rcu(entry
, n
, hhd
, node
) {
1404 if (entry
->ip
== ip
)
1405 entry
->ops
->func(ip
, parent_ip
, &entry
->data
);
1407 ftrace_preempt_enable(resched
);
1410 static struct ftrace_ops trace_probe_ops __read_mostly
=
1412 .func
= function_trace_probe_call
,
1415 static int ftrace_probe_registered
;
1417 static void __enable_ftrace_function_probe(void)
1421 if (ftrace_probe_registered
)
1424 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
1425 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
1429 /* Nothing registered? */
1430 if (i
== FTRACE_FUNC_HASHSIZE
)
1433 __register_ftrace_function(&trace_probe_ops
);
1435 ftrace_probe_registered
= 1;
1438 static void __disable_ftrace_function_probe(void)
1442 if (!ftrace_probe_registered
)
1445 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
1446 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
1451 /* no more funcs left */
1452 __unregister_ftrace_function(&trace_probe_ops
);
1454 ftrace_probe_registered
= 0;
1458 static void ftrace_free_entry_rcu(struct rcu_head
*rhp
)
1460 struct ftrace_func_probe
*entry
=
1461 container_of(rhp
, struct ftrace_func_probe
, rcu
);
1463 if (entry
->ops
->free
)
1464 entry
->ops
->free(&entry
->data
);
1470 register_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
1473 struct ftrace_func_probe
*entry
;
1474 struct ftrace_page
*pg
;
1475 struct dyn_ftrace
*rec
;
1481 type
= ftrace_setup_glob(glob
, strlen(glob
), &search
, ¬);
1482 len
= strlen(search
);
1484 /* we do not support '!' for function probes */
1488 mutex_lock(&ftrace_lock
);
1489 do_for_each_ftrace_rec(pg
, rec
) {
1491 if (rec
->flags
& FTRACE_FL_FAILED
)
1494 if (!ftrace_match_record(rec
, search
, len
, type
))
1497 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1499 /* If we did not process any, then return error */
1510 * The caller might want to do something special
1511 * for each function we find. We call the callback
1512 * to give the caller an opportunity to do so.
1514 if (ops
->callback
) {
1515 if (ops
->callback(rec
->ip
, &entry
->data
) < 0) {
1516 /* caller does not like this func */
1523 entry
->ip
= rec
->ip
;
1525 key
= hash_long(entry
->ip
, FTRACE_HASH_BITS
);
1526 hlist_add_head_rcu(&entry
->node
, &ftrace_func_hash
[key
]);
1528 } while_for_each_ftrace_rec();
1529 __enable_ftrace_function_probe();
1532 mutex_unlock(&ftrace_lock
);
1538 PROBE_TEST_FUNC
= 1,
1543 __unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
1544 void *data
, int flags
)
1546 struct ftrace_func_probe
*entry
;
1547 struct hlist_node
*n
, *tmp
;
1548 char str
[KSYM_SYMBOL_LEN
];
1549 int type
= MATCH_FULL
;
1553 if (glob
&& (strcmp(glob
, "*") || !strlen(glob
)))
1558 type
= ftrace_setup_glob(glob
, strlen(glob
), &search
, ¬);
1559 len
= strlen(search
);
1561 /* we do not support '!' for function probes */
1566 mutex_lock(&ftrace_lock
);
1567 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
1568 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
1570 hlist_for_each_entry_safe(entry
, n
, tmp
, hhd
, node
) {
1572 /* break up if statements for readability */
1573 if ((flags
& PROBE_TEST_FUNC
) && entry
->ops
!= ops
)
1576 if ((flags
& PROBE_TEST_DATA
) && entry
->data
!= data
)
1579 /* do this last, since it is the most expensive */
1581 kallsyms_lookup(entry
->ip
, NULL
, NULL
,
1583 if (!ftrace_match(str
, glob
, len
, type
))
1587 hlist_del(&entry
->node
);
1588 call_rcu(&entry
->rcu
, ftrace_free_entry_rcu
);
1591 __disable_ftrace_function_probe();
1592 mutex_unlock(&ftrace_lock
);
1596 unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
1599 __unregister_ftrace_function_probe(glob
, ops
, data
,
1600 PROBE_TEST_FUNC
| PROBE_TEST_DATA
);
1604 unregister_ftrace_function_probe_func(char *glob
, struct ftrace_probe_ops
*ops
)
1606 __unregister_ftrace_function_probe(glob
, ops
, NULL
, PROBE_TEST_FUNC
);
1609 void unregister_ftrace_function_probe_all(char *glob
)
1611 __unregister_ftrace_function_probe(glob
, NULL
, NULL
, 0);
1614 static LIST_HEAD(ftrace_commands
);
1615 static DEFINE_MUTEX(ftrace_cmd_mutex
);
1617 int register_ftrace_command(struct ftrace_func_command
*cmd
)
1619 struct ftrace_func_command
*p
;
1622 mutex_lock(&ftrace_cmd_mutex
);
1623 list_for_each_entry(p
, &ftrace_commands
, list
) {
1624 if (strcmp(cmd
->name
, p
->name
) == 0) {
1629 list_add(&cmd
->list
, &ftrace_commands
);
1631 mutex_unlock(&ftrace_cmd_mutex
);
1636 int unregister_ftrace_command(struct ftrace_func_command
*cmd
)
1638 struct ftrace_func_command
*p
, *n
;
1641 mutex_lock(&ftrace_cmd_mutex
);
1642 list_for_each_entry_safe(p
, n
, &ftrace_commands
, list
) {
1643 if (strcmp(cmd
->name
, p
->name
) == 0) {
1645 list_del_init(&p
->list
);
1650 mutex_unlock(&ftrace_cmd_mutex
);
1655 static int ftrace_process_regex(char *buff
, int len
, int enable
)
1657 char *func
, *command
, *next
= buff
;
1658 struct ftrace_func_command
*p
;
1661 func
= strsep(&next
, ":");
1664 ftrace_match_records(func
, len
, enable
);
1670 command
= strsep(&next
, ":");
1672 mutex_lock(&ftrace_cmd_mutex
);
1673 list_for_each_entry(p
, &ftrace_commands
, list
) {
1674 if (strcmp(p
->name
, command
) == 0) {
1675 ret
= p
->func(func
, command
, next
, enable
);
1680 mutex_unlock(&ftrace_cmd_mutex
);
1686 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
1687 size_t cnt
, loff_t
*ppos
, int enable
)
1689 struct ftrace_iterator
*iter
;
1694 if (!cnt
|| cnt
< 0)
1697 mutex_lock(&ftrace_regex_lock
);
1699 if (file
->f_mode
& FMODE_READ
) {
1700 struct seq_file
*m
= file
->private_data
;
1703 iter
= file
->private_data
;
1706 iter
->flags
&= ~FTRACE_ITER_CONT
;
1707 iter
->buffer_idx
= 0;
1710 ret
= get_user(ch
, ubuf
++);
1716 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1717 /* skip white space */
1718 while (cnt
&& isspace(ch
)) {
1719 ret
= get_user(ch
, ubuf
++);
1727 file
->f_pos
+= read
;
1732 iter
->buffer_idx
= 0;
1735 while (cnt
&& !isspace(ch
)) {
1736 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1737 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1742 ret
= get_user(ch
, ubuf
++);
1751 iter
->buffer
[iter
->buffer_idx
] = 0;
1752 ret
= ftrace_process_regex(iter
->buffer
,
1753 iter
->buffer_idx
, enable
);
1756 iter
->buffer_idx
= 0;
1758 iter
->flags
|= FTRACE_ITER_CONT
;
1761 file
->f_pos
+= read
;
1765 mutex_unlock(&ftrace_regex_lock
);
1771 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1772 size_t cnt
, loff_t
*ppos
)
1774 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
1778 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
1779 size_t cnt
, loff_t
*ppos
)
1781 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
1785 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
1787 if (unlikely(ftrace_disabled
))
1790 mutex_lock(&ftrace_regex_lock
);
1792 ftrace_filter_reset(enable
);
1794 ftrace_match_records(buf
, len
, enable
);
1795 mutex_unlock(&ftrace_regex_lock
);
1799 * ftrace_set_filter - set a function to filter on in ftrace
1800 * @buf - the string that holds the function filter text.
1801 * @len - the length of the string.
1802 * @reset - non zero to reset all filters before applying this filter.
1804 * Filters denote which functions should be enabled when tracing is enabled.
1805 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1807 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1809 ftrace_set_regex(buf
, len
, reset
, 1);
1813 * ftrace_set_notrace - set a function to not trace in ftrace
1814 * @buf - the string that holds the function notrace text.
1815 * @len - the length of the string.
1816 * @reset - non zero to reset all filters before applying this filter.
1818 * Notrace Filters denote which functions should not be enabled when tracing
1819 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1822 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
1824 ftrace_set_regex(buf
, len
, reset
, 0);
1828 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
1830 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1831 struct ftrace_iterator
*iter
;
1833 mutex_lock(&ftrace_regex_lock
);
1834 if (file
->f_mode
& FMODE_READ
) {
1837 seq_release(inode
, file
);
1839 iter
= file
->private_data
;
1841 if (iter
->buffer_idx
) {
1843 iter
->buffer
[iter
->buffer_idx
] = 0;
1844 ftrace_match_records(iter
->buffer
, iter
->buffer_idx
, enable
);
1847 mutex_lock(&ftrace_lock
);
1848 if (ftrace_start_up
&& ftrace_enabled
)
1849 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1850 mutex_unlock(&ftrace_lock
);
1853 mutex_unlock(&ftrace_regex_lock
);
1858 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1860 return ftrace_regex_release(inode
, file
, 1);
1864 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
1866 return ftrace_regex_release(inode
, file
, 0);
1869 static const struct file_operations ftrace_avail_fops
= {
1870 .open
= ftrace_avail_open
,
1872 .llseek
= seq_lseek
,
1873 .release
= ftrace_avail_release
,
1876 static const struct file_operations ftrace_failures_fops
= {
1877 .open
= ftrace_failures_open
,
1879 .llseek
= seq_lseek
,
1880 .release
= ftrace_avail_release
,
1883 static const struct file_operations ftrace_filter_fops
= {
1884 .open
= ftrace_filter_open
,
1885 .read
= ftrace_regex_read
,
1886 .write
= ftrace_filter_write
,
1887 .llseek
= ftrace_regex_lseek
,
1888 .release
= ftrace_filter_release
,
1891 static const struct file_operations ftrace_notrace_fops
= {
1892 .open
= ftrace_notrace_open
,
1893 .read
= ftrace_regex_read
,
1894 .write
= ftrace_notrace_write
,
1895 .llseek
= ftrace_regex_lseek
,
1896 .release
= ftrace_notrace_release
,
1899 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1901 static DEFINE_MUTEX(graph_lock
);
1903 int ftrace_graph_count
;
1904 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
1907 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1909 unsigned long *array
= m
->private;
1914 if (index
>= ftrace_graph_count
)
1917 return &array
[index
];
1920 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
1924 mutex_lock(&graph_lock
);
1926 /* Nothing, tell g_show to print all functions are enabled */
1927 if (!ftrace_graph_count
&& !*pos
)
1930 p
= g_next(m
, p
, pos
);
1935 static void g_stop(struct seq_file
*m
, void *p
)
1937 mutex_unlock(&graph_lock
);
1940 static int g_show(struct seq_file
*m
, void *v
)
1942 unsigned long *ptr
= v
;
1943 char str
[KSYM_SYMBOL_LEN
];
1948 if (ptr
== (unsigned long *)1) {
1949 seq_printf(m
, "#### all functions enabled ####\n");
1953 kallsyms_lookup(*ptr
, NULL
, NULL
, NULL
, str
);
1955 seq_printf(m
, "%s\n", str
);
1960 static struct seq_operations ftrace_graph_seq_ops
= {
1968 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
1972 if (unlikely(ftrace_disabled
))
1975 mutex_lock(&graph_lock
);
1976 if ((file
->f_mode
& FMODE_WRITE
) &&
1977 !(file
->f_flags
& O_APPEND
)) {
1978 ftrace_graph_count
= 0;
1979 memset(ftrace_graph_funcs
, 0, sizeof(ftrace_graph_funcs
));
1982 if (file
->f_mode
& FMODE_READ
) {
1983 ret
= seq_open(file
, &ftrace_graph_seq_ops
);
1985 struct seq_file
*m
= file
->private_data
;
1986 m
->private = ftrace_graph_funcs
;
1989 file
->private_data
= ftrace_graph_funcs
;
1990 mutex_unlock(&graph_lock
);
1996 ftrace_graph_read(struct file
*file
, char __user
*ubuf
,
1997 size_t cnt
, loff_t
*ppos
)
1999 if (file
->f_mode
& FMODE_READ
)
2000 return seq_read(file
, ubuf
, cnt
, ppos
);
2006 ftrace_set_func(unsigned long *array
, int *idx
, char *buffer
)
2008 struct dyn_ftrace
*rec
;
2009 struct ftrace_page
*pg
;
2017 if (ftrace_disabled
)
2021 type
= ftrace_setup_glob(buffer
, strlen(buffer
), &search
, ¬);
2025 search_len
= strlen(search
);
2027 mutex_lock(&ftrace_lock
);
2028 do_for_each_ftrace_rec(pg
, rec
) {
2030 if (*idx
>= FTRACE_GRAPH_MAX_FUNCS
)
2033 if (rec
->flags
& (FTRACE_FL_FAILED
| FTRACE_FL_FREE
))
2036 if (ftrace_match_record(rec
, search
, search_len
, type
)) {
2037 /* ensure it is not already in the array */
2039 for (i
= 0; i
< *idx
; i
++)
2040 if (array
[i
] == rec
->ip
) {
2045 array
[(*idx
)++] = rec
->ip
;
2049 } while_for_each_ftrace_rec();
2051 mutex_unlock(&ftrace_lock
);
2053 return found
? 0 : -EINVAL
;
2057 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
2058 size_t cnt
, loff_t
*ppos
)
2060 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
2061 unsigned long *array
;
2067 if (!cnt
|| cnt
< 0)
2070 mutex_lock(&graph_lock
);
2072 if (ftrace_graph_count
>= FTRACE_GRAPH_MAX_FUNCS
) {
2077 if (file
->f_mode
& FMODE_READ
) {
2078 struct seq_file
*m
= file
->private_data
;
2081 array
= file
->private_data
;
2083 ret
= get_user(ch
, ubuf
++);
2089 /* skip white space */
2090 while (cnt
&& isspace(ch
)) {
2091 ret
= get_user(ch
, ubuf
++);
2104 while (cnt
&& !isspace(ch
)) {
2105 if (index
< FTRACE_BUFF_MAX
)
2106 buffer
[index
++] = ch
;
2111 ret
= get_user(ch
, ubuf
++);
2119 /* we allow only one expression at a time */
2120 ret
= ftrace_set_func(array
, &ftrace_graph_count
, buffer
);
2124 file
->f_pos
+= read
;
2128 mutex_unlock(&graph_lock
);
2133 static const struct file_operations ftrace_graph_fops
= {
2134 .open
= ftrace_graph_open
,
2135 .read
= ftrace_graph_read
,
2136 .write
= ftrace_graph_write
,
2138 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2140 static __init
int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
)
2142 struct dentry
*entry
;
2144 entry
= debugfs_create_file("available_filter_functions", 0444,
2145 d_tracer
, NULL
, &ftrace_avail_fops
);
2147 pr_warning("Could not create debugfs "
2148 "'available_filter_functions' entry\n");
2150 entry
= debugfs_create_file("failures", 0444,
2151 d_tracer
, NULL
, &ftrace_failures_fops
);
2153 pr_warning("Could not create debugfs 'failures' entry\n");
2155 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
2156 NULL
, &ftrace_filter_fops
);
2158 pr_warning("Could not create debugfs "
2159 "'set_ftrace_filter' entry\n");
2161 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
2162 NULL
, &ftrace_notrace_fops
);
2164 pr_warning("Could not create debugfs "
2165 "'set_ftrace_notrace' entry\n");
2167 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2168 entry
= debugfs_create_file("set_graph_function", 0444, d_tracer
,
2170 &ftrace_graph_fops
);
2172 pr_warning("Could not create debugfs "
2173 "'set_graph_function' entry\n");
2174 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2179 static int ftrace_convert_nops(struct module
*mod
,
2180 unsigned long *start
,
2185 unsigned long flags
;
2187 mutex_lock(&ftrace_lock
);
2190 addr
= ftrace_call_adjust(*p
++);
2192 * Some architecture linkers will pad between
2193 * the different mcount_loc sections of different
2194 * object files to satisfy alignments.
2195 * Skip any NULL pointers.
2199 ftrace_record_ip(addr
);
2202 /* disable interrupts to prevent kstop machine */
2203 local_irq_save(flags
);
2204 ftrace_update_code(mod
);
2205 local_irq_restore(flags
);
2206 mutex_unlock(&ftrace_lock
);
2211 void ftrace_init_module(struct module
*mod
,
2212 unsigned long *start
, unsigned long *end
)
2214 if (ftrace_disabled
|| start
== end
)
2216 ftrace_convert_nops(mod
, start
, end
);
2219 extern unsigned long __start_mcount_loc
[];
2220 extern unsigned long __stop_mcount_loc
[];
2222 void __init
ftrace_init(void)
2224 unsigned long count
, addr
, flags
;
2227 /* Keep the ftrace pointer to the stub */
2228 addr
= (unsigned long)ftrace_stub
;
2230 local_irq_save(flags
);
2231 ftrace_dyn_arch_init(&addr
);
2232 local_irq_restore(flags
);
2234 /* ftrace_dyn_arch_init places the return code in addr */
2238 count
= __stop_mcount_loc
- __start_mcount_loc
;
2240 ret
= ftrace_dyn_table_alloc(count
);
2244 last_ftrace_enabled
= ftrace_enabled
= 1;
2246 ret
= ftrace_convert_nops(NULL
,
2252 ftrace_disabled
= 1;
2257 static int __init
ftrace_nodyn_init(void)
2262 device_initcall(ftrace_nodyn_init
);
2264 static inline int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
) { return 0; }
2265 static inline void ftrace_startup_enable(int command
) { }
2266 /* Keep as macros so we do not need to define the commands */
2267 # define ftrace_startup(command) do { } while (0)
2268 # define ftrace_shutdown(command) do { } while (0)
2269 # define ftrace_startup_sysctl() do { } while (0)
2270 # define ftrace_shutdown_sysctl() do { } while (0)
2271 #endif /* CONFIG_DYNAMIC_FTRACE */
2274 ftrace_pid_read(struct file
*file
, char __user
*ubuf
,
2275 size_t cnt
, loff_t
*ppos
)
2280 if (ftrace_pid_trace
== ftrace_swapper_pid
)
2281 r
= sprintf(buf
, "swapper tasks\n");
2282 else if (ftrace_pid_trace
)
2283 r
= sprintf(buf
, "%u\n", pid_nr(ftrace_pid_trace
));
2285 r
= sprintf(buf
, "no pid\n");
2287 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
2290 static void clear_ftrace_swapper(void)
2292 struct task_struct
*p
;
2296 for_each_online_cpu(cpu
) {
2298 clear_tsk_trace_trace(p
);
2303 static void set_ftrace_swapper(void)
2305 struct task_struct
*p
;
2309 for_each_online_cpu(cpu
) {
2311 set_tsk_trace_trace(p
);
2316 static void clear_ftrace_pid(struct pid
*pid
)
2318 struct task_struct
*p
;
2321 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
2322 clear_tsk_trace_trace(p
);
2323 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
2329 static void set_ftrace_pid(struct pid
*pid
)
2331 struct task_struct
*p
;
2334 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
2335 set_tsk_trace_trace(p
);
2336 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
2340 static void clear_ftrace_pid_task(struct pid
**pid
)
2342 if (*pid
== ftrace_swapper_pid
)
2343 clear_ftrace_swapper();
2345 clear_ftrace_pid(*pid
);
2350 static void set_ftrace_pid_task(struct pid
*pid
)
2352 if (pid
== ftrace_swapper_pid
)
2353 set_ftrace_swapper();
2355 set_ftrace_pid(pid
);
2359 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
2360 size_t cnt
, loff_t
*ppos
)
2367 if (cnt
>= sizeof(buf
))
2370 if (copy_from_user(&buf
, ubuf
, cnt
))
2375 ret
= strict_strtol(buf
, 10, &val
);
2379 mutex_lock(&ftrace_lock
);
2381 /* disable pid tracing */
2382 if (!ftrace_pid_trace
)
2385 clear_ftrace_pid_task(&ftrace_pid_trace
);
2388 /* swapper task is special */
2390 pid
= ftrace_swapper_pid
;
2391 if (pid
== ftrace_pid_trace
)
2394 pid
= find_get_pid(val
);
2396 if (pid
== ftrace_pid_trace
) {
2402 if (ftrace_pid_trace
)
2403 clear_ftrace_pid_task(&ftrace_pid_trace
);
2408 ftrace_pid_trace
= pid
;
2410 set_ftrace_pid_task(ftrace_pid_trace
);
2413 /* update the function call */
2414 ftrace_update_pid_func();
2415 ftrace_startup_enable(0);
2418 mutex_unlock(&ftrace_lock
);
2423 static const struct file_operations ftrace_pid_fops
= {
2424 .read
= ftrace_pid_read
,
2425 .write
= ftrace_pid_write
,
2428 static __init
int ftrace_init_debugfs(void)
2430 struct dentry
*d_tracer
;
2431 struct dentry
*entry
;
2433 d_tracer
= tracing_init_dentry();
2437 ftrace_init_dyn_debugfs(d_tracer
);
2439 entry
= debugfs_create_file("set_ftrace_pid", 0644, d_tracer
,
2440 NULL
, &ftrace_pid_fops
);
2442 pr_warning("Could not create debugfs "
2443 "'set_ftrace_pid' entry\n");
2446 fs_initcall(ftrace_init_debugfs
);
2449 * ftrace_kill - kill ftrace
2451 * This function should be used by panic code. It stops ftrace
2452 * but in a not so nice way. If you need to simply kill ftrace
2453 * from a non-atomic section, use ftrace_kill.
2455 void ftrace_kill(void)
2457 ftrace_disabled
= 1;
2459 clear_ftrace_function();
2463 * register_ftrace_function - register a function for profiling
2464 * @ops - ops structure that holds the function for profiling.
2466 * Register a function to be called by all functions in the
2469 * Note: @ops->func and all the functions it calls must be labeled
2470 * with "notrace", otherwise it will go into a
2473 int register_ftrace_function(struct ftrace_ops
*ops
)
2477 if (unlikely(ftrace_disabled
))
2480 mutex_lock(&ftrace_lock
);
2482 ret
= __register_ftrace_function(ops
);
2485 mutex_unlock(&ftrace_lock
);
2490 * unregister_ftrace_function - unregister a function for profiling.
2491 * @ops - ops structure that holds the function to unregister
2493 * Unregister a function that was added to be called by ftrace profiling.
2495 int unregister_ftrace_function(struct ftrace_ops
*ops
)
2499 mutex_lock(&ftrace_lock
);
2500 ret
= __unregister_ftrace_function(ops
);
2502 mutex_unlock(&ftrace_lock
);
2508 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
2509 struct file
*file
, void __user
*buffer
, size_t *lenp
,
2514 if (unlikely(ftrace_disabled
))
2517 mutex_lock(&ftrace_lock
);
2519 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
2521 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
2524 last_ftrace_enabled
= ftrace_enabled
;
2526 if (ftrace_enabled
) {
2528 ftrace_startup_sysctl();
2530 /* we are starting ftrace again */
2531 if (ftrace_list
!= &ftrace_list_end
) {
2532 if (ftrace_list
->next
== &ftrace_list_end
)
2533 ftrace_trace_function
= ftrace_list
->func
;
2535 ftrace_trace_function
= ftrace_list_func
;
2539 /* stopping ftrace calls (just send to ftrace_stub) */
2540 ftrace_trace_function
= ftrace_stub
;
2542 ftrace_shutdown_sysctl();
2546 mutex_unlock(&ftrace_lock
);
2550 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2552 static atomic_t ftrace_graph_active
;
2553 static struct notifier_block ftrace_suspend_notifier
;
2555 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
2560 /* The callbacks that hook a function */
2561 trace_func_graph_ret_t ftrace_graph_return
=
2562 (trace_func_graph_ret_t
)ftrace_stub
;
2563 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
2565 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2566 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
2570 unsigned long flags
;
2571 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
2572 struct task_struct
*g
, *t
;
2574 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
2575 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
2576 * sizeof(struct ftrace_ret_stack
),
2578 if (!ret_stack_list
[i
]) {
2586 read_lock_irqsave(&tasklist_lock
, flags
);
2587 do_each_thread(g
, t
) {
2593 if (t
->ret_stack
== NULL
) {
2594 t
->curr_ret_stack
= -1;
2595 /* Make sure IRQs see the -1 first: */
2597 t
->ret_stack
= ret_stack_list
[start
++];
2598 atomic_set(&t
->tracing_graph_pause
, 0);
2599 atomic_set(&t
->trace_overrun
, 0);
2601 } while_each_thread(g
, t
);
2604 read_unlock_irqrestore(&tasklist_lock
, flags
);
2606 for (i
= start
; i
< end
; i
++)
2607 kfree(ret_stack_list
[i
]);
2611 /* Allocate a return stack for each task */
2612 static int start_graph_tracing(void)
2614 struct ftrace_ret_stack
**ret_stack_list
;
2617 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
2618 sizeof(struct ftrace_ret_stack
*),
2621 if (!ret_stack_list
)
2624 /* The cpu_boot init_task->ret_stack will never be freed */
2625 for_each_online_cpu(cpu
)
2626 ftrace_graph_init_task(idle_task(cpu
));
2629 ret
= alloc_retstack_tasklist(ret_stack_list
);
2630 } while (ret
== -EAGAIN
);
2632 kfree(ret_stack_list
);
2637 * Hibernation protection.
2638 * The state of the current task is too much unstable during
2639 * suspend/restore to disk. We want to protect against that.
2642 ftrace_suspend_notifier_call(struct notifier_block
*bl
, unsigned long state
,
2646 case PM_HIBERNATION_PREPARE
:
2647 pause_graph_tracing();
2650 case PM_POST_HIBERNATION
:
2651 unpause_graph_tracing();
2657 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
2658 trace_func_graph_ent_t entryfunc
)
2662 mutex_lock(&ftrace_lock
);
2664 ftrace_suspend_notifier
.notifier_call
= ftrace_suspend_notifier_call
;
2665 register_pm_notifier(&ftrace_suspend_notifier
);
2667 atomic_inc(&ftrace_graph_active
);
2668 ret
= start_graph_tracing();
2670 atomic_dec(&ftrace_graph_active
);
2674 ftrace_graph_return
= retfunc
;
2675 ftrace_graph_entry
= entryfunc
;
2677 ftrace_startup(FTRACE_START_FUNC_RET
);
2680 mutex_unlock(&ftrace_lock
);
2684 void unregister_ftrace_graph(void)
2686 mutex_lock(&ftrace_lock
);
2688 atomic_dec(&ftrace_graph_active
);
2689 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
2690 ftrace_graph_entry
= ftrace_graph_entry_stub
;
2691 ftrace_shutdown(FTRACE_STOP_FUNC_RET
);
2692 unregister_pm_notifier(&ftrace_suspend_notifier
);
2694 mutex_unlock(&ftrace_lock
);
2697 /* Allocate a return stack for newly created task */
2698 void ftrace_graph_init_task(struct task_struct
*t
)
2700 if (atomic_read(&ftrace_graph_active
)) {
2701 t
->ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
2702 * sizeof(struct ftrace_ret_stack
),
2706 t
->curr_ret_stack
= -1;
2707 atomic_set(&t
->tracing_graph_pause
, 0);
2708 atomic_set(&t
->trace_overrun
, 0);
2710 t
->ret_stack
= NULL
;
2713 void ftrace_graph_exit_task(struct task_struct
*t
)
2715 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
2717 t
->ret_stack
= NULL
;
2718 /* NULL must become visible to IRQs before we free it: */
2724 void ftrace_graph_stop(void)