2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 #define FTRACE_WARN_ON(cond) \
41 #define FTRACE_WARN_ON_ONCE(cond) \
43 if (WARN_ON_ONCE(cond)) \
47 /* ftrace_enabled is a method to turn ftrace on or off */
48 int ftrace_enabled __read_mostly
;
49 static int last_ftrace_enabled
;
51 /* set when tracing only a pid */
52 struct pid
*ftrace_pid_trace
;
53 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
55 /* Quick disabling of function tracer. */
56 int function_trace_stop
;
59 * ftrace_disabled is set when an anomaly is discovered.
60 * ftrace_disabled is much stronger than ftrace_enabled.
62 static int ftrace_disabled __read_mostly
;
64 static DEFINE_SPINLOCK(ftrace_lock
);
65 static DEFINE_MUTEX(ftrace_sysctl_lock
);
66 static DEFINE_MUTEX(ftrace_start_lock
);
68 static struct ftrace_ops ftrace_list_end __read_mostly
=
73 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
74 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
75 ftrace_func_t __ftrace_trace_function __read_mostly
= ftrace_stub
;
76 ftrace_func_t ftrace_pid_function __read_mostly
= ftrace_stub
;
78 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
80 struct ftrace_ops
*op
= ftrace_list
;
82 /* in case someone actually ports this to alpha! */
83 read_barrier_depends();
85 while (op
!= &ftrace_list_end
) {
87 read_barrier_depends();
88 op
->func(ip
, parent_ip
);
93 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
)
95 if (!test_tsk_trace_trace(current
))
98 ftrace_pid_function(ip
, parent_ip
);
101 static void set_ftrace_pid_function(ftrace_func_t func
)
103 /* do not set ftrace_pid_function to itself! */
104 if (func
!= ftrace_pid_func
)
105 ftrace_pid_function
= func
;
109 * clear_ftrace_function - reset the ftrace function
111 * This NULLs the ftrace function and in essence stops
112 * tracing. There may be lag
114 void clear_ftrace_function(void)
116 ftrace_trace_function
= ftrace_stub
;
117 __ftrace_trace_function
= ftrace_stub
;
118 ftrace_pid_function
= ftrace_stub
;
121 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
123 * For those archs that do not test ftrace_trace_stop in their
124 * mcount call site, we need to do it from C.
126 static void ftrace_test_stop_func(unsigned long ip
, unsigned long parent_ip
)
128 if (function_trace_stop
)
131 __ftrace_trace_function(ip
, parent_ip
);
135 static int __register_ftrace_function(struct ftrace_ops
*ops
)
137 /* should not be called from interrupt context */
138 spin_lock(&ftrace_lock
);
140 ops
->next
= ftrace_list
;
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
150 if (ftrace_enabled
) {
153 if (ops
->next
== &ftrace_list_end
)
156 func
= ftrace_list_func
;
158 if (ftrace_pid_trace
) {
159 set_ftrace_pid_function(func
);
160 func
= ftrace_pid_func
;
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
167 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
168 ftrace_trace_function
= func
;
170 __ftrace_trace_function
= func
;
171 ftrace_trace_function
= ftrace_test_stop_func
;
175 spin_unlock(&ftrace_lock
);
180 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
182 struct ftrace_ops
**p
;
185 /* should not be called from interrupt context */
186 spin_lock(&ftrace_lock
);
189 * If we are removing the last function, then simply point
190 * to the ftrace_stub.
192 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
193 ftrace_trace_function
= ftrace_stub
;
194 ftrace_list
= &ftrace_list_end
;
198 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
209 if (ftrace_enabled
) {
210 /* If we only have one func left, then call that directly */
211 if (ftrace_list
->next
== &ftrace_list_end
) {
212 ftrace_func_t func
= ftrace_list
->func
;
214 if (ftrace_pid_trace
) {
215 set_ftrace_pid_function(func
);
216 func
= ftrace_pid_func
;
218 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
219 ftrace_trace_function
= func
;
221 __ftrace_trace_function
= func
;
227 spin_unlock(&ftrace_lock
);
232 static void ftrace_update_pid_func(void)
236 /* should not be called from interrupt context */
237 spin_lock(&ftrace_lock
);
239 if (ftrace_trace_function
== ftrace_stub
)
242 func
= ftrace_trace_function
;
244 if (ftrace_pid_trace
) {
245 set_ftrace_pid_function(func
);
246 func
= ftrace_pid_func
;
248 if (func
== ftrace_pid_func
)
249 func
= ftrace_pid_function
;
252 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function
= func
;
255 __ftrace_trace_function
= func
;
259 spin_unlock(&ftrace_lock
);
262 #ifdef CONFIG_DYNAMIC_FTRACE
263 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
264 # error Dynamic ftrace depends on MCOUNT_RECORD
268 * Since MCOUNT_ADDR may point to mcount itself, we do not want
269 * to get it confused by reading a reference in the code as we
270 * are parsing on objcopy output of text. Use a variable for
273 static unsigned long mcount_addr
= MCOUNT_ADDR
;
276 FTRACE_ENABLE_CALLS
= (1 << 0),
277 FTRACE_DISABLE_CALLS
= (1 << 1),
278 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
279 FTRACE_ENABLE_MCOUNT
= (1 << 3),
280 FTRACE_DISABLE_MCOUNT
= (1 << 4),
281 FTRACE_START_FUNC_RET
= (1 << 5),
282 FTRACE_STOP_FUNC_RET
= (1 << 6),
285 static int ftrace_filtered
;
287 static LIST_HEAD(ftrace_new_addrs
);
289 static DEFINE_MUTEX(ftrace_regex_lock
);
292 struct ftrace_page
*next
;
294 struct dyn_ftrace records
[];
297 #define ENTRIES_PER_PAGE \
298 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
300 /* estimate from running different kernels */
301 #define NR_TO_INIT 10000
303 static struct ftrace_page
*ftrace_pages_start
;
304 static struct ftrace_page
*ftrace_pages
;
306 static struct dyn_ftrace
*ftrace_free_records
;
309 #ifdef CONFIG_KPROBES
311 static int frozen_record_count
;
313 static inline void freeze_record(struct dyn_ftrace
*rec
)
315 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
316 rec
->flags
|= FTRACE_FL_FROZEN
;
317 frozen_record_count
++;
321 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
323 if (rec
->flags
& FTRACE_FL_FROZEN
) {
324 rec
->flags
&= ~FTRACE_FL_FROZEN
;
325 frozen_record_count
--;
329 static inline int record_frozen(struct dyn_ftrace
*rec
)
331 return rec
->flags
& FTRACE_FL_FROZEN
;
334 # define freeze_record(rec) ({ 0; })
335 # define unfreeze_record(rec) ({ 0; })
336 # define record_frozen(rec) ({ 0; })
337 #endif /* CONFIG_KPROBES */
339 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
341 rec
->ip
= (unsigned long)ftrace_free_records
;
342 ftrace_free_records
= rec
;
343 rec
->flags
|= FTRACE_FL_FREE
;
346 void ftrace_release(void *start
, unsigned long size
)
348 struct dyn_ftrace
*rec
;
349 struct ftrace_page
*pg
;
350 unsigned long s
= (unsigned long)start
;
351 unsigned long e
= s
+ size
;
354 if (ftrace_disabled
|| !start
)
357 /* should not be called from interrupt context */
358 spin_lock(&ftrace_lock
);
360 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
361 for (i
= 0; i
< pg
->index
; i
++) {
362 rec
= &pg
->records
[i
];
364 if ((rec
->ip
>= s
) && (rec
->ip
< e
))
365 ftrace_free_rec(rec
);
368 spin_unlock(&ftrace_lock
);
371 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
373 struct dyn_ftrace
*rec
;
375 /* First check for freed records */
376 if (ftrace_free_records
) {
377 rec
= ftrace_free_records
;
379 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
380 FTRACE_WARN_ON_ONCE(1);
381 ftrace_free_records
= NULL
;
385 ftrace_free_records
= (void *)rec
->ip
;
386 memset(rec
, 0, sizeof(*rec
));
390 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
391 if (!ftrace_pages
->next
) {
392 /* allocate another page */
394 (void *)get_zeroed_page(GFP_KERNEL
);
395 if (!ftrace_pages
->next
)
398 ftrace_pages
= ftrace_pages
->next
;
401 return &ftrace_pages
->records
[ftrace_pages
->index
++];
404 static struct dyn_ftrace
*
405 ftrace_record_ip(unsigned long ip
)
407 struct dyn_ftrace
*rec
;
412 rec
= ftrace_alloc_dyn_node(ip
);
418 list_add(&rec
->list
, &ftrace_new_addrs
);
423 static void print_ip_ins(const char *fmt
, unsigned char *p
)
427 printk(KERN_CONT
"%s", fmt
);
429 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
430 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
433 static void ftrace_bug(int failed
, unsigned long ip
)
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace faulted on modifying ");
442 FTRACE_WARN_ON_ONCE(1);
443 pr_info("ftrace failed to modify ");
445 print_ip_ins(" actual: ", (unsigned char *)ip
);
446 printk(KERN_CONT
"\n");
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on writing ");
454 FTRACE_WARN_ON_ONCE(1);
455 pr_info("ftrace faulted on unknown error ");
462 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
464 unsigned long ip
, fl
;
465 unsigned long ftrace_addr
;
467 ftrace_addr
= (unsigned long)ftrace_caller
;
472 * If this record is not to be traced and
473 * it is not enabled then do nothing.
475 * If this record is not to be traced and
476 * it is enabled then disabled it.
479 if (rec
->flags
& FTRACE_FL_NOTRACE
) {
480 if (rec
->flags
& FTRACE_FL_ENABLED
)
481 rec
->flags
&= ~FTRACE_FL_ENABLED
;
485 } else if (ftrace_filtered
&& enable
) {
490 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
);
492 /* Record is filtered and enabled, do nothing */
493 if (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
))
496 /* Record is not filtered and is not enabled do nothing */
500 /* Record is not filtered but enabled, disable it */
501 if (fl
== FTRACE_FL_ENABLED
)
502 rec
->flags
&= ~FTRACE_FL_ENABLED
;
504 /* Otherwise record is filtered but not enabled, enable it */
505 rec
->flags
|= FTRACE_FL_ENABLED
;
507 /* Disable or not filtered */
510 /* if record is enabled, do nothing */
511 if (rec
->flags
& FTRACE_FL_ENABLED
)
514 rec
->flags
|= FTRACE_FL_ENABLED
;
518 /* if record is not enabled do nothing */
519 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
522 rec
->flags
&= ~FTRACE_FL_ENABLED
;
526 if (rec
->flags
& FTRACE_FL_ENABLED
)
527 return ftrace_make_call(rec
, ftrace_addr
);
529 return ftrace_make_nop(NULL
, rec
, ftrace_addr
);
532 static void ftrace_replace_code(int enable
)
535 struct dyn_ftrace
*rec
;
536 struct ftrace_page
*pg
;
538 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
539 for (i
= 0; i
< pg
->index
; i
++) {
540 rec
= &pg
->records
[i
];
543 * Skip over free records and records that have
546 if (rec
->flags
& FTRACE_FL_FREE
||
547 rec
->flags
& FTRACE_FL_FAILED
)
550 /* ignore updates to this record's mcount site */
551 if (get_kprobe((void *)rec
->ip
)) {
555 unfreeze_record(rec
);
558 failed
= __ftrace_replace_code(rec
, enable
);
559 if (failed
&& (rec
->flags
& FTRACE_FL_CONVERTED
)) {
560 rec
->flags
|= FTRACE_FL_FAILED
;
561 if ((system_state
== SYSTEM_BOOTING
) ||
562 !core_kernel_text(rec
->ip
)) {
563 ftrace_free_rec(rec
);
565 ftrace_bug(failed
, rec
->ip
);
572 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
579 ret
= ftrace_make_nop(mod
, rec
, mcount_addr
);
582 rec
->flags
|= FTRACE_FL_FAILED
;
588 static int __ftrace_modify_code(void *data
)
592 if (*command
& FTRACE_ENABLE_CALLS
)
593 ftrace_replace_code(1);
594 else if (*command
& FTRACE_DISABLE_CALLS
)
595 ftrace_replace_code(0);
597 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
598 ftrace_update_ftrace_func(ftrace_trace_function
);
600 if (*command
& FTRACE_START_FUNC_RET
)
601 ftrace_enable_ftrace_graph_caller();
602 else if (*command
& FTRACE_STOP_FUNC_RET
)
603 ftrace_disable_ftrace_graph_caller();
608 static void ftrace_run_update_code(int command
)
610 stop_machine(__ftrace_modify_code
, &command
, NULL
);
613 static ftrace_func_t saved_ftrace_func
;
614 static int ftrace_start_up
;
616 static void ftrace_startup_enable(int command
)
618 if (saved_ftrace_func
!= ftrace_trace_function
) {
619 saved_ftrace_func
= ftrace_trace_function
;
620 command
|= FTRACE_UPDATE_TRACE_FUNC
;
623 if (!command
|| !ftrace_enabled
)
626 ftrace_run_update_code(command
);
629 static void ftrace_startup(int command
)
631 if (unlikely(ftrace_disabled
))
634 mutex_lock(&ftrace_start_lock
);
636 command
|= FTRACE_ENABLE_CALLS
;
638 ftrace_startup_enable(command
);
640 mutex_unlock(&ftrace_start_lock
);
643 static void ftrace_shutdown(int command
)
645 if (unlikely(ftrace_disabled
))
648 mutex_lock(&ftrace_start_lock
);
650 if (!ftrace_start_up
)
651 command
|= FTRACE_DISABLE_CALLS
;
653 if (saved_ftrace_func
!= ftrace_trace_function
) {
654 saved_ftrace_func
= ftrace_trace_function
;
655 command
|= FTRACE_UPDATE_TRACE_FUNC
;
658 if (!command
|| !ftrace_enabled
)
661 ftrace_run_update_code(command
);
663 mutex_unlock(&ftrace_start_lock
);
666 static void ftrace_startup_sysctl(void)
668 int command
= FTRACE_ENABLE_MCOUNT
;
670 if (unlikely(ftrace_disabled
))
673 mutex_lock(&ftrace_start_lock
);
674 /* Force update next time */
675 saved_ftrace_func
= NULL
;
676 /* ftrace_start_up is true if we want ftrace running */
678 command
|= FTRACE_ENABLE_CALLS
;
680 ftrace_run_update_code(command
);
681 mutex_unlock(&ftrace_start_lock
);
684 static void ftrace_shutdown_sysctl(void)
686 int command
= FTRACE_DISABLE_MCOUNT
;
688 if (unlikely(ftrace_disabled
))
691 mutex_lock(&ftrace_start_lock
);
692 /* ftrace_start_up is true if ftrace is running */
694 command
|= FTRACE_DISABLE_CALLS
;
696 ftrace_run_update_code(command
);
697 mutex_unlock(&ftrace_start_lock
);
700 static cycle_t ftrace_update_time
;
701 static unsigned long ftrace_update_cnt
;
702 unsigned long ftrace_update_tot_cnt
;
704 static int ftrace_update_code(struct module
*mod
)
706 struct dyn_ftrace
*p
, *t
;
709 start
= ftrace_now(raw_smp_processor_id());
710 ftrace_update_cnt
= 0;
712 list_for_each_entry_safe(p
, t
, &ftrace_new_addrs
, list
) {
714 /* If something went wrong, bail without enabling anything */
715 if (unlikely(ftrace_disabled
))
718 list_del_init(&p
->list
);
720 /* convert record (i.e, patch mcount-call with NOP) */
721 if (ftrace_code_disable(mod
, p
)) {
722 p
->flags
|= FTRACE_FL_CONVERTED
;
728 stop
= ftrace_now(raw_smp_processor_id());
729 ftrace_update_time
= stop
- start
;
730 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
735 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
737 struct ftrace_page
*pg
;
741 /* allocate a few pages */
742 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
743 if (!ftrace_pages_start
)
747 * Allocate a few more pages.
749 * TODO: have some parser search vmlinux before
750 * final linking to find all calls to ftrace.
752 * a) know how many pages to allocate.
754 * b) set up the table then.
756 * The dynamic code is still necessary for
760 pg
= ftrace_pages
= ftrace_pages_start
;
762 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
763 pr_info("ftrace: allocating %ld entries in %d pages\n",
764 num_to_init
, cnt
+ 1);
766 for (i
= 0; i
< cnt
; i
++) {
767 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
769 /* If we fail, we'll try later anyway */
780 FTRACE_ITER_FILTER
= (1 << 0),
781 FTRACE_ITER_CONT
= (1 << 1),
782 FTRACE_ITER_NOTRACE
= (1 << 2),
783 FTRACE_ITER_FAILURES
= (1 << 3),
786 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
788 struct ftrace_iterator
{
789 struct ftrace_page
*pg
;
792 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
798 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
800 struct ftrace_iterator
*iter
= m
->private;
801 struct dyn_ftrace
*rec
= NULL
;
805 /* should not be called from interrupt context */
806 spin_lock(&ftrace_lock
);
808 if (iter
->idx
>= iter
->pg
->index
) {
809 if (iter
->pg
->next
) {
810 iter
->pg
= iter
->pg
->next
;
817 rec
= &iter
->pg
->records
[iter
->idx
++];
818 if ((rec
->flags
& FTRACE_FL_FREE
) ||
820 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
821 (rec
->flags
& FTRACE_FL_FAILED
)) ||
823 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
824 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
826 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
827 !(rec
->flags
& FTRACE_FL_FILTER
)) ||
829 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
830 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
835 spin_unlock(&ftrace_lock
);
840 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
842 struct ftrace_iterator
*iter
= m
->private;
852 p
= t_next(m
, p
, pos
);
857 static void t_stop(struct seq_file
*m
, void *p
)
861 static int t_show(struct seq_file
*m
, void *v
)
863 struct dyn_ftrace
*rec
= v
;
864 char str
[KSYM_SYMBOL_LEN
];
869 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
871 seq_printf(m
, "%s\n", str
);
876 static struct seq_operations show_ftrace_seq_ops
= {
884 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
886 struct ftrace_iterator
*iter
;
889 if (unlikely(ftrace_disabled
))
892 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
896 iter
->pg
= ftrace_pages_start
;
898 ret
= seq_open(file
, &show_ftrace_seq_ops
);
900 struct seq_file
*m
= file
->private_data
;
910 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
912 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
913 struct ftrace_iterator
*iter
= m
->private;
915 seq_release(inode
, file
);
922 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
926 struct ftrace_iterator
*iter
;
928 ret
= ftrace_avail_open(inode
, file
);
930 m
= (struct seq_file
*)file
->private_data
;
931 iter
= (struct ftrace_iterator
*)m
->private;
932 iter
->flags
= FTRACE_ITER_FAILURES
;
939 static void ftrace_filter_reset(int enable
)
941 struct ftrace_page
*pg
;
942 struct dyn_ftrace
*rec
;
943 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
946 /* should not be called from interrupt context */
947 spin_lock(&ftrace_lock
);
950 pg
= ftrace_pages_start
;
952 for (i
= 0; i
< pg
->index
; i
++) {
953 rec
= &pg
->records
[i
];
954 if (rec
->flags
& FTRACE_FL_FAILED
)
960 spin_unlock(&ftrace_lock
);
964 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
966 struct ftrace_iterator
*iter
;
969 if (unlikely(ftrace_disabled
))
972 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
976 mutex_lock(&ftrace_regex_lock
);
977 if ((file
->f_mode
& FMODE_WRITE
) &&
978 !(file
->f_flags
& O_APPEND
))
979 ftrace_filter_reset(enable
);
981 if (file
->f_mode
& FMODE_READ
) {
982 iter
->pg
= ftrace_pages_start
;
983 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
986 ret
= seq_open(file
, &show_ftrace_seq_ops
);
988 struct seq_file
*m
= file
->private_data
;
993 file
->private_data
= iter
;
994 mutex_unlock(&ftrace_regex_lock
);
1000 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
1002 return ftrace_regex_open(inode
, file
, 1);
1006 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
1008 return ftrace_regex_open(inode
, file
, 0);
1012 ftrace_regex_read(struct file
*file
, char __user
*ubuf
,
1013 size_t cnt
, loff_t
*ppos
)
1015 if (file
->f_mode
& FMODE_READ
)
1016 return seq_read(file
, ubuf
, cnt
, ppos
);
1022 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
1026 if (file
->f_mode
& FMODE_READ
)
1027 ret
= seq_lseek(file
, offset
, origin
);
1029 file
->f_pos
= ret
= 1;
1042 ftrace_match(unsigned char *buff
, int len
, int enable
)
1044 char str
[KSYM_SYMBOL_LEN
];
1045 char *search
= NULL
;
1046 struct ftrace_page
*pg
;
1047 struct dyn_ftrace
*rec
;
1048 int type
= MATCH_FULL
;
1049 unsigned long flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1050 unsigned i
, match
= 0, search_len
= 0;
1053 if (buff
[0] == '!') {
1059 for (i
= 0; i
< len
; i
++) {
1060 if (buff
[i
] == '*') {
1062 search
= buff
+ i
+ 1;
1063 type
= MATCH_END_ONLY
;
1064 search_len
= len
- (i
+ 1);
1066 if (type
== MATCH_END_ONLY
) {
1067 type
= MATCH_MIDDLE_ONLY
;
1070 type
= MATCH_FRONT_ONLY
;
1078 /* should not be called from interrupt context */
1079 spin_lock(&ftrace_lock
);
1081 ftrace_filtered
= 1;
1082 pg
= ftrace_pages_start
;
1084 for (i
= 0; i
< pg
->index
; i
++) {
1088 rec
= &pg
->records
[i
];
1089 if (rec
->flags
& FTRACE_FL_FAILED
)
1091 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1094 if (strcmp(str
, buff
) == 0)
1097 case MATCH_FRONT_ONLY
:
1098 if (memcmp(str
, buff
, match
) == 0)
1101 case MATCH_MIDDLE_ONLY
:
1102 if (strstr(str
, search
))
1105 case MATCH_END_ONLY
:
1106 ptr
= strstr(str
, search
);
1107 if (ptr
&& (ptr
[search_len
] == 0))
1113 rec
->flags
&= ~flag
;
1120 spin_unlock(&ftrace_lock
);
1124 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
1125 size_t cnt
, loff_t
*ppos
, int enable
)
1127 struct ftrace_iterator
*iter
;
1132 if (!cnt
|| cnt
< 0)
1135 mutex_lock(&ftrace_regex_lock
);
1137 if (file
->f_mode
& FMODE_READ
) {
1138 struct seq_file
*m
= file
->private_data
;
1141 iter
= file
->private_data
;
1144 iter
->flags
&= ~FTRACE_ITER_CONT
;
1145 iter
->buffer_idx
= 0;
1148 ret
= get_user(ch
, ubuf
++);
1154 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1155 /* skip white space */
1156 while (cnt
&& isspace(ch
)) {
1157 ret
= get_user(ch
, ubuf
++);
1165 file
->f_pos
+= read
;
1170 iter
->buffer_idx
= 0;
1173 while (cnt
&& !isspace(ch
)) {
1174 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1175 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1180 ret
= get_user(ch
, ubuf
++);
1189 iter
->buffer
[iter
->buffer_idx
] = 0;
1190 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1191 iter
->buffer_idx
= 0;
1193 iter
->flags
|= FTRACE_ITER_CONT
;
1196 file
->f_pos
+= read
;
1200 mutex_unlock(&ftrace_regex_lock
);
1206 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1207 size_t cnt
, loff_t
*ppos
)
1209 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
1213 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
1214 size_t cnt
, loff_t
*ppos
)
1216 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
1220 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
1222 if (unlikely(ftrace_disabled
))
1225 mutex_lock(&ftrace_regex_lock
);
1227 ftrace_filter_reset(enable
);
1229 ftrace_match(buf
, len
, enable
);
1230 mutex_unlock(&ftrace_regex_lock
);
1234 * ftrace_set_filter - set a function to filter on in ftrace
1235 * @buf - the string that holds the function filter text.
1236 * @len - the length of the string.
1237 * @reset - non zero to reset all filters before applying this filter.
1239 * Filters denote which functions should be enabled when tracing is enabled.
1240 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1242 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1244 ftrace_set_regex(buf
, len
, reset
, 1);
1248 * ftrace_set_notrace - set a function to not trace in ftrace
1249 * @buf - the string that holds the function notrace text.
1250 * @len - the length of the string.
1251 * @reset - non zero to reset all filters before applying this filter.
1253 * Notrace Filters denote which functions should not be enabled when tracing
1254 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1257 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
1259 ftrace_set_regex(buf
, len
, reset
, 0);
1263 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
1265 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1266 struct ftrace_iterator
*iter
;
1268 mutex_lock(&ftrace_regex_lock
);
1269 if (file
->f_mode
& FMODE_READ
) {
1272 seq_release(inode
, file
);
1274 iter
= file
->private_data
;
1276 if (iter
->buffer_idx
) {
1278 iter
->buffer
[iter
->buffer_idx
] = 0;
1279 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1282 mutex_lock(&ftrace_sysctl_lock
);
1283 mutex_lock(&ftrace_start_lock
);
1284 if (ftrace_start_up
&& ftrace_enabled
)
1285 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1286 mutex_unlock(&ftrace_start_lock
);
1287 mutex_unlock(&ftrace_sysctl_lock
);
1290 mutex_unlock(&ftrace_regex_lock
);
1295 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1297 return ftrace_regex_release(inode
, file
, 1);
1301 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
1303 return ftrace_regex_release(inode
, file
, 0);
1306 static struct file_operations ftrace_avail_fops
= {
1307 .open
= ftrace_avail_open
,
1309 .llseek
= seq_lseek
,
1310 .release
= ftrace_avail_release
,
1313 static struct file_operations ftrace_failures_fops
= {
1314 .open
= ftrace_failures_open
,
1316 .llseek
= seq_lseek
,
1317 .release
= ftrace_avail_release
,
1320 static struct file_operations ftrace_filter_fops
= {
1321 .open
= ftrace_filter_open
,
1322 .read
= ftrace_regex_read
,
1323 .write
= ftrace_filter_write
,
1324 .llseek
= ftrace_regex_lseek
,
1325 .release
= ftrace_filter_release
,
1328 static struct file_operations ftrace_notrace_fops
= {
1329 .open
= ftrace_notrace_open
,
1330 .read
= ftrace_regex_read
,
1331 .write
= ftrace_notrace_write
,
1332 .llseek
= ftrace_regex_lseek
,
1333 .release
= ftrace_notrace_release
,
1336 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1338 static DEFINE_MUTEX(graph_lock
);
1340 int ftrace_graph_count
;
1341 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
1344 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1346 unsigned long *array
= m
->private;
1351 if (index
>= ftrace_graph_count
)
1354 return &array
[index
];
1357 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
1361 mutex_lock(&graph_lock
);
1363 p
= g_next(m
, p
, pos
);
1368 static void g_stop(struct seq_file
*m
, void *p
)
1370 mutex_unlock(&graph_lock
);
1373 static int g_show(struct seq_file
*m
, void *v
)
1375 unsigned long *ptr
= v
;
1376 char str
[KSYM_SYMBOL_LEN
];
1381 kallsyms_lookup(*ptr
, NULL
, NULL
, NULL
, str
);
1383 seq_printf(m
, "%s\n", str
);
1388 static struct seq_operations ftrace_graph_seq_ops
= {
1396 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
1400 if (unlikely(ftrace_disabled
))
1403 mutex_lock(&graph_lock
);
1404 if ((file
->f_mode
& FMODE_WRITE
) &&
1405 !(file
->f_flags
& O_APPEND
)) {
1406 ftrace_graph_count
= 0;
1407 memset(ftrace_graph_funcs
, 0, sizeof(ftrace_graph_funcs
));
1410 if (file
->f_mode
& FMODE_READ
) {
1411 ret
= seq_open(file
, &ftrace_graph_seq_ops
);
1413 struct seq_file
*m
= file
->private_data
;
1414 m
->private = ftrace_graph_funcs
;
1417 file
->private_data
= ftrace_graph_funcs
;
1418 mutex_unlock(&graph_lock
);
1424 ftrace_graph_read(struct file
*file
, char __user
*ubuf
,
1425 size_t cnt
, loff_t
*ppos
)
1427 if (file
->f_mode
& FMODE_READ
)
1428 return seq_read(file
, ubuf
, cnt
, ppos
);
1434 ftrace_set_func(unsigned long *array
, int idx
, char *buffer
)
1436 char str
[KSYM_SYMBOL_LEN
];
1437 struct dyn_ftrace
*rec
;
1438 struct ftrace_page
*pg
;
1442 if (ftrace_disabled
)
1445 /* should not be called from interrupt context */
1446 spin_lock(&ftrace_lock
);
1448 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
1449 for (i
= 0; i
< pg
->index
; i
++) {
1450 rec
= &pg
->records
[i
];
1452 if (rec
->flags
& (FTRACE_FL_FAILED
| FTRACE_FL_FREE
))
1455 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1456 if (strcmp(str
, buffer
) == 0) {
1458 for (j
= 0; j
< idx
; j
++)
1459 if (array
[j
] == rec
->ip
) {
1464 array
[idx
] = rec
->ip
;
1469 spin_unlock(&ftrace_lock
);
1471 return found
? 0 : -EINVAL
;
1475 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
1476 size_t cnt
, loff_t
*ppos
)
1478 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
1479 unsigned long *array
;
1485 if (!cnt
|| cnt
< 0)
1488 mutex_lock(&graph_lock
);
1490 if (ftrace_graph_count
>= FTRACE_GRAPH_MAX_FUNCS
) {
1495 if (file
->f_mode
& FMODE_READ
) {
1496 struct seq_file
*m
= file
->private_data
;
1499 array
= file
->private_data
;
1501 ret
= get_user(ch
, ubuf
++);
1507 /* skip white space */
1508 while (cnt
&& isspace(ch
)) {
1509 ret
= get_user(ch
, ubuf
++);
1522 while (cnt
&& !isspace(ch
)) {
1523 if (index
< FTRACE_BUFF_MAX
)
1524 buffer
[index
++] = ch
;
1529 ret
= get_user(ch
, ubuf
++);
1537 /* we allow only one at a time */
1538 ret
= ftrace_set_func(array
, ftrace_graph_count
, buffer
);
1542 ftrace_graph_count
++;
1544 file
->f_pos
+= read
;
1548 mutex_unlock(&graph_lock
);
1553 static const struct file_operations ftrace_graph_fops
= {
1554 .open
= ftrace_graph_open
,
1555 .read
= ftrace_graph_read
,
1556 .write
= ftrace_graph_write
,
1558 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1560 static __init
int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
)
1562 struct dentry
*entry
;
1564 entry
= debugfs_create_file("available_filter_functions", 0444,
1565 d_tracer
, NULL
, &ftrace_avail_fops
);
1567 pr_warning("Could not create debugfs "
1568 "'available_filter_functions' entry\n");
1570 entry
= debugfs_create_file("failures", 0444,
1571 d_tracer
, NULL
, &ftrace_failures_fops
);
1573 pr_warning("Could not create debugfs 'failures' entry\n");
1575 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1576 NULL
, &ftrace_filter_fops
);
1578 pr_warning("Could not create debugfs "
1579 "'set_ftrace_filter' entry\n");
1581 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
1582 NULL
, &ftrace_notrace_fops
);
1584 pr_warning("Could not create debugfs "
1585 "'set_ftrace_notrace' entry\n");
1587 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1588 entry
= debugfs_create_file("set_graph_function", 0444, d_tracer
,
1590 &ftrace_graph_fops
);
1592 pr_warning("Could not create debugfs "
1593 "'set_graph_function' entry\n");
1594 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1599 static int ftrace_convert_nops(struct module
*mod
,
1600 unsigned long *start
,
1605 unsigned long flags
;
1607 mutex_lock(&ftrace_start_lock
);
1610 addr
= ftrace_call_adjust(*p
++);
1612 * Some architecture linkers will pad between
1613 * the different mcount_loc sections of different
1614 * object files to satisfy alignments.
1615 * Skip any NULL pointers.
1619 ftrace_record_ip(addr
);
1622 /* disable interrupts to prevent kstop machine */
1623 local_irq_save(flags
);
1624 ftrace_update_code(mod
);
1625 local_irq_restore(flags
);
1626 mutex_unlock(&ftrace_start_lock
);
1631 void ftrace_init_module(struct module
*mod
,
1632 unsigned long *start
, unsigned long *end
)
1634 if (ftrace_disabled
|| start
== end
)
1636 ftrace_convert_nops(mod
, start
, end
);
1639 extern unsigned long __start_mcount_loc
[];
1640 extern unsigned long __stop_mcount_loc
[];
1642 void __init
ftrace_init(void)
1644 unsigned long count
, addr
, flags
;
1647 /* Keep the ftrace pointer to the stub */
1648 addr
= (unsigned long)ftrace_stub
;
1650 local_irq_save(flags
);
1651 ftrace_dyn_arch_init(&addr
);
1652 local_irq_restore(flags
);
1654 /* ftrace_dyn_arch_init places the return code in addr */
1658 count
= __stop_mcount_loc
- __start_mcount_loc
;
1660 ret
= ftrace_dyn_table_alloc(count
);
1664 last_ftrace_enabled
= ftrace_enabled
= 1;
1666 ret
= ftrace_convert_nops(NULL
,
1672 ftrace_disabled
= 1;
1677 static int __init
ftrace_nodyn_init(void)
1682 device_initcall(ftrace_nodyn_init
);
1684 static inline int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
) { return 0; }
1685 static inline void ftrace_startup_enable(int command
) { }
1686 /* Keep as macros so we do not need to define the commands */
1687 # define ftrace_startup(command) do { } while (0)
1688 # define ftrace_shutdown(command) do { } while (0)
1689 # define ftrace_startup_sysctl() do { } while (0)
1690 # define ftrace_shutdown_sysctl() do { } while (0)
1691 #endif /* CONFIG_DYNAMIC_FTRACE */
1694 ftrace_pid_read(struct file
*file
, char __user
*ubuf
,
1695 size_t cnt
, loff_t
*ppos
)
1700 if (ftrace_pid_trace
== ftrace_swapper_pid
)
1701 r
= sprintf(buf
, "swapper tasks\n");
1702 else if (ftrace_pid_trace
)
1703 r
= sprintf(buf
, "%u\n", pid_nr(ftrace_pid_trace
));
1705 r
= sprintf(buf
, "no pid\n");
1707 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
1710 static void clear_ftrace_swapper(void)
1712 struct task_struct
*p
;
1716 for_each_online_cpu(cpu
) {
1718 clear_tsk_trace_trace(p
);
1723 static void set_ftrace_swapper(void)
1725 struct task_struct
*p
;
1729 for_each_online_cpu(cpu
) {
1731 set_tsk_trace_trace(p
);
1736 static void clear_ftrace_pid(struct pid
*pid
)
1738 struct task_struct
*p
;
1741 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
1742 clear_tsk_trace_trace(p
);
1743 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
1749 static void set_ftrace_pid(struct pid
*pid
)
1751 struct task_struct
*p
;
1754 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
1755 set_tsk_trace_trace(p
);
1756 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
1760 static void clear_ftrace_pid_task(struct pid
**pid
)
1762 if (*pid
== ftrace_swapper_pid
)
1763 clear_ftrace_swapper();
1765 clear_ftrace_pid(*pid
);
1770 static void set_ftrace_pid_task(struct pid
*pid
)
1772 if (pid
== ftrace_swapper_pid
)
1773 set_ftrace_swapper();
1775 set_ftrace_pid(pid
);
1779 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
1780 size_t cnt
, loff_t
*ppos
)
1787 if (cnt
>= sizeof(buf
))
1790 if (copy_from_user(&buf
, ubuf
, cnt
))
1795 ret
= strict_strtol(buf
, 10, &val
);
1799 mutex_lock(&ftrace_start_lock
);
1801 /* disable pid tracing */
1802 if (!ftrace_pid_trace
)
1805 clear_ftrace_pid_task(&ftrace_pid_trace
);
1808 /* swapper task is special */
1810 pid
= ftrace_swapper_pid
;
1811 if (pid
== ftrace_pid_trace
)
1814 pid
= find_get_pid(val
);
1816 if (pid
== ftrace_pid_trace
) {
1822 if (ftrace_pid_trace
)
1823 clear_ftrace_pid_task(&ftrace_pid_trace
);
1828 ftrace_pid_trace
= pid
;
1830 set_ftrace_pid_task(ftrace_pid_trace
);
1833 /* update the function call */
1834 ftrace_update_pid_func();
1835 ftrace_startup_enable(0);
1838 mutex_unlock(&ftrace_start_lock
);
1843 static struct file_operations ftrace_pid_fops
= {
1844 .read
= ftrace_pid_read
,
1845 .write
= ftrace_pid_write
,
1848 static __init
int ftrace_init_debugfs(void)
1850 struct dentry
*d_tracer
;
1851 struct dentry
*entry
;
1853 d_tracer
= tracing_init_dentry();
1857 ftrace_init_dyn_debugfs(d_tracer
);
1859 entry
= debugfs_create_file("set_ftrace_pid", 0644, d_tracer
,
1860 NULL
, &ftrace_pid_fops
);
1862 pr_warning("Could not create debugfs "
1863 "'set_ftrace_pid' entry\n");
1867 fs_initcall(ftrace_init_debugfs
);
1870 * ftrace_kill - kill ftrace
1872 * This function should be used by panic code. It stops ftrace
1873 * but in a not so nice way. If you need to simply kill ftrace
1874 * from a non-atomic section, use ftrace_kill.
1876 void ftrace_kill(void)
1878 ftrace_disabled
= 1;
1880 clear_ftrace_function();
1884 * register_ftrace_function - register a function for profiling
1885 * @ops - ops structure that holds the function for profiling.
1887 * Register a function to be called by all functions in the
1890 * Note: @ops->func and all the functions it calls must be labeled
1891 * with "notrace", otherwise it will go into a
1894 int register_ftrace_function(struct ftrace_ops
*ops
)
1898 if (unlikely(ftrace_disabled
))
1901 mutex_lock(&ftrace_sysctl_lock
);
1903 ret
= __register_ftrace_function(ops
);
1906 mutex_unlock(&ftrace_sysctl_lock
);
1911 * unregister_ftrace_function - unresgister a function for profiling.
1912 * @ops - ops structure that holds the function to unregister
1914 * Unregister a function that was added to be called by ftrace profiling.
1916 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1920 mutex_lock(&ftrace_sysctl_lock
);
1921 ret
= __unregister_ftrace_function(ops
);
1923 mutex_unlock(&ftrace_sysctl_lock
);
1929 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1930 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1935 if (unlikely(ftrace_disabled
))
1938 mutex_lock(&ftrace_sysctl_lock
);
1940 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1942 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1945 last_ftrace_enabled
= ftrace_enabled
;
1947 if (ftrace_enabled
) {
1949 ftrace_startup_sysctl();
1951 /* we are starting ftrace again */
1952 if (ftrace_list
!= &ftrace_list_end
) {
1953 if (ftrace_list
->next
== &ftrace_list_end
)
1954 ftrace_trace_function
= ftrace_list
->func
;
1956 ftrace_trace_function
= ftrace_list_func
;
1960 /* stopping ftrace calls (just send to ftrace_stub) */
1961 ftrace_trace_function
= ftrace_stub
;
1963 ftrace_shutdown_sysctl();
1967 mutex_unlock(&ftrace_sysctl_lock
);
1971 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1973 static atomic_t ftrace_graph_active
;
1974 static struct notifier_block ftrace_suspend_notifier
;
1976 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
1981 /* The callbacks that hook a function */
1982 trace_func_graph_ret_t ftrace_graph_return
=
1983 (trace_func_graph_ret_t
)ftrace_stub
;
1984 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
1986 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1987 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
1991 unsigned long flags
;
1992 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
1993 struct task_struct
*g
, *t
;
1995 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
1996 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
1997 * sizeof(struct ftrace_ret_stack
),
1999 if (!ret_stack_list
[i
]) {
2007 read_lock_irqsave(&tasklist_lock
, flags
);
2008 do_each_thread(g
, t
) {
2014 if (t
->ret_stack
== NULL
) {
2015 t
->curr_ret_stack
= -1;
2016 /* Make sure IRQs see the -1 first: */
2018 t
->ret_stack
= ret_stack_list
[start
++];
2019 atomic_set(&t
->tracing_graph_pause
, 0);
2020 atomic_set(&t
->trace_overrun
, 0);
2022 } while_each_thread(g
, t
);
2025 read_unlock_irqrestore(&tasklist_lock
, flags
);
2027 for (i
= start
; i
< end
; i
++)
2028 kfree(ret_stack_list
[i
]);
2032 /* Allocate a return stack for each task */
2033 static int start_graph_tracing(void)
2035 struct ftrace_ret_stack
**ret_stack_list
;
2038 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
2039 sizeof(struct ftrace_ret_stack
*),
2042 if (!ret_stack_list
)
2045 /* The cpu_boot init_task->ret_stack will never be freed */
2046 for_each_online_cpu(cpu
)
2047 ftrace_graph_init_task(idle_task(cpu
));
2050 ret
= alloc_retstack_tasklist(ret_stack_list
);
2051 } while (ret
== -EAGAIN
);
2053 kfree(ret_stack_list
);
2058 * Hibernation protection.
2059 * The state of the current task is too much unstable during
2060 * suspend/restore to disk. We want to protect against that.
2063 ftrace_suspend_notifier_call(struct notifier_block
*bl
, unsigned long state
,
2067 case PM_HIBERNATION_PREPARE
:
2068 pause_graph_tracing();
2071 case PM_POST_HIBERNATION
:
2072 unpause_graph_tracing();
2078 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
2079 trace_func_graph_ent_t entryfunc
)
2083 mutex_lock(&ftrace_sysctl_lock
);
2085 ftrace_suspend_notifier
.notifier_call
= ftrace_suspend_notifier_call
;
2086 register_pm_notifier(&ftrace_suspend_notifier
);
2088 atomic_inc(&ftrace_graph_active
);
2089 ret
= start_graph_tracing();
2091 atomic_dec(&ftrace_graph_active
);
2095 ftrace_graph_return
= retfunc
;
2096 ftrace_graph_entry
= entryfunc
;
2098 ftrace_startup(FTRACE_START_FUNC_RET
);
2101 mutex_unlock(&ftrace_sysctl_lock
);
2105 void unregister_ftrace_graph(void)
2107 mutex_lock(&ftrace_sysctl_lock
);
2109 atomic_dec(&ftrace_graph_active
);
2110 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
2111 ftrace_graph_entry
= ftrace_graph_entry_stub
;
2112 ftrace_shutdown(FTRACE_STOP_FUNC_RET
);
2113 unregister_pm_notifier(&ftrace_suspend_notifier
);
2115 mutex_unlock(&ftrace_sysctl_lock
);
2118 /* Allocate a return stack for newly created task */
2119 void ftrace_graph_init_task(struct task_struct
*t
)
2121 if (atomic_read(&ftrace_graph_active
)) {
2122 t
->ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
2123 * sizeof(struct ftrace_ret_stack
),
2127 t
->curr_ret_stack
= -1;
2128 atomic_set(&t
->tracing_graph_pause
, 0);
2129 atomic_set(&t
->trace_overrun
, 0);
2131 t
->ret_stack
= NULL
;
2134 void ftrace_graph_exit_task(struct task_struct
*t
)
2136 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
2138 t
->ret_stack
= NULL
;
2139 /* NULL must become visible to IRQs before we free it: */
2145 void ftrace_graph_stop(void)