2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
34 #define FTRACE_WARN_ON(cond) \
40 #define FTRACE_WARN_ON_ONCE(cond) \
42 if (WARN_ON_ONCE(cond)) \
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly
;
48 static int last_ftrace_enabled
;
50 /* set when tracing only a pid */
51 struct pid
*ftrace_pid_trace
;
52 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
54 /* Quick disabling of function tracer. */
55 int function_trace_stop
;
58 * ftrace_disabled is set when an anomaly is discovered.
59 * ftrace_disabled is much stronger than ftrace_enabled.
61 static int ftrace_disabled __read_mostly
;
63 static DEFINE_SPINLOCK(ftrace_lock
);
64 static DEFINE_MUTEX(ftrace_sysctl_lock
);
65 static DEFINE_MUTEX(ftrace_start_lock
);
67 static struct ftrace_ops ftrace_list_end __read_mostly
=
72 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
73 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
74 ftrace_func_t __ftrace_trace_function __read_mostly
= ftrace_stub
;
75 ftrace_func_t ftrace_pid_function __read_mostly
= ftrace_stub
;
77 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
79 struct ftrace_ops
*op
= ftrace_list
;
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
84 while (op
!= &ftrace_list_end
) {
86 read_barrier_depends();
87 op
->func(ip
, parent_ip
);
92 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
)
94 if (!test_tsk_trace_trace(current
))
97 ftrace_pid_function(ip
, parent_ip
);
100 static void set_ftrace_pid_function(ftrace_func_t func
)
102 /* do not set ftrace_pid_function to itself! */
103 if (func
!= ftrace_pid_func
)
104 ftrace_pid_function
= func
;
108 * clear_ftrace_function - reset the ftrace function
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
113 void clear_ftrace_function(void)
115 ftrace_trace_function
= ftrace_stub
;
116 __ftrace_trace_function
= ftrace_stub
;
117 ftrace_pid_function
= ftrace_stub
;
120 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
125 static void ftrace_test_stop_func(unsigned long ip
, unsigned long parent_ip
)
127 if (function_trace_stop
)
130 __ftrace_trace_function(ip
, parent_ip
);
134 static int __register_ftrace_function(struct ftrace_ops
*ops
)
136 /* should not be called from interrupt context */
137 spin_lock(&ftrace_lock
);
139 ops
->next
= ftrace_list
;
141 * We are entering ops into the ftrace_list but another
142 * CPU might be walking that list. We need to make sure
143 * the ops->next pointer is valid before another CPU sees
144 * the ops pointer included into the ftrace_list.
149 if (ftrace_enabled
) {
152 if (ops
->next
== &ftrace_list_end
)
155 func
= ftrace_list_func
;
157 if (ftrace_pid_trace
) {
158 set_ftrace_pid_function(func
);
159 func
= ftrace_pid_func
;
163 * For one func, simply call it directly.
164 * For more than one func, call the chain.
166 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
167 ftrace_trace_function
= func
;
169 __ftrace_trace_function
= func
;
170 ftrace_trace_function
= ftrace_test_stop_func
;
174 spin_unlock(&ftrace_lock
);
179 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
181 struct ftrace_ops
**p
;
184 /* should not be called from interrupt context */
185 spin_lock(&ftrace_lock
);
188 * If we are removing the last function, then simply point
189 * to the ftrace_stub.
191 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
192 ftrace_trace_function
= ftrace_stub
;
193 ftrace_list
= &ftrace_list_end
;
197 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
208 if (ftrace_enabled
) {
209 /* If we only have one func left, then call that directly */
210 if (ftrace_list
->next
== &ftrace_list_end
) {
211 ftrace_func_t func
= ftrace_list
->func
;
213 if (ftrace_pid_trace
) {
214 set_ftrace_pid_function(func
);
215 func
= ftrace_pid_func
;
217 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218 ftrace_trace_function
= func
;
220 __ftrace_trace_function
= func
;
226 spin_unlock(&ftrace_lock
);
231 static void ftrace_update_pid_func(void)
235 /* should not be called from interrupt context */
236 spin_lock(&ftrace_lock
);
238 if (ftrace_trace_function
== ftrace_stub
)
241 func
= ftrace_trace_function
;
243 if (ftrace_pid_trace
) {
244 set_ftrace_pid_function(func
);
245 func
= ftrace_pid_func
;
247 if (func
== ftrace_pid_func
)
248 func
= ftrace_pid_function
;
251 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
252 ftrace_trace_function
= func
;
254 __ftrace_trace_function
= func
;
258 spin_unlock(&ftrace_lock
);
261 #ifdef CONFIG_DYNAMIC_FTRACE
262 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
263 # error Dynamic ftrace depends on MCOUNT_RECORD
267 * Since MCOUNT_ADDR may point to mcount itself, we do not want
268 * to get it confused by reading a reference in the code as we
269 * are parsing on objcopy output of text. Use a variable for
272 static unsigned long mcount_addr
= MCOUNT_ADDR
;
275 FTRACE_ENABLE_CALLS
= (1 << 0),
276 FTRACE_DISABLE_CALLS
= (1 << 1),
277 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
278 FTRACE_ENABLE_MCOUNT
= (1 << 3),
279 FTRACE_DISABLE_MCOUNT
= (1 << 4),
280 FTRACE_START_FUNC_RET
= (1 << 5),
281 FTRACE_STOP_FUNC_RET
= (1 << 6),
284 static int ftrace_filtered
;
286 static LIST_HEAD(ftrace_new_addrs
);
288 static DEFINE_MUTEX(ftrace_regex_lock
);
291 struct ftrace_page
*next
;
293 struct dyn_ftrace records
[];
296 #define ENTRIES_PER_PAGE \
297 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
299 /* estimate from running different kernels */
300 #define NR_TO_INIT 10000
302 static struct ftrace_page
*ftrace_pages_start
;
303 static struct ftrace_page
*ftrace_pages
;
305 static struct dyn_ftrace
*ftrace_free_records
;
308 #ifdef CONFIG_KPROBES
310 static int frozen_record_count
;
312 static inline void freeze_record(struct dyn_ftrace
*rec
)
314 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
315 rec
->flags
|= FTRACE_FL_FROZEN
;
316 frozen_record_count
++;
320 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
322 if (rec
->flags
& FTRACE_FL_FROZEN
) {
323 rec
->flags
&= ~FTRACE_FL_FROZEN
;
324 frozen_record_count
--;
328 static inline int record_frozen(struct dyn_ftrace
*rec
)
330 return rec
->flags
& FTRACE_FL_FROZEN
;
333 # define freeze_record(rec) ({ 0; })
334 # define unfreeze_record(rec) ({ 0; })
335 # define record_frozen(rec) ({ 0; })
336 #endif /* CONFIG_KPROBES */
338 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
340 rec
->ip
= (unsigned long)ftrace_free_records
;
341 ftrace_free_records
= rec
;
342 rec
->flags
|= FTRACE_FL_FREE
;
345 void ftrace_release(void *start
, unsigned long size
)
347 struct dyn_ftrace
*rec
;
348 struct ftrace_page
*pg
;
349 unsigned long s
= (unsigned long)start
;
350 unsigned long e
= s
+ size
;
353 if (ftrace_disabled
|| !start
)
356 /* should not be called from interrupt context */
357 spin_lock(&ftrace_lock
);
359 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
360 for (i
= 0; i
< pg
->index
; i
++) {
361 rec
= &pg
->records
[i
];
363 if ((rec
->ip
>= s
) && (rec
->ip
< e
))
364 ftrace_free_rec(rec
);
367 spin_unlock(&ftrace_lock
);
370 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
372 struct dyn_ftrace
*rec
;
374 /* First check for freed records */
375 if (ftrace_free_records
) {
376 rec
= ftrace_free_records
;
378 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
379 FTRACE_WARN_ON_ONCE(1);
380 ftrace_free_records
= NULL
;
384 ftrace_free_records
= (void *)rec
->ip
;
385 memset(rec
, 0, sizeof(*rec
));
389 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
390 if (!ftrace_pages
->next
) {
391 /* allocate another page */
393 (void *)get_zeroed_page(GFP_KERNEL
);
394 if (!ftrace_pages
->next
)
397 ftrace_pages
= ftrace_pages
->next
;
400 return &ftrace_pages
->records
[ftrace_pages
->index
++];
403 static struct dyn_ftrace
*
404 ftrace_record_ip(unsigned long ip
)
406 struct dyn_ftrace
*rec
;
411 rec
= ftrace_alloc_dyn_node(ip
);
417 list_add(&rec
->list
, &ftrace_new_addrs
);
422 static void print_ip_ins(const char *fmt
, unsigned char *p
)
426 printk(KERN_CONT
"%s", fmt
);
428 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
429 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
432 static void ftrace_bug(int failed
, unsigned long ip
)
436 FTRACE_WARN_ON_ONCE(1);
437 pr_info("ftrace faulted on modifying ");
441 FTRACE_WARN_ON_ONCE(1);
442 pr_info("ftrace failed to modify ");
444 print_ip_ins(" actual: ", (unsigned char *)ip
);
445 printk(KERN_CONT
"\n");
448 FTRACE_WARN_ON_ONCE(1);
449 pr_info("ftrace faulted on writing ");
453 FTRACE_WARN_ON_ONCE(1);
454 pr_info("ftrace faulted on unknown error ");
461 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
463 unsigned long ip
, fl
;
464 unsigned long ftrace_addr
;
466 ftrace_addr
= (unsigned long)ftrace_caller
;
471 * If this record is not to be traced and
472 * it is not enabled then do nothing.
474 * If this record is not to be traced and
475 * it is enabled then disabled it.
478 if (rec
->flags
& FTRACE_FL_NOTRACE
) {
479 if (rec
->flags
& FTRACE_FL_ENABLED
)
480 rec
->flags
&= ~FTRACE_FL_ENABLED
;
484 } else if (ftrace_filtered
&& enable
) {
489 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
);
491 /* Record is filtered and enabled, do nothing */
492 if (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
))
495 /* Record is not filtered and is not enabled do nothing */
499 /* Record is not filtered but enabled, disable it */
500 if (fl
== FTRACE_FL_ENABLED
)
501 rec
->flags
&= ~FTRACE_FL_ENABLED
;
503 /* Otherwise record is filtered but not enabled, enable it */
504 rec
->flags
|= FTRACE_FL_ENABLED
;
506 /* Disable or not filtered */
509 /* if record is enabled, do nothing */
510 if (rec
->flags
& FTRACE_FL_ENABLED
)
513 rec
->flags
|= FTRACE_FL_ENABLED
;
517 /* if record is not enabled do nothing */
518 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
521 rec
->flags
&= ~FTRACE_FL_ENABLED
;
525 if (rec
->flags
& FTRACE_FL_ENABLED
)
526 return ftrace_make_call(rec
, ftrace_addr
);
528 return ftrace_make_nop(NULL
, rec
, ftrace_addr
);
531 static void ftrace_replace_code(int enable
)
534 struct dyn_ftrace
*rec
;
535 struct ftrace_page
*pg
;
537 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
538 for (i
= 0; i
< pg
->index
; i
++) {
539 rec
= &pg
->records
[i
];
542 * Skip over free records and records that have
545 if (rec
->flags
& FTRACE_FL_FREE
||
546 rec
->flags
& FTRACE_FL_FAILED
)
549 /* ignore updates to this record's mcount site */
550 if (get_kprobe((void *)rec
->ip
)) {
554 unfreeze_record(rec
);
557 failed
= __ftrace_replace_code(rec
, enable
);
558 if (failed
&& (rec
->flags
& FTRACE_FL_CONVERTED
)) {
559 rec
->flags
|= FTRACE_FL_FAILED
;
560 if ((system_state
== SYSTEM_BOOTING
) ||
561 !core_kernel_text(rec
->ip
)) {
562 ftrace_free_rec(rec
);
564 ftrace_bug(failed
, rec
->ip
);
571 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
578 ret
= ftrace_make_nop(mod
, rec
, mcount_addr
);
581 rec
->flags
|= FTRACE_FL_FAILED
;
587 static int __ftrace_modify_code(void *data
)
591 if (*command
& FTRACE_ENABLE_CALLS
)
592 ftrace_replace_code(1);
593 else if (*command
& FTRACE_DISABLE_CALLS
)
594 ftrace_replace_code(0);
596 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
597 ftrace_update_ftrace_func(ftrace_trace_function
);
599 if (*command
& FTRACE_START_FUNC_RET
)
600 ftrace_enable_ftrace_graph_caller();
601 else if (*command
& FTRACE_STOP_FUNC_RET
)
602 ftrace_disable_ftrace_graph_caller();
607 static void ftrace_run_update_code(int command
)
609 stop_machine(__ftrace_modify_code
, &command
, NULL
);
612 static ftrace_func_t saved_ftrace_func
;
613 static int ftrace_start_up
;
615 static void ftrace_startup_enable(int command
)
617 if (saved_ftrace_func
!= ftrace_trace_function
) {
618 saved_ftrace_func
= ftrace_trace_function
;
619 command
|= FTRACE_UPDATE_TRACE_FUNC
;
622 if (!command
|| !ftrace_enabled
)
625 ftrace_run_update_code(command
);
628 static void ftrace_startup(int command
)
630 if (unlikely(ftrace_disabled
))
633 mutex_lock(&ftrace_start_lock
);
635 command
|= FTRACE_ENABLE_CALLS
;
637 ftrace_startup_enable(command
);
639 mutex_unlock(&ftrace_start_lock
);
642 static void ftrace_shutdown(int command
)
644 if (unlikely(ftrace_disabled
))
647 mutex_lock(&ftrace_start_lock
);
649 if (!ftrace_start_up
)
650 command
|= FTRACE_DISABLE_CALLS
;
652 if (saved_ftrace_func
!= ftrace_trace_function
) {
653 saved_ftrace_func
= ftrace_trace_function
;
654 command
|= FTRACE_UPDATE_TRACE_FUNC
;
657 if (!command
|| !ftrace_enabled
)
660 ftrace_run_update_code(command
);
662 mutex_unlock(&ftrace_start_lock
);
665 static void ftrace_startup_sysctl(void)
667 int command
= FTRACE_ENABLE_MCOUNT
;
669 if (unlikely(ftrace_disabled
))
672 mutex_lock(&ftrace_start_lock
);
673 /* Force update next time */
674 saved_ftrace_func
= NULL
;
675 /* ftrace_start_up is true if we want ftrace running */
677 command
|= FTRACE_ENABLE_CALLS
;
679 ftrace_run_update_code(command
);
680 mutex_unlock(&ftrace_start_lock
);
683 static void ftrace_shutdown_sysctl(void)
685 int command
= FTRACE_DISABLE_MCOUNT
;
687 if (unlikely(ftrace_disabled
))
690 mutex_lock(&ftrace_start_lock
);
691 /* ftrace_start_up is true if ftrace is running */
693 command
|= FTRACE_DISABLE_CALLS
;
695 ftrace_run_update_code(command
);
696 mutex_unlock(&ftrace_start_lock
);
699 static cycle_t ftrace_update_time
;
700 static unsigned long ftrace_update_cnt
;
701 unsigned long ftrace_update_tot_cnt
;
703 static int ftrace_update_code(struct module
*mod
)
705 struct dyn_ftrace
*p
, *t
;
708 start
= ftrace_now(raw_smp_processor_id());
709 ftrace_update_cnt
= 0;
711 list_for_each_entry_safe(p
, t
, &ftrace_new_addrs
, list
) {
713 /* If something went wrong, bail without enabling anything */
714 if (unlikely(ftrace_disabled
))
717 list_del_init(&p
->list
);
719 /* convert record (i.e, patch mcount-call with NOP) */
720 if (ftrace_code_disable(mod
, p
)) {
721 p
->flags
|= FTRACE_FL_CONVERTED
;
727 stop
= ftrace_now(raw_smp_processor_id());
728 ftrace_update_time
= stop
- start
;
729 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
734 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
736 struct ftrace_page
*pg
;
740 /* allocate a few pages */
741 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
742 if (!ftrace_pages_start
)
746 * Allocate a few more pages.
748 * TODO: have some parser search vmlinux before
749 * final linking to find all calls to ftrace.
751 * a) know how many pages to allocate.
753 * b) set up the table then.
755 * The dynamic code is still necessary for
759 pg
= ftrace_pages
= ftrace_pages_start
;
761 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
762 pr_info("ftrace: allocating %ld entries in %d pages\n",
763 num_to_init
, cnt
+ 1);
765 for (i
= 0; i
< cnt
; i
++) {
766 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
768 /* If we fail, we'll try later anyway */
779 FTRACE_ITER_FILTER
= (1 << 0),
780 FTRACE_ITER_CONT
= (1 << 1),
781 FTRACE_ITER_NOTRACE
= (1 << 2),
782 FTRACE_ITER_FAILURES
= (1 << 3),
785 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
787 struct ftrace_iterator
{
788 struct ftrace_page
*pg
;
791 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
797 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
799 struct ftrace_iterator
*iter
= m
->private;
800 struct dyn_ftrace
*rec
= NULL
;
804 /* should not be called from interrupt context */
805 spin_lock(&ftrace_lock
);
807 if (iter
->idx
>= iter
->pg
->index
) {
808 if (iter
->pg
->next
) {
809 iter
->pg
= iter
->pg
->next
;
816 rec
= &iter
->pg
->records
[iter
->idx
++];
817 if ((rec
->flags
& FTRACE_FL_FREE
) ||
819 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
820 (rec
->flags
& FTRACE_FL_FAILED
)) ||
822 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
823 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
825 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
826 !(rec
->flags
& FTRACE_FL_FILTER
)) ||
828 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
829 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
834 spin_unlock(&ftrace_lock
);
839 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
841 struct ftrace_iterator
*iter
= m
->private;
851 p
= t_next(m
, p
, pos
);
856 static void t_stop(struct seq_file
*m
, void *p
)
860 static int t_show(struct seq_file
*m
, void *v
)
862 struct dyn_ftrace
*rec
= v
;
863 char str
[KSYM_SYMBOL_LEN
];
868 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
870 seq_printf(m
, "%s\n", str
);
875 static struct seq_operations show_ftrace_seq_ops
= {
883 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
885 struct ftrace_iterator
*iter
;
888 if (unlikely(ftrace_disabled
))
891 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
895 iter
->pg
= ftrace_pages_start
;
897 ret
= seq_open(file
, &show_ftrace_seq_ops
);
899 struct seq_file
*m
= file
->private_data
;
909 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
911 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
912 struct ftrace_iterator
*iter
= m
->private;
914 seq_release(inode
, file
);
921 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
925 struct ftrace_iterator
*iter
;
927 ret
= ftrace_avail_open(inode
, file
);
929 m
= (struct seq_file
*)file
->private_data
;
930 iter
= (struct ftrace_iterator
*)m
->private;
931 iter
->flags
= FTRACE_ITER_FAILURES
;
938 static void ftrace_filter_reset(int enable
)
940 struct ftrace_page
*pg
;
941 struct dyn_ftrace
*rec
;
942 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
945 /* should not be called from interrupt context */
946 spin_lock(&ftrace_lock
);
949 pg
= ftrace_pages_start
;
951 for (i
= 0; i
< pg
->index
; i
++) {
952 rec
= &pg
->records
[i
];
953 if (rec
->flags
& FTRACE_FL_FAILED
)
959 spin_unlock(&ftrace_lock
);
963 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
965 struct ftrace_iterator
*iter
;
968 if (unlikely(ftrace_disabled
))
971 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
975 mutex_lock(&ftrace_regex_lock
);
976 if ((file
->f_mode
& FMODE_WRITE
) &&
977 !(file
->f_flags
& O_APPEND
))
978 ftrace_filter_reset(enable
);
980 if (file
->f_mode
& FMODE_READ
) {
981 iter
->pg
= ftrace_pages_start
;
982 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
985 ret
= seq_open(file
, &show_ftrace_seq_ops
);
987 struct seq_file
*m
= file
->private_data
;
992 file
->private_data
= iter
;
993 mutex_unlock(&ftrace_regex_lock
);
999 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
1001 return ftrace_regex_open(inode
, file
, 1);
1005 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
1007 return ftrace_regex_open(inode
, file
, 0);
1011 ftrace_regex_read(struct file
*file
, char __user
*ubuf
,
1012 size_t cnt
, loff_t
*ppos
)
1014 if (file
->f_mode
& FMODE_READ
)
1015 return seq_read(file
, ubuf
, cnt
, ppos
);
1021 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
1025 if (file
->f_mode
& FMODE_READ
)
1026 ret
= seq_lseek(file
, offset
, origin
);
1028 file
->f_pos
= ret
= 1;
1041 ftrace_match(unsigned char *buff
, int len
, int enable
)
1043 char str
[KSYM_SYMBOL_LEN
];
1044 char *search
= NULL
;
1045 struct ftrace_page
*pg
;
1046 struct dyn_ftrace
*rec
;
1047 int type
= MATCH_FULL
;
1048 unsigned long flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1049 unsigned i
, match
= 0, search_len
= 0;
1052 if (buff
[0] == '!') {
1058 for (i
= 0; i
< len
; i
++) {
1059 if (buff
[i
] == '*') {
1061 search
= buff
+ i
+ 1;
1062 type
= MATCH_END_ONLY
;
1063 search_len
= len
- (i
+ 1);
1065 if (type
== MATCH_END_ONLY
) {
1066 type
= MATCH_MIDDLE_ONLY
;
1069 type
= MATCH_FRONT_ONLY
;
1077 /* should not be called from interrupt context */
1078 spin_lock(&ftrace_lock
);
1080 ftrace_filtered
= 1;
1081 pg
= ftrace_pages_start
;
1083 for (i
= 0; i
< pg
->index
; i
++) {
1087 rec
= &pg
->records
[i
];
1088 if (rec
->flags
& FTRACE_FL_FAILED
)
1090 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1093 if (strcmp(str
, buff
) == 0)
1096 case MATCH_FRONT_ONLY
:
1097 if (memcmp(str
, buff
, match
) == 0)
1100 case MATCH_MIDDLE_ONLY
:
1101 if (strstr(str
, search
))
1104 case MATCH_END_ONLY
:
1105 ptr
= strstr(str
, search
);
1106 if (ptr
&& (ptr
[search_len
] == 0))
1112 rec
->flags
&= ~flag
;
1119 spin_unlock(&ftrace_lock
);
1123 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
1124 size_t cnt
, loff_t
*ppos
, int enable
)
1126 struct ftrace_iterator
*iter
;
1131 if (!cnt
|| cnt
< 0)
1134 mutex_lock(&ftrace_regex_lock
);
1136 if (file
->f_mode
& FMODE_READ
) {
1137 struct seq_file
*m
= file
->private_data
;
1140 iter
= file
->private_data
;
1143 iter
->flags
&= ~FTRACE_ITER_CONT
;
1144 iter
->buffer_idx
= 0;
1147 ret
= get_user(ch
, ubuf
++);
1153 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1154 /* skip white space */
1155 while (cnt
&& isspace(ch
)) {
1156 ret
= get_user(ch
, ubuf
++);
1164 file
->f_pos
+= read
;
1169 iter
->buffer_idx
= 0;
1172 while (cnt
&& !isspace(ch
)) {
1173 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1174 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1179 ret
= get_user(ch
, ubuf
++);
1188 iter
->buffer
[iter
->buffer_idx
] = 0;
1189 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1190 iter
->buffer_idx
= 0;
1192 iter
->flags
|= FTRACE_ITER_CONT
;
1195 file
->f_pos
+= read
;
1199 mutex_unlock(&ftrace_regex_lock
);
1205 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1206 size_t cnt
, loff_t
*ppos
)
1208 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
1212 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
1213 size_t cnt
, loff_t
*ppos
)
1215 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
1219 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
1221 if (unlikely(ftrace_disabled
))
1224 mutex_lock(&ftrace_regex_lock
);
1226 ftrace_filter_reset(enable
);
1228 ftrace_match(buf
, len
, enable
);
1229 mutex_unlock(&ftrace_regex_lock
);
1233 * ftrace_set_filter - set a function to filter on in ftrace
1234 * @buf - the string that holds the function filter text.
1235 * @len - the length of the string.
1236 * @reset - non zero to reset all filters before applying this filter.
1238 * Filters denote which functions should be enabled when tracing is enabled.
1239 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1241 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1243 ftrace_set_regex(buf
, len
, reset
, 1);
1247 * ftrace_set_notrace - set a function to not trace in ftrace
1248 * @buf - the string that holds the function notrace text.
1249 * @len - the length of the string.
1250 * @reset - non zero to reset all filters before applying this filter.
1252 * Notrace Filters denote which functions should not be enabled when tracing
1253 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1256 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
1258 ftrace_set_regex(buf
, len
, reset
, 0);
1262 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
1264 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1265 struct ftrace_iterator
*iter
;
1267 mutex_lock(&ftrace_regex_lock
);
1268 if (file
->f_mode
& FMODE_READ
) {
1271 seq_release(inode
, file
);
1273 iter
= file
->private_data
;
1275 if (iter
->buffer_idx
) {
1277 iter
->buffer
[iter
->buffer_idx
] = 0;
1278 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1281 mutex_lock(&ftrace_sysctl_lock
);
1282 mutex_lock(&ftrace_start_lock
);
1283 if (ftrace_start_up
&& ftrace_enabled
)
1284 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1285 mutex_unlock(&ftrace_start_lock
);
1286 mutex_unlock(&ftrace_sysctl_lock
);
1289 mutex_unlock(&ftrace_regex_lock
);
1294 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1296 return ftrace_regex_release(inode
, file
, 1);
1300 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
1302 return ftrace_regex_release(inode
, file
, 0);
1305 static struct file_operations ftrace_avail_fops
= {
1306 .open
= ftrace_avail_open
,
1308 .llseek
= seq_lseek
,
1309 .release
= ftrace_avail_release
,
1312 static struct file_operations ftrace_failures_fops
= {
1313 .open
= ftrace_failures_open
,
1315 .llseek
= seq_lseek
,
1316 .release
= ftrace_avail_release
,
1319 static struct file_operations ftrace_filter_fops
= {
1320 .open
= ftrace_filter_open
,
1321 .read
= ftrace_regex_read
,
1322 .write
= ftrace_filter_write
,
1323 .llseek
= ftrace_regex_lseek
,
1324 .release
= ftrace_filter_release
,
1327 static struct file_operations ftrace_notrace_fops
= {
1328 .open
= ftrace_notrace_open
,
1329 .read
= ftrace_regex_read
,
1330 .write
= ftrace_notrace_write
,
1331 .llseek
= ftrace_regex_lseek
,
1332 .release
= ftrace_notrace_release
,
1335 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1337 static DEFINE_MUTEX(graph_lock
);
1339 int ftrace_graph_count
;
1340 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
1343 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1345 unsigned long *array
= m
->private;
1350 if (index
>= ftrace_graph_count
)
1353 return &array
[index
];
1356 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
1360 mutex_lock(&graph_lock
);
1362 p
= g_next(m
, p
, pos
);
1367 static void g_stop(struct seq_file
*m
, void *p
)
1369 mutex_unlock(&graph_lock
);
1372 static int g_show(struct seq_file
*m
, void *v
)
1374 unsigned long *ptr
= v
;
1375 char str
[KSYM_SYMBOL_LEN
];
1380 kallsyms_lookup(*ptr
, NULL
, NULL
, NULL
, str
);
1382 seq_printf(m
, "%s\n", str
);
1387 static struct seq_operations ftrace_graph_seq_ops
= {
1395 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
1399 if (unlikely(ftrace_disabled
))
1402 mutex_lock(&graph_lock
);
1403 if ((file
->f_mode
& FMODE_WRITE
) &&
1404 !(file
->f_flags
& O_APPEND
)) {
1405 ftrace_graph_count
= 0;
1406 memset(ftrace_graph_funcs
, 0, sizeof(ftrace_graph_funcs
));
1409 if (file
->f_mode
& FMODE_READ
) {
1410 ret
= seq_open(file
, &ftrace_graph_seq_ops
);
1412 struct seq_file
*m
= file
->private_data
;
1413 m
->private = ftrace_graph_funcs
;
1416 file
->private_data
= ftrace_graph_funcs
;
1417 mutex_unlock(&graph_lock
);
1423 ftrace_graph_read(struct file
*file
, char __user
*ubuf
,
1424 size_t cnt
, loff_t
*ppos
)
1426 if (file
->f_mode
& FMODE_READ
)
1427 return seq_read(file
, ubuf
, cnt
, ppos
);
1433 ftrace_set_func(unsigned long *array
, int idx
, char *buffer
)
1435 char str
[KSYM_SYMBOL_LEN
];
1436 struct dyn_ftrace
*rec
;
1437 struct ftrace_page
*pg
;
1441 if (ftrace_disabled
)
1444 /* should not be called from interrupt context */
1445 spin_lock(&ftrace_lock
);
1447 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
1448 for (i
= 0; i
< pg
->index
; i
++) {
1449 rec
= &pg
->records
[i
];
1451 if (rec
->flags
& (FTRACE_FL_FAILED
| FTRACE_FL_FREE
))
1454 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1455 if (strcmp(str
, buffer
) == 0) {
1457 for (j
= 0; j
< idx
; j
++)
1458 if (array
[j
] == rec
->ip
) {
1463 array
[idx
] = rec
->ip
;
1468 spin_unlock(&ftrace_lock
);
1470 return found
? 0 : -EINVAL
;
1474 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
1475 size_t cnt
, loff_t
*ppos
)
1477 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
1478 unsigned long *array
;
1484 if (!cnt
|| cnt
< 0)
1487 mutex_lock(&graph_lock
);
1489 if (ftrace_graph_count
>= FTRACE_GRAPH_MAX_FUNCS
) {
1494 if (file
->f_mode
& FMODE_READ
) {
1495 struct seq_file
*m
= file
->private_data
;
1498 array
= file
->private_data
;
1500 ret
= get_user(ch
, ubuf
++);
1506 /* skip white space */
1507 while (cnt
&& isspace(ch
)) {
1508 ret
= get_user(ch
, ubuf
++);
1521 while (cnt
&& !isspace(ch
)) {
1522 if (index
< FTRACE_BUFF_MAX
)
1523 buffer
[index
++] = ch
;
1528 ret
= get_user(ch
, ubuf
++);
1536 /* we allow only one at a time */
1537 ret
= ftrace_set_func(array
, ftrace_graph_count
, buffer
);
1541 ftrace_graph_count
++;
1543 file
->f_pos
+= read
;
1547 mutex_unlock(&graph_lock
);
1552 static const struct file_operations ftrace_graph_fops
= {
1553 .open
= ftrace_graph_open
,
1554 .read
= ftrace_graph_read
,
1555 .write
= ftrace_graph_write
,
1557 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1559 static __init
int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
)
1561 struct dentry
*entry
;
1563 entry
= debugfs_create_file("available_filter_functions", 0444,
1564 d_tracer
, NULL
, &ftrace_avail_fops
);
1566 pr_warning("Could not create debugfs "
1567 "'available_filter_functions' entry\n");
1569 entry
= debugfs_create_file("failures", 0444,
1570 d_tracer
, NULL
, &ftrace_failures_fops
);
1572 pr_warning("Could not create debugfs 'failures' entry\n");
1574 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1575 NULL
, &ftrace_filter_fops
);
1577 pr_warning("Could not create debugfs "
1578 "'set_ftrace_filter' entry\n");
1580 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
1581 NULL
, &ftrace_notrace_fops
);
1583 pr_warning("Could not create debugfs "
1584 "'set_ftrace_notrace' entry\n");
1586 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1587 entry
= debugfs_create_file("set_graph_function", 0444, d_tracer
,
1589 &ftrace_graph_fops
);
1591 pr_warning("Could not create debugfs "
1592 "'set_graph_function' entry\n");
1593 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1598 static int ftrace_convert_nops(struct module
*mod
,
1599 unsigned long *start
,
1604 unsigned long flags
;
1606 mutex_lock(&ftrace_start_lock
);
1609 addr
= ftrace_call_adjust(*p
++);
1611 * Some architecture linkers will pad between
1612 * the different mcount_loc sections of different
1613 * object files to satisfy alignments.
1614 * Skip any NULL pointers.
1618 ftrace_record_ip(addr
);
1621 /* disable interrupts to prevent kstop machine */
1622 local_irq_save(flags
);
1623 ftrace_update_code(mod
);
1624 local_irq_restore(flags
);
1625 mutex_unlock(&ftrace_start_lock
);
1630 void ftrace_init_module(struct module
*mod
,
1631 unsigned long *start
, unsigned long *end
)
1633 if (ftrace_disabled
|| start
== end
)
1635 ftrace_convert_nops(mod
, start
, end
);
1638 extern unsigned long __start_mcount_loc
[];
1639 extern unsigned long __stop_mcount_loc
[];
1641 void __init
ftrace_init(void)
1643 unsigned long count
, addr
, flags
;
1646 /* Keep the ftrace pointer to the stub */
1647 addr
= (unsigned long)ftrace_stub
;
1649 local_irq_save(flags
);
1650 ftrace_dyn_arch_init(&addr
);
1651 local_irq_restore(flags
);
1653 /* ftrace_dyn_arch_init places the return code in addr */
1657 count
= __stop_mcount_loc
- __start_mcount_loc
;
1659 ret
= ftrace_dyn_table_alloc(count
);
1663 last_ftrace_enabled
= ftrace_enabled
= 1;
1665 ret
= ftrace_convert_nops(NULL
,
1671 ftrace_disabled
= 1;
1676 static int __init
ftrace_nodyn_init(void)
1681 device_initcall(ftrace_nodyn_init
);
1683 static inline int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
) { return 0; }
1684 static inline void ftrace_startup_enable(int command
) { }
1685 /* Keep as macros so we do not need to define the commands */
1686 # define ftrace_startup(command) do { } while (0)
1687 # define ftrace_shutdown(command) do { } while (0)
1688 # define ftrace_startup_sysctl() do { } while (0)
1689 # define ftrace_shutdown_sysctl() do { } while (0)
1690 #endif /* CONFIG_DYNAMIC_FTRACE */
1693 ftrace_pid_read(struct file
*file
, char __user
*ubuf
,
1694 size_t cnt
, loff_t
*ppos
)
1699 if (ftrace_pid_trace
== ftrace_swapper_pid
)
1700 r
= sprintf(buf
, "swapper tasks\n");
1701 else if (ftrace_pid_trace
)
1702 r
= sprintf(buf
, "%u\n", pid_nr(ftrace_pid_trace
));
1704 r
= sprintf(buf
, "no pid\n");
1706 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
1709 static void clear_ftrace_swapper(void)
1711 struct task_struct
*p
;
1715 for_each_online_cpu(cpu
) {
1717 clear_tsk_trace_trace(p
);
1722 static void set_ftrace_swapper(void)
1724 struct task_struct
*p
;
1728 for_each_online_cpu(cpu
) {
1730 set_tsk_trace_trace(p
);
1735 static void clear_ftrace_pid(struct pid
*pid
)
1737 struct task_struct
*p
;
1739 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
1740 clear_tsk_trace_trace(p
);
1741 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
1745 static void set_ftrace_pid(struct pid
*pid
)
1747 struct task_struct
*p
;
1749 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
1750 set_tsk_trace_trace(p
);
1751 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
1754 static void clear_ftrace_pid_task(struct pid
**pid
)
1756 if (*pid
== ftrace_swapper_pid
)
1757 clear_ftrace_swapper();
1759 clear_ftrace_pid(*pid
);
1764 static void set_ftrace_pid_task(struct pid
*pid
)
1766 if (pid
== ftrace_swapper_pid
)
1767 set_ftrace_swapper();
1769 set_ftrace_pid(pid
);
1773 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
1774 size_t cnt
, loff_t
*ppos
)
1781 if (cnt
>= sizeof(buf
))
1784 if (copy_from_user(&buf
, ubuf
, cnt
))
1789 ret
= strict_strtol(buf
, 10, &val
);
1793 mutex_lock(&ftrace_start_lock
);
1795 /* disable pid tracing */
1796 if (!ftrace_pid_trace
)
1799 clear_ftrace_pid_task(&ftrace_pid_trace
);
1802 /* swapper task is special */
1804 pid
= ftrace_swapper_pid
;
1805 if (pid
== ftrace_pid_trace
)
1808 pid
= find_get_pid(val
);
1810 if (pid
== ftrace_pid_trace
) {
1816 if (ftrace_pid_trace
)
1817 clear_ftrace_pid_task(&ftrace_pid_trace
);
1822 ftrace_pid_trace
= pid
;
1824 set_ftrace_pid_task(ftrace_pid_trace
);
1827 /* update the function call */
1828 ftrace_update_pid_func();
1829 ftrace_startup_enable(0);
1832 mutex_unlock(&ftrace_start_lock
);
1837 static struct file_operations ftrace_pid_fops
= {
1838 .read
= ftrace_pid_read
,
1839 .write
= ftrace_pid_write
,
1842 static __init
int ftrace_init_debugfs(void)
1844 struct dentry
*d_tracer
;
1845 struct dentry
*entry
;
1847 d_tracer
= tracing_init_dentry();
1851 ftrace_init_dyn_debugfs(d_tracer
);
1853 entry
= debugfs_create_file("set_ftrace_pid", 0644, d_tracer
,
1854 NULL
, &ftrace_pid_fops
);
1856 pr_warning("Could not create debugfs "
1857 "'set_ftrace_pid' entry\n");
1861 fs_initcall(ftrace_init_debugfs
);
1864 * ftrace_kill - kill ftrace
1866 * This function should be used by panic code. It stops ftrace
1867 * but in a not so nice way. If you need to simply kill ftrace
1868 * from a non-atomic section, use ftrace_kill.
1870 void ftrace_kill(void)
1872 ftrace_disabled
= 1;
1874 clear_ftrace_function();
1878 * register_ftrace_function - register a function for profiling
1879 * @ops - ops structure that holds the function for profiling.
1881 * Register a function to be called by all functions in the
1884 * Note: @ops->func and all the functions it calls must be labeled
1885 * with "notrace", otherwise it will go into a
1888 int register_ftrace_function(struct ftrace_ops
*ops
)
1892 if (unlikely(ftrace_disabled
))
1895 mutex_lock(&ftrace_sysctl_lock
);
1897 ret
= __register_ftrace_function(ops
);
1900 mutex_unlock(&ftrace_sysctl_lock
);
1905 * unregister_ftrace_function - unresgister a function for profiling.
1906 * @ops - ops structure that holds the function to unregister
1908 * Unregister a function that was added to be called by ftrace profiling.
1910 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1914 mutex_lock(&ftrace_sysctl_lock
);
1915 ret
= __unregister_ftrace_function(ops
);
1917 mutex_unlock(&ftrace_sysctl_lock
);
1923 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1924 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1929 if (unlikely(ftrace_disabled
))
1932 mutex_lock(&ftrace_sysctl_lock
);
1934 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1936 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1939 last_ftrace_enabled
= ftrace_enabled
;
1941 if (ftrace_enabled
) {
1943 ftrace_startup_sysctl();
1945 /* we are starting ftrace again */
1946 if (ftrace_list
!= &ftrace_list_end
) {
1947 if (ftrace_list
->next
== &ftrace_list_end
)
1948 ftrace_trace_function
= ftrace_list
->func
;
1950 ftrace_trace_function
= ftrace_list_func
;
1954 /* stopping ftrace calls (just send to ftrace_stub) */
1955 ftrace_trace_function
= ftrace_stub
;
1957 ftrace_shutdown_sysctl();
1961 mutex_unlock(&ftrace_sysctl_lock
);
1965 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1967 static atomic_t ftrace_graph_active
;
1969 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
1974 /* The callbacks that hook a function */
1975 trace_func_graph_ret_t ftrace_graph_return
=
1976 (trace_func_graph_ret_t
)ftrace_stub
;
1977 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
1979 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1980 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
1984 unsigned long flags
;
1985 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
1986 struct task_struct
*g
, *t
;
1988 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
1989 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
1990 * sizeof(struct ftrace_ret_stack
),
1992 if (!ret_stack_list
[i
]) {
2000 read_lock_irqsave(&tasklist_lock
, flags
);
2001 do_each_thread(g
, t
) {
2007 if (t
->ret_stack
== NULL
) {
2008 t
->curr_ret_stack
= -1;
2009 /* Make sure IRQs see the -1 first: */
2011 t
->ret_stack
= ret_stack_list
[start
++];
2012 atomic_set(&t
->tracing_graph_pause
, 0);
2013 atomic_set(&t
->trace_overrun
, 0);
2015 } while_each_thread(g
, t
);
2018 read_unlock_irqrestore(&tasklist_lock
, flags
);
2020 for (i
= start
; i
< end
; i
++)
2021 kfree(ret_stack_list
[i
]);
2025 /* Allocate a return stack for each task */
2026 static int start_graph_tracing(void)
2028 struct ftrace_ret_stack
**ret_stack_list
;
2031 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
2032 sizeof(struct ftrace_ret_stack
*),
2035 if (!ret_stack_list
)
2039 ret
= alloc_retstack_tasklist(ret_stack_list
);
2040 } while (ret
== -EAGAIN
);
2042 kfree(ret_stack_list
);
2046 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
2047 trace_func_graph_ent_t entryfunc
)
2051 mutex_lock(&ftrace_sysctl_lock
);
2053 atomic_inc(&ftrace_graph_active
);
2054 ret
= start_graph_tracing();
2056 atomic_dec(&ftrace_graph_active
);
2060 ftrace_graph_return
= retfunc
;
2061 ftrace_graph_entry
= entryfunc
;
2063 ftrace_startup(FTRACE_START_FUNC_RET
);
2066 mutex_unlock(&ftrace_sysctl_lock
);
2070 void unregister_ftrace_graph(void)
2072 mutex_lock(&ftrace_sysctl_lock
);
2074 atomic_dec(&ftrace_graph_active
);
2075 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
2076 ftrace_graph_entry
= ftrace_graph_entry_stub
;
2077 ftrace_shutdown(FTRACE_STOP_FUNC_RET
);
2079 mutex_unlock(&ftrace_sysctl_lock
);
2082 /* Allocate a return stack for newly created task */
2083 void ftrace_graph_init_task(struct task_struct
*t
)
2085 if (atomic_read(&ftrace_graph_active
)) {
2086 t
->ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
2087 * sizeof(struct ftrace_ret_stack
),
2091 t
->curr_ret_stack
= -1;
2092 atomic_set(&t
->tracing_graph_pause
, 0);
2093 atomic_set(&t
->trace_overrun
, 0);
2095 t
->ret_stack
= NULL
;
2098 void ftrace_graph_exit_task(struct task_struct
*t
)
2100 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
2102 t
->ret_stack
= NULL
;
2103 /* NULL must become visible to IRQs before we free it: */
2109 void ftrace_graph_stop(void)