2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
34 #define FTRACE_WARN_ON(cond) \
40 #define FTRACE_WARN_ON_ONCE(cond) \
42 if (WARN_ON_ONCE(cond)) \
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly
;
48 static int last_ftrace_enabled
;
50 /* Quick disabling of function tracer. */
51 int function_trace_stop
;
54 * ftrace_disabled is set when an anomaly is discovered.
55 * ftrace_disabled is much stronger than ftrace_enabled.
57 static int ftrace_disabled __read_mostly
;
59 static DEFINE_SPINLOCK(ftrace_lock
);
60 static DEFINE_MUTEX(ftrace_sysctl_lock
);
62 static struct ftrace_ops ftrace_list_end __read_mostly
=
67 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
68 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
69 ftrace_func_t __ftrace_trace_function __read_mostly
= ftrace_stub
;
71 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
73 struct ftrace_ops
*op
= ftrace_list
;
75 /* in case someone actually ports this to alpha! */
76 read_barrier_depends();
78 while (op
!= &ftrace_list_end
) {
80 read_barrier_depends();
81 op
->func(ip
, parent_ip
);
87 * clear_ftrace_function - reset the ftrace function
89 * This NULLs the ftrace function and in essence stops
90 * tracing. There may be lag
92 void clear_ftrace_function(void)
94 ftrace_trace_function
= ftrace_stub
;
95 __ftrace_trace_function
= ftrace_stub
;
98 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
100 * For those archs that do not test ftrace_trace_stop in their
101 * mcount call site, we need to do it from C.
103 static void ftrace_test_stop_func(unsigned long ip
, unsigned long parent_ip
)
105 if (function_trace_stop
)
108 __ftrace_trace_function(ip
, parent_ip
);
112 static int __register_ftrace_function(struct ftrace_ops
*ops
)
114 /* should not be called from interrupt context */
115 spin_lock(&ftrace_lock
);
117 ops
->next
= ftrace_list
;
119 * We are entering ops into the ftrace_list but another
120 * CPU might be walking that list. We need to make sure
121 * the ops->next pointer is valid before another CPU sees
122 * the ops pointer included into the ftrace_list.
127 if (ftrace_enabled
) {
129 * For one func, simply call it directly.
130 * For more than one func, call the chain.
132 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
133 if (ops
->next
== &ftrace_list_end
)
134 ftrace_trace_function
= ops
->func
;
136 ftrace_trace_function
= ftrace_list_func
;
138 if (ops
->next
== &ftrace_list_end
)
139 __ftrace_trace_function
= ops
->func
;
141 __ftrace_trace_function
= ftrace_list_func
;
142 ftrace_trace_function
= ftrace_test_stop_func
;
146 spin_unlock(&ftrace_lock
);
151 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
153 struct ftrace_ops
**p
;
156 /* should not be called from interrupt context */
157 spin_lock(&ftrace_lock
);
160 * If we are removing the last function, then simply point
161 * to the ftrace_stub.
163 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
164 ftrace_trace_function
= ftrace_stub
;
165 ftrace_list
= &ftrace_list_end
;
169 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
180 if (ftrace_enabled
) {
181 /* If we only have one func left, then call that directly */
182 if (ftrace_list
->next
== &ftrace_list_end
)
183 ftrace_trace_function
= ftrace_list
->func
;
187 spin_unlock(&ftrace_lock
);
192 #ifdef CONFIG_DYNAMIC_FTRACE
193 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
194 # error Dynamic ftrace depends on MCOUNT_RECORD
198 * Since MCOUNT_ADDR may point to mcount itself, we do not want
199 * to get it confused by reading a reference in the code as we
200 * are parsing on objcopy output of text. Use a variable for
203 static unsigned long mcount_addr
= MCOUNT_ADDR
;
206 FTRACE_ENABLE_CALLS
= (1 << 0),
207 FTRACE_DISABLE_CALLS
= (1 << 1),
208 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
209 FTRACE_ENABLE_MCOUNT
= (1 << 3),
210 FTRACE_DISABLE_MCOUNT
= (1 << 4),
213 static int ftrace_filtered
;
215 static LIST_HEAD(ftrace_new_addrs
);
217 static DEFINE_MUTEX(ftrace_regex_lock
);
220 struct ftrace_page
*next
;
222 struct dyn_ftrace records
[];
225 #define ENTRIES_PER_PAGE \
226 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
228 /* estimate from running different kernels */
229 #define NR_TO_INIT 10000
231 static struct ftrace_page
*ftrace_pages_start
;
232 static struct ftrace_page
*ftrace_pages
;
234 static struct dyn_ftrace
*ftrace_free_records
;
237 #ifdef CONFIG_KPROBES
239 static int frozen_record_count
;
241 static inline void freeze_record(struct dyn_ftrace
*rec
)
243 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
244 rec
->flags
|= FTRACE_FL_FROZEN
;
245 frozen_record_count
++;
249 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
251 if (rec
->flags
& FTRACE_FL_FROZEN
) {
252 rec
->flags
&= ~FTRACE_FL_FROZEN
;
253 frozen_record_count
--;
257 static inline int record_frozen(struct dyn_ftrace
*rec
)
259 return rec
->flags
& FTRACE_FL_FROZEN
;
262 # define freeze_record(rec) ({ 0; })
263 # define unfreeze_record(rec) ({ 0; })
264 # define record_frozen(rec) ({ 0; })
265 #endif /* CONFIG_KPROBES */
267 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
269 rec
->ip
= (unsigned long)ftrace_free_records
;
270 ftrace_free_records
= rec
;
271 rec
->flags
|= FTRACE_FL_FREE
;
274 void ftrace_release(void *start
, unsigned long size
)
276 struct dyn_ftrace
*rec
;
277 struct ftrace_page
*pg
;
278 unsigned long s
= (unsigned long)start
;
279 unsigned long e
= s
+ size
;
282 if (ftrace_disabled
|| !start
)
285 /* should not be called from interrupt context */
286 spin_lock(&ftrace_lock
);
288 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
289 for (i
= 0; i
< pg
->index
; i
++) {
290 rec
= &pg
->records
[i
];
292 if ((rec
->ip
>= s
) && (rec
->ip
< e
))
293 ftrace_free_rec(rec
);
296 spin_unlock(&ftrace_lock
);
299 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
301 struct dyn_ftrace
*rec
;
303 /* First check for freed records */
304 if (ftrace_free_records
) {
305 rec
= ftrace_free_records
;
307 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
308 FTRACE_WARN_ON_ONCE(1);
309 ftrace_free_records
= NULL
;
313 ftrace_free_records
= (void *)rec
->ip
;
314 memset(rec
, 0, sizeof(*rec
));
318 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
319 if (!ftrace_pages
->next
) {
320 /* allocate another page */
322 (void *)get_zeroed_page(GFP_KERNEL
);
323 if (!ftrace_pages
->next
)
326 ftrace_pages
= ftrace_pages
->next
;
329 return &ftrace_pages
->records
[ftrace_pages
->index
++];
332 static struct dyn_ftrace
*
333 ftrace_record_ip(unsigned long ip
)
335 struct dyn_ftrace
*rec
;
337 if (!ftrace_enabled
|| ftrace_disabled
)
340 rec
= ftrace_alloc_dyn_node(ip
);
346 list_add(&rec
->list
, &ftrace_new_addrs
);
351 #define FTRACE_ADDR ((long)(ftrace_caller))
354 __ftrace_replace_code(struct dyn_ftrace
*rec
,
355 unsigned char *old
, unsigned char *new, int enable
)
357 unsigned long ip
, fl
;
361 if (ftrace_filtered
&& enable
) {
363 * If filtering is on:
365 * If this record is set to be filtered and
366 * is enabled then do nothing.
368 * If this record is set to be filtered and
369 * it is not enabled, enable it.
371 * If this record is not set to be filtered
372 * and it is not enabled do nothing.
374 * If this record is set not to trace then
377 * If this record is set not to trace and
378 * it is enabled then disable it.
380 * If this record is not set to be filtered and
381 * it is enabled, disable it.
384 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_NOTRACE
|
387 if ((fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
)) ||
388 (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_NOTRACE
)) ||
389 !fl
|| (fl
== FTRACE_FL_NOTRACE
))
393 * If it is enabled disable it,
394 * otherwise enable it!
396 if (fl
& FTRACE_FL_ENABLED
) {
397 /* swap new and old */
399 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
400 rec
->flags
&= ~FTRACE_FL_ENABLED
;
402 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
403 rec
->flags
|= FTRACE_FL_ENABLED
;
409 * If this record is set not to trace and is
410 * not enabled, do nothing.
412 fl
= rec
->flags
& (FTRACE_FL_NOTRACE
| FTRACE_FL_ENABLED
);
413 if (fl
== FTRACE_FL_NOTRACE
)
416 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
418 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
421 if (rec
->flags
& FTRACE_FL_ENABLED
)
423 rec
->flags
|= FTRACE_FL_ENABLED
;
425 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
427 rec
->flags
&= ~FTRACE_FL_ENABLED
;
431 return ftrace_modify_code(ip
, old
, new);
434 static void ftrace_replace_code(int enable
)
437 unsigned char *new = NULL
, *old
= NULL
;
438 struct dyn_ftrace
*rec
;
439 struct ftrace_page
*pg
;
442 old
= ftrace_nop_replace();
444 new = ftrace_nop_replace();
446 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
447 for (i
= 0; i
< pg
->index
; i
++) {
448 rec
= &pg
->records
[i
];
450 /* don't modify code that has already faulted */
451 if (rec
->flags
& FTRACE_FL_FAILED
)
454 /* ignore updates to this record's mcount site */
455 if (get_kprobe((void *)rec
->ip
)) {
459 unfreeze_record(rec
);
462 failed
= __ftrace_replace_code(rec
, old
, new, enable
);
463 if (failed
&& (rec
->flags
& FTRACE_FL_CONVERTED
)) {
464 rec
->flags
|= FTRACE_FL_FAILED
;
465 if ((system_state
== SYSTEM_BOOTING
) ||
466 !core_kernel_text(rec
->ip
)) {
467 ftrace_free_rec(rec
);
474 static void print_ip_ins(const char *fmt
, unsigned char *p
)
478 printk(KERN_CONT
"%s", fmt
);
480 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
481 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
485 ftrace_code_disable(struct dyn_ftrace
*rec
)
488 unsigned char *nop
, *call
;
493 nop
= ftrace_nop_replace();
494 call
= ftrace_call_replace(ip
, mcount_addr
);
496 ret
= ftrace_modify_code(ip
, call
, nop
);
500 FTRACE_WARN_ON_ONCE(1);
501 pr_info("ftrace faulted on modifying ");
505 FTRACE_WARN_ON_ONCE(1);
506 pr_info("ftrace failed to modify ");
508 print_ip_ins(" expected: ", call
);
509 print_ip_ins(" actual: ", (unsigned char *)ip
);
510 print_ip_ins(" replace: ", nop
);
511 printk(KERN_CONT
"\n");
514 FTRACE_WARN_ON_ONCE(1);
515 pr_info("ftrace faulted on writing ");
519 FTRACE_WARN_ON_ONCE(1);
520 pr_info("ftrace faulted on unknown error ");
524 rec
->flags
|= FTRACE_FL_FAILED
;
530 static int __ftrace_modify_code(void *data
)
534 if (*command
& FTRACE_ENABLE_CALLS
)
535 ftrace_replace_code(1);
536 else if (*command
& FTRACE_DISABLE_CALLS
)
537 ftrace_replace_code(0);
539 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
540 ftrace_update_ftrace_func(ftrace_trace_function
);
545 static void ftrace_run_update_code(int command
)
547 stop_machine(__ftrace_modify_code
, &command
, NULL
);
550 static ftrace_func_t saved_ftrace_func
;
551 static int ftrace_start_up
;
552 static DEFINE_MUTEX(ftrace_start_lock
);
554 static void ftrace_startup(void)
558 if (unlikely(ftrace_disabled
))
561 mutex_lock(&ftrace_start_lock
);
563 if (ftrace_start_up
== 1)
564 command
|= FTRACE_ENABLE_CALLS
;
566 if (saved_ftrace_func
!= ftrace_trace_function
) {
567 saved_ftrace_func
= ftrace_trace_function
;
568 command
|= FTRACE_UPDATE_TRACE_FUNC
;
571 if (!command
|| !ftrace_enabled
)
574 ftrace_run_update_code(command
);
576 mutex_unlock(&ftrace_start_lock
);
579 static void ftrace_shutdown(void)
583 if (unlikely(ftrace_disabled
))
586 mutex_lock(&ftrace_start_lock
);
588 if (!ftrace_start_up
)
589 command
|= FTRACE_DISABLE_CALLS
;
591 if (saved_ftrace_func
!= ftrace_trace_function
) {
592 saved_ftrace_func
= ftrace_trace_function
;
593 command
|= FTRACE_UPDATE_TRACE_FUNC
;
596 if (!command
|| !ftrace_enabled
)
599 ftrace_run_update_code(command
);
601 mutex_unlock(&ftrace_start_lock
);
604 static void ftrace_startup_sysctl(void)
606 int command
= FTRACE_ENABLE_MCOUNT
;
608 if (unlikely(ftrace_disabled
))
611 mutex_lock(&ftrace_start_lock
);
612 /* Force update next time */
613 saved_ftrace_func
= NULL
;
614 /* ftrace_start_up is true if we want ftrace running */
616 command
|= FTRACE_ENABLE_CALLS
;
618 ftrace_run_update_code(command
);
619 mutex_unlock(&ftrace_start_lock
);
622 static void ftrace_shutdown_sysctl(void)
624 int command
= FTRACE_DISABLE_MCOUNT
;
626 if (unlikely(ftrace_disabled
))
629 mutex_lock(&ftrace_start_lock
);
630 /* ftrace_start_up is true if ftrace is running */
632 command
|= FTRACE_DISABLE_CALLS
;
634 ftrace_run_update_code(command
);
635 mutex_unlock(&ftrace_start_lock
);
638 static cycle_t ftrace_update_time
;
639 static unsigned long ftrace_update_cnt
;
640 unsigned long ftrace_update_tot_cnt
;
642 static int ftrace_update_code(void)
644 struct dyn_ftrace
*p
, *t
;
647 start
= ftrace_now(raw_smp_processor_id());
648 ftrace_update_cnt
= 0;
650 list_for_each_entry_safe(p
, t
, &ftrace_new_addrs
, list
) {
652 /* If something went wrong, bail without enabling anything */
653 if (unlikely(ftrace_disabled
))
656 list_del_init(&p
->list
);
658 /* convert record (i.e, patch mcount-call with NOP) */
659 if (ftrace_code_disable(p
)) {
660 p
->flags
|= FTRACE_FL_CONVERTED
;
666 stop
= ftrace_now(raw_smp_processor_id());
667 ftrace_update_time
= stop
- start
;
668 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
673 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
675 struct ftrace_page
*pg
;
679 /* allocate a few pages */
680 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
681 if (!ftrace_pages_start
)
685 * Allocate a few more pages.
687 * TODO: have some parser search vmlinux before
688 * final linking to find all calls to ftrace.
690 * a) know how many pages to allocate.
692 * b) set up the table then.
694 * The dynamic code is still necessary for
698 pg
= ftrace_pages
= ftrace_pages_start
;
700 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
701 pr_info("ftrace: allocating %ld entries in %d pages\n",
704 for (i
= 0; i
< cnt
; i
++) {
705 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
707 /* If we fail, we'll try later anyway */
718 FTRACE_ITER_FILTER
= (1 << 0),
719 FTRACE_ITER_CONT
= (1 << 1),
720 FTRACE_ITER_NOTRACE
= (1 << 2),
721 FTRACE_ITER_FAILURES
= (1 << 3),
724 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
726 struct ftrace_iterator
{
728 struct ftrace_page
*pg
;
731 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
737 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
739 struct ftrace_iterator
*iter
= m
->private;
740 struct dyn_ftrace
*rec
= NULL
;
744 /* should not be called from interrupt context */
745 spin_lock(&ftrace_lock
);
747 if (iter
->idx
>= iter
->pg
->index
) {
748 if (iter
->pg
->next
) {
749 iter
->pg
= iter
->pg
->next
;
754 rec
= &iter
->pg
->records
[iter
->idx
++];
755 if ((rec
->flags
& FTRACE_FL_FREE
) ||
757 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
758 (rec
->flags
& FTRACE_FL_FAILED
)) ||
760 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
761 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
763 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
764 !(rec
->flags
& FTRACE_FL_FILTER
)) ||
766 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
767 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
772 spin_unlock(&ftrace_lock
);
779 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
781 struct ftrace_iterator
*iter
= m
->private;
785 if (*pos
!= iter
->pos
) {
786 for (p
= t_next(m
, p
, &l
); p
&& l
< *pos
; p
= t_next(m
, p
, &l
))
790 p
= t_next(m
, p
, &l
);
796 static void t_stop(struct seq_file
*m
, void *p
)
800 static int t_show(struct seq_file
*m
, void *v
)
802 struct dyn_ftrace
*rec
= v
;
803 char str
[KSYM_SYMBOL_LEN
];
808 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
810 seq_printf(m
, "%s\n", str
);
815 static struct seq_operations show_ftrace_seq_ops
= {
823 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
825 struct ftrace_iterator
*iter
;
828 if (unlikely(ftrace_disabled
))
831 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
835 iter
->pg
= ftrace_pages_start
;
838 ret
= seq_open(file
, &show_ftrace_seq_ops
);
840 struct seq_file
*m
= file
->private_data
;
850 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
852 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
853 struct ftrace_iterator
*iter
= m
->private;
855 seq_release(inode
, file
);
862 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
866 struct ftrace_iterator
*iter
;
868 ret
= ftrace_avail_open(inode
, file
);
870 m
= (struct seq_file
*)file
->private_data
;
871 iter
= (struct ftrace_iterator
*)m
->private;
872 iter
->flags
= FTRACE_ITER_FAILURES
;
879 static void ftrace_filter_reset(int enable
)
881 struct ftrace_page
*pg
;
882 struct dyn_ftrace
*rec
;
883 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
886 /* should not be called from interrupt context */
887 spin_lock(&ftrace_lock
);
890 pg
= ftrace_pages_start
;
892 for (i
= 0; i
< pg
->index
; i
++) {
893 rec
= &pg
->records
[i
];
894 if (rec
->flags
& FTRACE_FL_FAILED
)
900 spin_unlock(&ftrace_lock
);
904 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
906 struct ftrace_iterator
*iter
;
909 if (unlikely(ftrace_disabled
))
912 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
916 mutex_lock(&ftrace_regex_lock
);
917 if ((file
->f_mode
& FMODE_WRITE
) &&
918 !(file
->f_flags
& O_APPEND
))
919 ftrace_filter_reset(enable
);
921 if (file
->f_mode
& FMODE_READ
) {
922 iter
->pg
= ftrace_pages_start
;
924 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
927 ret
= seq_open(file
, &show_ftrace_seq_ops
);
929 struct seq_file
*m
= file
->private_data
;
934 file
->private_data
= iter
;
935 mutex_unlock(&ftrace_regex_lock
);
941 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
943 return ftrace_regex_open(inode
, file
, 1);
947 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
949 return ftrace_regex_open(inode
, file
, 0);
953 ftrace_regex_read(struct file
*file
, char __user
*ubuf
,
954 size_t cnt
, loff_t
*ppos
)
956 if (file
->f_mode
& FMODE_READ
)
957 return seq_read(file
, ubuf
, cnt
, ppos
);
963 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
967 if (file
->f_mode
& FMODE_READ
)
968 ret
= seq_lseek(file
, offset
, origin
);
970 file
->f_pos
= ret
= 1;
983 ftrace_match(unsigned char *buff
, int len
, int enable
)
985 char str
[KSYM_SYMBOL_LEN
];
987 struct ftrace_page
*pg
;
988 struct dyn_ftrace
*rec
;
989 int type
= MATCH_FULL
;
990 unsigned long flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
991 unsigned i
, match
= 0, search_len
= 0;
993 for (i
= 0; i
< len
; i
++) {
994 if (buff
[i
] == '*') {
996 search
= buff
+ i
+ 1;
997 type
= MATCH_END_ONLY
;
998 search_len
= len
- (i
+ 1);
1000 if (type
== MATCH_END_ONLY
) {
1001 type
= MATCH_MIDDLE_ONLY
;
1004 type
= MATCH_FRONT_ONLY
;
1012 /* should not be called from interrupt context */
1013 spin_lock(&ftrace_lock
);
1015 ftrace_filtered
= 1;
1016 pg
= ftrace_pages_start
;
1018 for (i
= 0; i
< pg
->index
; i
++) {
1022 rec
= &pg
->records
[i
];
1023 if (rec
->flags
& FTRACE_FL_FAILED
)
1025 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1028 if (strcmp(str
, buff
) == 0)
1031 case MATCH_FRONT_ONLY
:
1032 if (memcmp(str
, buff
, match
) == 0)
1035 case MATCH_MIDDLE_ONLY
:
1036 if (strstr(str
, search
))
1039 case MATCH_END_ONLY
:
1040 ptr
= strstr(str
, search
);
1041 if (ptr
&& (ptr
[search_len
] == 0))
1050 spin_unlock(&ftrace_lock
);
1054 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
1055 size_t cnt
, loff_t
*ppos
, int enable
)
1057 struct ftrace_iterator
*iter
;
1062 if (!cnt
|| cnt
< 0)
1065 mutex_lock(&ftrace_regex_lock
);
1067 if (file
->f_mode
& FMODE_READ
) {
1068 struct seq_file
*m
= file
->private_data
;
1071 iter
= file
->private_data
;
1074 iter
->flags
&= ~FTRACE_ITER_CONT
;
1075 iter
->buffer_idx
= 0;
1078 ret
= get_user(ch
, ubuf
++);
1084 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1085 /* skip white space */
1086 while (cnt
&& isspace(ch
)) {
1087 ret
= get_user(ch
, ubuf
++);
1095 file
->f_pos
+= read
;
1100 iter
->buffer_idx
= 0;
1103 while (cnt
&& !isspace(ch
)) {
1104 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1105 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1110 ret
= get_user(ch
, ubuf
++);
1119 iter
->buffer
[iter
->buffer_idx
] = 0;
1120 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1121 iter
->buffer_idx
= 0;
1123 iter
->flags
|= FTRACE_ITER_CONT
;
1126 file
->f_pos
+= read
;
1130 mutex_unlock(&ftrace_regex_lock
);
1136 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1137 size_t cnt
, loff_t
*ppos
)
1139 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
1143 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
1144 size_t cnt
, loff_t
*ppos
)
1146 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
1150 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
1152 if (unlikely(ftrace_disabled
))
1155 mutex_lock(&ftrace_regex_lock
);
1157 ftrace_filter_reset(enable
);
1159 ftrace_match(buf
, len
, enable
);
1160 mutex_unlock(&ftrace_regex_lock
);
1164 * ftrace_set_filter - set a function to filter on in ftrace
1165 * @buf - the string that holds the function filter text.
1166 * @len - the length of the string.
1167 * @reset - non zero to reset all filters before applying this filter.
1169 * Filters denote which functions should be enabled when tracing is enabled.
1170 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1172 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1174 ftrace_set_regex(buf
, len
, reset
, 1);
1178 * ftrace_set_notrace - set a function to not trace in ftrace
1179 * @buf - the string that holds the function notrace text.
1180 * @len - the length of the string.
1181 * @reset - non zero to reset all filters before applying this filter.
1183 * Notrace Filters denote which functions should not be enabled when tracing
1184 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1187 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
1189 ftrace_set_regex(buf
, len
, reset
, 0);
1193 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
1195 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1196 struct ftrace_iterator
*iter
;
1198 mutex_lock(&ftrace_regex_lock
);
1199 if (file
->f_mode
& FMODE_READ
) {
1202 seq_release(inode
, file
);
1204 iter
= file
->private_data
;
1206 if (iter
->buffer_idx
) {
1208 iter
->buffer
[iter
->buffer_idx
] = 0;
1209 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1212 mutex_lock(&ftrace_sysctl_lock
);
1213 mutex_lock(&ftrace_start_lock
);
1214 if (iter
->filtered
&& ftrace_start_up
&& ftrace_enabled
)
1215 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1216 mutex_unlock(&ftrace_start_lock
);
1217 mutex_unlock(&ftrace_sysctl_lock
);
1220 mutex_unlock(&ftrace_regex_lock
);
1225 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1227 return ftrace_regex_release(inode
, file
, 1);
1231 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
1233 return ftrace_regex_release(inode
, file
, 0);
1236 static struct file_operations ftrace_avail_fops
= {
1237 .open
= ftrace_avail_open
,
1239 .llseek
= seq_lseek
,
1240 .release
= ftrace_avail_release
,
1243 static struct file_operations ftrace_failures_fops
= {
1244 .open
= ftrace_failures_open
,
1246 .llseek
= seq_lseek
,
1247 .release
= ftrace_avail_release
,
1250 static struct file_operations ftrace_filter_fops
= {
1251 .open
= ftrace_filter_open
,
1252 .read
= ftrace_regex_read
,
1253 .write
= ftrace_filter_write
,
1254 .llseek
= ftrace_regex_lseek
,
1255 .release
= ftrace_filter_release
,
1258 static struct file_operations ftrace_notrace_fops
= {
1259 .open
= ftrace_notrace_open
,
1260 .read
= ftrace_regex_read
,
1261 .write
= ftrace_notrace_write
,
1262 .llseek
= ftrace_regex_lseek
,
1263 .release
= ftrace_notrace_release
,
1266 static __init
int ftrace_init_debugfs(void)
1268 struct dentry
*d_tracer
;
1269 struct dentry
*entry
;
1271 d_tracer
= tracing_init_dentry();
1273 entry
= debugfs_create_file("available_filter_functions", 0444,
1274 d_tracer
, NULL
, &ftrace_avail_fops
);
1276 pr_warning("Could not create debugfs "
1277 "'available_filter_functions' entry\n");
1279 entry
= debugfs_create_file("failures", 0444,
1280 d_tracer
, NULL
, &ftrace_failures_fops
);
1282 pr_warning("Could not create debugfs 'failures' entry\n");
1284 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1285 NULL
, &ftrace_filter_fops
);
1287 pr_warning("Could not create debugfs "
1288 "'set_ftrace_filter' entry\n");
1290 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
1291 NULL
, &ftrace_notrace_fops
);
1293 pr_warning("Could not create debugfs "
1294 "'set_ftrace_notrace' entry\n");
1299 fs_initcall(ftrace_init_debugfs
);
1301 static int ftrace_convert_nops(unsigned long *start
,
1306 unsigned long flags
;
1308 mutex_lock(&ftrace_start_lock
);
1311 addr
= ftrace_call_adjust(*p
++);
1312 ftrace_record_ip(addr
);
1315 /* disable interrupts to prevent kstop machine */
1316 local_irq_save(flags
);
1317 ftrace_update_code();
1318 local_irq_restore(flags
);
1319 mutex_unlock(&ftrace_start_lock
);
1324 void ftrace_init_module(unsigned long *start
, unsigned long *end
)
1326 if (ftrace_disabled
|| start
== end
)
1328 ftrace_convert_nops(start
, end
);
1331 extern unsigned long __start_mcount_loc
[];
1332 extern unsigned long __stop_mcount_loc
[];
1334 void __init
ftrace_init(void)
1336 unsigned long count
, addr
, flags
;
1339 /* Keep the ftrace pointer to the stub */
1340 addr
= (unsigned long)ftrace_stub
;
1342 local_irq_save(flags
);
1343 ftrace_dyn_arch_init(&addr
);
1344 local_irq_restore(flags
);
1346 /* ftrace_dyn_arch_init places the return code in addr */
1350 count
= __stop_mcount_loc
- __start_mcount_loc
;
1352 ret
= ftrace_dyn_table_alloc(count
);
1356 last_ftrace_enabled
= ftrace_enabled
= 1;
1358 ret
= ftrace_convert_nops(__start_mcount_loc
,
1363 ftrace_disabled
= 1;
1368 static int __init
ftrace_nodyn_init(void)
1373 device_initcall(ftrace_nodyn_init
);
1375 # define ftrace_startup() do { } while (0)
1376 # define ftrace_shutdown() do { } while (0)
1377 # define ftrace_startup_sysctl() do { } while (0)
1378 # define ftrace_shutdown_sysctl() do { } while (0)
1379 #endif /* CONFIG_DYNAMIC_FTRACE */
1382 * ftrace_kill - kill ftrace
1384 * This function should be used by panic code. It stops ftrace
1385 * but in a not so nice way. If you need to simply kill ftrace
1386 * from a non-atomic section, use ftrace_kill.
1388 void ftrace_kill(void)
1390 ftrace_disabled
= 1;
1392 clear_ftrace_function();
1396 * register_ftrace_function - register a function for profiling
1397 * @ops - ops structure that holds the function for profiling.
1399 * Register a function to be called by all functions in the
1402 * Note: @ops->func and all the functions it calls must be labeled
1403 * with "notrace", otherwise it will go into a
1406 int register_ftrace_function(struct ftrace_ops
*ops
)
1410 if (unlikely(ftrace_disabled
))
1413 mutex_lock(&ftrace_sysctl_lock
);
1414 ret
= __register_ftrace_function(ops
);
1416 mutex_unlock(&ftrace_sysctl_lock
);
1422 * unregister_ftrace_function - unresgister a function for profiling.
1423 * @ops - ops structure that holds the function to unregister
1425 * Unregister a function that was added to be called by ftrace profiling.
1427 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1431 mutex_lock(&ftrace_sysctl_lock
);
1432 ret
= __unregister_ftrace_function(ops
);
1434 mutex_unlock(&ftrace_sysctl_lock
);
1440 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1441 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1446 if (unlikely(ftrace_disabled
))
1449 mutex_lock(&ftrace_sysctl_lock
);
1451 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1453 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1456 last_ftrace_enabled
= ftrace_enabled
;
1458 if (ftrace_enabled
) {
1460 ftrace_startup_sysctl();
1462 /* we are starting ftrace again */
1463 if (ftrace_list
!= &ftrace_list_end
) {
1464 if (ftrace_list
->next
== &ftrace_list_end
)
1465 ftrace_trace_function
= ftrace_list
->func
;
1467 ftrace_trace_function
= ftrace_list_func
;
1471 /* stopping ftrace calls (just send to ftrace_stub) */
1472 ftrace_trace_function
= ftrace_stub
;
1474 ftrace_shutdown_sysctl();
1478 mutex_unlock(&ftrace_sysctl_lock
);
1482 #ifdef CONFIG_FUNCTION_RET_TRACER
1483 trace_function_return_t ftrace_function_return
=
1484 (trace_function_return_t
)ftrace_stub
;
1485 void register_ftrace_return(trace_function_return_t func
)
1487 ftrace_function_return
= func
;
1490 void unregister_ftrace_return(void)
1492 ftrace_function_return
= (trace_function_return_t
)ftrace_stub
;