2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly
;
37 static int last_ftrace_enabled
;
40 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled.
43 static int ftrace_disabled __read_mostly
;
45 static DEFINE_SPINLOCK(ftrace_lock
);
46 static DEFINE_MUTEX(ftrace_sysctl_lock
);
48 static struct ftrace_ops ftrace_list_end __read_mostly
=
53 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
54 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
56 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
58 struct ftrace_ops
*op
= ftrace_list
;
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
63 while (op
!= &ftrace_list_end
) {
65 read_barrier_depends();
66 op
->func(ip
, parent_ip
);
72 * clear_ftrace_function - reset the ftrace function
74 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
77 void clear_ftrace_function(void)
79 ftrace_trace_function
= ftrace_stub
;
82 static int __register_ftrace_function(struct ftrace_ops
*ops
)
84 /* should not be called from interrupt context */
85 spin_lock(&ftrace_lock
);
87 ops
->next
= ftrace_list
;
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
102 if (ops
->next
== &ftrace_list_end
)
103 ftrace_trace_function
= ops
->func
;
105 ftrace_trace_function
= ftrace_list_func
;
108 spin_unlock(&ftrace_lock
);
113 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
115 struct ftrace_ops
**p
;
118 /* should not be called from interrupt context */
119 spin_lock(&ftrace_lock
);
122 * If we are removing the last function, then simply point
123 * to the ftrace_stub.
125 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
126 ftrace_trace_function
= ftrace_stub
;
127 ftrace_list
= &ftrace_list_end
;
131 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
142 if (ftrace_enabled
) {
143 /* If we only have one func left, then call that directly */
144 if (ftrace_list
== &ftrace_list_end
||
145 ftrace_list
->next
== &ftrace_list_end
)
146 ftrace_trace_function
= ftrace_list
->func
;
150 spin_unlock(&ftrace_lock
);
155 #ifdef CONFIG_DYNAMIC_FTRACE
157 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
159 * The hash lock is only needed when the recording of the mcount
160 * callers are dynamic. That is, by the caller themselves and
161 * not recorded via the compilation.
163 static DEFINE_SPINLOCK(ftrace_hash_lock
);
164 #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
165 #define ftrace_hash_unlock(flags) \
166 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
168 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
169 #define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
170 #define ftrace_hash_unlock(flags) do { } while(0)
174 * Since MCOUNT_ADDR may point to mcount itself, we do not want
175 * to get it confused by reading a reference in the code as we
176 * are parsing on objcopy output of text. Use a variable for
179 static unsigned long mcount_addr
= MCOUNT_ADDR
;
181 static struct task_struct
*ftraced_task
;
184 FTRACE_ENABLE_CALLS
= (1 << 0),
185 FTRACE_DISABLE_CALLS
= (1 << 1),
186 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
187 FTRACE_ENABLE_MCOUNT
= (1 << 3),
188 FTRACE_DISABLE_MCOUNT
= (1 << 4),
191 static int ftrace_filtered
;
192 static int tracing_on
;
193 static int frozen_record_count
;
195 static struct hlist_head ftrace_hash
[FTRACE_HASHSIZE
];
197 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu
);
199 static DEFINE_MUTEX(ftraced_lock
);
200 static DEFINE_MUTEX(ftrace_regex_lock
);
203 struct ftrace_page
*next
;
205 struct dyn_ftrace records
[];
208 #define ENTRIES_PER_PAGE \
209 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
211 /* estimate from running different kernels */
212 #define NR_TO_INIT 10000
214 static struct ftrace_page
*ftrace_pages_start
;
215 static struct ftrace_page
*ftrace_pages
;
217 static int ftraced_trigger
;
218 static int ftraced_suspend
;
219 static int ftraced_stop
;
221 static int ftrace_record_suspend
;
223 static struct dyn_ftrace
*ftrace_free_records
;
226 #ifdef CONFIG_KPROBES
227 static inline void freeze_record(struct dyn_ftrace
*rec
)
229 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
230 rec
->flags
|= FTRACE_FL_FROZEN
;
231 frozen_record_count
++;
235 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
237 if (rec
->flags
& FTRACE_FL_FROZEN
) {
238 rec
->flags
&= ~FTRACE_FL_FROZEN
;
239 frozen_record_count
--;
243 static inline int record_frozen(struct dyn_ftrace
*rec
)
245 return rec
->flags
& FTRACE_FL_FROZEN
;
248 # define freeze_record(rec) ({ 0; })
249 # define unfreeze_record(rec) ({ 0; })
250 # define record_frozen(rec) ({ 0; })
251 #endif /* CONFIG_KPROBES */
253 int skip_trace(unsigned long ip
)
256 struct dyn_ftrace
*rec
;
257 struct hlist_node
*t
;
258 struct hlist_head
*head
;
260 if (frozen_record_count
== 0)
263 head
= &ftrace_hash
[hash_long(ip
, FTRACE_HASHBITS
)];
264 hlist_for_each_entry_rcu(rec
, t
, head
, node
) {
266 if (record_frozen(rec
)) {
267 if (rec
->flags
& FTRACE_FL_FAILED
)
270 if (!(rec
->flags
& FTRACE_FL_CONVERTED
))
273 if (!tracing_on
|| !ftrace_enabled
)
276 if (ftrace_filtered
) {
277 fl
= rec
->flags
& (FTRACE_FL_FILTER
|
279 if (!fl
|| (fl
& FTRACE_FL_NOTRACE
))
291 ftrace_ip_in_hash(unsigned long ip
, unsigned long key
)
293 struct dyn_ftrace
*p
;
294 struct hlist_node
*t
;
297 hlist_for_each_entry_rcu(p
, t
, &ftrace_hash
[key
], node
) {
308 ftrace_add_hash(struct dyn_ftrace
*node
, unsigned long key
)
310 hlist_add_head_rcu(&node
->node
, &ftrace_hash
[key
]);
313 /* called from kstop_machine */
314 static inline void ftrace_del_hash(struct dyn_ftrace
*node
)
316 hlist_del(&node
->node
);
319 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
321 rec
->ip
= (unsigned long)ftrace_free_records
;
322 ftrace_free_records
= rec
;
323 rec
->flags
|= FTRACE_FL_FREE
;
326 void ftrace_release(void *start
, unsigned long size
)
328 struct dyn_ftrace
*rec
;
329 struct ftrace_page
*pg
;
330 unsigned long s
= (unsigned long)start
;
331 unsigned long e
= s
+ size
;
334 if (ftrace_disabled
|| !start
)
337 /* should not be called from interrupt context */
338 spin_lock(&ftrace_lock
);
340 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
341 for (i
= 0; i
< pg
->index
; i
++) {
342 rec
= &pg
->records
[i
];
344 if ((rec
->ip
>= s
) && (rec
->ip
< e
))
345 ftrace_free_rec(rec
);
348 spin_unlock(&ftrace_lock
);
352 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
354 struct dyn_ftrace
*rec
;
356 /* First check for freed records */
357 if (ftrace_free_records
) {
358 rec
= ftrace_free_records
;
360 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
362 ftrace_free_records
= NULL
;
368 ftrace_free_records
= (void *)rec
->ip
;
369 memset(rec
, 0, sizeof(*rec
));
373 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
374 if (!ftrace_pages
->next
)
376 ftrace_pages
= ftrace_pages
->next
;
379 return &ftrace_pages
->records
[ftrace_pages
->index
++];
383 ftrace_record_ip(unsigned long ip
)
385 struct dyn_ftrace
*node
;
391 if (!ftrace_enabled
|| ftrace_disabled
)
394 resched
= need_resched();
395 preempt_disable_notrace();
398 * We simply need to protect against recursion.
399 * Use the the raw version of smp_processor_id and not
400 * __get_cpu_var which can call debug hooks that can
401 * cause a recursive crash here.
403 cpu
= raw_smp_processor_id();
404 per_cpu(ftrace_shutdown_disable_cpu
, cpu
)++;
405 if (per_cpu(ftrace_shutdown_disable_cpu
, cpu
) != 1)
408 if (unlikely(ftrace_record_suspend
))
411 key
= hash_long(ip
, FTRACE_HASHBITS
);
413 WARN_ON_ONCE(key
>= FTRACE_HASHSIZE
);
415 if (ftrace_ip_in_hash(ip
, key
))
418 ftrace_hash_lock(flags
);
420 /* This ip may have hit the hash before the lock */
421 if (ftrace_ip_in_hash(ip
, key
))
424 node
= ftrace_alloc_dyn_node(ip
);
430 ftrace_add_hash(node
, key
);
435 ftrace_hash_unlock(flags
);
437 per_cpu(ftrace_shutdown_disable_cpu
, cpu
)--;
439 /* prevent recursion with scheduler */
441 preempt_enable_no_resched_notrace();
443 preempt_enable_notrace();
446 #define FTRACE_ADDR ((long)(ftrace_caller))
449 __ftrace_replace_code(struct dyn_ftrace
*rec
,
450 unsigned char *old
, unsigned char *new, int enable
)
452 unsigned long ip
, fl
;
456 if (ftrace_filtered
&& enable
) {
458 * If filtering is on:
460 * If this record is set to be filtered and
461 * is enabled then do nothing.
463 * If this record is set to be filtered and
464 * it is not enabled, enable it.
466 * If this record is not set to be filtered
467 * and it is not enabled do nothing.
469 * If this record is set not to trace then
472 * If this record is set not to trace and
473 * it is enabled then disable it.
475 * If this record is not set to be filtered and
476 * it is enabled, disable it.
479 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_NOTRACE
|
482 if ((fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
)) ||
483 (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_NOTRACE
)) ||
484 !fl
|| (fl
== FTRACE_FL_NOTRACE
))
488 * If it is enabled disable it,
489 * otherwise enable it!
491 if (fl
& FTRACE_FL_ENABLED
) {
492 /* swap new and old */
494 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
495 rec
->flags
&= ~FTRACE_FL_ENABLED
;
497 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
498 rec
->flags
|= FTRACE_FL_ENABLED
;
504 * If this record is set not to trace and is
505 * not enabled, do nothing.
507 fl
= rec
->flags
& (FTRACE_FL_NOTRACE
| FTRACE_FL_ENABLED
);
508 if (fl
== FTRACE_FL_NOTRACE
)
511 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
513 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
516 if (rec
->flags
& FTRACE_FL_ENABLED
)
518 rec
->flags
|= FTRACE_FL_ENABLED
;
520 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
522 rec
->flags
&= ~FTRACE_FL_ENABLED
;
526 return ftrace_modify_code(ip
, old
, new);
529 static void ftrace_replace_code(int enable
)
532 unsigned char *new = NULL
, *old
= NULL
;
533 struct dyn_ftrace
*rec
;
534 struct ftrace_page
*pg
;
537 old
= ftrace_nop_replace();
539 new = ftrace_nop_replace();
541 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
542 for (i
= 0; i
< pg
->index
; i
++) {
543 rec
= &pg
->records
[i
];
545 /* don't modify code that has already faulted */
546 if (rec
->flags
& FTRACE_FL_FAILED
)
549 /* ignore updates to this record's mcount site */
550 if (get_kprobe((void *)rec
->ip
)) {
554 unfreeze_record(rec
);
557 failed
= __ftrace_replace_code(rec
, old
, new, enable
);
558 if (failed
&& (rec
->flags
& FTRACE_FL_CONVERTED
)) {
559 rec
->flags
|= FTRACE_FL_FAILED
;
560 if ((system_state
== SYSTEM_BOOTING
) ||
561 !core_kernel_text(rec
->ip
)) {
562 ftrace_del_hash(rec
);
563 ftrace_free_rec(rec
);
570 static void ftrace_shutdown_replenish(void)
572 if (ftrace_pages
->next
)
575 /* allocate another page */
576 ftrace_pages
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
579 static void print_ip_ins(const char *fmt
, unsigned char *p
)
583 printk(KERN_CONT
"%s", fmt
);
585 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
586 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
590 ftrace_code_disable(struct dyn_ftrace
*rec
)
593 unsigned char *nop
, *call
;
598 nop
= ftrace_nop_replace();
599 call
= ftrace_call_replace(ip
, mcount_addr
);
601 failed
= ftrace_modify_code(ip
, call
, nop
);
606 pr_info("ftrace faulted on modifying ");
611 pr_info("ftrace failed to modify ");
613 print_ip_ins(" expected: ", call
);
614 print_ip_ins(" actual: ", (unsigned char *)ip
);
615 print_ip_ins(" replace: ", nop
);
616 printk(KERN_CONT
"\n");
620 rec
->flags
|= FTRACE_FL_FAILED
;
626 static int __ftrace_update_code(void *ignore
);
628 static int __ftrace_modify_code(void *data
)
633 if (*command
& FTRACE_ENABLE_CALLS
) {
635 * Update any recorded ips now that we have the
638 __ftrace_update_code(NULL
);
639 ftrace_replace_code(1);
641 } else if (*command
& FTRACE_DISABLE_CALLS
) {
642 ftrace_replace_code(0);
646 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
647 ftrace_update_ftrace_func(ftrace_trace_function
);
649 if (*command
& FTRACE_ENABLE_MCOUNT
) {
650 addr
= (unsigned long)ftrace_record_ip
;
651 ftrace_mcount_set(&addr
);
652 } else if (*command
& FTRACE_DISABLE_MCOUNT
) {
653 addr
= (unsigned long)ftrace_stub
;
654 ftrace_mcount_set(&addr
);
660 static void ftrace_run_update_code(int command
)
662 stop_machine(__ftrace_modify_code
, &command
, NULL
);
665 void ftrace_disable_daemon(void)
667 /* Stop the daemon from calling kstop_machine */
668 mutex_lock(&ftraced_lock
);
670 mutex_unlock(&ftraced_lock
);
672 ftrace_force_update();
675 void ftrace_enable_daemon(void)
677 mutex_lock(&ftraced_lock
);
679 mutex_unlock(&ftraced_lock
);
681 ftrace_force_update();
684 static ftrace_func_t saved_ftrace_func
;
686 static void ftrace_startup(void)
690 if (unlikely(ftrace_disabled
))
693 mutex_lock(&ftraced_lock
);
695 if (ftraced_suspend
== 1)
696 command
|= FTRACE_ENABLE_CALLS
;
698 if (saved_ftrace_func
!= ftrace_trace_function
) {
699 saved_ftrace_func
= ftrace_trace_function
;
700 command
|= FTRACE_UPDATE_TRACE_FUNC
;
703 if (!command
|| !ftrace_enabled
)
706 ftrace_run_update_code(command
);
708 mutex_unlock(&ftraced_lock
);
711 static void ftrace_shutdown(void)
715 if (unlikely(ftrace_disabled
))
718 mutex_lock(&ftraced_lock
);
720 if (!ftraced_suspend
)
721 command
|= FTRACE_DISABLE_CALLS
;
723 if (saved_ftrace_func
!= ftrace_trace_function
) {
724 saved_ftrace_func
= ftrace_trace_function
;
725 command
|= FTRACE_UPDATE_TRACE_FUNC
;
728 if (!command
|| !ftrace_enabled
)
731 ftrace_run_update_code(command
);
733 mutex_unlock(&ftraced_lock
);
736 static void ftrace_startup_sysctl(void)
738 int command
= FTRACE_ENABLE_MCOUNT
;
740 if (unlikely(ftrace_disabled
))
743 mutex_lock(&ftraced_lock
);
744 /* Force update next time */
745 saved_ftrace_func
= NULL
;
746 /* ftraced_suspend is true if we want ftrace running */
748 command
|= FTRACE_ENABLE_CALLS
;
750 ftrace_run_update_code(command
);
751 mutex_unlock(&ftraced_lock
);
754 static void ftrace_shutdown_sysctl(void)
756 int command
= FTRACE_DISABLE_MCOUNT
;
758 if (unlikely(ftrace_disabled
))
761 mutex_lock(&ftraced_lock
);
762 /* ftraced_suspend is true if ftrace is running */
764 command
|= FTRACE_DISABLE_CALLS
;
766 ftrace_run_update_code(command
);
767 mutex_unlock(&ftraced_lock
);
770 static cycle_t ftrace_update_time
;
771 static unsigned long ftrace_update_cnt
;
772 unsigned long ftrace_update_tot_cnt
;
774 static int __ftrace_update_code(void *ignore
)
776 int i
, save_ftrace_enabled
;
778 struct dyn_ftrace
*p
;
779 struct hlist_node
*t
, *n
;
780 struct hlist_head
*head
, temp_list
;
782 /* Don't be recording funcs now */
783 ftrace_record_suspend
++;
784 save_ftrace_enabled
= ftrace_enabled
;
787 start
= ftrace_now(raw_smp_processor_id());
788 ftrace_update_cnt
= 0;
790 /* No locks needed, the machine is stopped! */
791 for (i
= 0; i
< FTRACE_HASHSIZE
; i
++) {
792 INIT_HLIST_HEAD(&temp_list
);
793 head
= &ftrace_hash
[i
];
795 /* all CPUS are stopped, we are safe to modify code */
796 hlist_for_each_entry_safe(p
, t
, n
, head
, node
) {
797 /* Skip over failed records which have not been
799 if (p
->flags
& FTRACE_FL_FAILED
)
802 /* Unconverted records are always at the head of the
803 * hash bucket. Once we encounter a converted record,
804 * simply skip over to the next bucket. Saves ftraced
805 * some processor cycles (ftrace does its bid for
806 * global warming :-p ). */
807 if (p
->flags
& (FTRACE_FL_CONVERTED
))
810 /* Ignore updates to this record's mcount site.
811 * Reintroduce this record at the head of this
812 * bucket to attempt to "convert" it again if
813 * the kprobe on it is unregistered before the
815 if (get_kprobe((void *)p
->ip
)) {
817 INIT_HLIST_NODE(&p
->node
);
818 hlist_add_head(&p
->node
, &temp_list
);
825 /* convert record (i.e, patch mcount-call with NOP) */
826 if (ftrace_code_disable(p
)) {
827 p
->flags
|= FTRACE_FL_CONVERTED
;
830 if ((system_state
== SYSTEM_BOOTING
) ||
831 !core_kernel_text(p
->ip
)) {
838 hlist_for_each_entry_safe(p
, t
, n
, &temp_list
, node
) {
840 INIT_HLIST_NODE(&p
->node
);
841 hlist_add_head(&p
->node
, head
);
845 stop
= ftrace_now(raw_smp_processor_id());
846 ftrace_update_time
= stop
- start
;
847 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
850 ftrace_enabled
= save_ftrace_enabled
;
851 ftrace_record_suspend
--;
856 static int ftrace_update_code(void)
858 if (unlikely(ftrace_disabled
) ||
859 !ftrace_enabled
|| !ftraced_trigger
)
862 stop_machine(__ftrace_update_code
, NULL
, NULL
);
867 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
869 struct ftrace_page
*pg
;
873 /* allocate a few pages */
874 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
875 if (!ftrace_pages_start
)
879 * Allocate a few more pages.
881 * TODO: have some parser search vmlinux before
882 * final linking to find all calls to ftrace.
884 * a) know how many pages to allocate.
886 * b) set up the table then.
888 * The dynamic code is still necessary for
892 pg
= ftrace_pages
= ftrace_pages_start
;
894 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
895 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
898 for (i
= 0; i
< cnt
; i
++) {
899 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
901 /* If we fail, we'll try later anyway */
912 FTRACE_ITER_FILTER
= (1 << 0),
913 FTRACE_ITER_CONT
= (1 << 1),
914 FTRACE_ITER_NOTRACE
= (1 << 2),
915 FTRACE_ITER_FAILURES
= (1 << 3),
918 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
920 struct ftrace_iterator
{
922 struct ftrace_page
*pg
;
925 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
931 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
933 struct ftrace_iterator
*iter
= m
->private;
934 struct dyn_ftrace
*rec
= NULL
;
938 /* should not be called from interrupt context */
939 spin_lock(&ftrace_lock
);
941 if (iter
->idx
>= iter
->pg
->index
) {
942 if (iter
->pg
->next
) {
943 iter
->pg
= iter
->pg
->next
;
948 rec
= &iter
->pg
->records
[iter
->idx
++];
949 if ((rec
->flags
& FTRACE_FL_FREE
) ||
951 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
952 (rec
->flags
& FTRACE_FL_FAILED
)) ||
954 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
955 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
957 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
958 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
963 spin_unlock(&ftrace_lock
);
970 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
972 struct ftrace_iterator
*iter
= m
->private;
976 if (*pos
!= iter
->pos
) {
977 for (p
= t_next(m
, p
, &l
); p
&& l
< *pos
; p
= t_next(m
, p
, &l
))
981 p
= t_next(m
, p
, &l
);
987 static void t_stop(struct seq_file
*m
, void *p
)
991 static int t_show(struct seq_file
*m
, void *v
)
993 struct dyn_ftrace
*rec
= v
;
994 char str
[KSYM_SYMBOL_LEN
];
999 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1001 seq_printf(m
, "%s\n", str
);
1006 static struct seq_operations show_ftrace_seq_ops
= {
1014 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
1016 struct ftrace_iterator
*iter
;
1019 if (unlikely(ftrace_disabled
))
1022 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1026 iter
->pg
= ftrace_pages_start
;
1029 ret
= seq_open(file
, &show_ftrace_seq_ops
);
1031 struct seq_file
*m
= file
->private_data
;
1041 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
1043 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1044 struct ftrace_iterator
*iter
= m
->private;
1046 seq_release(inode
, file
);
1053 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
1057 struct ftrace_iterator
*iter
;
1059 ret
= ftrace_avail_open(inode
, file
);
1061 m
= (struct seq_file
*)file
->private_data
;
1062 iter
= (struct ftrace_iterator
*)m
->private;
1063 iter
->flags
= FTRACE_ITER_FAILURES
;
1070 static void ftrace_filter_reset(int enable
)
1072 struct ftrace_page
*pg
;
1073 struct dyn_ftrace
*rec
;
1074 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1077 /* should not be called from interrupt context */
1078 spin_lock(&ftrace_lock
);
1080 ftrace_filtered
= 0;
1081 pg
= ftrace_pages_start
;
1083 for (i
= 0; i
< pg
->index
; i
++) {
1084 rec
= &pg
->records
[i
];
1085 if (rec
->flags
& FTRACE_FL_FAILED
)
1087 rec
->flags
&= ~type
;
1091 spin_unlock(&ftrace_lock
);
1095 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
1097 struct ftrace_iterator
*iter
;
1100 if (unlikely(ftrace_disabled
))
1103 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1107 mutex_lock(&ftrace_regex_lock
);
1108 if ((file
->f_mode
& FMODE_WRITE
) &&
1109 !(file
->f_flags
& O_APPEND
))
1110 ftrace_filter_reset(enable
);
1112 if (file
->f_mode
& FMODE_READ
) {
1113 iter
->pg
= ftrace_pages_start
;
1115 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
1116 FTRACE_ITER_NOTRACE
;
1118 ret
= seq_open(file
, &show_ftrace_seq_ops
);
1120 struct seq_file
*m
= file
->private_data
;
1125 file
->private_data
= iter
;
1126 mutex_unlock(&ftrace_regex_lock
);
1132 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
1134 return ftrace_regex_open(inode
, file
, 1);
1138 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
1140 return ftrace_regex_open(inode
, file
, 0);
1144 ftrace_regex_read(struct file
*file
, char __user
*ubuf
,
1145 size_t cnt
, loff_t
*ppos
)
1147 if (file
->f_mode
& FMODE_READ
)
1148 return seq_read(file
, ubuf
, cnt
, ppos
);
1154 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
1158 if (file
->f_mode
& FMODE_READ
)
1159 ret
= seq_lseek(file
, offset
, origin
);
1161 file
->f_pos
= ret
= 1;
1174 ftrace_match(unsigned char *buff
, int len
, int enable
)
1176 char str
[KSYM_SYMBOL_LEN
];
1177 char *search
= NULL
;
1178 struct ftrace_page
*pg
;
1179 struct dyn_ftrace
*rec
;
1180 int type
= MATCH_FULL
;
1181 unsigned long flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1182 unsigned i
, match
= 0, search_len
= 0;
1184 for (i
= 0; i
< len
; i
++) {
1185 if (buff
[i
] == '*') {
1187 search
= buff
+ i
+ 1;
1188 type
= MATCH_END_ONLY
;
1189 search_len
= len
- (i
+ 1);
1191 if (type
== MATCH_END_ONLY
) {
1192 type
= MATCH_MIDDLE_ONLY
;
1195 type
= MATCH_FRONT_ONLY
;
1203 /* should not be called from interrupt context */
1204 spin_lock(&ftrace_lock
);
1206 ftrace_filtered
= 1;
1207 pg
= ftrace_pages_start
;
1209 for (i
= 0; i
< pg
->index
; i
++) {
1213 rec
= &pg
->records
[i
];
1214 if (rec
->flags
& FTRACE_FL_FAILED
)
1216 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1219 if (strcmp(str
, buff
) == 0)
1222 case MATCH_FRONT_ONLY
:
1223 if (memcmp(str
, buff
, match
) == 0)
1226 case MATCH_MIDDLE_ONLY
:
1227 if (strstr(str
, search
))
1230 case MATCH_END_ONLY
:
1231 ptr
= strstr(str
, search
);
1232 if (ptr
&& (ptr
[search_len
] == 0))
1241 spin_unlock(&ftrace_lock
);
1245 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
1246 size_t cnt
, loff_t
*ppos
, int enable
)
1248 struct ftrace_iterator
*iter
;
1253 if (!cnt
|| cnt
< 0)
1256 mutex_lock(&ftrace_regex_lock
);
1258 if (file
->f_mode
& FMODE_READ
) {
1259 struct seq_file
*m
= file
->private_data
;
1262 iter
= file
->private_data
;
1265 iter
->flags
&= ~FTRACE_ITER_CONT
;
1266 iter
->buffer_idx
= 0;
1269 ret
= get_user(ch
, ubuf
++);
1275 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1276 /* skip white space */
1277 while (cnt
&& isspace(ch
)) {
1278 ret
= get_user(ch
, ubuf
++);
1286 file
->f_pos
+= read
;
1291 iter
->buffer_idx
= 0;
1294 while (cnt
&& !isspace(ch
)) {
1295 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1296 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1301 ret
= get_user(ch
, ubuf
++);
1310 iter
->buffer
[iter
->buffer_idx
] = 0;
1311 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1312 iter
->buffer_idx
= 0;
1314 iter
->flags
|= FTRACE_ITER_CONT
;
1317 file
->f_pos
+= read
;
1321 mutex_unlock(&ftrace_regex_lock
);
1327 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1328 size_t cnt
, loff_t
*ppos
)
1330 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
1334 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
1335 size_t cnt
, loff_t
*ppos
)
1337 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
1341 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
1343 if (unlikely(ftrace_disabled
))
1346 mutex_lock(&ftrace_regex_lock
);
1348 ftrace_filter_reset(enable
);
1350 ftrace_match(buf
, len
, enable
);
1351 mutex_unlock(&ftrace_regex_lock
);
1355 * ftrace_set_filter - set a function to filter on in ftrace
1356 * @buf - the string that holds the function filter text.
1357 * @len - the length of the string.
1358 * @reset - non zero to reset all filters before applying this filter.
1360 * Filters denote which functions should be enabled when tracing is enabled.
1361 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1363 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1365 ftrace_set_regex(buf
, len
, reset
, 1);
1369 * ftrace_set_notrace - set a function to not trace in ftrace
1370 * @buf - the string that holds the function notrace text.
1371 * @len - the length of the string.
1372 * @reset - non zero to reset all filters before applying this filter.
1374 * Notrace Filters denote which functions should not be enabled when tracing
1375 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1378 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
1380 ftrace_set_regex(buf
, len
, reset
, 0);
1384 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
1386 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1387 struct ftrace_iterator
*iter
;
1389 mutex_lock(&ftrace_regex_lock
);
1390 if (file
->f_mode
& FMODE_READ
) {
1393 seq_release(inode
, file
);
1395 iter
= file
->private_data
;
1397 if (iter
->buffer_idx
) {
1399 iter
->buffer
[iter
->buffer_idx
] = 0;
1400 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1403 mutex_lock(&ftrace_sysctl_lock
);
1404 mutex_lock(&ftraced_lock
);
1405 if (iter
->filtered
&& ftraced_suspend
&& ftrace_enabled
)
1406 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1407 mutex_unlock(&ftraced_lock
);
1408 mutex_unlock(&ftrace_sysctl_lock
);
1411 mutex_unlock(&ftrace_regex_lock
);
1416 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1418 return ftrace_regex_release(inode
, file
, 1);
1422 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
1424 return ftrace_regex_release(inode
, file
, 0);
1428 ftraced_read(struct file
*filp
, char __user
*ubuf
,
1429 size_t cnt
, loff_t
*ppos
)
1431 /* don't worry about races */
1432 char *buf
= ftraced_stop
? "disabled\n" : "enabled\n";
1433 int r
= strlen(buf
);
1435 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
1439 ftraced_write(struct file
*filp
, const char __user
*ubuf
,
1440 size_t cnt
, loff_t
*ppos
)
1446 if (cnt
>= sizeof(buf
))
1449 if (copy_from_user(&buf
, ubuf
, cnt
))
1452 if (strncmp(buf
, "enable", 6) == 0)
1454 else if (strncmp(buf
, "disable", 7) == 0)
1459 ret
= strict_strtoul(buf
, 10, &val
);
1467 ftrace_enable_daemon();
1469 ftrace_disable_daemon();
1476 static struct file_operations ftrace_avail_fops
= {
1477 .open
= ftrace_avail_open
,
1479 .llseek
= seq_lseek
,
1480 .release
= ftrace_avail_release
,
1483 static struct file_operations ftrace_failures_fops
= {
1484 .open
= ftrace_failures_open
,
1486 .llseek
= seq_lseek
,
1487 .release
= ftrace_avail_release
,
1490 static struct file_operations ftrace_filter_fops
= {
1491 .open
= ftrace_filter_open
,
1492 .read
= ftrace_regex_read
,
1493 .write
= ftrace_filter_write
,
1494 .llseek
= ftrace_regex_lseek
,
1495 .release
= ftrace_filter_release
,
1498 static struct file_operations ftrace_notrace_fops
= {
1499 .open
= ftrace_notrace_open
,
1500 .read
= ftrace_regex_read
,
1501 .write
= ftrace_notrace_write
,
1502 .llseek
= ftrace_regex_lseek
,
1503 .release
= ftrace_notrace_release
,
1506 static struct file_operations ftraced_fops
= {
1507 .open
= tracing_open_generic
,
1508 .read
= ftraced_read
,
1509 .write
= ftraced_write
,
1513 * ftrace_force_update - force an update to all recording ftrace functions
1515 int ftrace_force_update(void)
1519 if (unlikely(ftrace_disabled
))
1522 mutex_lock(&ftrace_sysctl_lock
);
1523 mutex_lock(&ftraced_lock
);
1526 * If ftraced_trigger is not set, then there is nothing
1529 if (ftraced_trigger
&& !ftrace_update_code())
1532 mutex_unlock(&ftraced_lock
);
1533 mutex_unlock(&ftrace_sysctl_lock
);
1538 static void ftrace_force_shutdown(void)
1540 struct task_struct
*task
;
1541 int command
= FTRACE_DISABLE_CALLS
| FTRACE_UPDATE_TRACE_FUNC
;
1543 mutex_lock(&ftraced_lock
);
1544 task
= ftraced_task
;
1545 ftraced_task
= NULL
;
1546 ftraced_suspend
= -1;
1547 ftrace_run_update_code(command
);
1548 mutex_unlock(&ftraced_lock
);
1554 static __init
int ftrace_init_debugfs(void)
1556 struct dentry
*d_tracer
;
1557 struct dentry
*entry
;
1559 d_tracer
= tracing_init_dentry();
1561 entry
= debugfs_create_file("available_filter_functions", 0444,
1562 d_tracer
, NULL
, &ftrace_avail_fops
);
1564 pr_warning("Could not create debugfs "
1565 "'available_filter_functions' entry\n");
1567 entry
= debugfs_create_file("failures", 0444,
1568 d_tracer
, NULL
, &ftrace_failures_fops
);
1570 pr_warning("Could not create debugfs 'failures' entry\n");
1572 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1573 NULL
, &ftrace_filter_fops
);
1575 pr_warning("Could not create debugfs "
1576 "'set_ftrace_filter' entry\n");
1578 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
1579 NULL
, &ftrace_notrace_fops
);
1581 pr_warning("Could not create debugfs "
1582 "'set_ftrace_notrace' entry\n");
1584 entry
= debugfs_create_file("ftraced_enabled", 0644, d_tracer
,
1585 NULL
, &ftraced_fops
);
1587 pr_warning("Could not create debugfs "
1588 "'ftraced_enabled' entry\n");
1592 fs_initcall(ftrace_init_debugfs
);
1594 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1595 static int ftrace_convert_nops(unsigned long *start
,
1600 unsigned long flags
;
1604 addr
= ftrace_call_adjust(*p
++);
1605 /* should not be called from interrupt context */
1606 spin_lock(&ftrace_lock
);
1607 ftrace_record_ip(addr
);
1608 spin_unlock(&ftrace_lock
);
1609 ftrace_shutdown_replenish();
1613 local_irq_save(flags
);
1614 __ftrace_update_code(p
);
1615 local_irq_restore(flags
);
1620 void ftrace_init_module(unsigned long *start
, unsigned long *end
)
1622 if (ftrace_disabled
|| start
== end
)
1624 ftrace_convert_nops(start
, end
);
1627 extern unsigned long __start_mcount_loc
[];
1628 extern unsigned long __stop_mcount_loc
[];
1630 void __init
ftrace_init(void)
1632 unsigned long count
, addr
, flags
;
1635 /* Keep the ftrace pointer to the stub */
1636 addr
= (unsigned long)ftrace_stub
;
1638 local_irq_save(flags
);
1639 ftrace_dyn_arch_init(&addr
);
1640 local_irq_restore(flags
);
1642 /* ftrace_dyn_arch_init places the return code in addr */
1646 count
= __stop_mcount_loc
- __start_mcount_loc
;
1648 ret
= ftrace_dyn_table_alloc(count
);
1652 last_ftrace_enabled
= ftrace_enabled
= 1;
1654 ret
= ftrace_convert_nops(__start_mcount_loc
,
1659 ftrace_disabled
= 1;
1661 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1662 static int ftraced(void *ignore
)
1664 unsigned long usecs
;
1666 while (!kthread_should_stop()) {
1668 set_current_state(TASK_INTERRUPTIBLE
);
1670 /* check once a second */
1671 schedule_timeout(HZ
);
1673 if (unlikely(ftrace_disabled
))
1676 mutex_lock(&ftrace_sysctl_lock
);
1677 mutex_lock(&ftraced_lock
);
1678 if (!ftraced_suspend
&& !ftraced_stop
&&
1679 ftrace_update_code()) {
1680 usecs
= nsecs_to_usecs(ftrace_update_time
);
1681 if (ftrace_update_tot_cnt
> 100000) {
1682 ftrace_update_tot_cnt
= 0;
1683 pr_info("hm, dftrace overflow: %lu change%s"
1684 " (%lu total) in %lu usec%s\n",
1686 ftrace_update_cnt
!= 1 ? "s" : "",
1687 ftrace_update_tot_cnt
,
1688 usecs
, usecs
!= 1 ? "s" : "");
1689 ftrace_disabled
= 1;
1693 mutex_unlock(&ftraced_lock
);
1694 mutex_unlock(&ftrace_sysctl_lock
);
1696 ftrace_shutdown_replenish();
1698 __set_current_state(TASK_RUNNING
);
1702 static int __init
ftrace_dynamic_init(void)
1704 struct task_struct
*p
;
1708 addr
= (unsigned long)ftrace_record_ip
;
1710 stop_machine(ftrace_dyn_arch_init
, &addr
, NULL
);
1712 /* ftrace_dyn_arch_init places the return code in addr */
1718 ret
= ftrace_dyn_table_alloc(NR_TO_INIT
);
1722 p
= kthread_run(ftraced
, NULL
, "ftraced");
1728 last_ftrace_enabled
= ftrace_enabled
= 1;
1734 ftrace_disabled
= 1;
1738 core_initcall(ftrace_dynamic_init
);
1739 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1742 # define ftrace_startup() do { } while (0)
1743 # define ftrace_shutdown() do { } while (0)
1744 # define ftrace_startup_sysctl() do { } while (0)
1745 # define ftrace_shutdown_sysctl() do { } while (0)
1746 # define ftrace_force_shutdown() do { } while (0)
1747 #endif /* CONFIG_DYNAMIC_FTRACE */
1750 * ftrace_kill_atomic - kill ftrace from critical sections
1752 * This function should be used by panic code. It stops ftrace
1753 * but in a not so nice way. If you need to simply kill ftrace
1754 * from a non-atomic section, use ftrace_kill.
1756 void ftrace_kill_atomic(void)
1758 ftrace_disabled
= 1;
1760 #ifdef CONFIG_DYNAMIC_FTRACE
1761 ftraced_suspend
= -1;
1763 clear_ftrace_function();
1767 * ftrace_kill - totally shutdown ftrace
1769 * This is a safety measure. If something was detected that seems
1770 * wrong, calling this function will keep ftrace from doing
1771 * any more modifications, and updates.
1772 * used when something went wrong.
1774 void ftrace_kill(void)
1776 mutex_lock(&ftrace_sysctl_lock
);
1777 ftrace_disabled
= 1;
1780 clear_ftrace_function();
1781 mutex_unlock(&ftrace_sysctl_lock
);
1783 /* Try to totally disable ftrace */
1784 ftrace_force_shutdown();
1788 * register_ftrace_function - register a function for profiling
1789 * @ops - ops structure that holds the function for profiling.
1791 * Register a function to be called by all functions in the
1794 * Note: @ops->func and all the functions it calls must be labeled
1795 * with "notrace", otherwise it will go into a
1798 int register_ftrace_function(struct ftrace_ops
*ops
)
1802 if (unlikely(ftrace_disabled
))
1805 mutex_lock(&ftrace_sysctl_lock
);
1806 ret
= __register_ftrace_function(ops
);
1808 mutex_unlock(&ftrace_sysctl_lock
);
1814 * unregister_ftrace_function - unresgister a function for profiling.
1815 * @ops - ops structure that holds the function to unregister
1817 * Unregister a function that was added to be called by ftrace profiling.
1819 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1823 mutex_lock(&ftrace_sysctl_lock
);
1824 ret
= __unregister_ftrace_function(ops
);
1826 mutex_unlock(&ftrace_sysctl_lock
);
1832 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1833 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1838 if (unlikely(ftrace_disabled
))
1841 mutex_lock(&ftrace_sysctl_lock
);
1843 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1845 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1848 last_ftrace_enabled
= ftrace_enabled
;
1850 if (ftrace_enabled
) {
1852 ftrace_startup_sysctl();
1854 /* we are starting ftrace again */
1855 if (ftrace_list
!= &ftrace_list_end
) {
1856 if (ftrace_list
->next
== &ftrace_list_end
)
1857 ftrace_trace_function
= ftrace_list
->func
;
1859 ftrace_trace_function
= ftrace_list_func
;
1863 /* stopping ftrace calls (just send to ftrace_stub) */
1864 ftrace_trace_function
= ftrace_stub
;
1866 ftrace_shutdown_sysctl();
1870 mutex_unlock(&ftrace_sysctl_lock
);