1 /* Include in trace.c */
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
8 static inline int trace_valid_entry(struct trace_entry
*entry
)
10 switch (entry
->type
) {
24 static int trace_test_buffer_cpu(struct trace_array
*tr
, int cpu
)
26 struct ring_buffer_event
*event
;
27 struct trace_entry
*entry
;
28 unsigned int loops
= 0;
30 while ((event
= ring_buffer_consume(tr
->buffer
, cpu
, NULL
, NULL
))) {
31 entry
= ring_buffer_event_data(event
);
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
38 if (loops
++ > trace_buf_size
) {
39 printk(KERN_CONT
".. bad ring buffer ");
42 if (!trace_valid_entry(entry
)) {
43 printk(KERN_CONT
".. invalid entry %d ",
53 printk(KERN_CONT
".. corrupted trace buffer .. ");
58 * Test the trace buffer to see if all the elements
61 static int trace_test_buffer(struct trace_array
*tr
, unsigned long *count
)
63 unsigned long flags
, cnt
= 0;
66 /* Don't allow flipping of max traces now */
67 local_irq_save(flags
);
68 arch_spin_lock(&ftrace_max_lock
);
70 cnt
= ring_buffer_entries(tr
->buffer
);
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
80 for_each_possible_cpu(cpu
) {
81 ret
= trace_test_buffer_cpu(tr
, cpu
);
86 arch_spin_unlock(&ftrace_max_lock
);
87 local_irq_restore(flags
);
95 static inline void warn_failed_init_tracer(struct tracer
*trace
, int init_ret
)
97 printk(KERN_WARNING
"Failed to init %s tracer, init returned %d\n",
98 trace
->name
, init_ret
);
100 #ifdef CONFIG_FUNCTION_TRACER
102 #ifdef CONFIG_DYNAMIC_FTRACE
104 static int trace_selftest_test_probe1_cnt
;
105 static void trace_selftest_test_probe1_func(unsigned long ip
,
108 trace_selftest_test_probe1_cnt
++;
111 static int trace_selftest_test_probe2_cnt
;
112 static void trace_selftest_test_probe2_func(unsigned long ip
,
115 trace_selftest_test_probe2_cnt
++;
118 static int trace_selftest_test_probe3_cnt
;
119 static void trace_selftest_test_probe3_func(unsigned long ip
,
122 trace_selftest_test_probe3_cnt
++;
125 static int trace_selftest_test_global_cnt
;
126 static void trace_selftest_test_global_func(unsigned long ip
,
129 trace_selftest_test_global_cnt
++;
132 static int trace_selftest_test_dyn_cnt
;
133 static void trace_selftest_test_dyn_func(unsigned long ip
,
136 trace_selftest_test_dyn_cnt
++;
139 static struct ftrace_ops test_probe1
= {
140 .func
= trace_selftest_test_probe1_func
,
143 static struct ftrace_ops test_probe2
= {
144 .func
= trace_selftest_test_probe2_func
,
147 static struct ftrace_ops test_probe3
= {
148 .func
= trace_selftest_test_probe3_func
,
151 static struct ftrace_ops test_global
= {
152 .func
= trace_selftest_test_global_func
,
153 .flags
= FTRACE_OPS_FL_GLOBAL
,
156 static void print_counts(void)
158 printk("(%d %d %d %d %d) ",
159 trace_selftest_test_probe1_cnt
,
160 trace_selftest_test_probe2_cnt
,
161 trace_selftest_test_probe3_cnt
,
162 trace_selftest_test_global_cnt
,
163 trace_selftest_test_dyn_cnt
);
166 static void reset_counts(void)
168 trace_selftest_test_probe1_cnt
= 0;
169 trace_selftest_test_probe2_cnt
= 0;
170 trace_selftest_test_probe3_cnt
= 0;
171 trace_selftest_test_global_cnt
= 0;
172 trace_selftest_test_dyn_cnt
= 0;
175 static int trace_selftest_ops(int cnt
)
177 int save_ftrace_enabled
= ftrace_enabled
;
178 struct ftrace_ops
*dyn_ops
;
185 printk(KERN_CONT
"PASSED\n");
186 pr_info("Testing dynamic ftrace ops #%d: ", cnt
);
191 /* Handle PPC64 '.' name */
192 func1_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
193 func2_name
= "*" __stringify(DYN_FTRACE_TEST_NAME2
);
194 len1
= strlen(func1_name
);
195 len2
= strlen(func2_name
);
198 * Probe 1 will trace function 1.
199 * Probe 2 will trace function 2.
200 * Probe 3 will trace functions 1 and 2.
202 ftrace_set_filter(&test_probe1
, func1_name
, len1
, 1);
203 ftrace_set_filter(&test_probe2
, func2_name
, len2
, 1);
204 ftrace_set_filter(&test_probe3
, func1_name
, len1
, 1);
205 ftrace_set_filter(&test_probe3
, func2_name
, len2
, 0);
207 register_ftrace_function(&test_probe1
);
208 register_ftrace_function(&test_probe2
);
209 register_ftrace_function(&test_probe3
);
210 register_ftrace_function(&test_global
);
212 DYN_FTRACE_TEST_NAME();
216 if (trace_selftest_test_probe1_cnt
!= 1)
218 if (trace_selftest_test_probe2_cnt
!= 0)
220 if (trace_selftest_test_probe3_cnt
!= 1)
222 if (trace_selftest_test_global_cnt
== 0)
225 DYN_FTRACE_TEST_NAME2();
229 if (trace_selftest_test_probe1_cnt
!= 1)
231 if (trace_selftest_test_probe2_cnt
!= 1)
233 if (trace_selftest_test_probe3_cnt
!= 2)
236 /* Add a dynamic probe */
237 dyn_ops
= kzalloc(sizeof(*dyn_ops
), GFP_KERNEL
);
239 printk("MEMORY ERROR ");
243 dyn_ops
->func
= trace_selftest_test_dyn_func
;
245 register_ftrace_function(dyn_ops
);
247 trace_selftest_test_global_cnt
= 0;
249 DYN_FTRACE_TEST_NAME();
253 if (trace_selftest_test_probe1_cnt
!= 2)
255 if (trace_selftest_test_probe2_cnt
!= 1)
257 if (trace_selftest_test_probe3_cnt
!= 3)
259 if (trace_selftest_test_global_cnt
== 0)
261 if (trace_selftest_test_dyn_cnt
== 0)
264 DYN_FTRACE_TEST_NAME2();
268 if (trace_selftest_test_probe1_cnt
!= 2)
270 if (trace_selftest_test_probe2_cnt
!= 2)
272 if (trace_selftest_test_probe3_cnt
!= 4)
277 unregister_ftrace_function(dyn_ops
);
281 /* Purposely unregister in the same order */
282 unregister_ftrace_function(&test_probe1
);
283 unregister_ftrace_function(&test_probe2
);
284 unregister_ftrace_function(&test_probe3
);
285 unregister_ftrace_function(&test_global
);
287 /* Make sure everything is off */
289 DYN_FTRACE_TEST_NAME();
290 DYN_FTRACE_TEST_NAME();
292 if (trace_selftest_test_probe1_cnt
||
293 trace_selftest_test_probe2_cnt
||
294 trace_selftest_test_probe3_cnt
||
295 trace_selftest_test_global_cnt
||
296 trace_selftest_test_dyn_cnt
)
299 ftrace_enabled
= save_ftrace_enabled
;
304 /* Test dynamic code modification and ftrace filters */
305 int trace_selftest_startup_dynamic_tracing(struct tracer
*trace
,
306 struct trace_array
*tr
,
309 int save_ftrace_enabled
= ftrace_enabled
;
310 int save_tracer_enabled
= tracer_enabled
;
315 /* The ftrace test PASSED */
316 printk(KERN_CONT
"PASSED\n");
317 pr_info("Testing dynamic ftrace: ");
319 /* enable tracing, and record the filter function */
323 /* passed in by parameter to fool gcc from optimizing */
327 * Some archs *cough*PowerPC*cough* add characters to the
328 * start of the function names. We simply put a '*' to
331 func_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
333 /* filter only on our function */
334 ftrace_set_global_filter(func_name
, strlen(func_name
), 1);
337 ret
= tracer_init(trace
, tr
);
339 warn_failed_init_tracer(trace
, ret
);
343 /* Sleep for a 1/10 of a second */
346 /* we should have nothing in the buffer */
347 ret
= trace_test_buffer(tr
, &count
);
353 printk(KERN_CONT
".. filter did not filter .. ");
357 /* call our function again */
363 /* stop the tracing. */
367 /* check the trace buffer */
368 ret
= trace_test_buffer(tr
, &count
);
371 /* we should only have one item */
372 if (!ret
&& count
!= 1) {
374 printk(KERN_CONT
".. filter failed count=%ld ..", count
);
379 /* Test the ops with global tracing running */
380 ret
= trace_selftest_ops(1);
384 ftrace_enabled
= save_ftrace_enabled
;
385 tracer_enabled
= save_tracer_enabled
;
387 /* Enable tracing on all functions again */
388 ftrace_set_global_filter(NULL
, 0, 1);
390 /* Test the ops with global tracing off */
392 ret
= trace_selftest_ops(2);
397 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
398 #endif /* CONFIG_DYNAMIC_FTRACE */
401 * Simple verification test of ftrace function tracer.
402 * Enable ftrace, sleep 1/10 second, and then read the trace
403 * buffer to see if all is in order.
406 trace_selftest_startup_function(struct tracer
*trace
, struct trace_array
*tr
)
408 int save_ftrace_enabled
= ftrace_enabled
;
409 int save_tracer_enabled
= tracer_enabled
;
413 /* make sure msleep has been recorded */
416 /* start the tracing */
420 ret
= tracer_init(trace
, tr
);
422 warn_failed_init_tracer(trace
, ret
);
426 /* Sleep for a 1/10 of a second */
428 /* stop the tracing. */
432 /* check the trace buffer */
433 ret
= trace_test_buffer(tr
, &count
);
437 if (!ret
&& !count
) {
438 printk(KERN_CONT
".. no entries found ..");
443 ret
= trace_selftest_startup_dynamic_tracing(trace
, tr
,
444 DYN_FTRACE_TEST_NAME
);
447 ftrace_enabled
= save_ftrace_enabled
;
448 tracer_enabled
= save_tracer_enabled
;
450 /* kill ftrace totally if we failed */
456 #endif /* CONFIG_FUNCTION_TRACER */
459 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
461 /* Maximum number of functions to trace before diagnosing a hang */
462 #define GRAPH_MAX_FUNC_TEST 100000000
465 __ftrace_dump(bool disable_tracing
, enum ftrace_dump_mode oops_dump_mode
);
466 static unsigned int graph_hang_thresh
;
468 /* Wrap the real function entry probe to avoid possible hanging */
469 static int trace_graph_entry_watchdog(struct ftrace_graph_ent
*trace
)
471 /* This is harmlessly racy, we want to approximately detect a hang */
472 if (unlikely(++graph_hang_thresh
> GRAPH_MAX_FUNC_TEST
)) {
474 printk(KERN_WARNING
"BUG: Function graph tracer hang!\n");
475 if (ftrace_dump_on_oops
)
476 __ftrace_dump(false, DUMP_ALL
);
480 return trace_graph_entry(trace
);
484 * Pretty much the same than for the function tracer from which the selftest
488 trace_selftest_startup_function_graph(struct tracer
*trace
,
489 struct trace_array
*tr
)
495 * Simulate the init() callback but we attach a watchdog callback
496 * to detect and recover from possible hangs
498 tracing_reset_online_cpus(tr
);
500 ret
= register_ftrace_graph(&trace_graph_return
,
501 &trace_graph_entry_watchdog
);
503 warn_failed_init_tracer(trace
, ret
);
506 tracing_start_cmdline_record();
508 /* Sleep for a 1/10 of a second */
511 /* Have we just recovered from a hang? */
512 if (graph_hang_thresh
> GRAPH_MAX_FUNC_TEST
) {
513 tracing_selftest_disabled
= true;
520 /* check the trace buffer */
521 ret
= trace_test_buffer(tr
, &count
);
526 if (!ret
&& !count
) {
527 printk(KERN_CONT
".. no entries found ..");
532 /* Don't test dynamic tracing, the function tracer already did */
535 /* Stop it if we failed */
541 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
544 #ifdef CONFIG_IRQSOFF_TRACER
546 trace_selftest_startup_irqsoff(struct tracer
*trace
, struct trace_array
*tr
)
548 unsigned long save_max
= tracing_max_latency
;
552 /* start the tracing */
553 ret
= tracer_init(trace
, tr
);
555 warn_failed_init_tracer(trace
, ret
);
559 /* reset the max latency */
560 tracing_max_latency
= 0;
561 /* disable interrupts for a bit */
567 * Stop the tracer to avoid a warning subsequent
568 * to buffer flipping failure because tracing_stop()
569 * disables the tr and max buffers, making flipping impossible
570 * in case of parallels max irqs off latencies.
573 /* stop the tracing. */
575 /* check both trace buffers */
576 ret
= trace_test_buffer(tr
, NULL
);
578 ret
= trace_test_buffer(&max_tr
, &count
);
582 if (!ret
&& !count
) {
583 printk(KERN_CONT
".. no entries found ..");
587 tracing_max_latency
= save_max
;
591 #endif /* CONFIG_IRQSOFF_TRACER */
593 #ifdef CONFIG_PREEMPT_TRACER
595 trace_selftest_startup_preemptoff(struct tracer
*trace
, struct trace_array
*tr
)
597 unsigned long save_max
= tracing_max_latency
;
602 * Now that the big kernel lock is no longer preemptable,
603 * and this is called with the BKL held, it will always
604 * fail. If preemption is already disabled, simply
605 * pass the test. When the BKL is removed, or becomes
606 * preemptible again, we will once again test this,
609 if (preempt_count()) {
610 printk(KERN_CONT
"can not test ... force ");
614 /* start the tracing */
615 ret
= tracer_init(trace
, tr
);
617 warn_failed_init_tracer(trace
, ret
);
621 /* reset the max latency */
622 tracing_max_latency
= 0;
623 /* disable preemption for a bit */
629 * Stop the tracer to avoid a warning subsequent
630 * to buffer flipping failure because tracing_stop()
631 * disables the tr and max buffers, making flipping impossible
632 * in case of parallels max preempt off latencies.
635 /* stop the tracing. */
637 /* check both trace buffers */
638 ret
= trace_test_buffer(tr
, NULL
);
640 ret
= trace_test_buffer(&max_tr
, &count
);
644 if (!ret
&& !count
) {
645 printk(KERN_CONT
".. no entries found ..");
649 tracing_max_latency
= save_max
;
653 #endif /* CONFIG_PREEMPT_TRACER */
655 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
657 trace_selftest_startup_preemptirqsoff(struct tracer
*trace
, struct trace_array
*tr
)
659 unsigned long save_max
= tracing_max_latency
;
664 * Now that the big kernel lock is no longer preemptable,
665 * and this is called with the BKL held, it will always
666 * fail. If preemption is already disabled, simply
667 * pass the test. When the BKL is removed, or becomes
668 * preemptible again, we will once again test this,
671 if (preempt_count()) {
672 printk(KERN_CONT
"can not test ... force ");
676 /* start the tracing */
677 ret
= tracer_init(trace
, tr
);
679 warn_failed_init_tracer(trace
, ret
);
683 /* reset the max latency */
684 tracing_max_latency
= 0;
686 /* disable preemption and interrupts for a bit */
691 /* reverse the order of preempt vs irqs */
695 * Stop the tracer to avoid a warning subsequent
696 * to buffer flipping failure because tracing_stop()
697 * disables the tr and max buffers, making flipping impossible
698 * in case of parallels max irqs/preempt off latencies.
701 /* stop the tracing. */
703 /* check both trace buffers */
704 ret
= trace_test_buffer(tr
, NULL
);
708 ret
= trace_test_buffer(&max_tr
, &count
);
712 if (!ret
&& !count
) {
713 printk(KERN_CONT
".. no entries found ..");
718 /* do the test by disabling interrupts first this time */
719 tracing_max_latency
= 0;
727 /* reverse the order of preempt vs irqs */
731 /* stop the tracing. */
733 /* check both trace buffers */
734 ret
= trace_test_buffer(tr
, NULL
);
738 ret
= trace_test_buffer(&max_tr
, &count
);
740 if (!ret
&& !count
) {
741 printk(KERN_CONT
".. no entries found ..");
750 tracing_max_latency
= save_max
;
754 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
756 #ifdef CONFIG_NOP_TRACER
758 trace_selftest_startup_nop(struct tracer
*trace
, struct trace_array
*tr
)
760 /* What could possibly go wrong? */
765 #ifdef CONFIG_SCHED_TRACER
766 static int trace_wakeup_test_thread(void *data
)
768 /* Make this a RT thread, doesn't need to be too high */
769 static const struct sched_param param
= { .sched_priority
= 5 };
770 struct completion
*x
= data
;
772 sched_setscheduler(current
, SCHED_FIFO
, ¶m
);
774 /* Make it know we have a new prio */
777 /* now go to sleep and let the test wake us up */
778 set_current_state(TASK_INTERRUPTIBLE
);
781 /* we are awake, now wait to disappear */
782 while (!kthread_should_stop()) {
784 * This is an RT task, do short sleeps to let
794 trace_selftest_startup_wakeup(struct tracer
*trace
, struct trace_array
*tr
)
796 unsigned long save_max
= tracing_max_latency
;
797 struct task_struct
*p
;
798 struct completion isrt
;
802 init_completion(&isrt
);
804 /* create a high prio thread */
805 p
= kthread_run(trace_wakeup_test_thread
, &isrt
, "ftrace-test");
807 printk(KERN_CONT
"Failed to create ftrace wakeup test thread ");
811 /* make sure the thread is running at an RT prio */
812 wait_for_completion(&isrt
);
814 /* start the tracing */
815 ret
= tracer_init(trace
, tr
);
817 warn_failed_init_tracer(trace
, ret
);
821 /* reset the max latency */
822 tracing_max_latency
= 0;
824 /* sleep to let the RT thread sleep too */
828 * Yes this is slightly racy. It is possible that for some
829 * strange reason that the RT thread we created, did not
830 * call schedule for 100ms after doing the completion,
831 * and we do a wakeup on a task that already is awake.
832 * But that is extremely unlikely, and the worst thing that
833 * happens in such a case, is that we disable tracing.
834 * Honestly, if this race does happen something is horrible
835 * wrong with the system.
840 /* give a little time to let the thread wake up */
843 /* stop the tracing. */
845 /* check both trace buffers */
846 ret
= trace_test_buffer(tr
, NULL
);
848 ret
= trace_test_buffer(&max_tr
, &count
);
854 tracing_max_latency
= save_max
;
856 /* kill the thread */
859 if (!ret
&& !count
) {
860 printk(KERN_CONT
".. no entries found ..");
866 #endif /* CONFIG_SCHED_TRACER */
868 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
870 trace_selftest_startup_sched_switch(struct tracer
*trace
, struct trace_array
*tr
)
875 /* start the tracing */
876 ret
= tracer_init(trace
, tr
);
878 warn_failed_init_tracer(trace
, ret
);
882 /* Sleep for a 1/10 of a second */
884 /* stop the tracing. */
886 /* check the trace buffer */
887 ret
= trace_test_buffer(tr
, &count
);
891 if (!ret
&& !count
) {
892 printk(KERN_CONT
".. no entries found ..");
898 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
900 #ifdef CONFIG_BRANCH_TRACER
902 trace_selftest_startup_branch(struct tracer
*trace
, struct trace_array
*tr
)
907 /* start the tracing */
908 ret
= tracer_init(trace
, tr
);
910 warn_failed_init_tracer(trace
, ret
);
914 /* Sleep for a 1/10 of a second */
916 /* stop the tracing. */
918 /* check the trace buffer */
919 ret
= trace_test_buffer(tr
, &count
);
923 if (!ret
&& !count
) {
924 printk(KERN_CONT
".. no entries found ..");
930 #endif /* CONFIG_BRANCH_TRACER */