1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry
*entry
)
22 static int trace_test_buffer_cpu(struct trace_array
*tr
, int cpu
)
24 struct ring_buffer_event
*event
;
25 struct trace_entry
*entry
;
26 unsigned int loops
= 0;
28 while ((event
= ring_buffer_consume(tr
->buffer
, cpu
, NULL
))) {
29 entry
= ring_buffer_event_data(event
);
32 * The ring buffer is a size of trace_buf_size, if
33 * we loop more than the size, there's something wrong
34 * with the ring buffer.
36 if (loops
++ > trace_buf_size
) {
37 printk(KERN_CONT
".. bad ring buffer ");
40 if (!trace_valid_entry(entry
)) {
41 printk(KERN_CONT
".. invalid entry %d ",
51 printk(KERN_CONT
".. corrupted trace buffer .. ");
56 * Test the trace buffer to see if all the elements
59 static int trace_test_buffer(struct trace_array
*tr
, unsigned long *count
)
61 unsigned long flags
, cnt
= 0;
64 /* Don't allow flipping of max traces now */
65 local_irq_save(flags
);
66 __raw_spin_lock(&ftrace_max_lock
);
68 cnt
= ring_buffer_entries(tr
->buffer
);
71 * The trace_test_buffer_cpu runs a while loop to consume all data.
72 * If the calling tracer is broken, and is constantly filling
73 * the buffer, this will run forever, and hard lock the box.
74 * We disable the ring buffer while we do this test to prevent
78 for_each_possible_cpu(cpu
) {
79 ret
= trace_test_buffer_cpu(tr
, cpu
);
84 __raw_spin_unlock(&ftrace_max_lock
);
85 local_irq_restore(flags
);
93 static inline void warn_failed_init_tracer(struct tracer
*trace
, int init_ret
)
95 printk(KERN_WARNING
"Failed to init %s tracer, init returned %d\n",
96 trace
->name
, init_ret
);
98 #ifdef CONFIG_FUNCTION_TRACER
100 #ifdef CONFIG_DYNAMIC_FTRACE
103 #define STR(x) __STR(x)
105 /* Test dynamic code modification and ftrace filters */
106 int trace_selftest_startup_dynamic_tracing(struct tracer
*trace
,
107 struct trace_array
*tr
,
110 int save_ftrace_enabled
= ftrace_enabled
;
111 int save_tracer_enabled
= tracer_enabled
;
116 /* The ftrace test PASSED */
117 printk(KERN_CONT
"PASSED\n");
118 pr_info("Testing dynamic ftrace: ");
120 /* enable tracing, and record the filter function */
124 /* passed in by parameter to fool gcc from optimizing */
128 * Some archs *cough*PowerPC*cough* add charachters to the
129 * start of the function names. We simply put a '*' to
132 func_name
= "*" STR(DYN_FTRACE_TEST_NAME
);
134 /* filter only on our function */
135 ftrace_set_filter(func_name
, strlen(func_name
), 1);
138 ret
= trace
->init(tr
);
140 warn_failed_init_tracer(trace
, ret
);
144 /* Sleep for a 1/10 of a second */
147 /* we should have nothing in the buffer */
148 ret
= trace_test_buffer(tr
, &count
);
154 printk(KERN_CONT
".. filter did not filter .. ");
158 /* call our function again */
164 /* stop the tracing. */
168 /* check the trace buffer */
169 ret
= trace_test_buffer(tr
, &count
);
173 /* we should only have one item */
174 if (!ret
&& count
!= 1) {
175 printk(KERN_CONT
".. filter failed count=%ld ..", count
);
181 ftrace_enabled
= save_ftrace_enabled
;
182 tracer_enabled
= save_tracer_enabled
;
184 /* Enable tracing on all functions again */
185 ftrace_set_filter(NULL
, 0, 1);
190 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
191 #endif /* CONFIG_DYNAMIC_FTRACE */
193 * Simple verification test of ftrace function tracer.
194 * Enable ftrace, sleep 1/10 second, and then read the trace
195 * buffer to see if all is in order.
198 trace_selftest_startup_function(struct tracer
*trace
, struct trace_array
*tr
)
200 int save_ftrace_enabled
= ftrace_enabled
;
201 int save_tracer_enabled
= tracer_enabled
;
205 /* make sure msleep has been recorded */
208 /* start the tracing */
212 ret
= trace
->init(tr
);
214 warn_failed_init_tracer(trace
, ret
);
218 /* Sleep for a 1/10 of a second */
220 /* stop the tracing. */
224 /* check the trace buffer */
225 ret
= trace_test_buffer(tr
, &count
);
229 if (!ret
&& !count
) {
230 printk(KERN_CONT
".. no entries found ..");
235 ret
= trace_selftest_startup_dynamic_tracing(trace
, tr
,
236 DYN_FTRACE_TEST_NAME
);
239 ftrace_enabled
= save_ftrace_enabled
;
240 tracer_enabled
= save_tracer_enabled
;
242 /* kill ftrace totally if we failed */
248 #endif /* CONFIG_FUNCTION_TRACER */
250 #ifdef CONFIG_IRQSOFF_TRACER
252 trace_selftest_startup_irqsoff(struct tracer
*trace
, struct trace_array
*tr
)
254 unsigned long save_max
= tracing_max_latency
;
258 /* start the tracing */
259 ret
= trace
->init(tr
);
261 warn_failed_init_tracer(trace
, ret
);
265 /* reset the max latency */
266 tracing_max_latency
= 0;
267 /* disable interrupts for a bit */
271 /* stop the tracing. */
273 /* check both trace buffers */
274 ret
= trace_test_buffer(tr
, NULL
);
276 ret
= trace_test_buffer(&max_tr
, &count
);
280 if (!ret
&& !count
) {
281 printk(KERN_CONT
".. no entries found ..");
285 tracing_max_latency
= save_max
;
289 #endif /* CONFIG_IRQSOFF_TRACER */
291 #ifdef CONFIG_PREEMPT_TRACER
293 trace_selftest_startup_preemptoff(struct tracer
*trace
, struct trace_array
*tr
)
295 unsigned long save_max
= tracing_max_latency
;
300 * Now that the big kernel lock is no longer preemptable,
301 * and this is called with the BKL held, it will always
302 * fail. If preemption is already disabled, simply
303 * pass the test. When the BKL is removed, or becomes
304 * preemptible again, we will once again test this,
307 if (preempt_count()) {
308 printk(KERN_CONT
"can not test ... force ");
312 /* start the tracing */
313 ret
= trace
->init(tr
);
315 warn_failed_init_tracer(trace
, ret
);
319 /* reset the max latency */
320 tracing_max_latency
= 0;
321 /* disable preemption for a bit */
325 /* stop the tracing. */
327 /* check both trace buffers */
328 ret
= trace_test_buffer(tr
, NULL
);
330 ret
= trace_test_buffer(&max_tr
, &count
);
334 if (!ret
&& !count
) {
335 printk(KERN_CONT
".. no entries found ..");
339 tracing_max_latency
= save_max
;
343 #endif /* CONFIG_PREEMPT_TRACER */
345 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
347 trace_selftest_startup_preemptirqsoff(struct tracer
*trace
, struct trace_array
*tr
)
349 unsigned long save_max
= tracing_max_latency
;
354 * Now that the big kernel lock is no longer preemptable,
355 * and this is called with the BKL held, it will always
356 * fail. If preemption is already disabled, simply
357 * pass the test. When the BKL is removed, or becomes
358 * preemptible again, we will once again test this,
361 if (preempt_count()) {
362 printk(KERN_CONT
"can not test ... force ");
366 /* start the tracing */
367 ret
= trace
->init(tr
);
369 warn_failed_init_tracer(trace
, ret
);
373 /* reset the max latency */
374 tracing_max_latency
= 0;
376 /* disable preemption and interrupts for a bit */
381 /* reverse the order of preempt vs irqs */
384 /* stop the tracing. */
386 /* check both trace buffers */
387 ret
= trace_test_buffer(tr
, NULL
);
393 ret
= trace_test_buffer(&max_tr
, &count
);
399 if (!ret
&& !count
) {
400 printk(KERN_CONT
".. no entries found ..");
406 /* do the test by disabling interrupts first this time */
407 tracing_max_latency
= 0;
413 /* reverse the order of preempt vs irqs */
416 /* stop the tracing. */
418 /* check both trace buffers */
419 ret
= trace_test_buffer(tr
, NULL
);
423 ret
= trace_test_buffer(&max_tr
, &count
);
425 if (!ret
&& !count
) {
426 printk(KERN_CONT
".. no entries found ..");
434 tracing_max_latency
= save_max
;
438 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
440 #ifdef CONFIG_NOP_TRACER
442 trace_selftest_startup_nop(struct tracer
*trace
, struct trace_array
*tr
)
444 /* What could possibly go wrong? */
449 #ifdef CONFIG_SCHED_TRACER
450 static int trace_wakeup_test_thread(void *data
)
452 /* Make this a RT thread, doesn't need to be too high */
453 struct sched_param param
= { .sched_priority
= 5 };
454 struct completion
*x
= data
;
456 sched_setscheduler(current
, SCHED_FIFO
, ¶m
);
458 /* Make it know we have a new prio */
461 /* now go to sleep and let the test wake us up */
462 set_current_state(TASK_INTERRUPTIBLE
);
465 /* we are awake, now wait to disappear */
466 while (!kthread_should_stop()) {
468 * This is an RT task, do short sleeps to let
478 trace_selftest_startup_wakeup(struct tracer
*trace
, struct trace_array
*tr
)
480 unsigned long save_max
= tracing_max_latency
;
481 struct task_struct
*p
;
482 struct completion isrt
;
486 init_completion(&isrt
);
488 /* create a high prio thread */
489 p
= kthread_run(trace_wakeup_test_thread
, &isrt
, "ftrace-test");
491 printk(KERN_CONT
"Failed to create ftrace wakeup test thread ");
495 /* make sure the thread is running at an RT prio */
496 wait_for_completion(&isrt
);
498 /* start the tracing */
499 ret
= trace
->init(tr
);
501 warn_failed_init_tracer(trace
, ret
);
505 /* reset the max latency */
506 tracing_max_latency
= 0;
508 /* sleep to let the RT thread sleep too */
512 * Yes this is slightly racy. It is possible that for some
513 * strange reason that the RT thread we created, did not
514 * call schedule for 100ms after doing the completion,
515 * and we do a wakeup on a task that already is awake.
516 * But that is extremely unlikely, and the worst thing that
517 * happens in such a case, is that we disable tracing.
518 * Honestly, if this race does happen something is horrible
519 * wrong with the system.
524 /* give a little time to let the thread wake up */
527 /* stop the tracing. */
529 /* check both trace buffers */
530 ret
= trace_test_buffer(tr
, NULL
);
532 ret
= trace_test_buffer(&max_tr
, &count
);
538 tracing_max_latency
= save_max
;
540 /* kill the thread */
543 if (!ret
&& !count
) {
544 printk(KERN_CONT
".. no entries found ..");
550 #endif /* CONFIG_SCHED_TRACER */
552 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
554 trace_selftest_startup_sched_switch(struct tracer
*trace
, struct trace_array
*tr
)
559 /* start the tracing */
560 ret
= trace
->init(tr
);
562 warn_failed_init_tracer(trace
, ret
);
566 /* Sleep for a 1/10 of a second */
568 /* stop the tracing. */
570 /* check the trace buffer */
571 ret
= trace_test_buffer(tr
, &count
);
575 if (!ret
&& !count
) {
576 printk(KERN_CONT
".. no entries found ..");
582 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
584 #ifdef CONFIG_SYSPROF_TRACER
586 trace_selftest_startup_sysprof(struct tracer
*trace
, struct trace_array
*tr
)
591 /* start the tracing */
592 ret
= trace
->init(tr
);
594 warn_failed_init_tracer(trace
, ret
);
598 /* Sleep for a 1/10 of a second */
600 /* stop the tracing. */
602 /* check the trace buffer */
603 ret
= trace_test_buffer(tr
, &count
);
609 #endif /* CONFIG_SYSPROF_TRACER */
611 #ifdef CONFIG_BRANCH_TRACER
613 trace_selftest_startup_branch(struct tracer
*trace
, struct trace_array
*tr
)
618 /* start the tracing */
619 ret
= trace
->init(tr
);
621 warn_failed_init_tracer(trace
, ret
);
625 /* Sleep for a 1/10 of a second */
627 /* stop the tracing. */
629 /* check the trace buffer */
630 ret
= trace_test_buffer(tr
, &count
);
636 #endif /* CONFIG_BRANCH_TRACER */