Merge branches 'tracing/blktrace', 'tracing/ftrace' and 'tracing/urgent' into tracing...
[linux-2.6/kvm.git] / kernel / trace / trace_selftest.c
blob7238646b8723e388db9073249d82946c16fe6545
1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
11 case TRACE_WAKE:
12 case TRACE_STACK:
13 case TRACE_PRINT:
14 case TRACE_SPECIAL:
15 case TRACE_BRANCH:
16 case TRACE_GRAPH_ENT:
17 case TRACE_GRAPH_RET:
18 return 1;
20 return 0;
23 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
25 struct ring_buffer_event *event;
26 struct trace_entry *entry;
27 unsigned int loops = 0;
29 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
30 entry = ring_buffer_event_data(event);
33 * The ring buffer is a size of trace_buf_size, if
34 * we loop more than the size, there's something wrong
35 * with the ring buffer.
37 if (loops++ > trace_buf_size) {
38 printk(KERN_CONT ".. bad ring buffer ");
39 goto failed;
41 if (!trace_valid_entry(entry)) {
42 printk(KERN_CONT ".. invalid entry %d ",
43 entry->type);
44 goto failed;
47 return 0;
49 failed:
50 /* disable tracing */
51 tracing_disabled = 1;
52 printk(KERN_CONT ".. corrupted trace buffer .. ");
53 return -1;
57 * Test the trace buffer to see if all the elements
58 * are still sane.
60 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62 unsigned long flags, cnt = 0;
63 int cpu, ret = 0;
65 /* Don't allow flipping of max traces now */
66 local_irq_save(flags);
67 __raw_spin_lock(&ftrace_max_lock);
69 cnt = ring_buffer_entries(tr->buffer);
72 * The trace_test_buffer_cpu runs a while loop to consume all data.
73 * If the calling tracer is broken, and is constantly filling
74 * the buffer, this will run forever, and hard lock the box.
75 * We disable the ring buffer while we do this test to prevent
76 * a hard lock up.
78 tracing_off();
79 for_each_possible_cpu(cpu) {
80 ret = trace_test_buffer_cpu(tr, cpu);
81 if (ret)
82 break;
84 tracing_on();
85 __raw_spin_unlock(&ftrace_max_lock);
86 local_irq_restore(flags);
88 if (count)
89 *count = cnt;
91 return ret;
94 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
97 trace->name, init_ret);
99 #ifdef CONFIG_FUNCTION_TRACER
101 #ifdef CONFIG_DYNAMIC_FTRACE
103 #define __STR(x) #x
104 #define STR(x) __STR(x)
106 /* Test dynamic code modification and ftrace filters */
107 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
108 struct trace_array *tr,
109 int (*func)(void))
111 int save_ftrace_enabled = ftrace_enabled;
112 int save_tracer_enabled = tracer_enabled;
113 unsigned long count;
114 char *func_name;
115 int ret;
117 /* The ftrace test PASSED */
118 printk(KERN_CONT "PASSED\n");
119 pr_info("Testing dynamic ftrace: ");
121 /* enable tracing, and record the filter function */
122 ftrace_enabled = 1;
123 tracer_enabled = 1;
125 /* passed in by parameter to fool gcc from optimizing */
126 func();
129 * Some archs *cough*PowerPC*cough* add characters to the
130 * start of the function names. We simply put a '*' to
131 * accommodate them.
133 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
135 /* filter only on our function */
136 ftrace_set_filter(func_name, strlen(func_name), 1);
138 /* enable tracing */
139 ret = tracer_init(trace, tr);
140 if (ret) {
141 warn_failed_init_tracer(trace, ret);
142 goto out;
145 /* Sleep for a 1/10 of a second */
146 msleep(100);
148 /* we should have nothing in the buffer */
149 ret = trace_test_buffer(tr, &count);
150 if (ret)
151 goto out;
153 if (count) {
154 ret = -1;
155 printk(KERN_CONT ".. filter did not filter .. ");
156 goto out;
159 /* call our function again */
160 func();
162 /* sleep again */
163 msleep(100);
165 /* stop the tracing. */
166 tracing_stop();
167 ftrace_enabled = 0;
169 /* check the trace buffer */
170 ret = trace_test_buffer(tr, &count);
171 trace->reset(tr);
172 tracing_start();
174 /* we should only have one item */
175 if (!ret && count != 1) {
176 printk(KERN_CONT ".. filter failed count=%ld ..", count);
177 ret = -1;
178 goto out;
181 out:
182 ftrace_enabled = save_ftrace_enabled;
183 tracer_enabled = save_tracer_enabled;
185 /* Enable tracing on all functions again */
186 ftrace_set_filter(NULL, 0, 1);
188 return ret;
190 #else
191 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
192 #endif /* CONFIG_DYNAMIC_FTRACE */
194 * Simple verification test of ftrace function tracer.
195 * Enable ftrace, sleep 1/10 second, and then read the trace
196 * buffer to see if all is in order.
199 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
201 int save_ftrace_enabled = ftrace_enabled;
202 int save_tracer_enabled = tracer_enabled;
203 unsigned long count;
204 int ret;
206 /* make sure msleep has been recorded */
207 msleep(1);
209 /* start the tracing */
210 ftrace_enabled = 1;
211 tracer_enabled = 1;
213 ret = tracer_init(trace, tr);
214 if (ret) {
215 warn_failed_init_tracer(trace, ret);
216 goto out;
219 /* Sleep for a 1/10 of a second */
220 msleep(100);
221 /* stop the tracing. */
222 tracing_stop();
223 ftrace_enabled = 0;
225 /* check the trace buffer */
226 ret = trace_test_buffer(tr, &count);
227 trace->reset(tr);
228 tracing_start();
230 if (!ret && !count) {
231 printk(KERN_CONT ".. no entries found ..");
232 ret = -1;
233 goto out;
236 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
237 DYN_FTRACE_TEST_NAME);
239 out:
240 ftrace_enabled = save_ftrace_enabled;
241 tracer_enabled = save_tracer_enabled;
243 /* kill ftrace totally if we failed */
244 if (ret)
245 ftrace_kill();
247 return ret;
249 #endif /* CONFIG_FUNCTION_TRACER */
252 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
254 * Pretty much the same than for the function tracer from which the selftest
255 * has been borrowed.
258 trace_selftest_startup_function_graph(struct tracer *trace,
259 struct trace_array *tr)
261 int ret;
262 unsigned long count;
264 ret = tracer_init(trace, tr);
265 if (ret) {
266 warn_failed_init_tracer(trace, ret);
267 goto out;
270 /* Sleep for a 1/10 of a second */
271 msleep(100);
273 tracing_stop();
275 /* check the trace buffer */
276 ret = trace_test_buffer(tr, &count);
278 trace->reset(tr);
279 tracing_start();
281 if (!ret && !count) {
282 printk(KERN_CONT ".. no entries found ..");
283 ret = -1;
284 goto out;
287 /* Don't test dynamic tracing, the function tracer already did */
289 out:
290 /* Stop it if we failed */
291 if (ret)
292 ftrace_graph_stop();
294 return ret;
296 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
299 #ifdef CONFIG_IRQSOFF_TRACER
301 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
303 unsigned long save_max = tracing_max_latency;
304 unsigned long count;
305 int ret;
307 /* start the tracing */
308 ret = tracer_init(trace, tr);
309 if (ret) {
310 warn_failed_init_tracer(trace, ret);
311 return ret;
314 /* reset the max latency */
315 tracing_max_latency = 0;
316 /* disable interrupts for a bit */
317 local_irq_disable();
318 udelay(100);
319 local_irq_enable();
320 /* stop the tracing. */
321 tracing_stop();
322 /* check both trace buffers */
323 ret = trace_test_buffer(tr, NULL);
324 if (!ret)
325 ret = trace_test_buffer(&max_tr, &count);
326 trace->reset(tr);
327 tracing_start();
329 if (!ret && !count) {
330 printk(KERN_CONT ".. no entries found ..");
331 ret = -1;
334 tracing_max_latency = save_max;
336 return ret;
338 #endif /* CONFIG_IRQSOFF_TRACER */
340 #ifdef CONFIG_PREEMPT_TRACER
342 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
344 unsigned long save_max = tracing_max_latency;
345 unsigned long count;
346 int ret;
349 * Now that the big kernel lock is no longer preemptable,
350 * and this is called with the BKL held, it will always
351 * fail. If preemption is already disabled, simply
352 * pass the test. When the BKL is removed, or becomes
353 * preemptible again, we will once again test this,
354 * so keep it in.
356 if (preempt_count()) {
357 printk(KERN_CONT "can not test ... force ");
358 return 0;
361 /* start the tracing */
362 ret = tracer_init(trace, tr);
363 if (ret) {
364 warn_failed_init_tracer(trace, ret);
365 return ret;
368 /* reset the max latency */
369 tracing_max_latency = 0;
370 /* disable preemption for a bit */
371 preempt_disable();
372 udelay(100);
373 preempt_enable();
374 /* stop the tracing. */
375 tracing_stop();
376 /* check both trace buffers */
377 ret = trace_test_buffer(tr, NULL);
378 if (!ret)
379 ret = trace_test_buffer(&max_tr, &count);
380 trace->reset(tr);
381 tracing_start();
383 if (!ret && !count) {
384 printk(KERN_CONT ".. no entries found ..");
385 ret = -1;
388 tracing_max_latency = save_max;
390 return ret;
392 #endif /* CONFIG_PREEMPT_TRACER */
394 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
396 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
398 unsigned long save_max = tracing_max_latency;
399 unsigned long count;
400 int ret;
403 * Now that the big kernel lock is no longer preemptable,
404 * and this is called with the BKL held, it will always
405 * fail. If preemption is already disabled, simply
406 * pass the test. When the BKL is removed, or becomes
407 * preemptible again, we will once again test this,
408 * so keep it in.
410 if (preempt_count()) {
411 printk(KERN_CONT "can not test ... force ");
412 return 0;
415 /* start the tracing */
416 ret = tracer_init(trace, tr);
417 if (ret) {
418 warn_failed_init_tracer(trace, ret);
419 goto out;
422 /* reset the max latency */
423 tracing_max_latency = 0;
425 /* disable preemption and interrupts for a bit */
426 preempt_disable();
427 local_irq_disable();
428 udelay(100);
429 preempt_enable();
430 /* reverse the order of preempt vs irqs */
431 local_irq_enable();
433 /* stop the tracing. */
434 tracing_stop();
435 /* check both trace buffers */
436 ret = trace_test_buffer(tr, NULL);
437 if (ret) {
438 tracing_start();
439 goto out;
442 ret = trace_test_buffer(&max_tr, &count);
443 if (ret) {
444 tracing_start();
445 goto out;
448 if (!ret && !count) {
449 printk(KERN_CONT ".. no entries found ..");
450 ret = -1;
451 tracing_start();
452 goto out;
455 /* do the test by disabling interrupts first this time */
456 tracing_max_latency = 0;
457 tracing_start();
458 preempt_disable();
459 local_irq_disable();
460 udelay(100);
461 preempt_enable();
462 /* reverse the order of preempt vs irqs */
463 local_irq_enable();
465 /* stop the tracing. */
466 tracing_stop();
467 /* check both trace buffers */
468 ret = trace_test_buffer(tr, NULL);
469 if (ret)
470 goto out;
472 ret = trace_test_buffer(&max_tr, &count);
474 if (!ret && !count) {
475 printk(KERN_CONT ".. no entries found ..");
476 ret = -1;
477 goto out;
480 out:
481 trace->reset(tr);
482 tracing_start();
483 tracing_max_latency = save_max;
485 return ret;
487 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
489 #ifdef CONFIG_NOP_TRACER
491 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
493 /* What could possibly go wrong? */
494 return 0;
496 #endif
498 #ifdef CONFIG_SCHED_TRACER
499 static int trace_wakeup_test_thread(void *data)
501 /* Make this a RT thread, doesn't need to be too high */
502 struct sched_param param = { .sched_priority = 5 };
503 struct completion *x = data;
505 sched_setscheduler(current, SCHED_FIFO, &param);
507 /* Make it know we have a new prio */
508 complete(x);
510 /* now go to sleep and let the test wake us up */
511 set_current_state(TASK_INTERRUPTIBLE);
512 schedule();
514 /* we are awake, now wait to disappear */
515 while (!kthread_should_stop()) {
517 * This is an RT task, do short sleeps to let
518 * others run.
520 msleep(100);
523 return 0;
527 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
529 unsigned long save_max = tracing_max_latency;
530 struct task_struct *p;
531 struct completion isrt;
532 unsigned long count;
533 int ret;
535 init_completion(&isrt);
537 /* create a high prio thread */
538 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
539 if (IS_ERR(p)) {
540 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
541 return -1;
544 /* make sure the thread is running at an RT prio */
545 wait_for_completion(&isrt);
547 /* start the tracing */
548 ret = tracer_init(trace, tr);
549 if (ret) {
550 warn_failed_init_tracer(trace, ret);
551 return ret;
554 /* reset the max latency */
555 tracing_max_latency = 0;
557 /* sleep to let the RT thread sleep too */
558 msleep(100);
561 * Yes this is slightly racy. It is possible that for some
562 * strange reason that the RT thread we created, did not
563 * call schedule for 100ms after doing the completion,
564 * and we do a wakeup on a task that already is awake.
565 * But that is extremely unlikely, and the worst thing that
566 * happens in such a case, is that we disable tracing.
567 * Honestly, if this race does happen something is horrible
568 * wrong with the system.
571 wake_up_process(p);
573 /* give a little time to let the thread wake up */
574 msleep(100);
576 /* stop the tracing. */
577 tracing_stop();
578 /* check both trace buffers */
579 ret = trace_test_buffer(tr, NULL);
580 if (!ret)
581 ret = trace_test_buffer(&max_tr, &count);
584 trace->reset(tr);
585 tracing_start();
587 tracing_max_latency = save_max;
589 /* kill the thread */
590 kthread_stop(p);
592 if (!ret && !count) {
593 printk(KERN_CONT ".. no entries found ..");
594 ret = -1;
597 return ret;
599 #endif /* CONFIG_SCHED_TRACER */
601 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
603 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
605 unsigned long count;
606 int ret;
608 /* start the tracing */
609 ret = tracer_init(trace, tr);
610 if (ret) {
611 warn_failed_init_tracer(trace, ret);
612 return ret;
615 /* Sleep for a 1/10 of a second */
616 msleep(100);
617 /* stop the tracing. */
618 tracing_stop();
619 /* check the trace buffer */
620 ret = trace_test_buffer(tr, &count);
621 trace->reset(tr);
622 tracing_start();
624 if (!ret && !count) {
625 printk(KERN_CONT ".. no entries found ..");
626 ret = -1;
629 return ret;
631 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
633 #ifdef CONFIG_SYSPROF_TRACER
635 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
637 unsigned long count;
638 int ret;
640 /* start the tracing */
641 ret = tracer_init(trace, tr);
642 if (ret) {
643 warn_failed_init_tracer(trace, ret);
644 return ret;
647 /* Sleep for a 1/10 of a second */
648 msleep(100);
649 /* stop the tracing. */
650 tracing_stop();
651 /* check the trace buffer */
652 ret = trace_test_buffer(tr, &count);
653 trace->reset(tr);
654 tracing_start();
656 if (!ret && !count) {
657 printk(KERN_CONT ".. no entries found ..");
658 ret = -1;
661 return ret;
663 #endif /* CONFIG_SYSPROF_TRACER */
665 #ifdef CONFIG_BRANCH_TRACER
667 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
669 unsigned long count;
670 int ret;
672 /* start the tracing */
673 ret = tracer_init(trace, tr);
674 if (ret) {
675 warn_failed_init_tracer(trace, ret);
676 return ret;
679 /* Sleep for a 1/10 of a second */
680 msleep(100);
681 /* stop the tracing. */
682 tracing_stop();
683 /* check the trace buffer */
684 ret = trace_test_buffer(tr, &count);
685 trace->reset(tr);
686 tracing_start();
688 if (!ret && !count) {
689 printk(KERN_CONT ".. no entries found ..");
690 ret = -1;
693 return ret;
695 #endif /* CONFIG_BRANCH_TRACER */