mm/hotplug: only free wait_table if it's allocated by vmalloc
[linux-2.6/cjktty.git] / kernel / trace / trace_selftest.c
blob51c819c12c2916c8e93226c79d3fc464dc190ec3
1 /* Include in trace.c */
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
8 static inline int trace_valid_entry(struct trace_entry *entry)
10 switch (entry->type) {
11 case TRACE_FN:
12 case TRACE_CTX:
13 case TRACE_WAKE:
14 case TRACE_STACK:
15 case TRACE_PRINT:
16 case TRACE_BRANCH:
17 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
19 return 1;
21 return 0;
24 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
26 struct ring_buffer_event *event;
27 struct trace_entry *entry;
28 unsigned int loops = 0;
30 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
31 entry = ring_buffer_event_data(event);
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
40 goto failed;
42 if (!trace_valid_entry(entry)) {
43 printk(KERN_CONT ".. invalid entry %d ",
44 entry->type);
45 goto failed;
48 return 0;
50 failed:
51 /* disable tracing */
52 tracing_disabled = 1;
53 printk(KERN_CONT ".. corrupted trace buffer .. ");
54 return -1;
58 * Test the trace buffer to see if all the elements
59 * are still sane.
61 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
63 unsigned long flags, cnt = 0;
64 int cpu, ret = 0;
66 /* Don't allow flipping of max traces now */
67 local_irq_save(flags);
68 arch_spin_lock(&ftrace_max_lock);
70 cnt = ring_buffer_entries(tr->buffer);
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
77 * a hard lock up.
79 tracing_off();
80 for_each_possible_cpu(cpu) {
81 ret = trace_test_buffer_cpu(tr, cpu);
82 if (ret)
83 break;
85 tracing_on();
86 arch_spin_unlock(&ftrace_max_lock);
87 local_irq_restore(flags);
89 if (count)
90 *count = cnt;
92 return ret;
95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
100 #ifdef CONFIG_FUNCTION_TRACER
102 #ifdef CONFIG_DYNAMIC_FTRACE
104 static int trace_selftest_test_probe1_cnt;
105 static void trace_selftest_test_probe1_func(unsigned long ip,
106 unsigned long pip,
107 struct ftrace_ops *op,
108 struct pt_regs *pt_regs)
110 trace_selftest_test_probe1_cnt++;
113 static int trace_selftest_test_probe2_cnt;
114 static void trace_selftest_test_probe2_func(unsigned long ip,
115 unsigned long pip,
116 struct ftrace_ops *op,
117 struct pt_regs *pt_regs)
119 trace_selftest_test_probe2_cnt++;
122 static int trace_selftest_test_probe3_cnt;
123 static void trace_selftest_test_probe3_func(unsigned long ip,
124 unsigned long pip,
125 struct ftrace_ops *op,
126 struct pt_regs *pt_regs)
128 trace_selftest_test_probe3_cnt++;
131 static int trace_selftest_test_global_cnt;
132 static void trace_selftest_test_global_func(unsigned long ip,
133 unsigned long pip,
134 struct ftrace_ops *op,
135 struct pt_regs *pt_regs)
137 trace_selftest_test_global_cnt++;
140 static int trace_selftest_test_dyn_cnt;
141 static void trace_selftest_test_dyn_func(unsigned long ip,
142 unsigned long pip,
143 struct ftrace_ops *op,
144 struct pt_regs *pt_regs)
146 trace_selftest_test_dyn_cnt++;
149 static struct ftrace_ops test_probe1 = {
150 .func = trace_selftest_test_probe1_func,
151 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
154 static struct ftrace_ops test_probe2 = {
155 .func = trace_selftest_test_probe2_func,
156 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
159 static struct ftrace_ops test_probe3 = {
160 .func = trace_selftest_test_probe3_func,
161 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
164 static struct ftrace_ops test_global = {
165 .func = trace_selftest_test_global_func,
166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
169 static void print_counts(void)
171 printk("(%d %d %d %d %d) ",
172 trace_selftest_test_probe1_cnt,
173 trace_selftest_test_probe2_cnt,
174 trace_selftest_test_probe3_cnt,
175 trace_selftest_test_global_cnt,
176 trace_selftest_test_dyn_cnt);
179 static void reset_counts(void)
181 trace_selftest_test_probe1_cnt = 0;
182 trace_selftest_test_probe2_cnt = 0;
183 trace_selftest_test_probe3_cnt = 0;
184 trace_selftest_test_global_cnt = 0;
185 trace_selftest_test_dyn_cnt = 0;
188 static int trace_selftest_ops(int cnt)
190 int save_ftrace_enabled = ftrace_enabled;
191 struct ftrace_ops *dyn_ops;
192 char *func1_name;
193 char *func2_name;
194 int len1;
195 int len2;
196 int ret = -1;
198 printk(KERN_CONT "PASSED\n");
199 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
201 ftrace_enabled = 1;
202 reset_counts();
204 /* Handle PPC64 '.' name */
205 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
206 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
207 len1 = strlen(func1_name);
208 len2 = strlen(func2_name);
211 * Probe 1 will trace function 1.
212 * Probe 2 will trace function 2.
213 * Probe 3 will trace functions 1 and 2.
215 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
216 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
217 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
218 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
220 register_ftrace_function(&test_probe1);
221 register_ftrace_function(&test_probe2);
222 register_ftrace_function(&test_probe3);
223 register_ftrace_function(&test_global);
225 DYN_FTRACE_TEST_NAME();
227 print_counts();
229 if (trace_selftest_test_probe1_cnt != 1)
230 goto out;
231 if (trace_selftest_test_probe2_cnt != 0)
232 goto out;
233 if (trace_selftest_test_probe3_cnt != 1)
234 goto out;
235 if (trace_selftest_test_global_cnt == 0)
236 goto out;
238 DYN_FTRACE_TEST_NAME2();
240 print_counts();
242 if (trace_selftest_test_probe1_cnt != 1)
243 goto out;
244 if (trace_selftest_test_probe2_cnt != 1)
245 goto out;
246 if (trace_selftest_test_probe3_cnt != 2)
247 goto out;
249 /* Add a dynamic probe */
250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
251 if (!dyn_ops) {
252 printk("MEMORY ERROR ");
253 goto out;
256 dyn_ops->func = trace_selftest_test_dyn_func;
258 register_ftrace_function(dyn_ops);
260 trace_selftest_test_global_cnt = 0;
262 DYN_FTRACE_TEST_NAME();
264 print_counts();
266 if (trace_selftest_test_probe1_cnt != 2)
267 goto out_free;
268 if (trace_selftest_test_probe2_cnt != 1)
269 goto out_free;
270 if (trace_selftest_test_probe3_cnt != 3)
271 goto out_free;
272 if (trace_selftest_test_global_cnt == 0)
273 goto out;
274 if (trace_selftest_test_dyn_cnt == 0)
275 goto out_free;
277 DYN_FTRACE_TEST_NAME2();
279 print_counts();
281 if (trace_selftest_test_probe1_cnt != 2)
282 goto out_free;
283 if (trace_selftest_test_probe2_cnt != 2)
284 goto out_free;
285 if (trace_selftest_test_probe3_cnt != 4)
286 goto out_free;
288 ret = 0;
289 out_free:
290 unregister_ftrace_function(dyn_ops);
291 kfree(dyn_ops);
293 out:
294 /* Purposely unregister in the same order */
295 unregister_ftrace_function(&test_probe1);
296 unregister_ftrace_function(&test_probe2);
297 unregister_ftrace_function(&test_probe3);
298 unregister_ftrace_function(&test_global);
300 /* Make sure everything is off */
301 reset_counts();
302 DYN_FTRACE_TEST_NAME();
303 DYN_FTRACE_TEST_NAME();
305 if (trace_selftest_test_probe1_cnt ||
306 trace_selftest_test_probe2_cnt ||
307 trace_selftest_test_probe3_cnt ||
308 trace_selftest_test_global_cnt ||
309 trace_selftest_test_dyn_cnt)
310 ret = -1;
312 ftrace_enabled = save_ftrace_enabled;
314 return ret;
317 /* Test dynamic code modification and ftrace filters */
318 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
319 struct trace_array *tr,
320 int (*func)(void))
322 int save_ftrace_enabled = ftrace_enabled;
323 unsigned long count;
324 char *func_name;
325 int ret;
327 /* The ftrace test PASSED */
328 printk(KERN_CONT "PASSED\n");
329 pr_info("Testing dynamic ftrace: ");
331 /* enable tracing, and record the filter function */
332 ftrace_enabled = 1;
334 /* passed in by parameter to fool gcc from optimizing */
335 func();
338 * Some archs *cough*PowerPC*cough* add characters to the
339 * start of the function names. We simply put a '*' to
340 * accommodate them.
342 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
344 /* filter only on our function */
345 ftrace_set_global_filter(func_name, strlen(func_name), 1);
347 /* enable tracing */
348 ret = tracer_init(trace, tr);
349 if (ret) {
350 warn_failed_init_tracer(trace, ret);
351 goto out;
354 /* Sleep for a 1/10 of a second */
355 msleep(100);
357 /* we should have nothing in the buffer */
358 ret = trace_test_buffer(tr, &count);
359 if (ret)
360 goto out;
362 if (count) {
363 ret = -1;
364 printk(KERN_CONT ".. filter did not filter .. ");
365 goto out;
368 /* call our function again */
369 func();
371 /* sleep again */
372 msleep(100);
374 /* stop the tracing. */
375 tracing_stop();
376 ftrace_enabled = 0;
378 /* check the trace buffer */
379 ret = trace_test_buffer(tr, &count);
380 tracing_start();
382 /* we should only have one item */
383 if (!ret && count != 1) {
384 trace->reset(tr);
385 printk(KERN_CONT ".. filter failed count=%ld ..", count);
386 ret = -1;
387 goto out;
390 /* Test the ops with global tracing running */
391 ret = trace_selftest_ops(1);
392 trace->reset(tr);
394 out:
395 ftrace_enabled = save_ftrace_enabled;
397 /* Enable tracing on all functions again */
398 ftrace_set_global_filter(NULL, 0, 1);
400 /* Test the ops with global tracing off */
401 if (!ret)
402 ret = trace_selftest_ops(2);
404 return ret;
407 static int trace_selftest_recursion_cnt;
408 static void trace_selftest_test_recursion_func(unsigned long ip,
409 unsigned long pip,
410 struct ftrace_ops *op,
411 struct pt_regs *pt_regs)
414 * This function is registered without the recursion safe flag.
415 * The ftrace infrastructure should provide the recursion
416 * protection. If not, this will crash the kernel!
418 if (trace_selftest_recursion_cnt++ > 10)
419 return;
420 DYN_FTRACE_TEST_NAME();
423 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
424 unsigned long pip,
425 struct ftrace_ops *op,
426 struct pt_regs *pt_regs)
429 * We said we would provide our own recursion. By calling
430 * this function again, we should recurse back into this function
431 * and count again. But this only happens if the arch supports
432 * all of ftrace features and nothing else is using the function
433 * tracing utility.
435 if (trace_selftest_recursion_cnt++)
436 return;
437 DYN_FTRACE_TEST_NAME();
440 static struct ftrace_ops test_rec_probe = {
441 .func = trace_selftest_test_recursion_func,
444 static struct ftrace_ops test_recsafe_probe = {
445 .func = trace_selftest_test_recursion_safe_func,
446 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
449 static int
450 trace_selftest_function_recursion(void)
452 int save_ftrace_enabled = ftrace_enabled;
453 char *func_name;
454 int len;
455 int ret;
457 /* The previous test PASSED */
458 pr_cont("PASSED\n");
459 pr_info("Testing ftrace recursion: ");
462 /* enable tracing, and record the filter function */
463 ftrace_enabled = 1;
465 /* Handle PPC64 '.' name */
466 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
467 len = strlen(func_name);
469 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
470 if (ret) {
471 pr_cont("*Could not set filter* ");
472 goto out;
475 ret = register_ftrace_function(&test_rec_probe);
476 if (ret) {
477 pr_cont("*could not register callback* ");
478 goto out;
481 DYN_FTRACE_TEST_NAME();
483 unregister_ftrace_function(&test_rec_probe);
485 ret = -1;
486 if (trace_selftest_recursion_cnt != 1) {
487 pr_cont("*callback not called once (%d)* ",
488 trace_selftest_recursion_cnt);
489 goto out;
492 trace_selftest_recursion_cnt = 1;
494 pr_cont("PASSED\n");
495 pr_info("Testing ftrace recursion safe: ");
497 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
498 if (ret) {
499 pr_cont("*Could not set filter* ");
500 goto out;
503 ret = register_ftrace_function(&test_recsafe_probe);
504 if (ret) {
505 pr_cont("*could not register callback* ");
506 goto out;
509 DYN_FTRACE_TEST_NAME();
511 unregister_ftrace_function(&test_recsafe_probe);
513 ret = -1;
514 if (trace_selftest_recursion_cnt != 2) {
515 pr_cont("*callback not called expected 2 times (%d)* ",
516 trace_selftest_recursion_cnt);
517 goto out;
520 ret = 0;
521 out:
522 ftrace_enabled = save_ftrace_enabled;
524 return ret;
526 #else
527 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
528 # define trace_selftest_function_recursion() ({ 0; })
529 #endif /* CONFIG_DYNAMIC_FTRACE */
531 static enum {
532 TRACE_SELFTEST_REGS_START,
533 TRACE_SELFTEST_REGS_FOUND,
534 TRACE_SELFTEST_REGS_NOT_FOUND,
535 } trace_selftest_regs_stat;
537 static void trace_selftest_test_regs_func(unsigned long ip,
538 unsigned long pip,
539 struct ftrace_ops *op,
540 struct pt_regs *pt_regs)
542 if (pt_regs)
543 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
544 else
545 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
548 static struct ftrace_ops test_regs_probe = {
549 .func = trace_selftest_test_regs_func,
550 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
553 static int
554 trace_selftest_function_regs(void)
556 int save_ftrace_enabled = ftrace_enabled;
557 char *func_name;
558 int len;
559 int ret;
560 int supported = 0;
562 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
563 supported = 1;
564 #endif
566 /* The previous test PASSED */
567 pr_cont("PASSED\n");
568 pr_info("Testing ftrace regs%s: ",
569 !supported ? "(no arch support)" : "");
571 /* enable tracing, and record the filter function */
572 ftrace_enabled = 1;
574 /* Handle PPC64 '.' name */
575 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
576 len = strlen(func_name);
578 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
580 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
581 * This test really doesn't care.
583 if (ret && ret != -ENODEV) {
584 pr_cont("*Could not set filter* ");
585 goto out;
588 ret = register_ftrace_function(&test_regs_probe);
590 * Now if the arch does not support passing regs, then this should
591 * have failed.
593 if (!supported) {
594 if (!ret) {
595 pr_cont("*registered save-regs without arch support* ");
596 goto out;
598 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
599 ret = register_ftrace_function(&test_regs_probe);
601 if (ret) {
602 pr_cont("*could not register callback* ");
603 goto out;
607 DYN_FTRACE_TEST_NAME();
609 unregister_ftrace_function(&test_regs_probe);
611 ret = -1;
613 switch (trace_selftest_regs_stat) {
614 case TRACE_SELFTEST_REGS_START:
615 pr_cont("*callback never called* ");
616 goto out;
618 case TRACE_SELFTEST_REGS_FOUND:
619 if (supported)
620 break;
621 pr_cont("*callback received regs without arch support* ");
622 goto out;
624 case TRACE_SELFTEST_REGS_NOT_FOUND:
625 if (!supported)
626 break;
627 pr_cont("*callback received NULL regs* ");
628 goto out;
631 ret = 0;
632 out:
633 ftrace_enabled = save_ftrace_enabled;
635 return ret;
639 * Simple verification test of ftrace function tracer.
640 * Enable ftrace, sleep 1/10 second, and then read the trace
641 * buffer to see if all is in order.
644 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
646 int save_ftrace_enabled = ftrace_enabled;
647 unsigned long count;
648 int ret;
650 /* make sure msleep has been recorded */
651 msleep(1);
653 /* start the tracing */
654 ftrace_enabled = 1;
656 ret = tracer_init(trace, tr);
657 if (ret) {
658 warn_failed_init_tracer(trace, ret);
659 goto out;
662 /* Sleep for a 1/10 of a second */
663 msleep(100);
664 /* stop the tracing. */
665 tracing_stop();
666 ftrace_enabled = 0;
668 /* check the trace buffer */
669 ret = trace_test_buffer(tr, &count);
670 trace->reset(tr);
671 tracing_start();
673 if (!ret && !count) {
674 printk(KERN_CONT ".. no entries found ..");
675 ret = -1;
676 goto out;
679 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
680 DYN_FTRACE_TEST_NAME);
681 if (ret)
682 goto out;
684 ret = trace_selftest_function_recursion();
685 if (ret)
686 goto out;
688 ret = trace_selftest_function_regs();
689 out:
690 ftrace_enabled = save_ftrace_enabled;
692 /* kill ftrace totally if we failed */
693 if (ret)
694 ftrace_kill();
696 return ret;
698 #endif /* CONFIG_FUNCTION_TRACER */
701 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
703 /* Maximum number of functions to trace before diagnosing a hang */
704 #define GRAPH_MAX_FUNC_TEST 100000000
706 static void
707 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
708 static unsigned int graph_hang_thresh;
710 /* Wrap the real function entry probe to avoid possible hanging */
711 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
713 /* This is harmlessly racy, we want to approximately detect a hang */
714 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
715 ftrace_graph_stop();
716 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
717 if (ftrace_dump_on_oops)
718 __ftrace_dump(false, DUMP_ALL);
719 return 0;
722 return trace_graph_entry(trace);
726 * Pretty much the same than for the function tracer from which the selftest
727 * has been borrowed.
730 trace_selftest_startup_function_graph(struct tracer *trace,
731 struct trace_array *tr)
733 int ret;
734 unsigned long count;
737 * Simulate the init() callback but we attach a watchdog callback
738 * to detect and recover from possible hangs
740 tracing_reset_online_cpus(tr);
741 set_graph_array(tr);
742 ret = register_ftrace_graph(&trace_graph_return,
743 &trace_graph_entry_watchdog);
744 if (ret) {
745 warn_failed_init_tracer(trace, ret);
746 goto out;
748 tracing_start_cmdline_record();
750 /* Sleep for a 1/10 of a second */
751 msleep(100);
753 /* Have we just recovered from a hang? */
754 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
755 tracing_selftest_disabled = true;
756 ret = -1;
757 goto out;
760 tracing_stop();
762 /* check the trace buffer */
763 ret = trace_test_buffer(tr, &count);
765 trace->reset(tr);
766 tracing_start();
768 if (!ret && !count) {
769 printk(KERN_CONT ".. no entries found ..");
770 ret = -1;
771 goto out;
774 /* Don't test dynamic tracing, the function tracer already did */
776 out:
777 /* Stop it if we failed */
778 if (ret)
779 ftrace_graph_stop();
781 return ret;
783 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
786 #ifdef CONFIG_IRQSOFF_TRACER
788 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
790 unsigned long save_max = tracing_max_latency;
791 unsigned long count;
792 int ret;
794 /* start the tracing */
795 ret = tracer_init(trace, tr);
796 if (ret) {
797 warn_failed_init_tracer(trace, ret);
798 return ret;
801 /* reset the max latency */
802 tracing_max_latency = 0;
803 /* disable interrupts for a bit */
804 local_irq_disable();
805 udelay(100);
806 local_irq_enable();
809 * Stop the tracer to avoid a warning subsequent
810 * to buffer flipping failure because tracing_stop()
811 * disables the tr and max buffers, making flipping impossible
812 * in case of parallels max irqs off latencies.
814 trace->stop(tr);
815 /* stop the tracing. */
816 tracing_stop();
817 /* check both trace buffers */
818 ret = trace_test_buffer(tr, NULL);
819 if (!ret)
820 ret = trace_test_buffer(&max_tr, &count);
821 trace->reset(tr);
822 tracing_start();
824 if (!ret && !count) {
825 printk(KERN_CONT ".. no entries found ..");
826 ret = -1;
829 tracing_max_latency = save_max;
831 return ret;
833 #endif /* CONFIG_IRQSOFF_TRACER */
835 #ifdef CONFIG_PREEMPT_TRACER
837 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
839 unsigned long save_max = tracing_max_latency;
840 unsigned long count;
841 int ret;
844 * Now that the big kernel lock is no longer preemptable,
845 * and this is called with the BKL held, it will always
846 * fail. If preemption is already disabled, simply
847 * pass the test. When the BKL is removed, or becomes
848 * preemptible again, we will once again test this,
849 * so keep it in.
851 if (preempt_count()) {
852 printk(KERN_CONT "can not test ... force ");
853 return 0;
856 /* start the tracing */
857 ret = tracer_init(trace, tr);
858 if (ret) {
859 warn_failed_init_tracer(trace, ret);
860 return ret;
863 /* reset the max latency */
864 tracing_max_latency = 0;
865 /* disable preemption for a bit */
866 preempt_disable();
867 udelay(100);
868 preempt_enable();
871 * Stop the tracer to avoid a warning subsequent
872 * to buffer flipping failure because tracing_stop()
873 * disables the tr and max buffers, making flipping impossible
874 * in case of parallels max preempt off latencies.
876 trace->stop(tr);
877 /* stop the tracing. */
878 tracing_stop();
879 /* check both trace buffers */
880 ret = trace_test_buffer(tr, NULL);
881 if (!ret)
882 ret = trace_test_buffer(&max_tr, &count);
883 trace->reset(tr);
884 tracing_start();
886 if (!ret && !count) {
887 printk(KERN_CONT ".. no entries found ..");
888 ret = -1;
891 tracing_max_latency = save_max;
893 return ret;
895 #endif /* CONFIG_PREEMPT_TRACER */
897 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
899 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
901 unsigned long save_max = tracing_max_latency;
902 unsigned long count;
903 int ret;
906 * Now that the big kernel lock is no longer preemptable,
907 * and this is called with the BKL held, it will always
908 * fail. If preemption is already disabled, simply
909 * pass the test. When the BKL is removed, or becomes
910 * preemptible again, we will once again test this,
911 * so keep it in.
913 if (preempt_count()) {
914 printk(KERN_CONT "can not test ... force ");
915 return 0;
918 /* start the tracing */
919 ret = tracer_init(trace, tr);
920 if (ret) {
921 warn_failed_init_tracer(trace, ret);
922 goto out_no_start;
925 /* reset the max latency */
926 tracing_max_latency = 0;
928 /* disable preemption and interrupts for a bit */
929 preempt_disable();
930 local_irq_disable();
931 udelay(100);
932 preempt_enable();
933 /* reverse the order of preempt vs irqs */
934 local_irq_enable();
937 * Stop the tracer to avoid a warning subsequent
938 * to buffer flipping failure because tracing_stop()
939 * disables the tr and max buffers, making flipping impossible
940 * in case of parallels max irqs/preempt off latencies.
942 trace->stop(tr);
943 /* stop the tracing. */
944 tracing_stop();
945 /* check both trace buffers */
946 ret = trace_test_buffer(tr, NULL);
947 if (ret)
948 goto out;
950 ret = trace_test_buffer(&max_tr, &count);
951 if (ret)
952 goto out;
954 if (!ret && !count) {
955 printk(KERN_CONT ".. no entries found ..");
956 ret = -1;
957 goto out;
960 /* do the test by disabling interrupts first this time */
961 tracing_max_latency = 0;
962 tracing_start();
963 trace->start(tr);
965 preempt_disable();
966 local_irq_disable();
967 udelay(100);
968 preempt_enable();
969 /* reverse the order of preempt vs irqs */
970 local_irq_enable();
972 trace->stop(tr);
973 /* stop the tracing. */
974 tracing_stop();
975 /* check both trace buffers */
976 ret = trace_test_buffer(tr, NULL);
977 if (ret)
978 goto out;
980 ret = trace_test_buffer(&max_tr, &count);
982 if (!ret && !count) {
983 printk(KERN_CONT ".. no entries found ..");
984 ret = -1;
985 goto out;
988 out:
989 tracing_start();
990 out_no_start:
991 trace->reset(tr);
992 tracing_max_latency = save_max;
994 return ret;
996 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
998 #ifdef CONFIG_NOP_TRACER
1000 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1002 /* What could possibly go wrong? */
1003 return 0;
1005 #endif
1007 #ifdef CONFIG_SCHED_TRACER
1008 static int trace_wakeup_test_thread(void *data)
1010 /* Make this a RT thread, doesn't need to be too high */
1011 static const struct sched_param param = { .sched_priority = 5 };
1012 struct completion *x = data;
1014 sched_setscheduler(current, SCHED_FIFO, &param);
1016 /* Make it know we have a new prio */
1017 complete(x);
1019 /* now go to sleep and let the test wake us up */
1020 set_current_state(TASK_INTERRUPTIBLE);
1021 schedule();
1023 complete(x);
1025 /* we are awake, now wait to disappear */
1026 while (!kthread_should_stop()) {
1028 * This is an RT task, do short sleeps to let
1029 * others run.
1031 msleep(100);
1034 return 0;
1038 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1040 unsigned long save_max = tracing_max_latency;
1041 struct task_struct *p;
1042 struct completion isrt;
1043 unsigned long count;
1044 int ret;
1046 init_completion(&isrt);
1048 /* create a high prio thread */
1049 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
1050 if (IS_ERR(p)) {
1051 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1052 return -1;
1055 /* make sure the thread is running at an RT prio */
1056 wait_for_completion(&isrt);
1058 /* start the tracing */
1059 ret = tracer_init(trace, tr);
1060 if (ret) {
1061 warn_failed_init_tracer(trace, ret);
1062 return ret;
1065 /* reset the max latency */
1066 tracing_max_latency = 0;
1068 while (p->on_rq) {
1070 * Sleep to make sure the RT thread is asleep too.
1071 * On virtual machines we can't rely on timings,
1072 * but we want to make sure this test still works.
1074 msleep(100);
1077 init_completion(&isrt);
1079 wake_up_process(p);
1081 /* Wait for the task to wake up */
1082 wait_for_completion(&isrt);
1084 /* stop the tracing. */
1085 tracing_stop();
1086 /* check both trace buffers */
1087 ret = trace_test_buffer(tr, NULL);
1088 printk("ret = %d\n", ret);
1089 if (!ret)
1090 ret = trace_test_buffer(&max_tr, &count);
1093 trace->reset(tr);
1094 tracing_start();
1096 tracing_max_latency = save_max;
1098 /* kill the thread */
1099 kthread_stop(p);
1101 if (!ret && !count) {
1102 printk(KERN_CONT ".. no entries found ..");
1103 ret = -1;
1106 return ret;
1108 #endif /* CONFIG_SCHED_TRACER */
1110 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
1112 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1114 unsigned long count;
1115 int ret;
1117 /* start the tracing */
1118 ret = tracer_init(trace, tr);
1119 if (ret) {
1120 warn_failed_init_tracer(trace, ret);
1121 return ret;
1124 /* Sleep for a 1/10 of a second */
1125 msleep(100);
1126 /* stop the tracing. */
1127 tracing_stop();
1128 /* check the trace buffer */
1129 ret = trace_test_buffer(tr, &count);
1130 trace->reset(tr);
1131 tracing_start();
1133 if (!ret && !count) {
1134 printk(KERN_CONT ".. no entries found ..");
1135 ret = -1;
1138 return ret;
1140 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1142 #ifdef CONFIG_BRANCH_TRACER
1144 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1146 unsigned long count;
1147 int ret;
1149 /* start the tracing */
1150 ret = tracer_init(trace, tr);
1151 if (ret) {
1152 warn_failed_init_tracer(trace, ret);
1153 return ret;
1156 /* Sleep for a 1/10 of a second */
1157 msleep(100);
1158 /* stop the tracing. */
1159 tracing_stop();
1160 /* check the trace buffer */
1161 ret = trace_test_buffer(tr, &count);
1162 trace->reset(tr);
1163 tracing_start();
1165 if (!ret && !count) {
1166 printk(KERN_CONT ".. no entries found ..");
1167 ret = -1;
1170 return ret;
1172 #endif /* CONFIG_BRANCH_TRACER */