1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry
*entry
)
20 trace_test_buffer_cpu(struct trace_array
*tr
, struct trace_array_cpu
*data
)
22 struct trace_entry
*entries
;
27 BUG_ON(list_empty(&data
->trace_pages
));
28 page
= list_entry(data
->trace_pages
.next
, struct page
, lru
);
29 entries
= page_address(page
);
32 if (head_page(data
) != entries
)
36 * The starting trace buffer always has valid elements,
37 * if any element exists.
39 entries
= head_page(data
);
41 for (i
= 0; i
< tr
->entries
; i
++) {
43 if (i
< data
->trace_idx
&& !trace_valid_entry(&entries
[idx
])) {
44 printk(KERN_CONT
".. invalid entry %d ",
50 if (idx
>= ENTRIES_PER_PAGE
) {
51 page
= virt_to_page(entries
);
52 if (page
->lru
.next
== &data
->trace_pages
) {
53 if (i
!= tr
->entries
- 1) {
54 printk(KERN_CONT
".. entries buffer mismatch");
58 page
= list_entry(page
->lru
.next
, struct page
, lru
);
59 entries
= page_address(page
);
65 page
= virt_to_page(entries
);
66 if (page
->lru
.next
!= &data
->trace_pages
) {
67 printk(KERN_CONT
".. too many entries");
76 printk(KERN_CONT
".. corrupted trace buffer .. ");
81 * Test the trace buffer to see if all the elements
84 static int trace_test_buffer(struct trace_array
*tr
, unsigned long *count
)
86 unsigned long flags
, cnt
= 0;
89 /* Don't allow flipping of max traces now */
90 raw_local_irq_save(flags
);
91 __raw_spin_lock(&ftrace_max_lock
);
92 for_each_possible_cpu(cpu
) {
93 if (!head_page(tr
->data
[cpu
]))
96 cnt
+= tr
->data
[cpu
]->trace_idx
;
98 ret
= trace_test_buffer_cpu(tr
, tr
->data
[cpu
]);
102 __raw_spin_unlock(&ftrace_max_lock
);
103 raw_local_irq_restore(flags
);
113 #ifdef CONFIG_DYNAMIC_FTRACE
116 #define STR(x) __STR(x)
118 /* Test dynamic code modification and ftrace filters */
119 int trace_selftest_startup_dynamic_tracing(struct tracer
*trace
,
120 struct trace_array
*tr
,
125 int save_ftrace_enabled
= ftrace_enabled
;
126 int save_tracer_enabled
= tracer_enabled
;
129 /* The ftrace test PASSED */
130 printk(KERN_CONT
"PASSED\n");
131 pr_info("Testing dynamic ftrace: ");
133 /* enable tracing, and record the filter function */
137 /* passed in by parameter to fool gcc from optimizing */
140 /* update the records */
141 ret
= ftrace_force_update();
143 printk(KERN_CONT
".. ftraced failed .. ");
148 * Some archs *cough*PowerPC*cough* add charachters to the
149 * start of the function names. We simply put a '*' to
152 func_name
= "*" STR(DYN_FTRACE_TEST_NAME
);
154 /* filter only on our function */
155 ftrace_set_filter(func_name
, strlen(func_name
), 1);
160 /* Sleep for a 1/10 of a second */
163 /* we should have nothing in the buffer */
164 ret
= trace_test_buffer(tr
, &count
);
170 printk(KERN_CONT
".. filter did not filter .. ");
174 /* call our function again */
180 /* stop the tracing. */
182 trace
->ctrl_update(tr
);
185 /* check the trace buffer */
186 ret
= trace_test_buffer(tr
, &count
);
189 /* we should only have one item */
190 if (!ret
&& count
!= 1) {
191 printk(KERN_CONT
".. filter failed count=%ld ..", count
);
196 ftrace_enabled
= save_ftrace_enabled
;
197 tracer_enabled
= save_tracer_enabled
;
199 /* Enable tracing on all functions again */
200 ftrace_set_filter(NULL
, 0, 1);
205 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
206 #endif /* CONFIG_DYNAMIC_FTRACE */
208 * Simple verification test of ftrace function tracer.
209 * Enable ftrace, sleep 1/10 second, and then read the trace
210 * buffer to see if all is in order.
213 trace_selftest_startup_function(struct tracer
*trace
, struct trace_array
*tr
)
217 int save_ftrace_enabled
= ftrace_enabled
;
218 int save_tracer_enabled
= tracer_enabled
;
220 /* make sure msleep has been recorded */
223 /* force the recorded functions to be traced */
224 ret
= ftrace_force_update();
226 printk(KERN_CONT
".. ftraced failed .. ");
230 /* start the tracing */
236 /* Sleep for a 1/10 of a second */
238 /* stop the tracing. */
240 trace
->ctrl_update(tr
);
243 /* check the trace buffer */
244 ret
= trace_test_buffer(tr
, &count
);
247 if (!ret
&& !count
) {
248 printk(KERN_CONT
".. no entries found ..");
253 ret
= trace_selftest_startup_dynamic_tracing(trace
, tr
,
254 DYN_FTRACE_TEST_NAME
);
257 ftrace_enabled
= save_ftrace_enabled
;
258 tracer_enabled
= save_tracer_enabled
;
260 /* kill ftrace totally if we failed */
266 #endif /* CONFIG_FTRACE */
268 #ifdef CONFIG_IRQSOFF_TRACER
270 trace_selftest_startup_irqsoff(struct tracer
*trace
, struct trace_array
*tr
)
272 unsigned long save_max
= tracing_max_latency
;
276 /* start the tracing */
279 /* reset the max latency */
280 tracing_max_latency
= 0;
281 /* disable interrupts for a bit */
285 /* stop the tracing. */
287 trace
->ctrl_update(tr
);
288 /* check both trace buffers */
289 ret
= trace_test_buffer(tr
, NULL
);
291 ret
= trace_test_buffer(&max_tr
, &count
);
294 if (!ret
&& !count
) {
295 printk(KERN_CONT
".. no entries found ..");
299 tracing_max_latency
= save_max
;
303 #endif /* CONFIG_IRQSOFF_TRACER */
305 #ifdef CONFIG_PREEMPT_TRACER
307 trace_selftest_startup_preemptoff(struct tracer
*trace
, struct trace_array
*tr
)
309 unsigned long save_max
= tracing_max_latency
;
313 /* start the tracing */
316 /* reset the max latency */
317 tracing_max_latency
= 0;
318 /* disable preemption for a bit */
322 /* stop the tracing. */
324 trace
->ctrl_update(tr
);
325 /* check both trace buffers */
326 ret
= trace_test_buffer(tr
, NULL
);
328 ret
= trace_test_buffer(&max_tr
, &count
);
331 if (!ret
&& !count
) {
332 printk(KERN_CONT
".. no entries found ..");
336 tracing_max_latency
= save_max
;
340 #endif /* CONFIG_PREEMPT_TRACER */
342 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
344 trace_selftest_startup_preemptirqsoff(struct tracer
*trace
, struct trace_array
*tr
)
346 unsigned long save_max
= tracing_max_latency
;
350 /* start the tracing */
354 /* reset the max latency */
355 tracing_max_latency
= 0;
357 /* disable preemption and interrupts for a bit */
362 /* reverse the order of preempt vs irqs */
365 /* stop the tracing. */
367 trace
->ctrl_update(tr
);
368 /* check both trace buffers */
369 ret
= trace_test_buffer(tr
, NULL
);
373 ret
= trace_test_buffer(&max_tr
, &count
);
377 if (!ret
&& !count
) {
378 printk(KERN_CONT
".. no entries found ..");
383 /* do the test by disabling interrupts first this time */
384 tracing_max_latency
= 0;
386 trace
->ctrl_update(tr
);
391 /* reverse the order of preempt vs irqs */
394 /* stop the tracing. */
396 trace
->ctrl_update(tr
);
397 /* check both trace buffers */
398 ret
= trace_test_buffer(tr
, NULL
);
402 ret
= trace_test_buffer(&max_tr
, &count
);
404 if (!ret
&& !count
) {
405 printk(KERN_CONT
".. no entries found ..");
412 tracing_max_latency
= save_max
;
416 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
418 #ifdef CONFIG_SCHED_TRACER
419 static int trace_wakeup_test_thread(void *data
)
421 /* Make this a RT thread, doesn't need to be too high */
422 struct sched_param param
= { .sched_priority
= 5 };
423 struct completion
*x
= data
;
425 sched_setscheduler(current
, SCHED_FIFO
, ¶m
);
427 /* Make it know we have a new prio */
430 /* now go to sleep and let the test wake us up */
431 set_current_state(TASK_INTERRUPTIBLE
);
434 /* we are awake, now wait to disappear */
435 while (!kthread_should_stop()) {
437 * This is an RT task, do short sleeps to let
447 trace_selftest_startup_wakeup(struct tracer
*trace
, struct trace_array
*tr
)
449 unsigned long save_max
= tracing_max_latency
;
450 struct task_struct
*p
;
451 struct completion isrt
;
455 init_completion(&isrt
);
457 /* create a high prio thread */
458 p
= kthread_run(trace_wakeup_test_thread
, &isrt
, "ftrace-test");
460 printk(KERN_CONT
"Failed to create ftrace wakeup test thread ");
464 /* make sure the thread is running at an RT prio */
465 wait_for_completion(&isrt
);
467 /* start the tracing */
470 /* reset the max latency */
471 tracing_max_latency
= 0;
473 /* sleep to let the RT thread sleep too */
477 * Yes this is slightly racy. It is possible that for some
478 * strange reason that the RT thread we created, did not
479 * call schedule for 100ms after doing the completion,
480 * and we do a wakeup on a task that already is awake.
481 * But that is extremely unlikely, and the worst thing that
482 * happens in such a case, is that we disable tracing.
483 * Honestly, if this race does happen something is horrible
484 * wrong with the system.
489 /* stop the tracing. */
491 trace
->ctrl_update(tr
);
492 /* check both trace buffers */
493 ret
= trace_test_buffer(tr
, NULL
);
495 ret
= trace_test_buffer(&max_tr
, &count
);
500 tracing_max_latency
= save_max
;
502 /* kill the thread */
505 if (!ret
&& !count
) {
506 printk(KERN_CONT
".. no entries found ..");
512 #endif /* CONFIG_SCHED_TRACER */
514 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
516 trace_selftest_startup_sched_switch(struct tracer
*trace
, struct trace_array
*tr
)
521 /* start the tracing */
524 /* Sleep for a 1/10 of a second */
526 /* stop the tracing. */
528 trace
->ctrl_update(tr
);
529 /* check the trace buffer */
530 ret
= trace_test_buffer(tr
, &count
);
533 if (!ret
&& !count
) {
534 printk(KERN_CONT
".. no entries found ..");
540 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
542 #ifdef CONFIG_SYSPROF_TRACER
544 trace_selftest_startup_sysprof(struct tracer
*trace
, struct trace_array
*tr
)
549 /* start the tracing */
552 /* Sleep for a 1/10 of a second */
554 /* stop the tracing. */
556 trace
->ctrl_update(tr
);
557 /* check the trace buffer */
558 ret
= trace_test_buffer(tr
, &count
);
563 #endif /* CONFIG_SYSPROF_TRACER */