1 /* Include in trace.c */
3 #include <linux/kthread.h>
5 static inline int trace_valid_entry(struct trace_entry
*entry
)
16 trace_test_buffer_cpu(struct trace_array
*tr
, struct trace_array_cpu
*data
)
19 struct trace_entry
*entries
;
23 page
= list_entry(data
->trace_pages
.next
, struct page
, lru
);
24 entries
= page_address(page
);
26 if (data
->trace
!= entries
)
30 * The starting trace buffer always has valid elements,
31 * if any element exits.
33 entries
= data
->trace
;
35 for (i
= 0; i
< tr
->entries
; i
++) {
37 if (i
< data
->trace_idx
&&
38 !trace_valid_entry(&entries
[idx
])) {
39 printk(KERN_CONT
".. invalid entry %d ", entries
[idx
].type
);
44 if (idx
>= ENTRIES_PER_PAGE
) {
45 page
= virt_to_page(entries
);
46 if (page
->lru
.next
== &data
->trace_pages
) {
47 if (i
!= tr
->entries
- 1) {
48 printk(KERN_CONT
".. entries buffer mismatch");
52 page
= list_entry(page
->lru
.next
, struct page
, lru
);
53 entries
= page_address(page
);
59 page
= virt_to_page(entries
);
60 if (page
->lru
.next
!= &data
->trace_pages
) {
61 printk(KERN_CONT
".. too many entries");
68 printk(KERN_CONT
".. corrupted trace buffer .. ");
73 * Test the trace buffer to see if all the elements
76 static int trace_test_buffer(struct trace_array
*tr
, unsigned long *count
)
78 unsigned long cnt
= 0;
82 for_each_possible_cpu(cpu
) {
83 if (!tr
->data
[cpu
]->trace
)
86 cnt
+= tr
->data
[cpu
]->trace_idx
;
87 printk("%d: count = %ld\n", cpu
, cnt
);
89 ret
= trace_test_buffer_cpu(tr
, tr
->data
[cpu
]);
102 * Simple verification test of ftrace function tracer.
103 * Enable ftrace, sleep 1/10 second, and then read the trace
104 * buffer to see if all is in order.
107 trace_selftest_startup_function(struct tracer
*trace
, struct trace_array
*tr
)
112 /* make sure functions have been recorded */
113 ret
= ftrace_force_update();
115 printk(KERN_CONT
".. ftraced failed .. ");
119 /* start the tracing */
122 /* Sleep for a 1/10 of a second */
124 /* stop the tracing. */
126 trace
->ctrl_update(tr
);
127 /* check the trace buffer */
128 ret
= trace_test_buffer(tr
, &count
);
131 if (!ret
&& !count
) {
132 printk(KERN_CONT
".. no entries found ..");
138 #endif /* CONFIG_FTRACE */
140 #ifdef CONFIG_IRQSOFF_TRACER
142 trace_selftest_startup_irqsoff(struct tracer
*trace
, struct trace_array
*tr
)
144 unsigned long save_max
= tracing_max_latency
;
148 /* start the tracing */
151 /* reset the max latency */
152 tracing_max_latency
= 0;
153 /* disable interrupts for a bit */
157 /* stop the tracing. */
159 trace
->ctrl_update(tr
);
160 /* check both trace buffers */
161 ret
= trace_test_buffer(tr
, NULL
);
163 ret
= trace_test_buffer(&max_tr
, &count
);
166 if (!ret
&& !count
) {
167 printk(KERN_CONT
".. no entries found ..");
171 tracing_max_latency
= save_max
;
175 #endif /* CONFIG_IRQSOFF_TRACER */
177 #ifdef CONFIG_PREEMPT_TRACER
179 trace_selftest_startup_preemptoff(struct tracer
*trace
, struct trace_array
*tr
)
181 unsigned long save_max
= tracing_max_latency
;
185 /* start the tracing */
188 /* reset the max latency */
189 tracing_max_latency
= 0;
190 /* disable preemption for a bit */
194 /* stop the tracing. */
196 trace
->ctrl_update(tr
);
197 /* check both trace buffers */
198 ret
= trace_test_buffer(tr
, NULL
);
200 ret
= trace_test_buffer(&max_tr
, &count
);
203 if (!ret
&& !count
) {
204 printk(KERN_CONT
".. no entries found ..");
208 tracing_max_latency
= save_max
;
212 #endif /* CONFIG_PREEMPT_TRACER */
214 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
216 trace_selftest_startup_preemptirqsoff(struct tracer
*trace
, struct trace_array
*tr
)
218 unsigned long save_max
= tracing_max_latency
;
222 /* start the tracing */
226 /* reset the max latency */
227 tracing_max_latency
= 0;
229 /* disable preemption and interrupts for a bit */
234 /* reverse the order of preempt vs irqs */
237 /* stop the tracing. */
239 trace
->ctrl_update(tr
);
240 /* check both trace buffers */
241 ret
= trace_test_buffer(tr
, NULL
);
245 ret
= trace_test_buffer(&max_tr
, &count
);
249 if (!ret
&& !count
) {
250 printk(KERN_CONT
".. no entries found ..");
255 /* do the test by disabling interrupts first this time */
256 tracing_max_latency
= 0;
258 trace
->ctrl_update(tr
);
263 /* reverse the order of preempt vs irqs */
266 /* stop the tracing. */
268 trace
->ctrl_update(tr
);
269 /* check both trace buffers */
270 ret
= trace_test_buffer(tr
, NULL
);
274 ret
= trace_test_buffer(&max_tr
, &count
);
276 if (!ret
&& !count
) {
277 printk(KERN_CONT
".. no entries found ..");
284 tracing_max_latency
= save_max
;
288 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
290 #ifdef CONFIG_SCHED_TRACER
291 static int trace_wakeup_test_thread(void *data
)
293 struct completion
*x
= data
;
295 /* Make this a RT thread, doesn't need to be too high */
297 rt_mutex_setprio(current
, MAX_RT_PRIO
- 5);
299 /* Make it know we have a new prio */
302 /* now go to sleep and let the test wake us up */
303 set_current_state(TASK_INTERRUPTIBLE
);
306 /* we are awake, now wait to disappear */
307 while (!kthread_should_stop()) {
309 * This is an RT task, do short sleeps to let
319 trace_selftest_startup_wakeup(struct tracer
*trace
, struct trace_array
*tr
)
321 unsigned long save_max
= tracing_max_latency
;
322 struct task_struct
*p
;
323 struct completion isrt
;
327 init_completion(&isrt
);
329 /* create a high prio thread */
330 p
= kthread_run(trace_wakeup_test_thread
, &isrt
, "ftrace-test");
332 printk(KERN_CONT
"Failed to create ftrace wakeup test thread ");
336 /* make sure the thread is running at an RT prio */
337 wait_for_completion(&isrt
);
339 /* start the tracing */
342 /* reset the max latency */
343 tracing_max_latency
= 0;
345 /* sleep to let the RT thread sleep too */
349 * Yes this is slightly racy. It is possible that for some
350 * strange reason that the RT thread we created, did not
351 * call schedule for 100ms after doing the completion,
352 * and we do a wakeup on a task that already is awake.
353 * But that is extremely unlikely, and the worst thing that
354 * happens in such a case, is that we disable tracing.
355 * Honestly, if this race does happen something is horrible
356 * wrong with the system.
361 /* stop the tracing. */
363 trace
->ctrl_update(tr
);
364 /* check both trace buffers */
365 ret
= trace_test_buffer(tr
, NULL
);
367 ret
= trace_test_buffer(&max_tr
, &count
);
372 tracing_max_latency
= save_max
;
374 /* kill the thread */
377 if (!ret
&& !count
) {
378 printk(KERN_CONT
".. no entries found ..");
384 #endif /* CONFIG_SCHED_TRACER */
386 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
388 trace_selftest_startup_sched_switch(struct tracer
*trace
, struct trace_array
*tr
)
393 /* start the tracing */
396 /* Sleep for a 1/10 of a second */
398 /* stop the tracing. */
400 trace
->ctrl_update(tr
);
401 /* check the trace buffer */
402 ret
= trace_test_buffer(tr
, &count
);
405 if (!ret
&& !count
) {
406 printk(KERN_CONT
".. no entries found ..");
412 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
414 #ifdef CONFIG_DYNAMIC_FTRACE
415 #endif /* CONFIG_DYNAMIC_FTRACE */