ftrace: add self-tests
[linux-2.6/kvm.git] / kernel / trace / trace_selftest.c
blobef4d3cc009f55e966a89a99e115f8266f3dae5b0
1 /* Include in trace.c */
3 #include <linux/kthread.h>
5 static inline int trace_valid_entry(struct trace_entry *entry)
7 switch (entry->type) {
8 case TRACE_FN:
9 case TRACE_CTX:
10 return 1;
12 return 0;
15 static int
16 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
18 struct page *page;
19 struct trace_entry *entries;
20 int idx = 0;
21 int i;
23 page = list_entry(data->trace_pages.next, struct page, lru);
24 entries = page_address(page);
26 if (data->trace != entries)
27 goto failed;
30 * The starting trace buffer always has valid elements,
31 * if any element exits.
33 entries = data->trace;
35 for (i = 0; i < tr->entries; i++) {
37 if (i < data->trace_idx &&
38 !trace_valid_entry(&entries[idx])) {
39 printk(KERN_CONT ".. invalid entry %d ", entries[idx].type);
40 goto failed;
43 idx++;
44 if (idx >= ENTRIES_PER_PAGE) {
45 page = virt_to_page(entries);
46 if (page->lru.next == &data->trace_pages) {
47 if (i != tr->entries - 1) {
48 printk(KERN_CONT ".. entries buffer mismatch");
49 goto failed;
51 } else {
52 page = list_entry(page->lru.next, struct page, lru);
53 entries = page_address(page);
55 idx = 0;
59 page = virt_to_page(entries);
60 if (page->lru.next != &data->trace_pages) {
61 printk(KERN_CONT ".. too many entries");
62 goto failed;
65 return 0;
67 failed:
68 printk(KERN_CONT ".. corrupted trace buffer .. ");
69 return -1;
73 * Test the trace buffer to see if all the elements
74 * are still sane.
76 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
78 unsigned long cnt = 0;
79 int cpu;
80 int ret = 0;
82 for_each_possible_cpu(cpu) {
83 if (!tr->data[cpu]->trace)
84 continue;
86 cnt += tr->data[cpu]->trace_idx;
87 printk("%d: count = %ld\n", cpu, cnt);
89 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
90 if (ret)
91 break;
94 if (count)
95 *count = cnt;
97 return ret;
100 #ifdef CONFIG_FTRACE
102 * Simple verification test of ftrace function tracer.
103 * Enable ftrace, sleep 1/10 second, and then read the trace
104 * buffer to see if all is in order.
107 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
109 unsigned long count;
110 int ret;
112 /* make sure functions have been recorded */
113 ret = ftrace_force_update();
114 if (ret) {
115 printk(KERN_CONT ".. ftraced failed .. ");
116 return ret;
119 /* start the tracing */
120 tr->ctrl = 1;
121 trace->init(tr);
122 /* Sleep for a 1/10 of a second */
123 msleep(100);
124 /* stop the tracing. */
125 tr->ctrl = 0;
126 trace->ctrl_update(tr);
127 /* check the trace buffer */
128 ret = trace_test_buffer(tr, &count);
129 trace->reset(tr);
131 if (!ret && !count) {
132 printk(KERN_CONT ".. no entries found ..");
133 ret = -1;
136 return ret;
138 #endif /* CONFIG_FTRACE */
140 #ifdef CONFIG_IRQSOFF_TRACER
142 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
144 unsigned long save_max = tracing_max_latency;
145 unsigned long count;
146 int ret;
148 /* start the tracing */
149 tr->ctrl = 1;
150 trace->init(tr);
151 /* reset the max latency */
152 tracing_max_latency = 0;
153 /* disable interrupts for a bit */
154 local_irq_disable();
155 udelay(100);
156 local_irq_enable();
157 /* stop the tracing. */
158 tr->ctrl = 0;
159 trace->ctrl_update(tr);
160 /* check both trace buffers */
161 ret = trace_test_buffer(tr, NULL);
162 if (!ret)
163 ret = trace_test_buffer(&max_tr, &count);
164 trace->reset(tr);
166 if (!ret && !count) {
167 printk(KERN_CONT ".. no entries found ..");
168 ret = -1;
171 tracing_max_latency = save_max;
173 return ret;
175 #endif /* CONFIG_IRQSOFF_TRACER */
177 #ifdef CONFIG_PREEMPT_TRACER
179 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
181 unsigned long save_max = tracing_max_latency;
182 unsigned long count;
183 int ret;
185 /* start the tracing */
186 tr->ctrl = 1;
187 trace->init(tr);
188 /* reset the max latency */
189 tracing_max_latency = 0;
190 /* disable preemption for a bit */
191 preempt_disable();
192 udelay(100);
193 preempt_enable();
194 /* stop the tracing. */
195 tr->ctrl = 0;
196 trace->ctrl_update(tr);
197 /* check both trace buffers */
198 ret = trace_test_buffer(tr, NULL);
199 if (!ret)
200 ret = trace_test_buffer(&max_tr, &count);
201 trace->reset(tr);
203 if (!ret && !count) {
204 printk(KERN_CONT ".. no entries found ..");
205 ret = -1;
208 tracing_max_latency = save_max;
210 return ret;
212 #endif /* CONFIG_PREEMPT_TRACER */
214 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
216 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
218 unsigned long save_max = tracing_max_latency;
219 unsigned long count;
220 int ret;
222 /* start the tracing */
223 tr->ctrl = 1;
224 trace->init(tr);
226 /* reset the max latency */
227 tracing_max_latency = 0;
229 /* disable preemption and interrupts for a bit */
230 preempt_disable();
231 local_irq_disable();
232 udelay(100);
233 preempt_enable();
234 /* reverse the order of preempt vs irqs */
235 local_irq_enable();
237 /* stop the tracing. */
238 tr->ctrl = 0;
239 trace->ctrl_update(tr);
240 /* check both trace buffers */
241 ret = trace_test_buffer(tr, NULL);
242 if (ret)
243 goto out;
245 ret = trace_test_buffer(&max_tr, &count);
246 if (ret)
247 goto out;
249 if (!ret && !count) {
250 printk(KERN_CONT ".. no entries found ..");
251 ret = -1;
252 goto out;
255 /* do the test by disabling interrupts first this time */
256 tracing_max_latency = 0;
257 tr->ctrl = 1;
258 trace->ctrl_update(tr);
259 preempt_disable();
260 local_irq_disable();
261 udelay(100);
262 preempt_enable();
263 /* reverse the order of preempt vs irqs */
264 local_irq_enable();
266 /* stop the tracing. */
267 tr->ctrl = 0;
268 trace->ctrl_update(tr);
269 /* check both trace buffers */
270 ret = trace_test_buffer(tr, NULL);
271 if (ret)
272 goto out;
274 ret = trace_test_buffer(&max_tr, &count);
276 if (!ret && !count) {
277 printk(KERN_CONT ".. no entries found ..");
278 ret = -1;
279 goto out;
282 out:
283 trace->reset(tr);
284 tracing_max_latency = save_max;
286 return ret;
288 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
290 #ifdef CONFIG_SCHED_TRACER
291 static int trace_wakeup_test_thread(void *data)
293 struct completion *x = data;
295 /* Make this a RT thread, doesn't need to be too high */
297 rt_mutex_setprio(current, MAX_RT_PRIO - 5);
299 /* Make it know we have a new prio */
300 complete(x);
302 /* now go to sleep and let the test wake us up */
303 set_current_state(TASK_INTERRUPTIBLE);
304 schedule();
306 /* we are awake, now wait to disappear */
307 while (!kthread_should_stop()) {
309 * This is an RT task, do short sleeps to let
310 * others run.
312 msleep(100);
315 return 0;
319 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
321 unsigned long save_max = tracing_max_latency;
322 struct task_struct *p;
323 struct completion isrt;
324 unsigned long count;
325 int ret;
327 init_completion(&isrt);
329 /* create a high prio thread */
330 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
331 if (!IS_ERR(p)) {
332 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
333 return -1;
336 /* make sure the thread is running at an RT prio */
337 wait_for_completion(&isrt);
339 /* start the tracing */
340 tr->ctrl = 1;
341 trace->init(tr);
342 /* reset the max latency */
343 tracing_max_latency = 0;
345 /* sleep to let the RT thread sleep too */
346 msleep(100);
349 * Yes this is slightly racy. It is possible that for some
350 * strange reason that the RT thread we created, did not
351 * call schedule for 100ms after doing the completion,
352 * and we do a wakeup on a task that already is awake.
353 * But that is extremely unlikely, and the worst thing that
354 * happens in such a case, is that we disable tracing.
355 * Honestly, if this race does happen something is horrible
356 * wrong with the system.
359 wake_up_process(p);
361 /* stop the tracing. */
362 tr->ctrl = 0;
363 trace->ctrl_update(tr);
364 /* check both trace buffers */
365 ret = trace_test_buffer(tr, NULL);
366 if (!ret)
367 ret = trace_test_buffer(&max_tr, &count);
370 trace->reset(tr);
372 tracing_max_latency = save_max;
374 /* kill the thread */
375 kthread_stop(p);
377 if (!ret && !count) {
378 printk(KERN_CONT ".. no entries found ..");
379 ret = -1;
382 return ret;
384 #endif /* CONFIG_SCHED_TRACER */
386 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
388 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
390 unsigned long count;
391 int ret;
393 /* start the tracing */
394 tr->ctrl = 1;
395 trace->init(tr);
396 /* Sleep for a 1/10 of a second */
397 msleep(100);
398 /* stop the tracing. */
399 tr->ctrl = 0;
400 trace->ctrl_update(tr);
401 /* check the trace buffer */
402 ret = trace_test_buffer(tr, &count);
403 trace->reset(tr);
405 if (!ret && !count) {
406 printk(KERN_CONT ".. no entries found ..");
407 ret = -1;
410 return ret;
412 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
414 #ifdef CONFIG_DYNAMIC_FTRACE
415 #endif /* CONFIG_DYNAMIC_FTRACE */