PCI: fix AER capability check
[linux-2.6/mini2440.git] / kernel / trace / trace_selftest.c
blob0911b7e073bf197b021ba77c75f6777a03910aa7
1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
11 case TRACE_WAKE:
12 case TRACE_STACK:
13 case TRACE_SPECIAL:
14 return 1;
16 return 0;
19 static int
20 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
22 struct trace_entry *entries;
23 struct page *page;
24 int idx = 0;
25 int i;
27 BUG_ON(list_empty(&data->trace_pages));
28 page = list_entry(data->trace_pages.next, struct page, lru);
29 entries = page_address(page);
31 check_pages(data);
32 if (head_page(data) != entries)
33 goto failed;
36 * The starting trace buffer always has valid elements,
37 * if any element exists.
39 entries = head_page(data);
41 for (i = 0; i < tr->entries; i++) {
43 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
44 printk(KERN_CONT ".. invalid entry %d ",
45 entries[idx].type);
46 goto failed;
49 idx++;
50 if (idx >= ENTRIES_PER_PAGE) {
51 page = virt_to_page(entries);
52 if (page->lru.next == &data->trace_pages) {
53 if (i != tr->entries - 1) {
54 printk(KERN_CONT ".. entries buffer mismatch");
55 goto failed;
57 } else {
58 page = list_entry(page->lru.next, struct page, lru);
59 entries = page_address(page);
61 idx = 0;
65 page = virt_to_page(entries);
66 if (page->lru.next != &data->trace_pages) {
67 printk(KERN_CONT ".. too many entries");
68 goto failed;
71 return 0;
73 failed:
74 /* disable tracing */
75 tracing_disabled = 1;
76 printk(KERN_CONT ".. corrupted trace buffer .. ");
77 return -1;
81 * Test the trace buffer to see if all the elements
82 * are still sane.
84 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
86 unsigned long flags, cnt = 0;
87 int cpu, ret = 0;
89 /* Don't allow flipping of max traces now */
90 raw_local_irq_save(flags);
91 __raw_spin_lock(&ftrace_max_lock);
92 for_each_possible_cpu(cpu) {
93 if (!head_page(tr->data[cpu]))
94 continue;
96 cnt += tr->data[cpu]->trace_idx;
98 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
99 if (ret)
100 break;
102 __raw_spin_unlock(&ftrace_max_lock);
103 raw_local_irq_restore(flags);
105 if (count)
106 *count = cnt;
108 return ret;
111 #ifdef CONFIG_FTRACE
113 #ifdef CONFIG_DYNAMIC_FTRACE
115 #define __STR(x) #x
116 #define STR(x) __STR(x)
118 /* Test dynamic code modification and ftrace filters */
119 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
120 struct trace_array *tr,
121 int (*func)(void))
123 unsigned long count;
124 int ret;
125 int save_ftrace_enabled = ftrace_enabled;
126 int save_tracer_enabled = tracer_enabled;
127 char *func_name;
129 /* The ftrace test PASSED */
130 printk(KERN_CONT "PASSED\n");
131 pr_info("Testing dynamic ftrace: ");
133 /* enable tracing, and record the filter function */
134 ftrace_enabled = 1;
135 tracer_enabled = 1;
137 /* passed in by parameter to fool gcc from optimizing */
138 func();
140 /* update the records */
141 ret = ftrace_force_update();
142 if (ret) {
143 printk(KERN_CONT ".. ftraced failed .. ");
144 return ret;
148 * Some archs *cough*PowerPC*cough* add charachters to the
149 * start of the function names. We simply put a '*' to
150 * accomodate them.
152 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
154 /* filter only on our function */
155 ftrace_set_filter(func_name, strlen(func_name), 1);
157 /* enable tracing */
158 tr->ctrl = 1;
159 trace->init(tr);
160 /* Sleep for a 1/10 of a second */
161 msleep(100);
163 /* we should have nothing in the buffer */
164 ret = trace_test_buffer(tr, &count);
165 if (ret)
166 goto out;
168 if (count) {
169 ret = -1;
170 printk(KERN_CONT ".. filter did not filter .. ");
171 goto out;
174 /* call our function again */
175 func();
177 /* sleep again */
178 msleep(100);
180 /* stop the tracing. */
181 tr->ctrl = 0;
182 trace->ctrl_update(tr);
183 ftrace_enabled = 0;
185 /* check the trace buffer */
186 ret = trace_test_buffer(tr, &count);
187 trace->reset(tr);
189 /* we should only have one item */
190 if (!ret && count != 1) {
191 printk(KERN_CONT ".. filter failed count=%ld ..", count);
192 ret = -1;
193 goto out;
195 out:
196 ftrace_enabled = save_ftrace_enabled;
197 tracer_enabled = save_tracer_enabled;
199 /* Enable tracing on all functions again */
200 ftrace_set_filter(NULL, 0, 1);
202 return ret;
204 #else
205 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
206 #endif /* CONFIG_DYNAMIC_FTRACE */
208 * Simple verification test of ftrace function tracer.
209 * Enable ftrace, sleep 1/10 second, and then read the trace
210 * buffer to see if all is in order.
213 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
215 unsigned long count;
216 int ret;
217 int save_ftrace_enabled = ftrace_enabled;
218 int save_tracer_enabled = tracer_enabled;
220 /* make sure msleep has been recorded */
221 msleep(1);
223 /* force the recorded functions to be traced */
224 ret = ftrace_force_update();
225 if (ret) {
226 printk(KERN_CONT ".. ftraced failed .. ");
227 return ret;
230 /* start the tracing */
231 ftrace_enabled = 1;
232 tracer_enabled = 1;
234 tr->ctrl = 1;
235 trace->init(tr);
236 /* Sleep for a 1/10 of a second */
237 msleep(100);
238 /* stop the tracing. */
239 tr->ctrl = 0;
240 trace->ctrl_update(tr);
241 ftrace_enabled = 0;
243 /* check the trace buffer */
244 ret = trace_test_buffer(tr, &count);
245 trace->reset(tr);
247 if (!ret && !count) {
248 printk(KERN_CONT ".. no entries found ..");
249 ret = -1;
250 goto out;
253 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
254 DYN_FTRACE_TEST_NAME);
256 out:
257 ftrace_enabled = save_ftrace_enabled;
258 tracer_enabled = save_tracer_enabled;
260 /* kill ftrace totally if we failed */
261 if (ret)
262 ftrace_kill();
264 return ret;
266 #endif /* CONFIG_FTRACE */
268 #ifdef CONFIG_IRQSOFF_TRACER
270 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
272 unsigned long save_max = tracing_max_latency;
273 unsigned long count;
274 int ret;
276 /* start the tracing */
277 tr->ctrl = 1;
278 trace->init(tr);
279 /* reset the max latency */
280 tracing_max_latency = 0;
281 /* disable interrupts for a bit */
282 local_irq_disable();
283 udelay(100);
284 local_irq_enable();
285 /* stop the tracing. */
286 tr->ctrl = 0;
287 trace->ctrl_update(tr);
288 /* check both trace buffers */
289 ret = trace_test_buffer(tr, NULL);
290 if (!ret)
291 ret = trace_test_buffer(&max_tr, &count);
292 trace->reset(tr);
294 if (!ret && !count) {
295 printk(KERN_CONT ".. no entries found ..");
296 ret = -1;
299 tracing_max_latency = save_max;
301 return ret;
303 #endif /* CONFIG_IRQSOFF_TRACER */
305 #ifdef CONFIG_PREEMPT_TRACER
307 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
309 unsigned long save_max = tracing_max_latency;
310 unsigned long count;
311 int ret;
313 /* start the tracing */
314 tr->ctrl = 1;
315 trace->init(tr);
316 /* reset the max latency */
317 tracing_max_latency = 0;
318 /* disable preemption for a bit */
319 preempt_disable();
320 udelay(100);
321 preempt_enable();
322 /* stop the tracing. */
323 tr->ctrl = 0;
324 trace->ctrl_update(tr);
325 /* check both trace buffers */
326 ret = trace_test_buffer(tr, NULL);
327 if (!ret)
328 ret = trace_test_buffer(&max_tr, &count);
329 trace->reset(tr);
331 if (!ret && !count) {
332 printk(KERN_CONT ".. no entries found ..");
333 ret = -1;
336 tracing_max_latency = save_max;
338 return ret;
340 #endif /* CONFIG_PREEMPT_TRACER */
342 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
344 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
346 unsigned long save_max = tracing_max_latency;
347 unsigned long count;
348 int ret;
350 /* start the tracing */
351 tr->ctrl = 1;
352 trace->init(tr);
354 /* reset the max latency */
355 tracing_max_latency = 0;
357 /* disable preemption and interrupts for a bit */
358 preempt_disable();
359 local_irq_disable();
360 udelay(100);
361 preempt_enable();
362 /* reverse the order of preempt vs irqs */
363 local_irq_enable();
365 /* stop the tracing. */
366 tr->ctrl = 0;
367 trace->ctrl_update(tr);
368 /* check both trace buffers */
369 ret = trace_test_buffer(tr, NULL);
370 if (ret)
371 goto out;
373 ret = trace_test_buffer(&max_tr, &count);
374 if (ret)
375 goto out;
377 if (!ret && !count) {
378 printk(KERN_CONT ".. no entries found ..");
379 ret = -1;
380 goto out;
383 /* do the test by disabling interrupts first this time */
384 tracing_max_latency = 0;
385 tr->ctrl = 1;
386 trace->ctrl_update(tr);
387 preempt_disable();
388 local_irq_disable();
389 udelay(100);
390 preempt_enable();
391 /* reverse the order of preempt vs irqs */
392 local_irq_enable();
394 /* stop the tracing. */
395 tr->ctrl = 0;
396 trace->ctrl_update(tr);
397 /* check both trace buffers */
398 ret = trace_test_buffer(tr, NULL);
399 if (ret)
400 goto out;
402 ret = trace_test_buffer(&max_tr, &count);
404 if (!ret && !count) {
405 printk(KERN_CONT ".. no entries found ..");
406 ret = -1;
407 goto out;
410 out:
411 trace->reset(tr);
412 tracing_max_latency = save_max;
414 return ret;
416 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
418 #ifdef CONFIG_SCHED_TRACER
419 static int trace_wakeup_test_thread(void *data)
421 /* Make this a RT thread, doesn't need to be too high */
422 struct sched_param param = { .sched_priority = 5 };
423 struct completion *x = data;
425 sched_setscheduler(current, SCHED_FIFO, &param);
427 /* Make it know we have a new prio */
428 complete(x);
430 /* now go to sleep and let the test wake us up */
431 set_current_state(TASK_INTERRUPTIBLE);
432 schedule();
434 /* we are awake, now wait to disappear */
435 while (!kthread_should_stop()) {
437 * This is an RT task, do short sleeps to let
438 * others run.
440 msleep(100);
443 return 0;
447 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
449 unsigned long save_max = tracing_max_latency;
450 struct task_struct *p;
451 struct completion isrt;
452 unsigned long count;
453 int ret;
455 init_completion(&isrt);
457 /* create a high prio thread */
458 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
459 if (IS_ERR(p)) {
460 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
461 return -1;
464 /* make sure the thread is running at an RT prio */
465 wait_for_completion(&isrt);
467 /* start the tracing */
468 tr->ctrl = 1;
469 trace->init(tr);
470 /* reset the max latency */
471 tracing_max_latency = 0;
473 /* sleep to let the RT thread sleep too */
474 msleep(100);
477 * Yes this is slightly racy. It is possible that for some
478 * strange reason that the RT thread we created, did not
479 * call schedule for 100ms after doing the completion,
480 * and we do a wakeup on a task that already is awake.
481 * But that is extremely unlikely, and the worst thing that
482 * happens in such a case, is that we disable tracing.
483 * Honestly, if this race does happen something is horrible
484 * wrong with the system.
487 wake_up_process(p);
489 /* stop the tracing. */
490 tr->ctrl = 0;
491 trace->ctrl_update(tr);
492 /* check both trace buffers */
493 ret = trace_test_buffer(tr, NULL);
494 if (!ret)
495 ret = trace_test_buffer(&max_tr, &count);
498 trace->reset(tr);
500 tracing_max_latency = save_max;
502 /* kill the thread */
503 kthread_stop(p);
505 if (!ret && !count) {
506 printk(KERN_CONT ".. no entries found ..");
507 ret = -1;
510 return ret;
512 #endif /* CONFIG_SCHED_TRACER */
514 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
516 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
518 unsigned long count;
519 int ret;
521 /* start the tracing */
522 tr->ctrl = 1;
523 trace->init(tr);
524 /* Sleep for a 1/10 of a second */
525 msleep(100);
526 /* stop the tracing. */
527 tr->ctrl = 0;
528 trace->ctrl_update(tr);
529 /* check the trace buffer */
530 ret = trace_test_buffer(tr, &count);
531 trace->reset(tr);
533 if (!ret && !count) {
534 printk(KERN_CONT ".. no entries found ..");
535 ret = -1;
538 return ret;
540 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
542 #ifdef CONFIG_SYSPROF_TRACER
544 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
546 unsigned long count;
547 int ret;
549 /* start the tracing */
550 tr->ctrl = 1;
551 trace->init(tr);
552 /* Sleep for a 1/10 of a second */
553 msleep(100);
554 /* stop the tracing. */
555 tr->ctrl = 0;
556 trace->ctrl_update(tr);
557 /* check the trace buffer */
558 ret = trace_test_buffer(tr, &count);
559 trace->reset(tr);
561 return ret;
563 #endif /* CONFIG_SYSPROF_TRACER */