NFSv4.1: Handle NFS4ERR_BADSLOT errors correctly
[linux-2.6.git] / tools / perf / builtin-test.c
blob484f26cc0c00c6ee80d74e93bddc3fe24eae4c1f
1 /*
2 * builtin-test.c
4 * Builtin regression testing command: ever growing number of sanity tests
5 */
6 #include "builtin.h"
8 #include "util/cache.h"
9 #include "util/debug.h"
10 #include "util/debugfs.h"
11 #include "util/evlist.h"
12 #include "util/parse-options.h"
13 #include "util/parse-events.h"
14 #include "util/symbol.h"
15 #include "util/thread_map.h"
16 #include "util/pmu.h"
17 #include "event-parse.h"
18 #include "../../include/linux/hw_breakpoint.h"
20 #include <sys/mman.h>
22 static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
23 struct symbol *sym)
25 bool *visited = symbol__priv(sym);
26 *visited = true;
27 return 0;
30 static int test__vmlinux_matches_kallsyms(void)
32 int err = -1;
33 struct rb_node *nd;
34 struct symbol *sym;
35 struct map *kallsyms_map, *vmlinux_map;
36 struct machine kallsyms, vmlinux;
37 enum map_type type = MAP__FUNCTION;
38 long page_size = sysconf(_SC_PAGE_SIZE);
39 struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
42 * Step 1:
44 * Init the machines that will hold kernel, modules obtained from
45 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
47 machine__init(&kallsyms, "", HOST_KERNEL_ID);
48 machine__init(&vmlinux, "", HOST_KERNEL_ID);
51 * Step 2:
53 * Create the kernel maps for kallsyms and the DSO where we will then
54 * load /proc/kallsyms. Also create the modules maps from /proc/modules
55 * and find the .ko files that match them in /lib/modules/`uname -r`/.
57 if (machine__create_kernel_maps(&kallsyms) < 0) {
58 pr_debug("machine__create_kernel_maps ");
59 return -1;
63 * Step 3:
65 * Load and split /proc/kallsyms into multiple maps, one per module.
67 if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
68 pr_debug("dso__load_kallsyms ");
69 goto out;
73 * Step 4:
75 * kallsyms will be internally on demand sorted by name so that we can
76 * find the reference relocation * symbol, i.e. the symbol we will use
77 * to see if the running kernel was relocated by checking if it has the
78 * same value in the vmlinux file we load.
80 kallsyms_map = machine__kernel_map(&kallsyms, type);
82 sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
83 if (sym == NULL) {
84 pr_debug("dso__find_symbol_by_name ");
85 goto out;
88 ref_reloc_sym.addr = sym->start;
91 * Step 5:
93 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
95 if (machine__create_kernel_maps(&vmlinux) < 0) {
96 pr_debug("machine__create_kernel_maps ");
97 goto out;
100 vmlinux_map = machine__kernel_map(&vmlinux, type);
101 map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
104 * Step 6:
106 * Locate a vmlinux file in the vmlinux path that has a buildid that
107 * matches the one of the running kernel.
109 * While doing that look if we find the ref reloc symbol, if we find it
110 * we'll have its ref_reloc_symbol.unrelocated_addr and then
111 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
112 * to fixup the symbols.
114 if (machine__load_vmlinux_path(&vmlinux, type,
115 vmlinux_matches_kallsyms_filter) <= 0) {
116 pr_debug("machine__load_vmlinux_path ");
117 goto out;
120 err = 0;
122 * Step 7:
124 * Now look at the symbols in the vmlinux DSO and check if we find all of them
125 * in the kallsyms dso. For the ones that are in both, check its names and
126 * end addresses too.
128 for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
129 struct symbol *pair, *first_pair;
130 bool backwards = true;
132 sym = rb_entry(nd, struct symbol, rb_node);
134 if (sym->start == sym->end)
135 continue;
137 first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
138 pair = first_pair;
140 if (pair && pair->start == sym->start) {
141 next_pair:
142 if (strcmp(sym->name, pair->name) == 0) {
144 * kallsyms don't have the symbol end, so we
145 * set that by using the next symbol start - 1,
146 * in some cases we get this up to a page
147 * wrong, trace_kmalloc when I was developing
148 * this code was one such example, 2106 bytes
149 * off the real size. More than that and we
150 * _really_ have a problem.
152 s64 skew = sym->end - pair->end;
153 if (llabs(skew) < page_size)
154 continue;
156 pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
157 sym->start, sym->name, sym->end, pair->end);
158 } else {
159 struct rb_node *nnd;
160 detour:
161 nnd = backwards ? rb_prev(&pair->rb_node) :
162 rb_next(&pair->rb_node);
163 if (nnd) {
164 struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
166 if (next->start == sym->start) {
167 pair = next;
168 goto next_pair;
172 if (backwards) {
173 backwards = false;
174 pair = first_pair;
175 goto detour;
178 pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
179 sym->start, sym->name, pair->name);
181 } else
182 pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
184 err = -1;
187 if (!verbose)
188 goto out;
190 pr_info("Maps only in vmlinux:\n");
192 for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
193 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
195 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
196 * the kernel will have the path for the vmlinux file being used,
197 * so use the short name, less descriptive but the same ("[kernel]" in
198 * both cases.
200 pair = map_groups__find_by_name(&kallsyms.kmaps, type,
201 (pos->dso->kernel ?
202 pos->dso->short_name :
203 pos->dso->name));
204 if (pair)
205 pair->priv = 1;
206 else
207 map__fprintf(pos, stderr);
210 pr_info("Maps in vmlinux with a different name in kallsyms:\n");
212 for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
213 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
215 pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
216 if (pair == NULL || pair->priv)
217 continue;
219 if (pair->start == pos->start) {
220 pair->priv = 1;
221 pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
222 pos->start, pos->end, pos->pgoff, pos->dso->name);
223 if (pos->pgoff != pair->pgoff || pos->end != pair->end)
224 pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
225 pair->start, pair->end, pair->pgoff);
226 pr_info(" %s\n", pair->dso->name);
227 pair->priv = 1;
231 pr_info("Maps only in kallsyms:\n");
233 for (nd = rb_first(&kallsyms.kmaps.maps[type]);
234 nd; nd = rb_next(nd)) {
235 struct map *pos = rb_entry(nd, struct map, rb_node);
237 if (!pos->priv)
238 map__fprintf(pos, stderr);
240 out:
241 return err;
244 #include "util/cpumap.h"
245 #include "util/evsel.h"
246 #include <sys/types.h>
248 static int trace_event__id(const char *evname)
250 char *filename;
251 int err = -1, fd;
253 if (asprintf(&filename,
254 "%s/syscalls/%s/id",
255 tracing_events_path, evname) < 0)
256 return -1;
258 fd = open(filename, O_RDONLY);
259 if (fd >= 0) {
260 char id[16];
261 if (read(fd, id, sizeof(id)) > 0)
262 err = atoi(id);
263 close(fd);
266 free(filename);
267 return err;
270 static int test__open_syscall_event(void)
272 int err = -1, fd;
273 struct thread_map *threads;
274 struct perf_evsel *evsel;
275 struct perf_event_attr attr;
276 unsigned int nr_open_calls = 111, i;
277 int id = trace_event__id("sys_enter_open");
279 if (id < 0) {
280 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
281 return -1;
284 threads = thread_map__new(-1, getpid(), UINT_MAX);
285 if (threads == NULL) {
286 pr_debug("thread_map__new\n");
287 return -1;
290 memset(&attr, 0, sizeof(attr));
291 attr.type = PERF_TYPE_TRACEPOINT;
292 attr.config = id;
293 evsel = perf_evsel__new(&attr, 0);
294 if (evsel == NULL) {
295 pr_debug("perf_evsel__new\n");
296 goto out_thread_map_delete;
299 if (perf_evsel__open_per_thread(evsel, threads) < 0) {
300 pr_debug("failed to open counter: %s, "
301 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
302 strerror(errno));
303 goto out_evsel_delete;
306 for (i = 0; i < nr_open_calls; ++i) {
307 fd = open("/etc/passwd", O_RDONLY);
308 close(fd);
311 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
312 pr_debug("perf_evsel__read_on_cpu\n");
313 goto out_close_fd;
316 if (evsel->counts->cpu[0].val != nr_open_calls) {
317 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
318 nr_open_calls, evsel->counts->cpu[0].val);
319 goto out_close_fd;
322 err = 0;
323 out_close_fd:
324 perf_evsel__close_fd(evsel, 1, threads->nr);
325 out_evsel_delete:
326 perf_evsel__delete(evsel);
327 out_thread_map_delete:
328 thread_map__delete(threads);
329 return err;
332 #include <sched.h>
334 static int test__open_syscall_event_on_all_cpus(void)
336 int err = -1, fd, cpu;
337 struct thread_map *threads;
338 struct cpu_map *cpus;
339 struct perf_evsel *evsel;
340 struct perf_event_attr attr;
341 unsigned int nr_open_calls = 111, i;
342 cpu_set_t cpu_set;
343 int id = trace_event__id("sys_enter_open");
345 if (id < 0) {
346 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
347 return -1;
350 threads = thread_map__new(-1, getpid(), UINT_MAX);
351 if (threads == NULL) {
352 pr_debug("thread_map__new\n");
353 return -1;
356 cpus = cpu_map__new(NULL);
357 if (cpus == NULL) {
358 pr_debug("cpu_map__new\n");
359 goto out_thread_map_delete;
363 CPU_ZERO(&cpu_set);
365 memset(&attr, 0, sizeof(attr));
366 attr.type = PERF_TYPE_TRACEPOINT;
367 attr.config = id;
368 evsel = perf_evsel__new(&attr, 0);
369 if (evsel == NULL) {
370 pr_debug("perf_evsel__new\n");
371 goto out_thread_map_delete;
374 if (perf_evsel__open(evsel, cpus, threads) < 0) {
375 pr_debug("failed to open counter: %s, "
376 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
377 strerror(errno));
378 goto out_evsel_delete;
381 for (cpu = 0; cpu < cpus->nr; ++cpu) {
382 unsigned int ncalls = nr_open_calls + cpu;
384 * XXX eventually lift this restriction in a way that
385 * keeps perf building on older glibc installations
386 * without CPU_ALLOC. 1024 cpus in 2010 still seems
387 * a reasonable upper limit tho :-)
389 if (cpus->map[cpu] >= CPU_SETSIZE) {
390 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
391 continue;
394 CPU_SET(cpus->map[cpu], &cpu_set);
395 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
396 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
397 cpus->map[cpu],
398 strerror(errno));
399 goto out_close_fd;
401 for (i = 0; i < ncalls; ++i) {
402 fd = open("/etc/passwd", O_RDONLY);
403 close(fd);
405 CPU_CLR(cpus->map[cpu], &cpu_set);
409 * Here we need to explicitely preallocate the counts, as if
410 * we use the auto allocation it will allocate just for 1 cpu,
411 * as we start by cpu 0.
413 if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
414 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
415 goto out_close_fd;
418 err = 0;
420 for (cpu = 0; cpu < cpus->nr; ++cpu) {
421 unsigned int expected;
423 if (cpus->map[cpu] >= CPU_SETSIZE)
424 continue;
426 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
427 pr_debug("perf_evsel__read_on_cpu\n");
428 err = -1;
429 break;
432 expected = nr_open_calls + cpu;
433 if (evsel->counts->cpu[cpu].val != expected) {
434 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
435 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
436 err = -1;
440 out_close_fd:
441 perf_evsel__close_fd(evsel, 1, threads->nr);
442 out_evsel_delete:
443 perf_evsel__delete(evsel);
444 out_thread_map_delete:
445 thread_map__delete(threads);
446 return err;
450 * This test will generate random numbers of calls to some getpid syscalls,
451 * then establish an mmap for a group of events that are created to monitor
452 * the syscalls.
454 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
455 * sample.id field to map back to its respective perf_evsel instance.
457 * Then it checks if the number of syscalls reported as perf events by
458 * the kernel corresponds to the number of syscalls made.
460 static int test__basic_mmap(void)
462 int err = -1;
463 union perf_event *event;
464 struct thread_map *threads;
465 struct cpu_map *cpus;
466 struct perf_evlist *evlist;
467 struct perf_event_attr attr = {
468 .type = PERF_TYPE_TRACEPOINT,
469 .read_format = PERF_FORMAT_ID,
470 .sample_type = PERF_SAMPLE_ID,
471 .watermark = 0,
473 cpu_set_t cpu_set;
474 const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
475 "getpgid", };
476 pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
477 (void*)getpgid };
478 #define nsyscalls ARRAY_SIZE(syscall_names)
479 int ids[nsyscalls];
480 unsigned int nr_events[nsyscalls],
481 expected_nr_events[nsyscalls], i, j;
482 struct perf_evsel *evsels[nsyscalls], *evsel;
484 for (i = 0; i < nsyscalls; ++i) {
485 char name[64];
487 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
488 ids[i] = trace_event__id(name);
489 if (ids[i] < 0) {
490 pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
491 return -1;
493 nr_events[i] = 0;
494 expected_nr_events[i] = random() % 257;
497 threads = thread_map__new(-1, getpid(), UINT_MAX);
498 if (threads == NULL) {
499 pr_debug("thread_map__new\n");
500 return -1;
503 cpus = cpu_map__new(NULL);
504 if (cpus == NULL) {
505 pr_debug("cpu_map__new\n");
506 goto out_free_threads;
509 CPU_ZERO(&cpu_set);
510 CPU_SET(cpus->map[0], &cpu_set);
511 sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
512 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
513 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
514 cpus->map[0], strerror(errno));
515 goto out_free_cpus;
518 evlist = perf_evlist__new(cpus, threads);
519 if (evlist == NULL) {
520 pr_debug("perf_evlist__new\n");
521 goto out_free_cpus;
524 /* anonymous union fields, can't be initialized above */
525 attr.wakeup_events = 1;
526 attr.sample_period = 1;
528 for (i = 0; i < nsyscalls; ++i) {
529 attr.config = ids[i];
530 evsels[i] = perf_evsel__new(&attr, i);
531 if (evsels[i] == NULL) {
532 pr_debug("perf_evsel__new\n");
533 goto out_free_evlist;
536 perf_evlist__add(evlist, evsels[i]);
538 if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
539 pr_debug("failed to open counter: %s, "
540 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
541 strerror(errno));
542 goto out_close_fd;
546 if (perf_evlist__mmap(evlist, 128, true) < 0) {
547 pr_debug("failed to mmap events: %d (%s)\n", errno,
548 strerror(errno));
549 goto out_close_fd;
552 for (i = 0; i < nsyscalls; ++i)
553 for (j = 0; j < expected_nr_events[i]; ++j) {
554 int foo = syscalls[i]();
555 ++foo;
558 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
559 struct perf_sample sample;
561 if (event->header.type != PERF_RECORD_SAMPLE) {
562 pr_debug("unexpected %s event\n",
563 perf_event__name(event->header.type));
564 goto out_munmap;
567 err = perf_evlist__parse_sample(evlist, event, &sample);
568 if (err) {
569 pr_err("Can't parse sample, err = %d\n", err);
570 goto out_munmap;
573 evsel = perf_evlist__id2evsel(evlist, sample.id);
574 if (evsel == NULL) {
575 pr_debug("event with id %" PRIu64
576 " doesn't map to an evsel\n", sample.id);
577 goto out_munmap;
579 nr_events[evsel->idx]++;
582 list_for_each_entry(evsel, &evlist->entries, node) {
583 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
584 pr_debug("expected %d %s events, got %d\n",
585 expected_nr_events[evsel->idx],
586 perf_evsel__name(evsel), nr_events[evsel->idx]);
587 goto out_munmap;
591 err = 0;
592 out_munmap:
593 perf_evlist__munmap(evlist);
594 out_close_fd:
595 for (i = 0; i < nsyscalls; ++i)
596 perf_evsel__close_fd(evsels[i], 1, threads->nr);
597 out_free_evlist:
598 perf_evlist__delete(evlist);
599 out_free_cpus:
600 cpu_map__delete(cpus);
601 out_free_threads:
602 thread_map__delete(threads);
603 return err;
604 #undef nsyscalls
607 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
608 size_t *sizep)
610 cpu_set_t *mask;
611 size_t size;
612 int i, cpu = -1, nrcpus = 1024;
613 realloc:
614 mask = CPU_ALLOC(nrcpus);
615 size = CPU_ALLOC_SIZE(nrcpus);
616 CPU_ZERO_S(size, mask);
618 if (sched_getaffinity(pid, size, mask) == -1) {
619 CPU_FREE(mask);
620 if (errno == EINVAL && nrcpus < (1024 << 8)) {
621 nrcpus = nrcpus << 2;
622 goto realloc;
624 perror("sched_getaffinity");
625 return -1;
628 for (i = 0; i < nrcpus; i++) {
629 if (CPU_ISSET_S(i, size, mask)) {
630 if (cpu == -1) {
631 cpu = i;
632 *maskp = mask;
633 *sizep = size;
634 } else
635 CPU_CLR_S(i, size, mask);
639 if (cpu == -1)
640 CPU_FREE(mask);
642 return cpu;
645 static int test__PERF_RECORD(void)
647 struct perf_record_opts opts = {
648 .target = {
649 .uid = UINT_MAX,
650 .uses_mmap = true,
652 .no_delay = true,
653 .freq = 10,
654 .mmap_pages = 256,
656 cpu_set_t *cpu_mask = NULL;
657 size_t cpu_mask_size = 0;
658 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
659 struct perf_evsel *evsel;
660 struct perf_sample sample;
661 const char *cmd = "sleep";
662 const char *argv[] = { cmd, "1", NULL, };
663 char *bname;
664 u64 prev_time = 0;
665 bool found_cmd_mmap = false,
666 found_libc_mmap = false,
667 found_vdso_mmap = false,
668 found_ld_mmap = false;
669 int err = -1, errs = 0, i, wakeups = 0;
670 u32 cpu;
671 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
673 if (evlist == NULL || argv == NULL) {
674 pr_debug("Not enough memory to create evlist\n");
675 goto out;
679 * We need at least one evsel in the evlist, use the default
680 * one: "cycles".
682 err = perf_evlist__add_default(evlist);
683 if (err < 0) {
684 pr_debug("Not enough memory to create evsel\n");
685 goto out_delete_evlist;
689 * Create maps of threads and cpus to monitor. In this case
690 * we start with all threads and cpus (-1, -1) but then in
691 * perf_evlist__prepare_workload we'll fill in the only thread
692 * we're monitoring, the one forked there.
694 err = perf_evlist__create_maps(evlist, &opts.target);
695 if (err < 0) {
696 pr_debug("Not enough memory to create thread/cpu maps\n");
697 goto out_delete_evlist;
701 * Prepare the workload in argv[] to run, it'll fork it, and then wait
702 * for perf_evlist__start_workload() to exec it. This is done this way
703 * so that we have time to open the evlist (calling sys_perf_event_open
704 * on all the fds) and then mmap them.
706 err = perf_evlist__prepare_workload(evlist, &opts, argv);
707 if (err < 0) {
708 pr_debug("Couldn't run the workload!\n");
709 goto out_delete_evlist;
713 * Config the evsels, setting attr->comm on the first one, etc.
715 evsel = perf_evlist__first(evlist);
716 evsel->attr.sample_type |= PERF_SAMPLE_CPU;
717 evsel->attr.sample_type |= PERF_SAMPLE_TID;
718 evsel->attr.sample_type |= PERF_SAMPLE_TIME;
719 perf_evlist__config_attrs(evlist, &opts);
721 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
722 &cpu_mask_size);
723 if (err < 0) {
724 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
725 goto out_delete_evlist;
728 cpu = err;
731 * So that we can check perf_sample.cpu on all the samples.
733 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
734 pr_debug("sched_setaffinity: %s\n", strerror(errno));
735 goto out_free_cpu_mask;
739 * Call sys_perf_event_open on all the fds on all the evsels,
740 * grouping them if asked to.
742 err = perf_evlist__open(evlist);
743 if (err < 0) {
744 pr_debug("perf_evlist__open: %s\n", strerror(errno));
745 goto out_delete_evlist;
749 * mmap the first fd on a given CPU and ask for events for the other
750 * fds in the same CPU to be injected in the same mmap ring buffer
751 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
753 err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
754 if (err < 0) {
755 pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
756 goto out_delete_evlist;
760 * Now that all is properly set up, enable the events, they will
761 * count just on workload.pid, which will start...
763 perf_evlist__enable(evlist);
766 * Now!
768 perf_evlist__start_workload(evlist);
770 while (1) {
771 int before = total_events;
773 for (i = 0; i < evlist->nr_mmaps; i++) {
774 union perf_event *event;
776 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
777 const u32 type = event->header.type;
778 const char *name = perf_event__name(type);
780 ++total_events;
781 if (type < PERF_RECORD_MAX)
782 nr_events[type]++;
784 err = perf_evlist__parse_sample(evlist, event, &sample);
785 if (err < 0) {
786 if (verbose)
787 perf_event__fprintf(event, stderr);
788 pr_debug("Couldn't parse sample\n");
789 goto out_err;
792 if (verbose) {
793 pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
794 perf_event__fprintf(event, stderr);
797 if (prev_time > sample.time) {
798 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
799 name, prev_time, sample.time);
800 ++errs;
803 prev_time = sample.time;
805 if (sample.cpu != cpu) {
806 pr_debug("%s with unexpected cpu, expected %d, got %d\n",
807 name, cpu, sample.cpu);
808 ++errs;
811 if ((pid_t)sample.pid != evlist->workload.pid) {
812 pr_debug("%s with unexpected pid, expected %d, got %d\n",
813 name, evlist->workload.pid, sample.pid);
814 ++errs;
817 if ((pid_t)sample.tid != evlist->workload.pid) {
818 pr_debug("%s with unexpected tid, expected %d, got %d\n",
819 name, evlist->workload.pid, sample.tid);
820 ++errs;
823 if ((type == PERF_RECORD_COMM ||
824 type == PERF_RECORD_MMAP ||
825 type == PERF_RECORD_FORK ||
826 type == PERF_RECORD_EXIT) &&
827 (pid_t)event->comm.pid != evlist->workload.pid) {
828 pr_debug("%s with unexpected pid/tid\n", name);
829 ++errs;
832 if ((type == PERF_RECORD_COMM ||
833 type == PERF_RECORD_MMAP) &&
834 event->comm.pid != event->comm.tid) {
835 pr_debug("%s with different pid/tid!\n", name);
836 ++errs;
839 switch (type) {
840 case PERF_RECORD_COMM:
841 if (strcmp(event->comm.comm, cmd)) {
842 pr_debug("%s with unexpected comm!\n", name);
843 ++errs;
845 break;
846 case PERF_RECORD_EXIT:
847 goto found_exit;
848 case PERF_RECORD_MMAP:
849 bname = strrchr(event->mmap.filename, '/');
850 if (bname != NULL) {
851 if (!found_cmd_mmap)
852 found_cmd_mmap = !strcmp(bname + 1, cmd);
853 if (!found_libc_mmap)
854 found_libc_mmap = !strncmp(bname + 1, "libc", 4);
855 if (!found_ld_mmap)
856 found_ld_mmap = !strncmp(bname + 1, "ld", 2);
857 } else if (!found_vdso_mmap)
858 found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
859 break;
861 case PERF_RECORD_SAMPLE:
862 /* Just ignore samples for now */
863 break;
864 default:
865 pr_debug("Unexpected perf_event->header.type %d!\n",
866 type);
867 ++errs;
873 * We don't use poll here because at least at 3.1 times the
874 * PERF_RECORD_{!SAMPLE} events don't honour
875 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
877 if (total_events == before && false)
878 poll(evlist->pollfd, evlist->nr_fds, -1);
880 sleep(1);
881 if (++wakeups > 5) {
882 pr_debug("No PERF_RECORD_EXIT event!\n");
883 break;
887 found_exit:
888 if (nr_events[PERF_RECORD_COMM] > 1) {
889 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
890 ++errs;
893 if (nr_events[PERF_RECORD_COMM] == 0) {
894 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
895 ++errs;
898 if (!found_cmd_mmap) {
899 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
900 ++errs;
903 if (!found_libc_mmap) {
904 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
905 ++errs;
908 if (!found_ld_mmap) {
909 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
910 ++errs;
913 if (!found_vdso_mmap) {
914 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
915 ++errs;
917 out_err:
918 perf_evlist__munmap(evlist);
919 out_free_cpu_mask:
920 CPU_FREE(cpu_mask);
921 out_delete_evlist:
922 perf_evlist__delete(evlist);
923 out:
924 return (err < 0 || errs > 0) ? -1 : 0;
928 #if defined(__x86_64__) || defined(__i386__)
930 #define barrier() asm volatile("" ::: "memory")
932 static u64 rdpmc(unsigned int counter)
934 unsigned int low, high;
936 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
938 return low | ((u64)high) << 32;
941 static u64 rdtsc(void)
943 unsigned int low, high;
945 asm volatile("rdtsc" : "=a" (low), "=d" (high));
947 return low | ((u64)high) << 32;
950 static u64 mmap_read_self(void *addr)
952 struct perf_event_mmap_page *pc = addr;
953 u32 seq, idx, time_mult = 0, time_shift = 0;
954 u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
956 do {
957 seq = pc->lock;
958 barrier();
960 enabled = pc->time_enabled;
961 running = pc->time_running;
963 if (enabled != running) {
964 cyc = rdtsc();
965 time_mult = pc->time_mult;
966 time_shift = pc->time_shift;
967 time_offset = pc->time_offset;
970 idx = pc->index;
971 count = pc->offset;
972 if (idx)
973 count += rdpmc(idx - 1);
975 barrier();
976 } while (pc->lock != seq);
978 if (enabled != running) {
979 u64 quot, rem;
981 quot = (cyc >> time_shift);
982 rem = cyc & ((1 << time_shift) - 1);
983 delta = time_offset + quot * time_mult +
984 ((rem * time_mult) >> time_shift);
986 enabled += delta;
987 if (idx)
988 running += delta;
990 quot = count / running;
991 rem = count % running;
992 count = quot * enabled + (rem * enabled) / running;
995 return count;
999 * If the RDPMC instruction faults then signal this back to the test parent task:
1001 static void segfault_handler(int sig __maybe_unused,
1002 siginfo_t *info __maybe_unused,
1003 void *uc __maybe_unused)
1005 exit(-1);
1008 static int __test__rdpmc(void)
1010 long page_size = sysconf(_SC_PAGE_SIZE);
1011 volatile int tmp = 0;
1012 u64 i, loops = 1000;
1013 int n;
1014 int fd;
1015 void *addr;
1016 struct perf_event_attr attr = {
1017 .type = PERF_TYPE_HARDWARE,
1018 .config = PERF_COUNT_HW_INSTRUCTIONS,
1019 .exclude_kernel = 1,
1021 u64 delta_sum = 0;
1022 struct sigaction sa;
1024 sigfillset(&sa.sa_mask);
1025 sa.sa_sigaction = segfault_handler;
1026 sigaction(SIGSEGV, &sa, NULL);
1028 fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
1029 if (fd < 0) {
1030 pr_err("Error: sys_perf_event_open() syscall returned "
1031 "with %d (%s)\n", fd, strerror(errno));
1032 return -1;
1035 addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
1036 if (addr == (void *)(-1)) {
1037 pr_err("Error: mmap() syscall returned with (%s)\n",
1038 strerror(errno));
1039 goto out_close;
1042 for (n = 0; n < 6; n++) {
1043 u64 stamp, now, delta;
1045 stamp = mmap_read_self(addr);
1047 for (i = 0; i < loops; i++)
1048 tmp++;
1050 now = mmap_read_self(addr);
1051 loops *= 10;
1053 delta = now - stamp;
1054 pr_debug("%14d: %14Lu\n", n, (long long)delta);
1056 delta_sum += delta;
1059 munmap(addr, page_size);
1060 pr_debug(" ");
1061 out_close:
1062 close(fd);
1064 if (!delta_sum)
1065 return -1;
1067 return 0;
1070 static int test__rdpmc(void)
1072 int status = 0;
1073 int wret = 0;
1074 int ret;
1075 int pid;
1077 pid = fork();
1078 if (pid < 0)
1079 return -1;
1081 if (!pid) {
1082 ret = __test__rdpmc();
1084 exit(ret);
1087 wret = waitpid(pid, &status, 0);
1088 if (wret < 0 || status)
1089 return -1;
1091 return 0;
1094 #endif
1096 static int test__perf_pmu(void)
1098 return perf_pmu__test();
1101 static int perf_evsel__roundtrip_cache_name_test(void)
1103 char name[128];
1104 int type, op, err = 0, ret = 0, i, idx;
1105 struct perf_evsel *evsel;
1106 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
1108 if (evlist == NULL)
1109 return -ENOMEM;
1111 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
1112 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
1113 /* skip invalid cache type */
1114 if (!perf_evsel__is_cache_op_valid(type, op))
1115 continue;
1117 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
1118 __perf_evsel__hw_cache_type_op_res_name(type, op, i,
1119 name, sizeof(name));
1120 err = parse_events(evlist, name, 0);
1121 if (err)
1122 ret = err;
1127 idx = 0;
1128 evsel = perf_evlist__first(evlist);
1130 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
1131 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
1132 /* skip invalid cache type */
1133 if (!perf_evsel__is_cache_op_valid(type, op))
1134 continue;
1136 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
1137 __perf_evsel__hw_cache_type_op_res_name(type, op, i,
1138 name, sizeof(name));
1139 if (evsel->idx != idx)
1140 continue;
1142 ++idx;
1144 if (strcmp(perf_evsel__name(evsel), name)) {
1145 pr_debug("%s != %s\n", perf_evsel__name(evsel), name);
1146 ret = -1;
1149 evsel = perf_evsel__next(evsel);
1154 perf_evlist__delete(evlist);
1155 return ret;
1158 static int __perf_evsel__name_array_test(const char *names[], int nr_names)
1160 int i, err;
1161 struct perf_evsel *evsel;
1162 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
1164 if (evlist == NULL)
1165 return -ENOMEM;
1167 for (i = 0; i < nr_names; ++i) {
1168 err = parse_events(evlist, names[i], 0);
1169 if (err) {
1170 pr_debug("failed to parse event '%s', err %d\n",
1171 names[i], err);
1172 goto out_delete_evlist;
1176 err = 0;
1177 list_for_each_entry(evsel, &evlist->entries, node) {
1178 if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) {
1179 --err;
1180 pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]);
1184 out_delete_evlist:
1185 perf_evlist__delete(evlist);
1186 return err;
1189 #define perf_evsel__name_array_test(names) \
1190 __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
1192 static int perf_evsel__roundtrip_name_test(void)
1194 int err = 0, ret = 0;
1196 err = perf_evsel__name_array_test(perf_evsel__hw_names);
1197 if (err)
1198 ret = err;
1200 err = perf_evsel__name_array_test(perf_evsel__sw_names);
1201 if (err)
1202 ret = err;
1204 err = perf_evsel__roundtrip_cache_name_test();
1205 if (err)
1206 ret = err;
1208 return ret;
1211 static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
1212 int size, bool should_be_signed)
1214 struct format_field *field = perf_evsel__field(evsel, name);
1215 int is_signed;
1216 int ret = 0;
1218 if (field == NULL) {
1219 pr_debug("%s: \"%s\" field not found!\n", evsel->name, name);
1220 return -1;
1223 is_signed = !!(field->flags | FIELD_IS_SIGNED);
1224 if (should_be_signed && !is_signed) {
1225 pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
1226 evsel->name, name, is_signed, should_be_signed);
1227 ret = -1;
1230 if (field->size != size) {
1231 pr_debug("%s: \"%s\" size (%d) should be %d!\n",
1232 evsel->name, name, field->size, size);
1233 ret = -1;
1236 return ret;
1239 static int perf_evsel__tp_sched_test(void)
1241 struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0);
1242 int ret = 0;
1244 if (evsel == NULL) {
1245 pr_debug("perf_evsel__new\n");
1246 return -1;
1249 if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
1250 ret = -1;
1252 if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
1253 ret = -1;
1255 if (perf_evsel__test_field(evsel, "prev_prio", 4, true))
1256 ret = -1;
1258 if (perf_evsel__test_field(evsel, "prev_state", 8, true))
1259 ret = -1;
1261 if (perf_evsel__test_field(evsel, "next_comm", 16, true))
1262 ret = -1;
1264 if (perf_evsel__test_field(evsel, "next_pid", 4, true))
1265 ret = -1;
1267 if (perf_evsel__test_field(evsel, "next_prio", 4, true))
1268 ret = -1;
1270 perf_evsel__delete(evsel);
1272 evsel = perf_evsel__newtp("sched", "sched_wakeup", 0);
1274 if (perf_evsel__test_field(evsel, "comm", 16, true))
1275 ret = -1;
1277 if (perf_evsel__test_field(evsel, "pid", 4, true))
1278 ret = -1;
1280 if (perf_evsel__test_field(evsel, "prio", 4, true))
1281 ret = -1;
1283 if (perf_evsel__test_field(evsel, "success", 4, true))
1284 ret = -1;
1286 if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
1287 ret = -1;
1289 return ret;
1292 static int test__syscall_open_tp_fields(void)
1294 struct perf_record_opts opts = {
1295 .target = {
1296 .uid = UINT_MAX,
1297 .uses_mmap = true,
1299 .no_delay = true,
1300 .freq = 1,
1301 .mmap_pages = 256,
1302 .raw_samples = true,
1304 const char *filename = "/etc/passwd";
1305 int flags = O_RDONLY | O_DIRECTORY;
1306 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
1307 struct perf_evsel *evsel;
1308 int err = -1, i, nr_events = 0, nr_polls = 0;
1310 if (evlist == NULL) {
1311 pr_debug("%s: perf_evlist__new\n", __func__);
1312 goto out;
1315 evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
1316 if (evsel == NULL) {
1317 pr_debug("%s: perf_evsel__newtp\n", __func__);
1318 goto out_delete_evlist;
1321 perf_evlist__add(evlist, evsel);
1323 err = perf_evlist__create_maps(evlist, &opts.target);
1324 if (err < 0) {
1325 pr_debug("%s: perf_evlist__create_maps\n", __func__);
1326 goto out_delete_evlist;
1329 perf_evsel__config(evsel, &opts, evsel);
1331 evlist->threads->map[0] = getpid();
1333 err = perf_evlist__open(evlist);
1334 if (err < 0) {
1335 pr_debug("perf_evlist__open: %s\n", strerror(errno));
1336 goto out_delete_evlist;
1339 err = perf_evlist__mmap(evlist, UINT_MAX, false);
1340 if (err < 0) {
1341 pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
1342 goto out_delete_evlist;
1345 perf_evlist__enable(evlist);
1348 * Generate the event:
1350 open(filename, flags);
1352 while (1) {
1353 int before = nr_events;
1355 for (i = 0; i < evlist->nr_mmaps; i++) {
1356 union perf_event *event;
1358 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
1359 const u32 type = event->header.type;
1360 int tp_flags;
1361 struct perf_sample sample;
1363 ++nr_events;
1365 if (type != PERF_RECORD_SAMPLE)
1366 continue;
1368 err = perf_evsel__parse_sample(evsel, event, &sample);
1369 if (err) {
1370 pr_err("Can't parse sample, err = %d\n", err);
1371 goto out_munmap;
1374 tp_flags = perf_evsel__intval(evsel, &sample, "flags");
1376 if (flags != tp_flags) {
1377 pr_debug("%s: Expected flags=%#x, got %#x\n",
1378 __func__, flags, tp_flags);
1379 goto out_munmap;
1382 goto out_ok;
1386 if (nr_events == before)
1387 poll(evlist->pollfd, evlist->nr_fds, 10);
1389 if (++nr_polls > 5) {
1390 pr_debug("%s: no events!\n", __func__);
1391 goto out_munmap;
1394 out_ok:
1395 err = 0;
1396 out_munmap:
1397 perf_evlist__munmap(evlist);
1398 out_delete_evlist:
1399 perf_evlist__delete(evlist);
1400 out:
1401 return err;
1404 static struct test {
1405 const char *desc;
1406 int (*func)(void);
1407 } tests[] = {
1409 .desc = "vmlinux symtab matches kallsyms",
1410 .func = test__vmlinux_matches_kallsyms,
1413 .desc = "detect open syscall event",
1414 .func = test__open_syscall_event,
1417 .desc = "detect open syscall event on all cpus",
1418 .func = test__open_syscall_event_on_all_cpus,
1421 .desc = "read samples using the mmap interface",
1422 .func = test__basic_mmap,
1425 .desc = "parse events tests",
1426 .func = parse_events__test,
1428 #if defined(__x86_64__) || defined(__i386__)
1430 .desc = "x86 rdpmc test",
1431 .func = test__rdpmc,
1433 #endif
1435 .desc = "Validate PERF_RECORD_* events & perf_sample fields",
1436 .func = test__PERF_RECORD,
1439 .desc = "Test perf pmu format parsing",
1440 .func = test__perf_pmu,
1443 .desc = "Test dso data interface",
1444 .func = dso__test_data,
1447 .desc = "roundtrip evsel->name check",
1448 .func = perf_evsel__roundtrip_name_test,
1451 .desc = "Check parsing of sched tracepoints fields",
1452 .func = perf_evsel__tp_sched_test,
1455 .desc = "Generate and check syscalls:sys_enter_open event fields",
1456 .func = test__syscall_open_tp_fields,
1459 .func = NULL,
1463 static bool perf_test__matches(int curr, int argc, const char *argv[])
1465 int i;
1467 if (argc == 0)
1468 return true;
1470 for (i = 0; i < argc; ++i) {
1471 char *end;
1472 long nr = strtoul(argv[i], &end, 10);
1474 if (*end == '\0') {
1475 if (nr == curr + 1)
1476 return true;
1477 continue;
1480 if (strstr(tests[curr].desc, argv[i]))
1481 return true;
1484 return false;
1487 static int __cmd_test(int argc, const char *argv[])
1489 int i = 0;
1491 while (tests[i].func) {
1492 int curr = i++, err;
1494 if (!perf_test__matches(curr, argc, argv))
1495 continue;
1497 pr_info("%2d: %s:", i, tests[curr].desc);
1498 pr_debug("\n--- start ---\n");
1499 err = tests[curr].func();
1500 pr_debug("---- end ----\n%s:", tests[curr].desc);
1501 pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
1504 return 0;
1507 static int perf_test__list(int argc, const char **argv)
1509 int i = 0;
1511 while (tests[i].func) {
1512 int curr = i++;
1514 if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
1515 continue;
1517 pr_info("%2d: %s\n", i, tests[curr].desc);
1520 return 0;
1523 int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
1525 const char * const test_usage[] = {
1526 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
1527 NULL,
1529 const struct option test_options[] = {
1530 OPT_INCR('v', "verbose", &verbose,
1531 "be more verbose (show symbol address, etc)"),
1532 OPT_END()
1535 argc = parse_options(argc, argv, test_options, test_usage, 0);
1536 if (argc >= 1 && !strcmp(argv[0], "list"))
1537 return perf_test__list(argc, argv);
1539 symbol_conf.priv_size = sizeof(int);
1540 symbol_conf.sort_by_name = true;
1541 symbol_conf.try_vmlinux_path = true;
1543 if (symbol__init() < 0)
1544 return -1;
1546 return __cmd_test(argc, argv);