4 * Builtin regression testing command: ever growing number of sanity tests
8 #include "util/cache.h"
9 #include "util/debug.h"
10 #include "util/evlist.h"
11 #include "util/parse-options.h"
12 #include "util/parse-events.h"
13 #include "util/symbol.h"
14 #include "util/thread_map.h"
16 static long page_size
;
18 static int vmlinux_matches_kallsyms_filter(struct map
*map __used
, struct symbol
*sym
)
20 bool *visited
= symbol__priv(sym
);
25 static int test__vmlinux_matches_kallsyms(void)
30 struct map
*kallsyms_map
, *vmlinux_map
;
31 struct machine kallsyms
, vmlinux
;
32 enum map_type type
= MAP__FUNCTION
;
33 struct ref_reloc_sym ref_reloc_sym
= { .name
= "_stext", };
38 * Init the machines that will hold kernel, modules obtained from
39 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
41 machine__init(&kallsyms
, "", HOST_KERNEL_ID
);
42 machine__init(&vmlinux
, "", HOST_KERNEL_ID
);
47 * Create the kernel maps for kallsyms and the DSO where we will then
48 * load /proc/kallsyms. Also create the modules maps from /proc/modules
49 * and find the .ko files that match them in /lib/modules/`uname -r`/.
51 if (machine__create_kernel_maps(&kallsyms
) < 0) {
52 pr_debug("machine__create_kernel_maps ");
59 * Load and split /proc/kallsyms into multiple maps, one per module.
61 if (machine__load_kallsyms(&kallsyms
, "/proc/kallsyms", type
, NULL
) <= 0) {
62 pr_debug("dso__load_kallsyms ");
69 * kallsyms will be internally on demand sorted by name so that we can
70 * find the reference relocation * symbol, i.e. the symbol we will use
71 * to see if the running kernel was relocated by checking if it has the
72 * same value in the vmlinux file we load.
74 kallsyms_map
= machine__kernel_map(&kallsyms
, type
);
76 sym
= map__find_symbol_by_name(kallsyms_map
, ref_reloc_sym
.name
, NULL
);
78 pr_debug("dso__find_symbol_by_name ");
82 ref_reloc_sym
.addr
= sym
->start
;
87 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
89 if (machine__create_kernel_maps(&vmlinux
) < 0) {
90 pr_debug("machine__create_kernel_maps ");
94 vmlinux_map
= machine__kernel_map(&vmlinux
, type
);
95 map__kmap(vmlinux_map
)->ref_reloc_sym
= &ref_reloc_sym
;
100 * Locate a vmlinux file in the vmlinux path that has a buildid that
101 * matches the one of the running kernel.
103 * While doing that look if we find the ref reloc symbol, if we find it
104 * we'll have its ref_reloc_symbol.unrelocated_addr and then
105 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
106 * to fixup the symbols.
108 if (machine__load_vmlinux_path(&vmlinux
, type
,
109 vmlinux_matches_kallsyms_filter
) <= 0) {
110 pr_debug("machine__load_vmlinux_path ");
118 * Now look at the symbols in the vmlinux DSO and check if we find all of them
119 * in the kallsyms dso. For the ones that are in both, check its names and
122 for (nd
= rb_first(&vmlinux_map
->dso
->symbols
[type
]); nd
; nd
= rb_next(nd
)) {
123 struct symbol
*pair
, *first_pair
;
124 bool backwards
= true;
126 sym
= rb_entry(nd
, struct symbol
, rb_node
);
128 if (sym
->start
== sym
->end
)
131 first_pair
= machine__find_kernel_symbol(&kallsyms
, type
, sym
->start
, NULL
, NULL
);
134 if (pair
&& pair
->start
== sym
->start
) {
136 if (strcmp(sym
->name
, pair
->name
) == 0) {
138 * kallsyms don't have the symbol end, so we
139 * set that by using the next symbol start - 1,
140 * in some cases we get this up to a page
141 * wrong, trace_kmalloc when I was developing
142 * this code was one such example, 2106 bytes
143 * off the real size. More than that and we
144 * _really_ have a problem.
146 s64 skew
= sym
->end
- pair
->end
;
147 if (llabs(skew
) < page_size
)
150 pr_debug("%#" PRIx64
": diff end addr for %s v: %#" PRIx64
" k: %#" PRIx64
"\n",
151 sym
->start
, sym
->name
, sym
->end
, pair
->end
);
155 nnd
= backwards
? rb_prev(&pair
->rb_node
) :
156 rb_next(&pair
->rb_node
);
158 struct symbol
*next
= rb_entry(nnd
, struct symbol
, rb_node
);
160 if (next
->start
== sym
->start
) {
172 pr_debug("%#" PRIx64
": diff name v: %s k: %s\n",
173 sym
->start
, sym
->name
, pair
->name
);
176 pr_debug("%#" PRIx64
": %s not on kallsyms\n", sym
->start
, sym
->name
);
184 pr_info("Maps only in vmlinux:\n");
186 for (nd
= rb_first(&vmlinux
.kmaps
.maps
[type
]); nd
; nd
= rb_next(nd
)) {
187 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
), *pair
;
189 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
190 * the kernel will have the path for the vmlinux file being used,
191 * so use the short name, less descriptive but the same ("[kernel]" in
194 pair
= map_groups__find_by_name(&kallsyms
.kmaps
, type
,
196 pos
->dso
->short_name
:
201 map__fprintf(pos
, stderr
);
204 pr_info("Maps in vmlinux with a different name in kallsyms:\n");
206 for (nd
= rb_first(&vmlinux
.kmaps
.maps
[type
]); nd
; nd
= rb_next(nd
)) {
207 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
), *pair
;
209 pair
= map_groups__find(&kallsyms
.kmaps
, type
, pos
->start
);
210 if (pair
== NULL
|| pair
->priv
)
213 if (pair
->start
== pos
->start
) {
215 pr_info(" %" PRIx64
"-%" PRIx64
" %" PRIx64
" %s in kallsyms as",
216 pos
->start
, pos
->end
, pos
->pgoff
, pos
->dso
->name
);
217 if (pos
->pgoff
!= pair
->pgoff
|| pos
->end
!= pair
->end
)
218 pr_info(": \n*%" PRIx64
"-%" PRIx64
" %" PRIx64
"",
219 pair
->start
, pair
->end
, pair
->pgoff
);
220 pr_info(" %s\n", pair
->dso
->name
);
225 pr_info("Maps only in kallsyms:\n");
227 for (nd
= rb_first(&kallsyms
.kmaps
.maps
[type
]);
228 nd
; nd
= rb_next(nd
)) {
229 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
232 map__fprintf(pos
, stderr
);
238 #include "util/cpumap.h"
239 #include "util/evsel.h"
240 #include <sys/types.h>
242 static int trace_event__id(const char *evname
)
247 if (asprintf(&filename
,
248 "/sys/kernel/debug/tracing/events/syscalls/%s/id",
252 fd
= open(filename
, O_RDONLY
);
255 if (read(fd
, id
, sizeof(id
)) > 0)
264 static int test__open_syscall_event(void)
267 struct thread_map
*threads
;
268 struct perf_evsel
*evsel
;
269 struct perf_event_attr attr
;
270 unsigned int nr_open_calls
= 111, i
;
271 int id
= trace_event__id("sys_enter_open");
274 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
278 threads
= thread_map__new(-1, getpid());
279 if (threads
== NULL
) {
280 pr_debug("thread_map__new\n");
284 memset(&attr
, 0, sizeof(attr
));
285 attr
.type
= PERF_TYPE_TRACEPOINT
;
287 evsel
= perf_evsel__new(&attr
, 0);
289 pr_debug("perf_evsel__new\n");
290 goto out_thread_map_delete
;
293 if (perf_evsel__open_per_thread(evsel
, threads
, false, false) < 0) {
294 pr_debug("failed to open counter: %s, "
295 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
297 goto out_evsel_delete
;
300 for (i
= 0; i
< nr_open_calls
; ++i
) {
301 fd
= open("/etc/passwd", O_RDONLY
);
305 if (perf_evsel__read_on_cpu(evsel
, 0, 0) < 0) {
306 pr_debug("perf_evsel__open_read_on_cpu\n");
310 if (evsel
->counts
->cpu
[0].val
!= nr_open_calls
) {
311 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64
"\n",
312 nr_open_calls
, evsel
->counts
->cpu
[0].val
);
318 perf_evsel__close_fd(evsel
, 1, threads
->nr
);
320 perf_evsel__delete(evsel
);
321 out_thread_map_delete
:
322 thread_map__delete(threads
);
328 static int test__open_syscall_event_on_all_cpus(void)
330 int err
= -1, fd
, cpu
;
331 struct thread_map
*threads
;
332 struct cpu_map
*cpus
;
333 struct perf_evsel
*evsel
;
334 struct perf_event_attr attr
;
335 unsigned int nr_open_calls
= 111, i
;
337 int id
= trace_event__id("sys_enter_open");
340 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
344 threads
= thread_map__new(-1, getpid());
345 if (threads
== NULL
) {
346 pr_debug("thread_map__new\n");
350 cpus
= cpu_map__new(NULL
);
352 pr_debug("cpu_map__new\n");
353 goto out_thread_map_delete
;
359 memset(&attr
, 0, sizeof(attr
));
360 attr
.type
= PERF_TYPE_TRACEPOINT
;
362 evsel
= perf_evsel__new(&attr
, 0);
364 pr_debug("perf_evsel__new\n");
365 goto out_thread_map_delete
;
368 if (perf_evsel__open(evsel
, cpus
, threads
, false, false) < 0) {
369 pr_debug("failed to open counter: %s, "
370 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
372 goto out_evsel_delete
;
375 for (cpu
= 0; cpu
< cpus
->nr
; ++cpu
) {
376 unsigned int ncalls
= nr_open_calls
+ cpu
;
378 * XXX eventually lift this restriction in a way that
379 * keeps perf building on older glibc installations
380 * without CPU_ALLOC. 1024 cpus in 2010 still seems
381 * a reasonable upper limit tho :-)
383 if (cpus
->map
[cpu
] >= CPU_SETSIZE
) {
384 pr_debug("Ignoring CPU %d\n", cpus
->map
[cpu
]);
388 CPU_SET(cpus
->map
[cpu
], &cpu_set
);
389 if (sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
) < 0) {
390 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
395 for (i
= 0; i
< ncalls
; ++i
) {
396 fd
= open("/etc/passwd", O_RDONLY
);
399 CPU_CLR(cpus
->map
[cpu
], &cpu_set
);
403 * Here we need to explicitely preallocate the counts, as if
404 * we use the auto allocation it will allocate just for 1 cpu,
405 * as we start by cpu 0.
407 if (perf_evsel__alloc_counts(evsel
, cpus
->nr
) < 0) {
408 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus
->nr
);
414 for (cpu
= 0; cpu
< cpus
->nr
; ++cpu
) {
415 unsigned int expected
;
417 if (cpus
->map
[cpu
] >= CPU_SETSIZE
)
420 if (perf_evsel__read_on_cpu(evsel
, cpu
, 0) < 0) {
421 pr_debug("perf_evsel__open_read_on_cpu\n");
426 expected
= nr_open_calls
+ cpu
;
427 if (evsel
->counts
->cpu
[cpu
].val
!= expected
) {
428 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64
"\n",
429 expected
, cpus
->map
[cpu
], evsel
->counts
->cpu
[cpu
].val
);
435 perf_evsel__close_fd(evsel
, 1, threads
->nr
);
437 perf_evsel__delete(evsel
);
438 out_thread_map_delete
:
439 thread_map__delete(threads
);
444 * This test will generate random numbers of calls to some getpid syscalls,
445 * then establish an mmap for a group of events that are created to monitor
448 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
449 * sample.id field to map back to its respective perf_evsel instance.
451 * Then it checks if the number of syscalls reported as perf events by
452 * the kernel corresponds to the number of syscalls made.
454 static int test__basic_mmap(void)
457 union perf_event
*event
;
458 struct thread_map
*threads
;
459 struct cpu_map
*cpus
;
460 struct perf_evlist
*evlist
;
461 struct perf_event_attr attr
= {
462 .type
= PERF_TYPE_TRACEPOINT
,
463 .read_format
= PERF_FORMAT_ID
,
464 .sample_type
= PERF_SAMPLE_ID
,
468 const char *syscall_names
[] = { "getsid", "getppid", "getpgrp",
470 pid_t (*syscalls
[])(void) = { (void *)getsid
, getppid
, getpgrp
,
472 #define nsyscalls ARRAY_SIZE(syscall_names)
474 unsigned int nr_events
[nsyscalls
],
475 expected_nr_events
[nsyscalls
], i
, j
;
476 struct perf_evsel
*evsels
[nsyscalls
], *evsel
;
478 for (i
= 0; i
< nsyscalls
; ++i
) {
481 snprintf(name
, sizeof(name
), "sys_enter_%s", syscall_names
[i
]);
482 ids
[i
] = trace_event__id(name
);
484 pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
488 expected_nr_events
[i
] = random() % 257;
491 threads
= thread_map__new(-1, getpid());
492 if (threads
== NULL
) {
493 pr_debug("thread_map__new\n");
497 cpus
= cpu_map__new(NULL
);
499 pr_debug("cpu_map__new\n");
500 goto out_free_threads
;
504 CPU_SET(cpus
->map
[0], &cpu_set
);
505 sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
);
506 if (sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
) < 0) {
507 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
508 cpus
->map
[0], strerror(errno
));
512 evlist
= perf_evlist__new(cpus
, threads
);
513 if (evlist
== NULL
) {
514 pr_debug("perf_evlist__new\n");
518 /* anonymous union fields, can't be initialized above */
519 attr
.wakeup_events
= 1;
520 attr
.sample_period
= 1;
522 for (i
= 0; i
< nsyscalls
; ++i
) {
523 attr
.config
= ids
[i
];
524 evsels
[i
] = perf_evsel__new(&attr
, i
);
525 if (evsels
[i
] == NULL
) {
526 pr_debug("perf_evsel__new\n");
527 goto out_free_evlist
;
530 perf_evlist__add(evlist
, evsels
[i
]);
532 if (perf_evsel__open(evsels
[i
], cpus
, threads
, false, false) < 0) {
533 pr_debug("failed to open counter: %s, "
534 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
540 if (perf_evlist__mmap(evlist
, 128, true) < 0) {
541 pr_debug("failed to mmap events: %d (%s)\n", errno
,
546 for (i
= 0; i
< nsyscalls
; ++i
)
547 for (j
= 0; j
< expected_nr_events
[i
]; ++j
) {
548 int foo
= syscalls
[i
]();
552 while ((event
= perf_evlist__read_on_cpu(evlist
, 0)) != NULL
) {
553 struct perf_sample sample
;
555 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
556 pr_debug("unexpected %s event\n",
557 perf_event__name(event
->header
.type
));
561 perf_event__parse_sample(event
, attr
.sample_type
, false, &sample
);
562 evsel
= perf_evlist__id2evsel(evlist
, sample
.id
);
564 pr_debug("event with id %" PRIu64
565 " doesn't map to an evsel\n", sample
.id
);
568 nr_events
[evsel
->idx
]++;
571 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
572 if (nr_events
[evsel
->idx
] != expected_nr_events
[evsel
->idx
]) {
573 pr_debug("expected %d %s events, got %d\n",
574 expected_nr_events
[evsel
->idx
],
575 event_name(evsel
), nr_events
[evsel
->idx
]);
582 perf_evlist__munmap(evlist
);
584 for (i
= 0; i
< nsyscalls
; ++i
)
585 perf_evsel__close_fd(evsels
[i
], 1, threads
->nr
);
587 perf_evlist__delete(evlist
);
589 cpu_map__delete(cpus
);
591 thread_map__delete(threads
);
601 .desc
= "vmlinux symtab matches kallsyms",
602 .func
= test__vmlinux_matches_kallsyms
,
605 .desc
= "detect open syscall event",
606 .func
= test__open_syscall_event
,
609 .desc
= "detect open syscall event on all cpus",
610 .func
= test__open_syscall_event_on_all_cpus
,
613 .desc
= "read samples using the mmap interface",
614 .func
= test__basic_mmap
,
621 static int __cmd_test(void)
625 page_size
= sysconf(_SC_PAGE_SIZE
);
627 while (tests
[i
].func
) {
629 pr_info("%2d: %s:", i
+ 1, tests
[i
].desc
);
630 pr_debug("\n--- start ---\n");
631 err
= tests
[i
].func();
632 pr_debug("---- end ----\n%s:", tests
[i
].desc
);
633 pr_info(" %s\n", err
? "FAILED!\n" : "Ok");
640 static const char * const test_usage
[] = {
641 "perf test [<options>]",
645 static const struct option test_options
[] = {
646 OPT_INTEGER('v', "verbose", &verbose
,
647 "be more verbose (show symbol address, etc)"),
651 int cmd_test(int argc
, const char **argv
, const char *prefix __used
)
653 argc
= parse_options(argc
, argv
, test_options
, test_usage
, 0);
655 usage_with_options(test_usage
, test_options
);
657 symbol_conf
.priv_size
= sizeof(int);
658 symbol_conf
.sort_by_name
= true;
659 symbol_conf
.try_vmlinux_path
= true;
661 if (symbol__init() < 0)