4 * Builtin regression testing command: ever growing number of sanity tests
8 #include "util/cache.h"
9 #include "util/color.h"
10 #include "util/debug.h"
11 #include "util/debugfs.h"
12 #include "util/evlist.h"
13 #include "util/machine.h"
14 #include "util/parse-options.h"
15 #include "util/parse-events.h"
16 #include "util/symbol.h"
17 #include "util/thread_map.h"
19 #include "event-parse.h"
20 #include "../../include/linux/hw_breakpoint.h"
24 #include "util/cpumap.h"
25 #include "util/evsel.h"
26 #include <sys/types.h>
30 static int trace_event__id(const char *evname
)
35 if (asprintf(&filename
,
37 tracing_events_path
, evname
) < 0)
40 fd
= open(filename
, O_RDONLY
);
43 if (read(fd
, id
, sizeof(id
)) > 0)
52 static int test__open_syscall_event(void)
55 struct thread_map
*threads
;
56 struct perf_evsel
*evsel
;
57 struct perf_event_attr attr
;
58 unsigned int nr_open_calls
= 111, i
;
59 int id
= trace_event__id("sys_enter_open");
62 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
66 threads
= thread_map__new(-1, getpid(), UINT_MAX
);
67 if (threads
== NULL
) {
68 pr_debug("thread_map__new\n");
72 memset(&attr
, 0, sizeof(attr
));
73 attr
.type
= PERF_TYPE_TRACEPOINT
;
75 evsel
= perf_evsel__new(&attr
, 0);
77 pr_debug("perf_evsel__new\n");
78 goto out_thread_map_delete
;
81 if (perf_evsel__open_per_thread(evsel
, threads
) < 0) {
82 pr_debug("failed to open counter: %s, "
83 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
85 goto out_evsel_delete
;
88 for (i
= 0; i
< nr_open_calls
; ++i
) {
89 fd
= open("/etc/passwd", O_RDONLY
);
93 if (perf_evsel__read_on_cpu(evsel
, 0, 0) < 0) {
94 pr_debug("perf_evsel__read_on_cpu\n");
98 if (evsel
->counts
->cpu
[0].val
!= nr_open_calls
) {
99 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64
"\n",
100 nr_open_calls
, evsel
->counts
->cpu
[0].val
);
106 perf_evsel__close_fd(evsel
, 1, threads
->nr
);
108 perf_evsel__delete(evsel
);
109 out_thread_map_delete
:
110 thread_map__delete(threads
);
116 static int test__open_syscall_event_on_all_cpus(void)
118 int err
= -1, fd
, cpu
;
119 struct thread_map
*threads
;
120 struct cpu_map
*cpus
;
121 struct perf_evsel
*evsel
;
122 struct perf_event_attr attr
;
123 unsigned int nr_open_calls
= 111, i
;
125 int id
= trace_event__id("sys_enter_open");
128 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
132 threads
= thread_map__new(-1, getpid(), UINT_MAX
);
133 if (threads
== NULL
) {
134 pr_debug("thread_map__new\n");
138 cpus
= cpu_map__new(NULL
);
140 pr_debug("cpu_map__new\n");
141 goto out_thread_map_delete
;
147 memset(&attr
, 0, sizeof(attr
));
148 attr
.type
= PERF_TYPE_TRACEPOINT
;
150 evsel
= perf_evsel__new(&attr
, 0);
152 pr_debug("perf_evsel__new\n");
153 goto out_thread_map_delete
;
156 if (perf_evsel__open(evsel
, cpus
, threads
) < 0) {
157 pr_debug("failed to open counter: %s, "
158 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
160 goto out_evsel_delete
;
163 for (cpu
= 0; cpu
< cpus
->nr
; ++cpu
) {
164 unsigned int ncalls
= nr_open_calls
+ cpu
;
166 * XXX eventually lift this restriction in a way that
167 * keeps perf building on older glibc installations
168 * without CPU_ALLOC. 1024 cpus in 2010 still seems
169 * a reasonable upper limit tho :-)
171 if (cpus
->map
[cpu
] >= CPU_SETSIZE
) {
172 pr_debug("Ignoring CPU %d\n", cpus
->map
[cpu
]);
176 CPU_SET(cpus
->map
[cpu
], &cpu_set
);
177 if (sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
) < 0) {
178 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
183 for (i
= 0; i
< ncalls
; ++i
) {
184 fd
= open("/etc/passwd", O_RDONLY
);
187 CPU_CLR(cpus
->map
[cpu
], &cpu_set
);
191 * Here we need to explicitely preallocate the counts, as if
192 * we use the auto allocation it will allocate just for 1 cpu,
193 * as we start by cpu 0.
195 if (perf_evsel__alloc_counts(evsel
, cpus
->nr
) < 0) {
196 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus
->nr
);
202 for (cpu
= 0; cpu
< cpus
->nr
; ++cpu
) {
203 unsigned int expected
;
205 if (cpus
->map
[cpu
] >= CPU_SETSIZE
)
208 if (perf_evsel__read_on_cpu(evsel
, cpu
, 0) < 0) {
209 pr_debug("perf_evsel__read_on_cpu\n");
214 expected
= nr_open_calls
+ cpu
;
215 if (evsel
->counts
->cpu
[cpu
].val
!= expected
) {
216 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64
"\n",
217 expected
, cpus
->map
[cpu
], evsel
->counts
->cpu
[cpu
].val
);
223 perf_evsel__close_fd(evsel
, 1, threads
->nr
);
225 perf_evsel__delete(evsel
);
226 out_thread_map_delete
:
227 thread_map__delete(threads
);
232 * This test will generate random numbers of calls to some getpid syscalls,
233 * then establish an mmap for a group of events that are created to monitor
236 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
237 * sample.id field to map back to its respective perf_evsel instance.
239 * Then it checks if the number of syscalls reported as perf events by
240 * the kernel corresponds to the number of syscalls made.
242 static int test__basic_mmap(void)
245 union perf_event
*event
;
246 struct thread_map
*threads
;
247 struct cpu_map
*cpus
;
248 struct perf_evlist
*evlist
;
249 struct perf_event_attr attr
= {
250 .type
= PERF_TYPE_TRACEPOINT
,
251 .read_format
= PERF_FORMAT_ID
,
252 .sample_type
= PERF_SAMPLE_ID
,
256 const char *syscall_names
[] = { "getsid", "getppid", "getpgrp",
258 pid_t (*syscalls
[])(void) = { (void *)getsid
, getppid
, getpgrp
,
260 #define nsyscalls ARRAY_SIZE(syscall_names)
262 unsigned int nr_events
[nsyscalls
],
263 expected_nr_events
[nsyscalls
], i
, j
;
264 struct perf_evsel
*evsels
[nsyscalls
], *evsel
;
266 for (i
= 0; i
< nsyscalls
; ++i
) {
269 snprintf(name
, sizeof(name
), "sys_enter_%s", syscall_names
[i
]);
270 ids
[i
] = trace_event__id(name
);
272 pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
276 expected_nr_events
[i
] = random() % 257;
279 threads
= thread_map__new(-1, getpid(), UINT_MAX
);
280 if (threads
== NULL
) {
281 pr_debug("thread_map__new\n");
285 cpus
= cpu_map__new(NULL
);
287 pr_debug("cpu_map__new\n");
288 goto out_free_threads
;
292 CPU_SET(cpus
->map
[0], &cpu_set
);
293 sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
);
294 if (sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
) < 0) {
295 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
296 cpus
->map
[0], strerror(errno
));
300 evlist
= perf_evlist__new(cpus
, threads
);
301 if (evlist
== NULL
) {
302 pr_debug("perf_evlist__new\n");
306 /* anonymous union fields, can't be initialized above */
307 attr
.wakeup_events
= 1;
308 attr
.sample_period
= 1;
310 for (i
= 0; i
< nsyscalls
; ++i
) {
311 attr
.config
= ids
[i
];
312 evsels
[i
] = perf_evsel__new(&attr
, i
);
313 if (evsels
[i
] == NULL
) {
314 pr_debug("perf_evsel__new\n");
315 goto out_free_evlist
;
318 perf_evlist__add(evlist
, evsels
[i
]);
320 if (perf_evsel__open(evsels
[i
], cpus
, threads
) < 0) {
321 pr_debug("failed to open counter: %s, "
322 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
328 if (perf_evlist__mmap(evlist
, 128, true) < 0) {
329 pr_debug("failed to mmap events: %d (%s)\n", errno
,
334 for (i
= 0; i
< nsyscalls
; ++i
)
335 for (j
= 0; j
< expected_nr_events
[i
]; ++j
) {
336 int foo
= syscalls
[i
]();
340 while ((event
= perf_evlist__mmap_read(evlist
, 0)) != NULL
) {
341 struct perf_sample sample
;
343 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
344 pr_debug("unexpected %s event\n",
345 perf_event__name(event
->header
.type
));
349 err
= perf_evlist__parse_sample(evlist
, event
, &sample
);
351 pr_err("Can't parse sample, err = %d\n", err
);
355 evsel
= perf_evlist__id2evsel(evlist
, sample
.id
);
357 pr_debug("event with id %" PRIu64
358 " doesn't map to an evsel\n", sample
.id
);
361 nr_events
[evsel
->idx
]++;
364 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
365 if (nr_events
[evsel
->idx
] != expected_nr_events
[evsel
->idx
]) {
366 pr_debug("expected %d %s events, got %d\n",
367 expected_nr_events
[evsel
->idx
],
368 perf_evsel__name(evsel
), nr_events
[evsel
->idx
]);
375 perf_evlist__munmap(evlist
);
377 for (i
= 0; i
< nsyscalls
; ++i
)
378 perf_evsel__close_fd(evsels
[i
], 1, threads
->nr
);
380 perf_evlist__delete(evlist
);
382 cpu_map__delete(cpus
);
384 thread_map__delete(threads
);
389 static int sched__get_first_possible_cpu(pid_t pid
, cpu_set_t
*maskp
)
391 int i
, cpu
= -1, nrcpus
= 1024;
395 if (sched_getaffinity(pid
, sizeof(*maskp
), maskp
) == -1) {
396 if (errno
== EINVAL
&& nrcpus
< (1024 << 8)) {
397 nrcpus
= nrcpus
<< 2;
400 perror("sched_getaffinity");
404 for (i
= 0; i
< nrcpus
; i
++) {
405 if (CPU_ISSET(i
, maskp
)) {
416 static int test__PERF_RECORD(void)
418 struct perf_record_opts opts
= {
428 size_t cpu_mask_size
= sizeof(cpu_mask
);
429 struct perf_evlist
*evlist
= perf_evlist__new(NULL
, NULL
);
430 struct perf_evsel
*evsel
;
431 struct perf_sample sample
;
432 const char *cmd
= "sleep";
433 const char *argv
[] = { cmd
, "1", NULL
, };
436 bool found_cmd_mmap
= false,
437 found_libc_mmap
= false,
438 found_vdso_mmap
= false,
439 found_ld_mmap
= false;
440 int err
= -1, errs
= 0, i
, wakeups
= 0;
442 int total_events
= 0, nr_events
[PERF_RECORD_MAX
] = { 0, };
444 if (evlist
== NULL
|| argv
== NULL
) {
445 pr_debug("Not enough memory to create evlist\n");
450 * We need at least one evsel in the evlist, use the default
453 err
= perf_evlist__add_default(evlist
);
455 pr_debug("Not enough memory to create evsel\n");
456 goto out_delete_evlist
;
460 * Create maps of threads and cpus to monitor. In this case
461 * we start with all threads and cpus (-1, -1) but then in
462 * perf_evlist__prepare_workload we'll fill in the only thread
463 * we're monitoring, the one forked there.
465 err
= perf_evlist__create_maps(evlist
, &opts
.target
);
467 pr_debug("Not enough memory to create thread/cpu maps\n");
468 goto out_delete_evlist
;
472 * Prepare the workload in argv[] to run, it'll fork it, and then wait
473 * for perf_evlist__start_workload() to exec it. This is done this way
474 * so that we have time to open the evlist (calling sys_perf_event_open
475 * on all the fds) and then mmap them.
477 err
= perf_evlist__prepare_workload(evlist
, &opts
, argv
);
479 pr_debug("Couldn't run the workload!\n");
480 goto out_delete_evlist
;
484 * Config the evsels, setting attr->comm on the first one, etc.
486 evsel
= perf_evlist__first(evlist
);
487 evsel
->attr
.sample_type
|= PERF_SAMPLE_CPU
;
488 evsel
->attr
.sample_type
|= PERF_SAMPLE_TID
;
489 evsel
->attr
.sample_type
|= PERF_SAMPLE_TIME
;
490 perf_evlist__config_attrs(evlist
, &opts
);
492 err
= sched__get_first_possible_cpu(evlist
->workload
.pid
, &cpu_mask
);
494 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno
));
495 goto out_delete_evlist
;
501 * So that we can check perf_sample.cpu on all the samples.
503 if (sched_setaffinity(evlist
->workload
.pid
, cpu_mask_size
, &cpu_mask
) < 0) {
504 pr_debug("sched_setaffinity: %s\n", strerror(errno
));
505 goto out_delete_evlist
;
509 * Call sys_perf_event_open on all the fds on all the evsels,
510 * grouping them if asked to.
512 err
= perf_evlist__open(evlist
);
514 pr_debug("perf_evlist__open: %s\n", strerror(errno
));
515 goto out_delete_evlist
;
519 * mmap the first fd on a given CPU and ask for events for the other
520 * fds in the same CPU to be injected in the same mmap ring buffer
521 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
523 err
= perf_evlist__mmap(evlist
, opts
.mmap_pages
, false);
525 pr_debug("perf_evlist__mmap: %s\n", strerror(errno
));
526 goto out_delete_evlist
;
530 * Now that all is properly set up, enable the events, they will
531 * count just on workload.pid, which will start...
533 perf_evlist__enable(evlist
);
538 perf_evlist__start_workload(evlist
);
541 int before
= total_events
;
543 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
544 union perf_event
*event
;
546 while ((event
= perf_evlist__mmap_read(evlist
, i
)) != NULL
) {
547 const u32 type
= event
->header
.type
;
548 const char *name
= perf_event__name(type
);
551 if (type
< PERF_RECORD_MAX
)
554 err
= perf_evlist__parse_sample(evlist
, event
, &sample
);
557 perf_event__fprintf(event
, stderr
);
558 pr_debug("Couldn't parse sample\n");
563 pr_info("%" PRIu64
" %d ", sample
.time
, sample
.cpu
);
564 perf_event__fprintf(event
, stderr
);
567 if (prev_time
> sample
.time
) {
568 pr_debug("%s going backwards in time, prev=%" PRIu64
", curr=%" PRIu64
"\n",
569 name
, prev_time
, sample
.time
);
573 prev_time
= sample
.time
;
575 if (sample
.cpu
!= cpu
) {
576 pr_debug("%s with unexpected cpu, expected %d, got %d\n",
577 name
, cpu
, sample
.cpu
);
581 if ((pid_t
)sample
.pid
!= evlist
->workload
.pid
) {
582 pr_debug("%s with unexpected pid, expected %d, got %d\n",
583 name
, evlist
->workload
.pid
, sample
.pid
);
587 if ((pid_t
)sample
.tid
!= evlist
->workload
.pid
) {
588 pr_debug("%s with unexpected tid, expected %d, got %d\n",
589 name
, evlist
->workload
.pid
, sample
.tid
);
593 if ((type
== PERF_RECORD_COMM
||
594 type
== PERF_RECORD_MMAP
||
595 type
== PERF_RECORD_FORK
||
596 type
== PERF_RECORD_EXIT
) &&
597 (pid_t
)event
->comm
.pid
!= evlist
->workload
.pid
) {
598 pr_debug("%s with unexpected pid/tid\n", name
);
602 if ((type
== PERF_RECORD_COMM
||
603 type
== PERF_RECORD_MMAP
) &&
604 event
->comm
.pid
!= event
->comm
.tid
) {
605 pr_debug("%s with different pid/tid!\n", name
);
610 case PERF_RECORD_COMM
:
611 if (strcmp(event
->comm
.comm
, cmd
)) {
612 pr_debug("%s with unexpected comm!\n", name
);
616 case PERF_RECORD_EXIT
:
618 case PERF_RECORD_MMAP
:
619 bname
= strrchr(event
->mmap
.filename
, '/');
622 found_cmd_mmap
= !strcmp(bname
+ 1, cmd
);
623 if (!found_libc_mmap
)
624 found_libc_mmap
= !strncmp(bname
+ 1, "libc", 4);
626 found_ld_mmap
= !strncmp(bname
+ 1, "ld", 2);
627 } else if (!found_vdso_mmap
)
628 found_vdso_mmap
= !strcmp(event
->mmap
.filename
, "[vdso]");
631 case PERF_RECORD_SAMPLE
:
632 /* Just ignore samples for now */
635 pr_debug("Unexpected perf_event->header.type %d!\n",
643 * We don't use poll here because at least at 3.1 times the
644 * PERF_RECORD_{!SAMPLE} events don't honour
645 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
647 if (total_events
== before
&& false)
648 poll(evlist
->pollfd
, evlist
->nr_fds
, -1);
652 pr_debug("No PERF_RECORD_EXIT event!\n");
658 if (nr_events
[PERF_RECORD_COMM
] > 1) {
659 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
663 if (nr_events
[PERF_RECORD_COMM
] == 0) {
664 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd
);
668 if (!found_cmd_mmap
) {
669 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd
);
673 if (!found_libc_mmap
) {
674 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
678 if (!found_ld_mmap
) {
679 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
683 if (!found_vdso_mmap
) {
684 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
688 perf_evlist__munmap(evlist
);
690 perf_evlist__delete(evlist
);
692 return (err
< 0 || errs
> 0) ? -1 : 0;
696 #if defined(__x86_64__) || defined(__i386__)
698 #define barrier() asm volatile("" ::: "memory")
700 static u64
rdpmc(unsigned int counter
)
702 unsigned int low
, high
;
704 asm volatile("rdpmc" : "=a" (low
), "=d" (high
) : "c" (counter
));
706 return low
| ((u64
)high
) << 32;
709 static u64
rdtsc(void)
711 unsigned int low
, high
;
713 asm volatile("rdtsc" : "=a" (low
), "=d" (high
));
715 return low
| ((u64
)high
) << 32;
718 static u64
mmap_read_self(void *addr
)
720 struct perf_event_mmap_page
*pc
= addr
;
721 u32 seq
, idx
, time_mult
= 0, time_shift
= 0;
722 u64 count
, cyc
= 0, time_offset
= 0, enabled
, running
, delta
;
728 enabled
= pc
->time_enabled
;
729 running
= pc
->time_running
;
731 if (enabled
!= running
) {
733 time_mult
= pc
->time_mult
;
734 time_shift
= pc
->time_shift
;
735 time_offset
= pc
->time_offset
;
741 count
+= rdpmc(idx
- 1);
744 } while (pc
->lock
!= seq
);
746 if (enabled
!= running
) {
749 quot
= (cyc
>> time_shift
);
750 rem
= cyc
& ((1 << time_shift
) - 1);
751 delta
= time_offset
+ quot
* time_mult
+
752 ((rem
* time_mult
) >> time_shift
);
758 quot
= count
/ running
;
759 rem
= count
% running
;
760 count
= quot
* enabled
+ (rem
* enabled
) / running
;
767 * If the RDPMC instruction faults then signal this back to the test parent task:
769 static void segfault_handler(int sig __maybe_unused
,
770 siginfo_t
*info __maybe_unused
,
771 void *uc __maybe_unused
)
776 static int __test__rdpmc(void)
778 volatile int tmp
= 0;
783 struct perf_event_attr attr
= {
784 .type
= PERF_TYPE_HARDWARE
,
785 .config
= PERF_COUNT_HW_INSTRUCTIONS
,
791 sigfillset(&sa
.sa_mask
);
792 sa
.sa_sigaction
= segfault_handler
;
793 sigaction(SIGSEGV
, &sa
, NULL
);
795 fd
= sys_perf_event_open(&attr
, 0, -1, -1, 0);
797 pr_err("Error: sys_perf_event_open() syscall returned "
798 "with %d (%s)\n", fd
, strerror(errno
));
802 addr
= mmap(NULL
, page_size
, PROT_READ
, MAP_SHARED
, fd
, 0);
803 if (addr
== (void *)(-1)) {
804 pr_err("Error: mmap() syscall returned with (%s)\n",
809 for (n
= 0; n
< 6; n
++) {
810 u64 stamp
, now
, delta
;
812 stamp
= mmap_read_self(addr
);
814 for (i
= 0; i
< loops
; i
++)
817 now
= mmap_read_self(addr
);
821 pr_debug("%14d: %14Lu\n", n
, (long long)delta
);
826 munmap(addr
, page_size
);
837 static int test__rdpmc(void)
849 ret
= __test__rdpmc();
854 wret
= waitpid(pid
, &status
, 0);
855 if (wret
< 0 || status
)
863 static int test__perf_pmu(void)
865 return perf_pmu__test();
868 static int perf_evsel__roundtrip_cache_name_test(void)
871 int type
, op
, err
= 0, ret
= 0, i
, idx
;
872 struct perf_evsel
*evsel
;
873 struct perf_evlist
*evlist
= perf_evlist__new(NULL
, NULL
);
878 for (type
= 0; type
< PERF_COUNT_HW_CACHE_MAX
; type
++) {
879 for (op
= 0; op
< PERF_COUNT_HW_CACHE_OP_MAX
; op
++) {
880 /* skip invalid cache type */
881 if (!perf_evsel__is_cache_op_valid(type
, op
))
884 for (i
= 0; i
< PERF_COUNT_HW_CACHE_RESULT_MAX
; i
++) {
885 __perf_evsel__hw_cache_type_op_res_name(type
, op
, i
,
887 err
= parse_events(evlist
, name
, 0);
895 evsel
= perf_evlist__first(evlist
);
897 for (type
= 0; type
< PERF_COUNT_HW_CACHE_MAX
; type
++) {
898 for (op
= 0; op
< PERF_COUNT_HW_CACHE_OP_MAX
; op
++) {
899 /* skip invalid cache type */
900 if (!perf_evsel__is_cache_op_valid(type
, op
))
903 for (i
= 0; i
< PERF_COUNT_HW_CACHE_RESULT_MAX
; i
++) {
904 __perf_evsel__hw_cache_type_op_res_name(type
, op
, i
,
906 if (evsel
->idx
!= idx
)
911 if (strcmp(perf_evsel__name(evsel
), name
)) {
912 pr_debug("%s != %s\n", perf_evsel__name(evsel
), name
);
916 evsel
= perf_evsel__next(evsel
);
921 perf_evlist__delete(evlist
);
925 static int __perf_evsel__name_array_test(const char *names
[], int nr_names
)
928 struct perf_evsel
*evsel
;
929 struct perf_evlist
*evlist
= perf_evlist__new(NULL
, NULL
);
934 for (i
= 0; i
< nr_names
; ++i
) {
935 err
= parse_events(evlist
, names
[i
], 0);
937 pr_debug("failed to parse event '%s', err %d\n",
939 goto out_delete_evlist
;
944 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
945 if (strcmp(perf_evsel__name(evsel
), names
[evsel
->idx
])) {
947 pr_debug("%s != %s\n", perf_evsel__name(evsel
), names
[evsel
->idx
]);
952 perf_evlist__delete(evlist
);
956 #define perf_evsel__name_array_test(names) \
957 __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
959 static int perf_evsel__roundtrip_name_test(void)
961 int err
= 0, ret
= 0;
963 err
= perf_evsel__name_array_test(perf_evsel__hw_names
);
967 err
= perf_evsel__name_array_test(perf_evsel__sw_names
);
971 err
= perf_evsel__roundtrip_cache_name_test();
978 static int perf_evsel__test_field(struct perf_evsel
*evsel
, const char *name
,
979 int size
, bool should_be_signed
)
981 struct format_field
*field
= perf_evsel__field(evsel
, name
);
986 pr_debug("%s: \"%s\" field not found!\n", evsel
->name
, name
);
990 is_signed
= !!(field
->flags
| FIELD_IS_SIGNED
);
991 if (should_be_signed
&& !is_signed
) {
992 pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
993 evsel
->name
, name
, is_signed
, should_be_signed
);
997 if (field
->size
!= size
) {
998 pr_debug("%s: \"%s\" size (%d) should be %d!\n",
999 evsel
->name
, name
, field
->size
, size
);
1006 static int perf_evsel__tp_sched_test(void)
1008 struct perf_evsel
*evsel
= perf_evsel__newtp("sched", "sched_switch", 0);
1011 if (evsel
== NULL
) {
1012 pr_debug("perf_evsel__new\n");
1016 if (perf_evsel__test_field(evsel
, "prev_comm", 16, true))
1019 if (perf_evsel__test_field(evsel
, "prev_pid", 4, true))
1022 if (perf_evsel__test_field(evsel
, "prev_prio", 4, true))
1025 if (perf_evsel__test_field(evsel
, "prev_state", 8, true))
1028 if (perf_evsel__test_field(evsel
, "next_comm", 16, true))
1031 if (perf_evsel__test_field(evsel
, "next_pid", 4, true))
1034 if (perf_evsel__test_field(evsel
, "next_prio", 4, true))
1037 perf_evsel__delete(evsel
);
1039 evsel
= perf_evsel__newtp("sched", "sched_wakeup", 0);
1041 if (perf_evsel__test_field(evsel
, "comm", 16, true))
1044 if (perf_evsel__test_field(evsel
, "pid", 4, true))
1047 if (perf_evsel__test_field(evsel
, "prio", 4, true))
1050 if (perf_evsel__test_field(evsel
, "success", 4, true))
1053 if (perf_evsel__test_field(evsel
, "target_cpu", 4, true))
1059 static int test__syscall_open_tp_fields(void)
1061 struct perf_record_opts opts
= {
1069 .raw_samples
= true,
1071 const char *filename
= "/etc/passwd";
1072 int flags
= O_RDONLY
| O_DIRECTORY
;
1073 struct perf_evlist
*evlist
= perf_evlist__new(NULL
, NULL
);
1074 struct perf_evsel
*evsel
;
1075 int err
= -1, i
, nr_events
= 0, nr_polls
= 0;
1077 if (evlist
== NULL
) {
1078 pr_debug("%s: perf_evlist__new\n", __func__
);
1082 evsel
= perf_evsel__newtp("syscalls", "sys_enter_open", 0);
1083 if (evsel
== NULL
) {
1084 pr_debug("%s: perf_evsel__newtp\n", __func__
);
1085 goto out_delete_evlist
;
1088 perf_evlist__add(evlist
, evsel
);
1090 err
= perf_evlist__create_maps(evlist
, &opts
.target
);
1092 pr_debug("%s: perf_evlist__create_maps\n", __func__
);
1093 goto out_delete_evlist
;
1096 perf_evsel__config(evsel
, &opts
, evsel
);
1098 evlist
->threads
->map
[0] = getpid();
1100 err
= perf_evlist__open(evlist
);
1102 pr_debug("perf_evlist__open: %s\n", strerror(errno
));
1103 goto out_delete_evlist
;
1106 err
= perf_evlist__mmap(evlist
, UINT_MAX
, false);
1108 pr_debug("perf_evlist__mmap: %s\n", strerror(errno
));
1109 goto out_delete_evlist
;
1112 perf_evlist__enable(evlist
);
1115 * Generate the event:
1117 open(filename
, flags
);
1120 int before
= nr_events
;
1122 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
1123 union perf_event
*event
;
1125 while ((event
= perf_evlist__mmap_read(evlist
, i
)) != NULL
) {
1126 const u32 type
= event
->header
.type
;
1128 struct perf_sample sample
;
1132 if (type
!= PERF_RECORD_SAMPLE
)
1135 err
= perf_evsel__parse_sample(evsel
, event
, &sample
);
1137 pr_err("Can't parse sample, err = %d\n", err
);
1141 tp_flags
= perf_evsel__intval(evsel
, &sample
, "flags");
1143 if (flags
!= tp_flags
) {
1144 pr_debug("%s: Expected flags=%#x, got %#x\n",
1145 __func__
, flags
, tp_flags
);
1153 if (nr_events
== before
)
1154 poll(evlist
->pollfd
, evlist
->nr_fds
, 10);
1156 if (++nr_polls
> 5) {
1157 pr_debug("%s: no events!\n", __func__
);
1164 perf_evlist__munmap(evlist
);
1166 perf_evlist__delete(evlist
);
1171 static struct test
{
1176 .desc
= "vmlinux symtab matches kallsyms",
1177 .func
= test__vmlinux_matches_kallsyms
,
1180 .desc
= "detect open syscall event",
1181 .func
= test__open_syscall_event
,
1184 .desc
= "detect open syscall event on all cpus",
1185 .func
= test__open_syscall_event_on_all_cpus
,
1188 .desc
= "read samples using the mmap interface",
1189 .func
= test__basic_mmap
,
1192 .desc
= "parse events tests",
1193 .func
= parse_events__test
,
1195 #if defined(__x86_64__) || defined(__i386__)
1197 .desc
= "x86 rdpmc test",
1198 .func
= test__rdpmc
,
1202 .desc
= "Validate PERF_RECORD_* events & perf_sample fields",
1203 .func
= test__PERF_RECORD
,
1206 .desc
= "Test perf pmu format parsing",
1207 .func
= test__perf_pmu
,
1210 .desc
= "Test dso data interface",
1211 .func
= dso__test_data
,
1214 .desc
= "roundtrip evsel->name check",
1215 .func
= perf_evsel__roundtrip_name_test
,
1218 .desc
= "Check parsing of sched tracepoints fields",
1219 .func
= perf_evsel__tp_sched_test
,
1222 .desc
= "Generate and check syscalls:sys_enter_open event fields",
1223 .func
= test__syscall_open_tp_fields
,
1226 .desc
= "struct perf_event_attr setup",
1227 .func
= test_attr__run
,
1234 static bool perf_test__matches(int curr
, int argc
, const char *argv
[])
1241 for (i
= 0; i
< argc
; ++i
) {
1243 long nr
= strtoul(argv
[i
], &end
, 10);
1251 if (strstr(tests
[curr
].desc
, argv
[i
]))
1258 static int __cmd_test(int argc
, const char *argv
[])
1263 while (tests
[i
].func
) {
1264 int len
= strlen(tests
[i
].desc
);
1272 while (tests
[i
].func
) {
1273 int curr
= i
++, err
;
1275 if (!perf_test__matches(curr
, argc
, argv
))
1278 pr_info("%2d: %-*s:", i
, width
, tests
[curr
].desc
);
1279 pr_debug("\n--- start ---\n");
1280 err
= tests
[curr
].func();
1281 pr_debug("---- end ----\n%s:", tests
[curr
].desc
);
1283 color_fprintf(stderr
, PERF_COLOR_RED
, " FAILED!\n");
1291 static int perf_test__list(int argc
, const char **argv
)
1295 while (tests
[i
].func
) {
1298 if (argc
> 1 && !strstr(tests
[curr
].desc
, argv
[1]))
1301 pr_info("%2d: %s\n", i
, tests
[curr
].desc
);
1307 int cmd_test(int argc
, const char **argv
, const char *prefix __maybe_unused
)
1309 const char * const test_usage
[] = {
1310 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
1313 const struct option test_options
[] = {
1314 OPT_INCR('v', "verbose", &verbose
,
1315 "be more verbose (show symbol address, etc)"),
1319 argc
= parse_options(argc
, argv
, test_options
, test_usage
, 0);
1320 if (argc
>= 1 && !strcmp(argv
[0], "list"))
1321 return perf_test__list(argc
, argv
);
1323 symbol_conf
.priv_size
= sizeof(int);
1324 symbol_conf
.sort_by_name
= true;
1325 symbol_conf
.try_vmlinux_path
= true;
1327 if (symbol__init() < 0)
1330 return __cmd_test(argc
, argv
);