2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <api/fs/fs.h>
13 #include "thread_map.h"
20 #include "parse-events.h"
21 #include "parse-options.h"
25 #include <linux/bitops.h>
26 #include <linux/hash.h>
27 #include <linux/log2.h>
29 static void perf_evlist__mmap_put(struct perf_evlist
*evlist
, int idx
);
30 static void __perf_evlist__munmap(struct perf_evlist
*evlist
, int idx
);
32 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
33 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
35 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
36 struct thread_map
*threads
)
40 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
41 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
42 INIT_LIST_HEAD(&evlist
->entries
);
43 perf_evlist__set_maps(evlist
, cpus
, threads
);
44 fdarray__init(&evlist
->pollfd
, 64);
45 evlist
->workload
.pid
= -1;
48 struct perf_evlist
*perf_evlist__new(void)
50 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
53 perf_evlist__init(evlist
, NULL
, NULL
);
58 struct perf_evlist
*perf_evlist__new_default(void)
60 struct perf_evlist
*evlist
= perf_evlist__new();
62 if (evlist
&& perf_evlist__add_default(evlist
)) {
63 perf_evlist__delete(evlist
);
71 * perf_evlist__set_id_pos - set the positions of event ids.
72 * @evlist: selected event list
74 * Events with compatible sample types all have the same id_pos
75 * and is_pos. For convenience, put a copy on evlist.
77 void perf_evlist__set_id_pos(struct perf_evlist
*evlist
)
79 struct perf_evsel
*first
= perf_evlist__first(evlist
);
81 evlist
->id_pos
= first
->id_pos
;
82 evlist
->is_pos
= first
->is_pos
;
85 static void perf_evlist__update_id_pos(struct perf_evlist
*evlist
)
87 struct perf_evsel
*evsel
;
89 evlist__for_each(evlist
, evsel
)
90 perf_evsel__calc_id_pos(evsel
);
92 perf_evlist__set_id_pos(evlist
);
95 static void perf_evlist__purge(struct perf_evlist
*evlist
)
97 struct perf_evsel
*pos
, *n
;
99 evlist__for_each_safe(evlist
, n
, pos
) {
100 list_del_init(&pos
->node
);
101 perf_evsel__delete(pos
);
104 evlist
->nr_entries
= 0;
107 void perf_evlist__exit(struct perf_evlist
*evlist
)
109 zfree(&evlist
->mmap
);
110 fdarray__exit(&evlist
->pollfd
);
113 void perf_evlist__delete(struct perf_evlist
*evlist
)
115 perf_evlist__munmap(evlist
);
116 perf_evlist__close(evlist
);
117 cpu_map__delete(evlist
->cpus
);
118 thread_map__delete(evlist
->threads
);
120 evlist
->threads
= NULL
;
121 perf_evlist__purge(evlist
);
122 perf_evlist__exit(evlist
);
126 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
128 list_add_tail(&entry
->node
, &evlist
->entries
);
129 entry
->idx
= evlist
->nr_entries
;
130 entry
->tracking
= !entry
->idx
;
132 if (!evlist
->nr_entries
++)
133 perf_evlist__set_id_pos(evlist
);
136 void perf_evlist__splice_list_tail(struct perf_evlist
*evlist
,
137 struct list_head
*list
,
140 bool set_id_pos
= !evlist
->nr_entries
;
142 list_splice_tail(list
, &evlist
->entries
);
143 evlist
->nr_entries
+= nr_entries
;
145 perf_evlist__set_id_pos(evlist
);
148 void __perf_evlist__set_leader(struct list_head
*list
)
150 struct perf_evsel
*evsel
, *leader
;
152 leader
= list_entry(list
->next
, struct perf_evsel
, node
);
153 evsel
= list_entry(list
->prev
, struct perf_evsel
, node
);
155 leader
->nr_members
= evsel
->idx
- leader
->idx
+ 1;
157 __evlist__for_each(list
, evsel
) {
158 evsel
->leader
= leader
;
162 void perf_evlist__set_leader(struct perf_evlist
*evlist
)
164 if (evlist
->nr_entries
) {
165 evlist
->nr_groups
= evlist
->nr_entries
> 1 ? 1 : 0;
166 __perf_evlist__set_leader(&evlist
->entries
);
170 int perf_evlist__add_default(struct perf_evlist
*evlist
)
172 struct perf_event_attr attr
= {
173 .type
= PERF_TYPE_HARDWARE
,
174 .config
= PERF_COUNT_HW_CPU_CYCLES
,
176 struct perf_evsel
*evsel
;
178 event_attr_init(&attr
);
180 evsel
= perf_evsel__new(&attr
);
184 /* use strdup() because free(evsel) assumes name is allocated */
185 evsel
->name
= strdup("cycles");
189 perf_evlist__add(evlist
, evsel
);
192 perf_evsel__delete(evsel
);
197 static int perf_evlist__add_attrs(struct perf_evlist
*evlist
,
198 struct perf_event_attr
*attrs
, size_t nr_attrs
)
200 struct perf_evsel
*evsel
, *n
;
204 for (i
= 0; i
< nr_attrs
; i
++) {
205 evsel
= perf_evsel__new_idx(attrs
+ i
, evlist
->nr_entries
+ i
);
207 goto out_delete_partial_list
;
208 list_add_tail(&evsel
->node
, &head
);
211 perf_evlist__splice_list_tail(evlist
, &head
, nr_attrs
);
215 out_delete_partial_list
:
216 __evlist__for_each_safe(&head
, n
, evsel
)
217 perf_evsel__delete(evsel
);
221 int __perf_evlist__add_default_attrs(struct perf_evlist
*evlist
,
222 struct perf_event_attr
*attrs
, size_t nr_attrs
)
226 for (i
= 0; i
< nr_attrs
; i
++)
227 event_attr_init(attrs
+ i
);
229 return perf_evlist__add_attrs(evlist
, attrs
, nr_attrs
);
233 perf_evlist__find_tracepoint_by_id(struct perf_evlist
*evlist
, int id
)
235 struct perf_evsel
*evsel
;
237 evlist__for_each(evlist
, evsel
) {
238 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
239 (int)evsel
->attr
.config
== id
)
247 perf_evlist__find_tracepoint_by_name(struct perf_evlist
*evlist
,
250 struct perf_evsel
*evsel
;
252 evlist__for_each(evlist
, evsel
) {
253 if ((evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) &&
254 (strcmp(evsel
->name
, name
) == 0))
261 int perf_evlist__add_newtp(struct perf_evlist
*evlist
,
262 const char *sys
, const char *name
, void *handler
)
264 struct perf_evsel
*evsel
= perf_evsel__newtp(sys
, name
);
269 evsel
->handler
= handler
;
270 perf_evlist__add(evlist
, evsel
);
274 static int perf_evlist__nr_threads(struct perf_evlist
*evlist
,
275 struct perf_evsel
*evsel
)
277 if (evsel
->system_wide
)
280 return thread_map__nr(evlist
->threads
);
283 void perf_evlist__disable(struct perf_evlist
*evlist
)
286 struct perf_evsel
*pos
;
287 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
290 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
291 evlist__for_each(evlist
, pos
) {
292 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
294 nr_threads
= perf_evlist__nr_threads(evlist
, pos
);
295 for (thread
= 0; thread
< nr_threads
; thread
++)
296 ioctl(FD(pos
, cpu
, thread
),
297 PERF_EVENT_IOC_DISABLE
, 0);
302 void perf_evlist__enable(struct perf_evlist
*evlist
)
305 struct perf_evsel
*pos
;
306 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
309 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
310 evlist__for_each(evlist
, pos
) {
311 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
313 nr_threads
= perf_evlist__nr_threads(evlist
, pos
);
314 for (thread
= 0; thread
< nr_threads
; thread
++)
315 ioctl(FD(pos
, cpu
, thread
),
316 PERF_EVENT_IOC_ENABLE
, 0);
321 int perf_evlist__disable_event(struct perf_evlist
*evlist
,
322 struct perf_evsel
*evsel
)
324 int cpu
, thread
, err
;
325 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
326 int nr_threads
= perf_evlist__nr_threads(evlist
, evsel
);
331 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
332 for (thread
= 0; thread
< nr_threads
; thread
++) {
333 err
= ioctl(FD(evsel
, cpu
, thread
),
334 PERF_EVENT_IOC_DISABLE
, 0);
342 int perf_evlist__enable_event(struct perf_evlist
*evlist
,
343 struct perf_evsel
*evsel
)
345 int cpu
, thread
, err
;
346 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
347 int nr_threads
= perf_evlist__nr_threads(evlist
, evsel
);
352 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
353 for (thread
= 0; thread
< nr_threads
; thread
++) {
354 err
= ioctl(FD(evsel
, cpu
, thread
),
355 PERF_EVENT_IOC_ENABLE
, 0);
363 static int perf_evlist__enable_event_cpu(struct perf_evlist
*evlist
,
364 struct perf_evsel
*evsel
, int cpu
)
367 int nr_threads
= perf_evlist__nr_threads(evlist
, evsel
);
372 for (thread
= 0; thread
< nr_threads
; thread
++) {
373 err
= ioctl(FD(evsel
, cpu
, thread
),
374 PERF_EVENT_IOC_ENABLE
, 0);
381 static int perf_evlist__enable_event_thread(struct perf_evlist
*evlist
,
382 struct perf_evsel
*evsel
,
386 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
391 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
392 err
= ioctl(FD(evsel
, cpu
, thread
), PERF_EVENT_IOC_ENABLE
, 0);
399 int perf_evlist__enable_event_idx(struct perf_evlist
*evlist
,
400 struct perf_evsel
*evsel
, int idx
)
402 bool per_cpu_mmaps
= !cpu_map__empty(evlist
->cpus
);
405 return perf_evlist__enable_event_cpu(evlist
, evsel
, idx
);
407 return perf_evlist__enable_event_thread(evlist
, evsel
, idx
);
410 int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
412 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
413 int nr_threads
= thread_map__nr(evlist
->threads
);
415 struct perf_evsel
*evsel
;
417 evlist__for_each(evlist
, evsel
) {
418 if (evsel
->system_wide
)
421 nfds
+= nr_cpus
* nr_threads
;
424 if (fdarray__available_entries(&evlist
->pollfd
) < nfds
&&
425 fdarray__grow(&evlist
->pollfd
, nfds
) < 0)
431 static int __perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
, int idx
)
433 int pos
= fdarray__add(&evlist
->pollfd
, fd
, POLLIN
| POLLERR
| POLLHUP
);
435 * Save the idx so that when we filter out fds POLLHUP'ed we can
436 * close the associated evlist->mmap[] entry.
439 evlist
->pollfd
.priv
[pos
].idx
= idx
;
441 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
447 int perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
449 return __perf_evlist__add_pollfd(evlist
, fd
, -1);
452 static void perf_evlist__munmap_filtered(struct fdarray
*fda
, int fd
)
454 struct perf_evlist
*evlist
= container_of(fda
, struct perf_evlist
, pollfd
);
456 perf_evlist__mmap_put(evlist
, fda
->priv
[fd
].idx
);
459 int perf_evlist__filter_pollfd(struct perf_evlist
*evlist
, short revents_and_mask
)
461 return fdarray__filter(&evlist
->pollfd
, revents_and_mask
,
462 perf_evlist__munmap_filtered
);
465 int perf_evlist__poll(struct perf_evlist
*evlist
, int timeout
)
467 return fdarray__poll(&evlist
->pollfd
, timeout
);
470 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
471 struct perf_evsel
*evsel
,
472 int cpu
, int thread
, u64 id
)
475 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
479 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
480 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
483 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
484 int cpu
, int thread
, u64 id
)
486 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
487 evsel
->id
[evsel
->ids
++] = id
;
490 static int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
491 struct perf_evsel
*evsel
,
492 int cpu
, int thread
, int fd
)
494 u64 read_data
[4] = { 0, };
495 int id_idx
= 1; /* The first entry is the counter value */
499 ret
= ioctl(fd
, PERF_EVENT_IOC_ID
, &id
);
506 /* Legacy way to get event id.. All hail to old kernels! */
509 * This way does not work with group format read, so bail
512 if (perf_evlist__read_format(evlist
) & PERF_FORMAT_GROUP
)
515 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
516 read(fd
, &read_data
, sizeof(read_data
)) == -1)
519 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
521 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
524 id
= read_data
[id_idx
];
527 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, id
);
531 static void perf_evlist__set_sid_idx(struct perf_evlist
*evlist
,
532 struct perf_evsel
*evsel
, int idx
, int cpu
,
535 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
537 if (evlist
->cpus
&& cpu
>= 0)
538 sid
->cpu
= evlist
->cpus
->map
[cpu
];
541 if (!evsel
->system_wide
&& evlist
->threads
&& thread
>= 0)
542 sid
->tid
= evlist
->threads
->map
[thread
];
547 struct perf_sample_id
*perf_evlist__id2sid(struct perf_evlist
*evlist
, u64 id
)
549 struct hlist_head
*head
;
550 struct perf_sample_id
*sid
;
553 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
554 head
= &evlist
->heads
[hash
];
556 hlist_for_each_entry(sid
, head
, node
)
563 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
565 struct perf_sample_id
*sid
;
567 if (evlist
->nr_entries
== 1)
568 return perf_evlist__first(evlist
);
570 sid
= perf_evlist__id2sid(evlist
, id
);
574 if (!perf_evlist__sample_id_all(evlist
))
575 return perf_evlist__first(evlist
);
580 static int perf_evlist__event2id(struct perf_evlist
*evlist
,
581 union perf_event
*event
, u64
*id
)
583 const u64
*array
= event
->sample
.array
;
586 n
= (event
->header
.size
- sizeof(event
->header
)) >> 3;
588 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
589 if (evlist
->id_pos
>= n
)
591 *id
= array
[evlist
->id_pos
];
593 if (evlist
->is_pos
> n
)
601 static struct perf_evsel
*perf_evlist__event2evsel(struct perf_evlist
*evlist
,
602 union perf_event
*event
)
604 struct perf_evsel
*first
= perf_evlist__first(evlist
);
605 struct hlist_head
*head
;
606 struct perf_sample_id
*sid
;
610 if (evlist
->nr_entries
== 1)
613 if (!first
->attr
.sample_id_all
&&
614 event
->header
.type
!= PERF_RECORD_SAMPLE
)
617 if (perf_evlist__event2id(evlist
, event
, &id
))
620 /* Synthesized events have an id of zero */
624 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
625 head
= &evlist
->heads
[hash
];
627 hlist_for_each_entry(sid
, head
, node
) {
634 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
636 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
637 unsigned int head
= perf_mmap__read_head(md
);
638 unsigned int old
= md
->prev
;
639 unsigned char *data
= md
->base
+ page_size
;
640 union perf_event
*event
= NULL
;
642 if (evlist
->overwrite
) {
644 * If we're further behind than half the buffer, there's a chance
645 * the writer will bite our tail and mess up the samples under us.
647 * If we somehow ended up ahead of the head, we got messed up.
649 * In either case, truncate and restart at head.
651 int diff
= head
- old
;
652 if (diff
> md
->mask
/ 2 || diff
< 0) {
653 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
656 * head points to a known good entry, start there.
665 event
= (union perf_event
*)&data
[old
& md
->mask
];
666 size
= event
->header
.size
;
669 * Event straddles the mmap boundary -- header should always
670 * be inside due to u64 alignment of output.
672 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
673 unsigned int offset
= old
;
674 unsigned int len
= min(sizeof(*event
), size
), cpy
;
675 void *dst
= md
->event_copy
;
678 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
679 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
685 event
= (union perf_event
*) md
->event_copy
;
696 static bool perf_mmap__empty(struct perf_mmap
*md
)
698 return perf_mmap__read_head(md
) != md
->prev
;
701 static void perf_evlist__mmap_get(struct perf_evlist
*evlist
, int idx
)
703 ++evlist
->mmap
[idx
].refcnt
;
706 static void perf_evlist__mmap_put(struct perf_evlist
*evlist
, int idx
)
708 BUG_ON(evlist
->mmap
[idx
].refcnt
== 0);
710 if (--evlist
->mmap
[idx
].refcnt
== 0)
711 __perf_evlist__munmap(evlist
, idx
);
714 void perf_evlist__mmap_consume(struct perf_evlist
*evlist
, int idx
)
716 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
718 if (!evlist
->overwrite
) {
719 unsigned int old
= md
->prev
;
721 perf_mmap__write_tail(md
, old
);
724 if (md
->refcnt
== 1 && perf_mmap__empty(md
))
725 perf_evlist__mmap_put(evlist
, idx
);
728 static void __perf_evlist__munmap(struct perf_evlist
*evlist
, int idx
)
730 if (evlist
->mmap
[idx
].base
!= NULL
) {
731 munmap(evlist
->mmap
[idx
].base
, evlist
->mmap_len
);
732 evlist
->mmap
[idx
].base
= NULL
;
733 evlist
->mmap
[idx
].refcnt
= 0;
737 void perf_evlist__munmap(struct perf_evlist
*evlist
)
741 if (evlist
->mmap
== NULL
)
744 for (i
= 0; i
< evlist
->nr_mmaps
; i
++)
745 __perf_evlist__munmap(evlist
, i
);
747 zfree(&evlist
->mmap
);
750 static int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
752 evlist
->nr_mmaps
= cpu_map__nr(evlist
->cpus
);
753 if (cpu_map__empty(evlist
->cpus
))
754 evlist
->nr_mmaps
= thread_map__nr(evlist
->threads
);
755 evlist
->mmap
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
756 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
764 static int __perf_evlist__mmap(struct perf_evlist
*evlist
, int idx
,
765 struct mmap_params
*mp
, int fd
)
768 * The last one will be done at perf_evlist__mmap_consume(), so that we
769 * make sure we don't prevent tools from consuming every last event in
772 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
773 * anymore, but the last events for it are still in the ring buffer,
774 * waiting to be consumed.
776 * Tools can chose to ignore this at their own discretion, but the
777 * evlist layer can't just drop it when filtering events in
778 * perf_evlist__filter_pollfd().
780 evlist
->mmap
[idx
].refcnt
= 2;
781 evlist
->mmap
[idx
].prev
= 0;
782 evlist
->mmap
[idx
].mask
= mp
->mask
;
783 evlist
->mmap
[idx
].base
= mmap(NULL
, evlist
->mmap_len
, mp
->prot
,
785 if (evlist
->mmap
[idx
].base
== MAP_FAILED
) {
786 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
788 evlist
->mmap
[idx
].base
= NULL
;
795 static int perf_evlist__mmap_per_evsel(struct perf_evlist
*evlist
, int idx
,
796 struct mmap_params
*mp
, int cpu
,
797 int thread
, int *output
)
799 struct perf_evsel
*evsel
;
801 evlist__for_each(evlist
, evsel
) {
804 if (evsel
->system_wide
&& thread
)
807 fd
= FD(evsel
, cpu
, thread
);
811 if (__perf_evlist__mmap(evlist
, idx
, mp
, *output
) < 0)
814 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, *output
) != 0)
817 perf_evlist__mmap_get(evlist
, idx
);
821 * The system_wide flag causes a selected event to be opened
822 * always without a pid. Consequently it will never get a
823 * POLLHUP, but it is used for tracking in combination with
824 * other events, so it should not need to be polled anyway.
825 * Therefore don't add it for polling.
827 if (!evsel
->system_wide
&&
828 __perf_evlist__add_pollfd(evlist
, fd
, idx
) < 0) {
829 perf_evlist__mmap_put(evlist
, idx
);
833 if (evsel
->attr
.read_format
& PERF_FORMAT_ID
) {
834 if (perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
,
837 perf_evlist__set_sid_idx(evlist
, evsel
, idx
, cpu
,
845 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
,
846 struct mmap_params
*mp
)
849 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
850 int nr_threads
= thread_map__nr(evlist
->threads
);
852 pr_debug2("perf event ring buffer mmapped per cpu\n");
853 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
856 for (thread
= 0; thread
< nr_threads
; thread
++) {
857 if (perf_evlist__mmap_per_evsel(evlist
, cpu
, mp
, cpu
,
866 for (cpu
= 0; cpu
< nr_cpus
; cpu
++)
867 __perf_evlist__munmap(evlist
, cpu
);
871 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
,
872 struct mmap_params
*mp
)
875 int nr_threads
= thread_map__nr(evlist
->threads
);
877 pr_debug2("perf event ring buffer mmapped per thread\n");
878 for (thread
= 0; thread
< nr_threads
; thread
++) {
881 if (perf_evlist__mmap_per_evsel(evlist
, thread
, mp
, 0, thread
,
889 for (thread
= 0; thread
< nr_threads
; thread
++)
890 __perf_evlist__munmap(evlist
, thread
);
894 static size_t perf_evlist__mmap_size(unsigned long pages
)
896 if (pages
== UINT_MAX
) {
899 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max
) < 0) {
901 * Pick a once upon a time good value, i.e. things look
902 * strange since we can't read a sysctl value, but lets not
907 max
-= (page_size
/ 1024);
910 pages
= (max
* 1024) / page_size
;
911 if (!is_power_of_2(pages
))
912 pages
= rounddown_pow_of_two(pages
);
913 } else if (!is_power_of_2(pages
))
916 return (pages
+ 1) * page_size
;
919 static long parse_pages_arg(const char *str
, unsigned long min
,
922 unsigned long pages
, val
;
923 static struct parse_tag tags
[] = {
924 { .tag
= 'B', .mult
= 1 },
925 { .tag
= 'K', .mult
= 1 << 10 },
926 { .tag
= 'M', .mult
= 1 << 20 },
927 { .tag
= 'G', .mult
= 1 << 30 },
934 val
= parse_tag_value(str
, tags
);
935 if (val
!= (unsigned long) -1) {
936 /* we got file size value */
937 pages
= PERF_ALIGN(val
, page_size
) / page_size
;
939 /* we got pages count value */
941 pages
= strtoul(str
, &eptr
, 10);
946 if (pages
== 0 && min
== 0) {
947 /* leave number of pages at 0 */
948 } else if (!is_power_of_2(pages
)) {
949 /* round pages up to next power of 2 */
950 pages
= roundup_pow_of_two(pages
);
953 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
954 pages
* page_size
, pages
);
963 int perf_evlist__parse_mmap_pages(const struct option
*opt
, const char *str
,
964 int unset __maybe_unused
)
966 unsigned int *mmap_pages
= opt
->value
;
967 unsigned long max
= UINT_MAX
;
970 if (max
> SIZE_MAX
/ page_size
)
971 max
= SIZE_MAX
/ page_size
;
973 pages
= parse_pages_arg(str
, 1, max
);
975 pr_err("Invalid argument for --mmap_pages/-m\n");
984 * perf_evlist__mmap - Create mmaps to receive events.
985 * @evlist: list of events
986 * @pages: map length in pages
987 * @overwrite: overwrite older events?
989 * If @overwrite is %false the user needs to signal event consumption using
990 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
993 * Return: %0 on success, negative error code otherwise.
995 int perf_evlist__mmap(struct perf_evlist
*evlist
, unsigned int pages
,
998 struct perf_evsel
*evsel
;
999 const struct cpu_map
*cpus
= evlist
->cpus
;
1000 const struct thread_map
*threads
= evlist
->threads
;
1001 struct mmap_params mp
= {
1002 .prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
),
1005 if (evlist
->mmap
== NULL
&& perf_evlist__alloc_mmap(evlist
) < 0)
1008 if (evlist
->pollfd
.entries
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
1011 evlist
->overwrite
= overwrite
;
1012 evlist
->mmap_len
= perf_evlist__mmap_size(pages
);
1013 pr_debug("mmap size %zuB\n", evlist
->mmap_len
);
1014 mp
.mask
= evlist
->mmap_len
- page_size
- 1;
1016 evlist__for_each(evlist
, evsel
) {
1017 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
1018 evsel
->sample_id
== NULL
&&
1019 perf_evsel__alloc_id(evsel
, cpu_map__nr(cpus
), threads
->nr
) < 0)
1023 if (cpu_map__empty(cpus
))
1024 return perf_evlist__mmap_per_thread(evlist
, &mp
);
1026 return perf_evlist__mmap_per_cpu(evlist
, &mp
);
1029 int perf_evlist__create_maps(struct perf_evlist
*evlist
, struct target
*target
)
1031 evlist
->threads
= thread_map__new_str(target
->pid
, target
->tid
,
1034 if (evlist
->threads
== NULL
)
1037 if (target__uses_dummy_map(target
))
1038 evlist
->cpus
= cpu_map__dummy_new();
1040 evlist
->cpus
= cpu_map__new(target
->cpu_list
);
1042 if (evlist
->cpus
== NULL
)
1043 goto out_delete_threads
;
1048 thread_map__delete(evlist
->threads
);
1049 evlist
->threads
= NULL
;
1053 int perf_evlist__apply_filters(struct perf_evlist
*evlist
, struct perf_evsel
**err_evsel
)
1055 struct perf_evsel
*evsel
;
1057 const int ncpus
= cpu_map__nr(evlist
->cpus
),
1058 nthreads
= thread_map__nr(evlist
->threads
);
1060 evlist__for_each(evlist
, evsel
) {
1061 if (evsel
->filter
== NULL
)
1064 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, evsel
->filter
);
1074 int perf_evlist__set_filter(struct perf_evlist
*evlist
, const char *filter
)
1076 struct perf_evsel
*evsel
;
1078 const int ncpus
= cpu_map__nr(evlist
->cpus
),
1079 nthreads
= thread_map__nr(evlist
->threads
);
1081 evlist__for_each(evlist
, evsel
) {
1082 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, filter
);
1090 int perf_evlist__set_filter_pids(struct perf_evlist
*evlist
, size_t npids
, pid_t
*pids
)
1096 for (i
= 0; i
< npids
; ++i
) {
1098 if (asprintf(&filter
, "common_pid != %d", pids
[i
]) < 0)
1103 if (asprintf(&tmp
, "%s && common_pid != %d", filter
, pids
[i
]) < 0)
1111 ret
= perf_evlist__set_filter(evlist
, filter
);
1117 int perf_evlist__set_filter_pid(struct perf_evlist
*evlist
, pid_t pid
)
1119 return perf_evlist__set_filter_pids(evlist
, 1, &pid
);
1122 bool perf_evlist__valid_sample_type(struct perf_evlist
*evlist
)
1124 struct perf_evsel
*pos
;
1126 if (evlist
->nr_entries
== 1)
1129 if (evlist
->id_pos
< 0 || evlist
->is_pos
< 0)
1132 evlist__for_each(evlist
, pos
) {
1133 if (pos
->id_pos
!= evlist
->id_pos
||
1134 pos
->is_pos
!= evlist
->is_pos
)
1141 u64
__perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
1143 struct perf_evsel
*evsel
;
1145 if (evlist
->combined_sample_type
)
1146 return evlist
->combined_sample_type
;
1148 evlist__for_each(evlist
, evsel
)
1149 evlist
->combined_sample_type
|= evsel
->attr
.sample_type
;
1151 return evlist
->combined_sample_type
;
1154 u64
perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
1156 evlist
->combined_sample_type
= 0;
1157 return __perf_evlist__combined_sample_type(evlist
);
1160 bool perf_evlist__valid_read_format(struct perf_evlist
*evlist
)
1162 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
1163 u64 read_format
= first
->attr
.read_format
;
1164 u64 sample_type
= first
->attr
.sample_type
;
1166 evlist__for_each(evlist
, pos
) {
1167 if (read_format
!= pos
->attr
.read_format
)
1171 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1172 if ((sample_type
& PERF_SAMPLE_READ
) &&
1173 !(read_format
& PERF_FORMAT_ID
)) {
1180 u64
perf_evlist__read_format(struct perf_evlist
*evlist
)
1182 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1183 return first
->attr
.read_format
;
1186 u16
perf_evlist__id_hdr_size(struct perf_evlist
*evlist
)
1188 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1189 struct perf_sample
*data
;
1193 if (!first
->attr
.sample_id_all
)
1196 sample_type
= first
->attr
.sample_type
;
1198 if (sample_type
& PERF_SAMPLE_TID
)
1199 size
+= sizeof(data
->tid
) * 2;
1201 if (sample_type
& PERF_SAMPLE_TIME
)
1202 size
+= sizeof(data
->time
);
1204 if (sample_type
& PERF_SAMPLE_ID
)
1205 size
+= sizeof(data
->id
);
1207 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
1208 size
+= sizeof(data
->stream_id
);
1210 if (sample_type
& PERF_SAMPLE_CPU
)
1211 size
+= sizeof(data
->cpu
) * 2;
1213 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
1214 size
+= sizeof(data
->id
);
1219 bool perf_evlist__valid_sample_id_all(struct perf_evlist
*evlist
)
1221 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
1223 evlist__for_each_continue(evlist
, pos
) {
1224 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
1231 bool perf_evlist__sample_id_all(struct perf_evlist
*evlist
)
1233 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1234 return first
->attr
.sample_id_all
;
1237 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
1238 struct perf_evsel
*evsel
)
1240 evlist
->selected
= evsel
;
1243 void perf_evlist__close(struct perf_evlist
*evlist
)
1245 struct perf_evsel
*evsel
;
1246 int ncpus
= cpu_map__nr(evlist
->cpus
);
1247 int nthreads
= thread_map__nr(evlist
->threads
);
1250 evlist__for_each_reverse(evlist
, evsel
) {
1251 n
= evsel
->cpus
? evsel
->cpus
->nr
: ncpus
;
1252 perf_evsel__close(evsel
, n
, nthreads
);
1256 static int perf_evlist__create_syswide_maps(struct perf_evlist
*evlist
)
1261 * Try reading /sys/devices/system/cpu/online to get
1264 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1265 * code needs an overhaul to properly forward the
1266 * error, and we may not want to do that fallback to a
1267 * default cpu identity map :-\
1269 evlist
->cpus
= cpu_map__new(NULL
);
1270 if (evlist
->cpus
== NULL
)
1273 evlist
->threads
= thread_map__new_dummy();
1274 if (evlist
->threads
== NULL
)
1281 cpu_map__delete(evlist
->cpus
);
1282 evlist
->cpus
= NULL
;
1286 int perf_evlist__open(struct perf_evlist
*evlist
)
1288 struct perf_evsel
*evsel
;
1292 * Default: one fd per CPU, all threads, aka systemwide
1293 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1295 if (evlist
->threads
== NULL
&& evlist
->cpus
== NULL
) {
1296 err
= perf_evlist__create_syswide_maps(evlist
);
1301 perf_evlist__update_id_pos(evlist
);
1303 evlist__for_each(evlist
, evsel
) {
1304 err
= perf_evsel__open(evsel
, evlist
->cpus
, evlist
->threads
);
1311 perf_evlist__close(evlist
);
1316 int perf_evlist__prepare_workload(struct perf_evlist
*evlist
, struct target
*target
,
1317 const char *argv
[], bool pipe_output
,
1318 void (*exec_error
)(int signo
, siginfo_t
*info
, void *ucontext
))
1320 int child_ready_pipe
[2], go_pipe
[2];
1323 if (pipe(child_ready_pipe
) < 0) {
1324 perror("failed to create 'ready' pipe");
1328 if (pipe(go_pipe
) < 0) {
1329 perror("failed to create 'go' pipe");
1330 goto out_close_ready_pipe
;
1333 evlist
->workload
.pid
= fork();
1334 if (evlist
->workload
.pid
< 0) {
1335 perror("failed to fork");
1336 goto out_close_pipes
;
1339 if (!evlist
->workload
.pid
) {
1345 signal(SIGTERM
, SIG_DFL
);
1347 close(child_ready_pipe
[0]);
1349 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
1352 * Tell the parent we're ready to go
1354 close(child_ready_pipe
[1]);
1357 * Wait until the parent tells us to go.
1359 ret
= read(go_pipe
[0], &bf
, 1);
1361 * The parent will ask for the execvp() to be performed by
1362 * writing exactly one byte, in workload.cork_fd, usually via
1363 * perf_evlist__start_workload().
1365 * For cancelling the workload without actually running it,
1366 * the parent will just close workload.cork_fd, without writing
1367 * anything, i.e. read will return zero and we just exit()
1372 perror("unable to read pipe");
1376 execvp(argv
[0], (char **)argv
);
1381 val
.sival_int
= errno
;
1382 if (sigqueue(getppid(), SIGUSR1
, val
))
1390 struct sigaction act
= {
1391 .sa_flags
= SA_SIGINFO
,
1392 .sa_sigaction
= exec_error
,
1394 sigaction(SIGUSR1
, &act
, NULL
);
1397 if (target__none(target
)) {
1398 if (evlist
->threads
== NULL
) {
1399 fprintf(stderr
, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1400 __func__
, __LINE__
);
1401 goto out_close_pipes
;
1403 evlist
->threads
->map
[0] = evlist
->workload
.pid
;
1406 close(child_ready_pipe
[1]);
1409 * wait for child to settle
1411 if (read(child_ready_pipe
[0], &bf
, 1) == -1) {
1412 perror("unable to read pipe");
1413 goto out_close_pipes
;
1416 fcntl(go_pipe
[1], F_SETFD
, FD_CLOEXEC
);
1417 evlist
->workload
.cork_fd
= go_pipe
[1];
1418 close(child_ready_pipe
[0]);
1424 out_close_ready_pipe
:
1425 close(child_ready_pipe
[0]);
1426 close(child_ready_pipe
[1]);
1430 int perf_evlist__start_workload(struct perf_evlist
*evlist
)
1432 if (evlist
->workload
.cork_fd
> 0) {
1436 * Remove the cork, let it rip!
1438 ret
= write(evlist
->workload
.cork_fd
, &bf
, 1);
1440 perror("enable to write to pipe");
1442 close(evlist
->workload
.cork_fd
);
1449 int perf_evlist__parse_sample(struct perf_evlist
*evlist
, union perf_event
*event
,
1450 struct perf_sample
*sample
)
1452 struct perf_evsel
*evsel
= perf_evlist__event2evsel(evlist
, event
);
1456 return perf_evsel__parse_sample(evsel
, event
, sample
);
1459 size_t perf_evlist__fprintf(struct perf_evlist
*evlist
, FILE *fp
)
1461 struct perf_evsel
*evsel
;
1464 evlist__for_each(evlist
, evsel
) {
1465 printed
+= fprintf(fp
, "%s%s", evsel
->idx
? ", " : "",
1466 perf_evsel__name(evsel
));
1469 return printed
+ fprintf(fp
, "\n");
1472 int perf_evlist__strerror_open(struct perf_evlist
*evlist __maybe_unused
,
1473 int err
, char *buf
, size_t size
)
1476 char sbuf
[STRERR_BUFSIZE
], *emsg
= strerror_r(err
, sbuf
, sizeof(sbuf
));
1481 printed
= scnprintf(buf
, size
,
1483 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg
);
1485 value
= perf_event_paranoid();
1487 printed
+= scnprintf(buf
+ printed
, size
- printed
, "\nHint:\t");
1490 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1491 "For your workloads it needs to be <= 1\nHint:\t");
1493 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1494 "For system wide tracing it needs to be set to -1.\n");
1496 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1497 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1498 "Hint:\tThe current value is %d.", value
);
1501 scnprintf(buf
, size
, "%s", emsg
);
1508 int perf_evlist__strerror_mmap(struct perf_evlist
*evlist
, int err
, char *buf
, size_t size
)
1510 char sbuf
[STRERR_BUFSIZE
], *emsg
= strerror_r(err
, sbuf
, sizeof(sbuf
));
1511 int pages_attempted
= evlist
->mmap_len
/ 1024, pages_max_per_user
, printed
= 0;
1515 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user
);
1516 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1518 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1519 "Hint:\tTried using %zd kB.\n",
1520 emsg
, pages_max_per_user
, pages_attempted
);
1522 if (pages_attempted
>= pages_max_per_user
) {
1523 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1524 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1525 pages_max_per_user
+ pages_attempted
);
1528 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1529 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1532 scnprintf(buf
, size
, "%s", emsg
);
1539 void perf_evlist__to_front(struct perf_evlist
*evlist
,
1540 struct perf_evsel
*move_evsel
)
1542 struct perf_evsel
*evsel
, *n
;
1545 if (move_evsel
== perf_evlist__first(evlist
))
1548 evlist__for_each_safe(evlist
, n
, evsel
) {
1549 if (evsel
->leader
== move_evsel
->leader
)
1550 list_move_tail(&evsel
->node
, &move
);
1553 list_splice(&move
, &evlist
->entries
);
1556 void perf_evlist__set_tracking_event(struct perf_evlist
*evlist
,
1557 struct perf_evsel
*tracking_evsel
)
1559 struct perf_evsel
*evsel
;
1561 if (tracking_evsel
->tracking
)
1564 evlist__for_each(evlist
, evsel
) {
1565 if (evsel
!= tracking_evsel
)
1566 evsel
->tracking
= false;
1569 tracking_evsel
->tracking
= true;