2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include "thread_map.h"
18 #include <linux/bitops.h>
19 #include <linux/hash.h>
21 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
22 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
24 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
25 struct thread_map
*threads
)
29 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
30 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
31 INIT_LIST_HEAD(&evlist
->entries
);
32 perf_evlist__set_maps(evlist
, cpus
, threads
);
35 struct perf_evlist
*perf_evlist__new(struct cpu_map
*cpus
,
36 struct thread_map
*threads
)
38 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
41 perf_evlist__init(evlist
, cpus
, threads
);
46 static void perf_evlist__purge(struct perf_evlist
*evlist
)
48 struct perf_evsel
*pos
, *n
;
50 list_for_each_entry_safe(pos
, n
, &evlist
->entries
, node
) {
51 list_del_init(&pos
->node
);
52 perf_evsel__delete(pos
);
55 evlist
->nr_entries
= 0;
58 void perf_evlist__exit(struct perf_evlist
*evlist
)
63 evlist
->pollfd
= NULL
;
66 void perf_evlist__delete(struct perf_evlist
*evlist
)
68 perf_evlist__purge(evlist
);
69 perf_evlist__exit(evlist
);
73 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
75 list_add_tail(&entry
->node
, &evlist
->entries
);
79 int perf_evlist__add_default(struct perf_evlist
*evlist
)
81 struct perf_event_attr attr
= {
82 .type
= PERF_TYPE_HARDWARE
,
83 .config
= PERF_COUNT_HW_CPU_CYCLES
,
85 struct perf_evsel
*evsel
= perf_evsel__new(&attr
, 0);
90 /* use strdup() because free(evsel) assumes name is allocated */
91 evsel
->name
= strdup("cycles");
95 perf_evlist__add(evlist
, evsel
);
98 perf_evsel__delete(evsel
);
103 void perf_evlist__disable(struct perf_evlist
*evlist
)
106 struct perf_evsel
*pos
;
108 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
109 list_for_each_entry(pos
, &evlist
->entries
, node
) {
110 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++)
111 ioctl(FD(pos
, cpu
, thread
), PERF_EVENT_IOC_DISABLE
);
116 int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
118 int nfds
= evlist
->cpus
->nr
* evlist
->threads
->nr
* evlist
->nr_entries
;
119 evlist
->pollfd
= malloc(sizeof(struct pollfd
) * nfds
);
120 return evlist
->pollfd
!= NULL
? 0 : -ENOMEM
;
123 void perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
125 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
126 evlist
->pollfd
[evlist
->nr_fds
].fd
= fd
;
127 evlist
->pollfd
[evlist
->nr_fds
].events
= POLLIN
;
131 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
132 struct perf_evsel
*evsel
,
133 int cpu
, int thread
, u64 id
)
136 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
140 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
141 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
144 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
145 int cpu
, int thread
, u64 id
)
147 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
148 evsel
->id
[evsel
->ids
++] = id
;
151 static int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
152 struct perf_evsel
*evsel
,
153 int cpu
, int thread
, int fd
)
155 u64 read_data
[4] = { 0, };
156 int id_idx
= 1; /* The first entry is the counter value */
158 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
159 read(fd
, &read_data
, sizeof(read_data
)) == -1)
162 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
164 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
167 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, read_data
[id_idx
]);
171 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
173 struct hlist_head
*head
;
174 struct hlist_node
*pos
;
175 struct perf_sample_id
*sid
;
178 if (evlist
->nr_entries
== 1)
179 return list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
181 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
182 head
= &evlist
->heads
[hash
];
184 hlist_for_each_entry(sid
, pos
, head
, node
)
190 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
192 /* XXX Move this to perf.c, making it generally available */
193 unsigned int page_size
= sysconf(_SC_PAGE_SIZE
);
194 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
195 unsigned int head
= perf_mmap__read_head(md
);
196 unsigned int old
= md
->prev
;
197 unsigned char *data
= md
->base
+ page_size
;
198 union perf_event
*event
= NULL
;
200 if (evlist
->overwrite
) {
202 * If we're further behind than half the buffer, there's a chance
203 * the writer will bite our tail and mess up the samples under us.
205 * If we somehow ended up ahead of the head, we got messed up.
207 * In either case, truncate and restart at head.
209 int diff
= head
- old
;
210 if (diff
> md
->mask
/ 2 || diff
< 0) {
211 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
214 * head points to a known good entry, start there.
223 event
= (union perf_event
*)&data
[old
& md
->mask
];
224 size
= event
->header
.size
;
227 * Event straddles the mmap boundary -- header should always
228 * be inside due to u64 alignment of output.
230 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
231 unsigned int offset
= old
;
232 unsigned int len
= min(sizeof(*event
), size
), cpy
;
233 void *dst
= &evlist
->event_copy
;
236 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
237 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
243 event
= &evlist
->event_copy
;
251 if (!evlist
->overwrite
)
252 perf_mmap__write_tail(md
, old
);
257 void perf_evlist__munmap(struct perf_evlist
*evlist
)
261 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
262 if (evlist
->mmap
[i
].base
!= NULL
) {
263 munmap(evlist
->mmap
[i
].base
, evlist
->mmap_len
);
264 evlist
->mmap
[i
].base
= NULL
;
272 int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
274 evlist
->nr_mmaps
= evlist
->cpus
->nr
;
275 if (evlist
->cpus
->map
[0] == -1)
276 evlist
->nr_mmaps
= evlist
->threads
->nr
;
277 evlist
->mmap
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
278 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
281 static int __perf_evlist__mmap(struct perf_evlist
*evlist
,
282 int idx
, int prot
, int mask
, int fd
)
284 evlist
->mmap
[idx
].prev
= 0;
285 evlist
->mmap
[idx
].mask
= mask
;
286 evlist
->mmap
[idx
].base
= mmap(NULL
, evlist
->mmap_len
, prot
,
288 if (evlist
->mmap
[idx
].base
== MAP_FAILED
)
291 perf_evlist__add_pollfd(evlist
, fd
);
295 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
, int prot
, int mask
)
297 struct perf_evsel
*evsel
;
300 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
303 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
304 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
305 int fd
= FD(evsel
, cpu
, thread
);
309 if (__perf_evlist__mmap(evlist
, cpu
,
310 prot
, mask
, output
) < 0)
313 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
317 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
318 perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
, fd
) < 0)
327 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
328 if (evlist
->mmap
[cpu
].base
!= NULL
) {
329 munmap(evlist
->mmap
[cpu
].base
, evlist
->mmap_len
);
330 evlist
->mmap
[cpu
].base
= NULL
;
336 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
, int prot
, int mask
)
338 struct perf_evsel
*evsel
;
341 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
344 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
345 int fd
= FD(evsel
, 0, thread
);
349 if (__perf_evlist__mmap(evlist
, thread
,
350 prot
, mask
, output
) < 0)
353 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
357 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
358 perf_evlist__id_add_fd(evlist
, evsel
, 0, thread
, fd
) < 0)
366 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
367 if (evlist
->mmap
[thread
].base
!= NULL
) {
368 munmap(evlist
->mmap
[thread
].base
, evlist
->mmap_len
);
369 evlist
->mmap
[thread
].base
= NULL
;
375 /** perf_evlist__mmap - Create per cpu maps to receive events
377 * @evlist - list of events
378 * @pages - map length in pages
379 * @overwrite - overwrite older events?
381 * If overwrite is false the user needs to signal event consuption using:
383 * struct perf_mmap *m = &evlist->mmap[cpu];
384 * unsigned int head = perf_mmap__read_head(m);
386 * perf_mmap__write_tail(m, head)
388 * Using perf_evlist__read_on_cpu does this automatically.
390 int perf_evlist__mmap(struct perf_evlist
*evlist
, int pages
, bool overwrite
)
392 unsigned int page_size
= sysconf(_SC_PAGE_SIZE
);
393 int mask
= pages
* page_size
- 1;
394 struct perf_evsel
*evsel
;
395 const struct cpu_map
*cpus
= evlist
->cpus
;
396 const struct thread_map
*threads
= evlist
->threads
;
397 int prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
);
399 if (evlist
->mmap
== NULL
&& perf_evlist__alloc_mmap(evlist
) < 0)
402 if (evlist
->pollfd
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
405 evlist
->overwrite
= overwrite
;
406 evlist
->mmap_len
= (pages
+ 1) * page_size
;
408 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
409 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
410 evsel
->sample_id
== NULL
&&
411 perf_evsel__alloc_id(evsel
, cpus
->nr
, threads
->nr
) < 0)
415 if (evlist
->cpus
->map
[0] == -1)
416 return perf_evlist__mmap_per_thread(evlist
, prot
, mask
);
418 return perf_evlist__mmap_per_cpu(evlist
, prot
, mask
);
421 int perf_evlist__create_maps(struct perf_evlist
*evlist
, pid_t target_pid
,
422 pid_t target_tid
, const char *cpu_list
)
424 evlist
->threads
= thread_map__new(target_pid
, target_tid
);
426 if (evlist
->threads
== NULL
)
429 if (cpu_list
== NULL
&& target_tid
!= -1)
430 evlist
->cpus
= cpu_map__dummy_new();
432 evlist
->cpus
= cpu_map__new(cpu_list
);
434 if (evlist
->cpus
== NULL
)
435 goto out_delete_threads
;
440 thread_map__delete(evlist
->threads
);
444 void perf_evlist__delete_maps(struct perf_evlist
*evlist
)
446 cpu_map__delete(evlist
->cpus
);
447 thread_map__delete(evlist
->threads
);
449 evlist
->threads
= NULL
;
452 int perf_evlist__set_filters(struct perf_evlist
*evlist
)
454 const struct thread_map
*threads
= evlist
->threads
;
455 const struct cpu_map
*cpus
= evlist
->cpus
;
456 struct perf_evsel
*evsel
;
463 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
464 filter
= evsel
->filter
;
467 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
468 for (thread
= 0; thread
< threads
->nr
; thread
++) {
469 fd
= FD(evsel
, cpu
, thread
);
470 err
= ioctl(fd
, PERF_EVENT_IOC_SET_FILTER
, filter
);
480 bool perf_evlist__valid_sample_type(const struct perf_evlist
*evlist
)
482 struct perf_evsel
*pos
, *first
;
484 pos
= first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
486 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
487 if (first
->attr
.sample_type
!= pos
->attr
.sample_type
)
494 u64
perf_evlist__sample_type(const struct perf_evlist
*evlist
)
496 struct perf_evsel
*first
;
498 first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
499 return first
->attr
.sample_type
;
502 bool perf_evlist__valid_sample_id_all(const struct perf_evlist
*evlist
)
504 struct perf_evsel
*pos
, *first
;
506 pos
= first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
508 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
509 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
516 bool perf_evlist__sample_id_all(const struct perf_evlist
*evlist
)
518 struct perf_evsel
*first
;
520 first
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
521 return first
->attr
.sample_id_all
;