perf evlist: Fix grouping of multiple events
[linux-2.6/btrfs-unstable.git] / tools / perf / util / evlist.c
blobfbb4b4ab9cc6925a57017abc3087eab9b3b12ff4
1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9 #include <poll.h>
10 #include "cpumap.h"
11 #include "thread_map.h"
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "util.h"
16 #include <sys/mman.h>
18 #include <linux/bitops.h>
19 #include <linux/hash.h>
21 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
22 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
24 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
25 struct thread_map *threads)
27 int i;
29 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
30 INIT_HLIST_HEAD(&evlist->heads[i]);
31 INIT_LIST_HEAD(&evlist->entries);
32 perf_evlist__set_maps(evlist, cpus, threads);
35 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
36 struct thread_map *threads)
38 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
40 if (evlist != NULL)
41 perf_evlist__init(evlist, cpus, threads);
43 return evlist;
46 static void perf_evlist__purge(struct perf_evlist *evlist)
48 struct perf_evsel *pos, *n;
50 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
51 list_del_init(&pos->node);
52 perf_evsel__delete(pos);
55 evlist->nr_entries = 0;
58 void perf_evlist__exit(struct perf_evlist *evlist)
60 free(evlist->mmap);
61 free(evlist->pollfd);
62 evlist->mmap = NULL;
63 evlist->pollfd = NULL;
66 void perf_evlist__delete(struct perf_evlist *evlist)
68 perf_evlist__purge(evlist);
69 perf_evlist__exit(evlist);
70 free(evlist);
73 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
75 list_add_tail(&entry->node, &evlist->entries);
76 ++evlist->nr_entries;
79 int perf_evlist__add_default(struct perf_evlist *evlist)
81 struct perf_event_attr attr = {
82 .type = PERF_TYPE_HARDWARE,
83 .config = PERF_COUNT_HW_CPU_CYCLES,
85 struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
87 if (evsel == NULL)
88 goto error;
90 /* use strdup() because free(evsel) assumes name is allocated */
91 evsel->name = strdup("cycles");
92 if (!evsel->name)
93 goto error_free;
95 perf_evlist__add(evlist, evsel);
96 return 0;
97 error_free:
98 perf_evsel__delete(evsel);
99 error:
100 return -ENOMEM;
103 void perf_evlist__disable(struct perf_evlist *evlist)
105 int cpu, thread;
106 struct perf_evsel *pos;
108 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
109 list_for_each_entry(pos, &evlist->entries, node) {
110 for (thread = 0; thread < evlist->threads->nr; thread++)
111 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
116 void perf_evlist__enable(struct perf_evlist *evlist)
118 int cpu, thread;
119 struct perf_evsel *pos;
121 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
122 list_for_each_entry(pos, &evlist->entries, node) {
123 for (thread = 0; thread < evlist->threads->nr; thread++)
124 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
129 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
131 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
132 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
133 return evlist->pollfd != NULL ? 0 : -ENOMEM;
136 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
138 fcntl(fd, F_SETFL, O_NONBLOCK);
139 evlist->pollfd[evlist->nr_fds].fd = fd;
140 evlist->pollfd[evlist->nr_fds].events = POLLIN;
141 evlist->nr_fds++;
144 static void perf_evlist__id_hash(struct perf_evlist *evlist,
145 struct perf_evsel *evsel,
146 int cpu, int thread, u64 id)
148 int hash;
149 struct perf_sample_id *sid = SID(evsel, cpu, thread);
151 sid->id = id;
152 sid->evsel = evsel;
153 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
154 hlist_add_head(&sid->node, &evlist->heads[hash]);
157 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
158 int cpu, int thread, u64 id)
160 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
161 evsel->id[evsel->ids++] = id;
164 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
165 struct perf_evsel *evsel,
166 int cpu, int thread, int fd)
168 u64 read_data[4] = { 0, };
169 int id_idx = 1; /* The first entry is the counter value */
171 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
172 read(fd, &read_data, sizeof(read_data)) == -1)
173 return -1;
175 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
176 ++id_idx;
177 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
178 ++id_idx;
180 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
181 return 0;
184 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
186 struct hlist_head *head;
187 struct hlist_node *pos;
188 struct perf_sample_id *sid;
189 int hash;
191 if (evlist->nr_entries == 1)
192 return list_entry(evlist->entries.next, struct perf_evsel, node);
194 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
195 head = &evlist->heads[hash];
197 hlist_for_each_entry(sid, pos, head, node)
198 if (sid->id == id)
199 return sid->evsel;
200 return NULL;
203 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
205 /* XXX Move this to perf.c, making it generally available */
206 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
207 struct perf_mmap *md = &evlist->mmap[idx];
208 unsigned int head = perf_mmap__read_head(md);
209 unsigned int old = md->prev;
210 unsigned char *data = md->base + page_size;
211 union perf_event *event = NULL;
213 if (evlist->overwrite) {
215 * If we're further behind than half the buffer, there's a chance
216 * the writer will bite our tail and mess up the samples under us.
218 * If we somehow ended up ahead of the head, we got messed up.
220 * In either case, truncate and restart at head.
222 int diff = head - old;
223 if (diff > md->mask / 2 || diff < 0) {
224 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
227 * head points to a known good entry, start there.
229 old = head;
233 if (old != head) {
234 size_t size;
236 event = (union perf_event *)&data[old & md->mask];
237 size = event->header.size;
240 * Event straddles the mmap boundary -- header should always
241 * be inside due to u64 alignment of output.
243 if ((old & md->mask) + size != ((old + size) & md->mask)) {
244 unsigned int offset = old;
245 unsigned int len = min(sizeof(*event), size), cpy;
246 void *dst = &evlist->event_copy;
248 do {
249 cpy = min(md->mask + 1 - (offset & md->mask), len);
250 memcpy(dst, &data[offset & md->mask], cpy);
251 offset += cpy;
252 dst += cpy;
253 len -= cpy;
254 } while (len);
256 event = &evlist->event_copy;
259 old += size;
262 md->prev = old;
264 if (!evlist->overwrite)
265 perf_mmap__write_tail(md, old);
267 return event;
270 void perf_evlist__munmap(struct perf_evlist *evlist)
272 int i;
274 for (i = 0; i < evlist->nr_mmaps; i++) {
275 if (evlist->mmap[i].base != NULL) {
276 munmap(evlist->mmap[i].base, evlist->mmap_len);
277 evlist->mmap[i].base = NULL;
281 free(evlist->mmap);
282 evlist->mmap = NULL;
285 int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
287 evlist->nr_mmaps = evlist->cpus->nr;
288 if (evlist->cpus->map[0] == -1)
289 evlist->nr_mmaps = evlist->threads->nr;
290 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
291 return evlist->mmap != NULL ? 0 : -ENOMEM;
294 static int __perf_evlist__mmap(struct perf_evlist *evlist,
295 int idx, int prot, int mask, int fd)
297 evlist->mmap[idx].prev = 0;
298 evlist->mmap[idx].mask = mask;
299 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
300 MAP_SHARED, fd, 0);
301 if (evlist->mmap[idx].base == MAP_FAILED)
302 return -1;
304 perf_evlist__add_pollfd(evlist, fd);
305 return 0;
308 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
310 struct perf_evsel *evsel;
311 int cpu, thread;
313 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
314 int output = -1;
316 for (thread = 0; thread < evlist->threads->nr; thread++) {
317 list_for_each_entry(evsel, &evlist->entries, node) {
318 int fd = FD(evsel, cpu, thread);
320 if (output == -1) {
321 output = fd;
322 if (__perf_evlist__mmap(evlist, cpu,
323 prot, mask, output) < 0)
324 goto out_unmap;
325 } else {
326 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
327 goto out_unmap;
330 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
331 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
332 goto out_unmap;
337 return 0;
339 out_unmap:
340 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
341 if (evlist->mmap[cpu].base != NULL) {
342 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
343 evlist->mmap[cpu].base = NULL;
346 return -1;
349 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
351 struct perf_evsel *evsel;
352 int thread;
354 for (thread = 0; thread < evlist->threads->nr; thread++) {
355 int output = -1;
357 list_for_each_entry(evsel, &evlist->entries, node) {
358 int fd = FD(evsel, 0, thread);
360 if (output == -1) {
361 output = fd;
362 if (__perf_evlist__mmap(evlist, thread,
363 prot, mask, output) < 0)
364 goto out_unmap;
365 } else {
366 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
367 goto out_unmap;
370 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
371 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
372 goto out_unmap;
376 return 0;
378 out_unmap:
379 for (thread = 0; thread < evlist->threads->nr; thread++) {
380 if (evlist->mmap[thread].base != NULL) {
381 munmap(evlist->mmap[thread].base, evlist->mmap_len);
382 evlist->mmap[thread].base = NULL;
385 return -1;
388 /** perf_evlist__mmap - Create per cpu maps to receive events
390 * @evlist - list of events
391 * @pages - map length in pages
392 * @overwrite - overwrite older events?
394 * If overwrite is false the user needs to signal event consuption using:
396 * struct perf_mmap *m = &evlist->mmap[cpu];
397 * unsigned int head = perf_mmap__read_head(m);
399 * perf_mmap__write_tail(m, head)
401 * Using perf_evlist__read_on_cpu does this automatically.
403 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
405 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
406 int mask = pages * page_size - 1;
407 struct perf_evsel *evsel;
408 const struct cpu_map *cpus = evlist->cpus;
409 const struct thread_map *threads = evlist->threads;
410 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
412 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
413 return -ENOMEM;
415 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
416 return -ENOMEM;
418 evlist->overwrite = overwrite;
419 evlist->mmap_len = (pages + 1) * page_size;
421 list_for_each_entry(evsel, &evlist->entries, node) {
422 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
423 evsel->sample_id == NULL &&
424 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
425 return -ENOMEM;
428 if (evlist->cpus->map[0] == -1)
429 return perf_evlist__mmap_per_thread(evlist, prot, mask);
431 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
434 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
435 pid_t target_tid, const char *cpu_list)
437 evlist->threads = thread_map__new(target_pid, target_tid);
439 if (evlist->threads == NULL)
440 return -1;
442 if (cpu_list == NULL && target_tid != -1)
443 evlist->cpus = cpu_map__dummy_new();
444 else
445 evlist->cpus = cpu_map__new(cpu_list);
447 if (evlist->cpus == NULL)
448 goto out_delete_threads;
450 return 0;
452 out_delete_threads:
453 thread_map__delete(evlist->threads);
454 return -1;
457 void perf_evlist__delete_maps(struct perf_evlist *evlist)
459 cpu_map__delete(evlist->cpus);
460 thread_map__delete(evlist->threads);
461 evlist->cpus = NULL;
462 evlist->threads = NULL;
465 int perf_evlist__set_filters(struct perf_evlist *evlist)
467 const struct thread_map *threads = evlist->threads;
468 const struct cpu_map *cpus = evlist->cpus;
469 struct perf_evsel *evsel;
470 char *filter;
471 int thread;
472 int cpu;
473 int err;
474 int fd;
476 list_for_each_entry(evsel, &evlist->entries, node) {
477 filter = evsel->filter;
478 if (!filter)
479 continue;
480 for (cpu = 0; cpu < cpus->nr; cpu++) {
481 for (thread = 0; thread < threads->nr; thread++) {
482 fd = FD(evsel, cpu, thread);
483 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
484 if (err)
485 return err;
490 return 0;
493 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
495 struct perf_evsel *pos, *first;
497 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
499 list_for_each_entry_continue(pos, &evlist->entries, node) {
500 if (first->attr.sample_type != pos->attr.sample_type)
501 return false;
504 return true;
507 u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
509 struct perf_evsel *first;
511 first = list_entry(evlist->entries.next, struct perf_evsel, node);
512 return first->attr.sample_type;
515 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
517 struct perf_evsel *pos, *first;
519 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
521 list_for_each_entry_continue(pos, &evlist->entries, node) {
522 if (first->attr.sample_id_all != pos->attr.sample_id_all)
523 return false;
526 return true;
529 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
531 struct perf_evsel *first;
533 first = list_entry(evlist->entries.next, struct perf_evsel, node);
534 return first->attr.sample_id_all;
537 void perf_evlist__set_selected(struct perf_evlist *evlist,
538 struct perf_evsel *evsel)
540 evlist->selected = evsel;
543 int perf_evlist__open(struct perf_evlist *evlist, bool group)
545 struct perf_evsel *evsel, *first;
546 int err, ncpus, nthreads;
548 first = list_entry(evlist->entries.next, struct perf_evsel, node);
550 list_for_each_entry(evsel, &evlist->entries, node) {
551 struct xyarray *group_fd = NULL;
553 if (group && evsel != first)
554 group_fd = first->fd;
556 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
557 group, group_fd);
558 if (err < 0)
559 goto out_err;
562 return 0;
563 out_err:
564 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
565 nthreads = evlist->threads ? evlist->threads->nr : 1;
567 list_for_each_entry_reverse(evsel, &evlist->entries, node)
568 perf_evsel__close(evsel, ncpus, nthreads);
570 return err;