2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
14 #include "event-parse.h"
19 #include "thread_map.h"
21 #include <linux/hw_breakpoint.h>
22 #include <linux/perf_event.h>
23 #include "perf_regs.h"
28 } perf_missing_features
;
30 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
32 static int __perf_evsel__sample_size(u64 sample_type
)
34 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
38 for (i
= 0; i
< 64; i
++) {
39 if (mask
& (1ULL << i
))
48 void hists__init(struct hists
*hists
)
50 memset(hists
, 0, sizeof(*hists
));
51 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
52 hists
->entries_in
= &hists
->entries_in_array
[0];
53 hists
->entries_collapsed
= RB_ROOT
;
54 hists
->entries
= RB_ROOT
;
55 pthread_mutex_init(&hists
->lock
, NULL
);
58 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
59 enum perf_event_sample_format bit
)
61 if (!(evsel
->attr
.sample_type
& bit
)) {
62 evsel
->attr
.sample_type
|= bit
;
63 evsel
->sample_size
+= sizeof(u64
);
67 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
68 enum perf_event_sample_format bit
)
70 if (evsel
->attr
.sample_type
& bit
) {
71 evsel
->attr
.sample_type
&= ~bit
;
72 evsel
->sample_size
-= sizeof(u64
);
76 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
)
78 perf_evsel__set_sample_bit(evsel
, ID
);
79 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
82 void perf_evsel__init(struct perf_evsel
*evsel
,
83 struct perf_event_attr
*attr
, int idx
)
87 evsel
->leader
= evsel
;
88 INIT_LIST_HEAD(&evsel
->node
);
89 hists__init(&evsel
->hists
);
90 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
93 struct perf_evsel
*perf_evsel__new(struct perf_event_attr
*attr
, int idx
)
95 struct perf_evsel
*evsel
= zalloc(sizeof(*evsel
));
98 perf_evsel__init(evsel
, attr
, idx
);
103 struct event_format
*event_format__new(const char *sys
, const char *name
)
107 void *bf
= NULL
, *nbf
;
108 size_t size
= 0, alloc_size
= 0;
109 struct event_format
*format
= NULL
;
111 if (asprintf(&filename
, "%s/%s/%s/format", tracing_events_path
, sys
, name
) < 0)
114 fd
= open(filename
, O_RDONLY
);
116 goto out_free_filename
;
119 if (size
== alloc_size
) {
120 alloc_size
+= BUFSIZ
;
121 nbf
= realloc(bf
, alloc_size
);
127 n
= read(fd
, bf
+ size
, BUFSIZ
);
133 pevent_parse_format(&format
, bf
, size
, sys
);
144 struct perf_evsel
*perf_evsel__newtp(const char *sys
, const char *name
, int idx
)
146 struct perf_evsel
*evsel
= zalloc(sizeof(*evsel
));
149 struct perf_event_attr attr
= {
150 .type
= PERF_TYPE_TRACEPOINT
,
151 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
152 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
155 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
158 evsel
->tp_format
= event_format__new(sys
, name
);
159 if (evsel
->tp_format
== NULL
)
162 event_attr_init(&attr
);
163 attr
.config
= evsel
->tp_format
->id
;
164 attr
.sample_period
= 1;
165 perf_evsel__init(evsel
, &attr
, idx
);
176 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
184 "stalled-cycles-frontend",
185 "stalled-cycles-backend",
189 static const char *__perf_evsel__hw_name(u64 config
)
191 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
192 return perf_evsel__hw_names
[config
];
194 return "unknown-hardware";
197 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
199 int colon
= 0, r
= 0;
200 struct perf_event_attr
*attr
= &evsel
->attr
;
201 bool exclude_guest_default
= false;
203 #define MOD_PRINT(context, mod) do { \
204 if (!attr->exclude_##context) { \
205 if (!colon) colon = ++r; \
206 r += scnprintf(bf + r, size - r, "%c", mod); \
209 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
210 MOD_PRINT(kernel
, 'k');
211 MOD_PRINT(user
, 'u');
213 exclude_guest_default
= true;
216 if (attr
->precise_ip
) {
219 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
220 exclude_guest_default
= true;
223 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
224 MOD_PRINT(host
, 'H');
225 MOD_PRINT(guest
, 'G');
233 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
235 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
236 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
239 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
251 static const char *__perf_evsel__sw_name(u64 config
)
253 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
254 return perf_evsel__sw_names
[config
];
255 return "unknown-software";
258 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
260 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
261 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
264 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
268 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
270 if (type
& HW_BREAKPOINT_R
)
271 r
+= scnprintf(bf
+ r
, size
- r
, "r");
273 if (type
& HW_BREAKPOINT_W
)
274 r
+= scnprintf(bf
+ r
, size
- r
, "w");
276 if (type
& HW_BREAKPOINT_X
)
277 r
+= scnprintf(bf
+ r
, size
- r
, "x");
282 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
284 struct perf_event_attr
*attr
= &evsel
->attr
;
285 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
286 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
289 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
290 [PERF_EVSEL__MAX_ALIASES
] = {
291 { "L1-dcache", "l1-d", "l1d", "L1-data", },
292 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
294 { "dTLB", "d-tlb", "Data-TLB", },
295 { "iTLB", "i-tlb", "Instruction-TLB", },
296 { "branch", "branches", "bpu", "btb", "bpc", },
300 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
301 [PERF_EVSEL__MAX_ALIASES
] = {
302 { "load", "loads", "read", },
303 { "store", "stores", "write", },
304 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
307 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
308 [PERF_EVSEL__MAX_ALIASES
] = {
309 { "refs", "Reference", "ops", "access", },
310 { "misses", "miss", },
313 #define C(x) PERF_COUNT_HW_CACHE_##x
314 #define CACHE_READ (1 << C(OP_READ))
315 #define CACHE_WRITE (1 << C(OP_WRITE))
316 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
317 #define COP(x) (1 << x)
320 * cache operartion stat
321 * L1I : Read and prefetch only
322 * ITLB and BPU : Read-only
324 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
325 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
326 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
327 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
328 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
329 [C(ITLB
)] = (CACHE_READ
),
330 [C(BPU
)] = (CACHE_READ
),
331 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
334 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
336 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
337 return true; /* valid */
339 return false; /* invalid */
342 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
343 char *bf
, size_t size
)
346 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
347 perf_evsel__hw_cache_op
[op
][0],
348 perf_evsel__hw_cache_result
[result
][0]);
351 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
352 perf_evsel__hw_cache_op
[op
][1]);
355 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
357 u8 op
, result
, type
= (config
>> 0) & 0xff;
358 const char *err
= "unknown-ext-hardware-cache-type";
360 if (type
> PERF_COUNT_HW_CACHE_MAX
)
363 op
= (config
>> 8) & 0xff;
364 err
= "unknown-ext-hardware-cache-op";
365 if (op
> PERF_COUNT_HW_CACHE_OP_MAX
)
368 result
= (config
>> 16) & 0xff;
369 err
= "unknown-ext-hardware-cache-result";
370 if (result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
373 err
= "invalid-cache";
374 if (!perf_evsel__is_cache_op_valid(type
, op
))
377 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
379 return scnprintf(bf
, size
, "%s", err
);
382 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
384 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
385 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
388 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
390 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
391 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
394 const char *perf_evsel__name(struct perf_evsel
*evsel
)
401 switch (evsel
->attr
.type
) {
403 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
406 case PERF_TYPE_HARDWARE
:
407 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
410 case PERF_TYPE_HW_CACHE
:
411 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
414 case PERF_TYPE_SOFTWARE
:
415 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
418 case PERF_TYPE_TRACEPOINT
:
419 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
422 case PERF_TYPE_BREAKPOINT
:
423 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
427 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
432 evsel
->name
= strdup(bf
);
434 return evsel
->name
?: "unknown";
437 const char *perf_evsel__group_name(struct perf_evsel
*evsel
)
439 return evsel
->group_name
?: "anon group";
442 int perf_evsel__group_desc(struct perf_evsel
*evsel
, char *buf
, size_t size
)
445 struct perf_evsel
*pos
;
446 const char *group_name
= perf_evsel__group_name(evsel
);
448 ret
= scnprintf(buf
, size
, "%s", group_name
);
450 ret
+= scnprintf(buf
+ ret
, size
- ret
, " { %s",
451 perf_evsel__name(evsel
));
453 for_each_group_member(pos
, evsel
)
454 ret
+= scnprintf(buf
+ ret
, size
- ret
, ", %s",
455 perf_evsel__name(pos
));
457 ret
+= scnprintf(buf
+ ret
, size
- ret
, " }");
463 * The enable_on_exec/disabled value strategy:
465 * 1) For any type of traced program:
466 * - all independent events and group leaders are disabled
467 * - all group members are enabled
469 * Group members are ruled by group leaders. They need to
470 * be enabled, because the group scheduling relies on that.
472 * 2) For traced programs executed by perf:
473 * - all independent events and group leaders have
475 * - we don't specifically enable or disable any event during
478 * Independent events and group leaders are initially disabled
479 * and get enabled by exec. Group members are ruled by group
480 * leaders as stated in 1).
482 * 3) For traced programs attached by perf (pid/tid):
483 * - we specifically enable or disable all events during
486 * When attaching events to already running traced we
487 * enable/disable events specifically, as there's no
488 * initial traced exec call.
490 void perf_evsel__config(struct perf_evsel
*evsel
,
491 struct perf_record_opts
*opts
)
493 struct perf_event_attr
*attr
= &evsel
->attr
;
494 int track
= !evsel
->idx
; /* only the first counter needs these */
496 attr
->sample_id_all
= perf_missing_features
.sample_id_all
? 0 : 1;
497 attr
->inherit
= !opts
->no_inherit
;
499 perf_evsel__set_sample_bit(evsel
, IP
);
500 perf_evsel__set_sample_bit(evsel
, TID
);
503 * We default some events to a 1 default interval. But keep
504 * it a weak assumption overridable by the user.
506 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
&&
507 opts
->user_interval
!= ULLONG_MAX
)) {
509 perf_evsel__set_sample_bit(evsel
, PERIOD
);
511 attr
->sample_freq
= opts
->freq
;
513 attr
->sample_period
= opts
->default_interval
;
517 if (opts
->no_samples
)
518 attr
->sample_freq
= 0;
520 if (opts
->inherit_stat
)
521 attr
->inherit_stat
= 1;
523 if (opts
->sample_address
) {
524 perf_evsel__set_sample_bit(evsel
, ADDR
);
525 attr
->mmap_data
= track
;
528 if (opts
->call_graph
) {
529 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
531 if (opts
->call_graph
== CALLCHAIN_DWARF
) {
532 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
533 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
534 attr
->sample_regs_user
= PERF_REGS_MASK
;
535 attr
->sample_stack_user
= opts
->stack_dump_size
;
536 attr
->exclude_callchain_user
= 1;
540 if (perf_target__has_cpu(&opts
->target
))
541 perf_evsel__set_sample_bit(evsel
, CPU
);
544 perf_evsel__set_sample_bit(evsel
, PERIOD
);
546 if (!perf_missing_features
.sample_id_all
&&
547 (opts
->sample_time
|| !opts
->no_inherit
||
548 perf_target__has_cpu(&opts
->target
)))
549 perf_evsel__set_sample_bit(evsel
, TIME
);
551 if (opts
->raw_samples
) {
552 perf_evsel__set_sample_bit(evsel
, TIME
);
553 perf_evsel__set_sample_bit(evsel
, RAW
);
554 perf_evsel__set_sample_bit(evsel
, CPU
);
557 if (opts
->no_delay
) {
559 attr
->wakeup_events
= 1;
561 if (opts
->branch_stack
) {
562 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
563 attr
->branch_sample_type
= opts
->branch_stack
;
570 * XXX see the function comment above
572 * Disabling only independent events or group leaders,
573 * keeping group members enabled.
575 if (perf_evsel__is_group_leader(evsel
))
579 * Setting enable_on_exec for independent events and
580 * group leaders for traced executed by perf.
582 if (perf_target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
))
583 attr
->enable_on_exec
= 1;
586 int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
589 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
592 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
593 for (thread
= 0; thread
< nthreads
; thread
++) {
594 FD(evsel
, cpu
, thread
) = -1;
599 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
602 int perf_evsel__set_filter(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
607 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
608 for (thread
= 0; thread
< nthreads
; thread
++) {
609 int fd
= FD(evsel
, cpu
, thread
),
610 err
= ioctl(fd
, PERF_EVENT_IOC_SET_FILTER
, filter
);
620 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
622 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
623 if (evsel
->sample_id
== NULL
)
626 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
627 if (evsel
->id
== NULL
) {
628 xyarray__delete(evsel
->sample_id
);
629 evsel
->sample_id
= NULL
;
636 int perf_evsel__alloc_counts(struct perf_evsel
*evsel
, int ncpus
)
638 evsel
->counts
= zalloc((sizeof(*evsel
->counts
) +
639 (ncpus
* sizeof(struct perf_counts_values
))));
640 return evsel
->counts
!= NULL
? 0 : -ENOMEM
;
643 void perf_evsel__free_fd(struct perf_evsel
*evsel
)
645 xyarray__delete(evsel
->fd
);
649 void perf_evsel__free_id(struct perf_evsel
*evsel
)
651 xyarray__delete(evsel
->sample_id
);
652 evsel
->sample_id
= NULL
;
657 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
661 for (cpu
= 0; cpu
< ncpus
; cpu
++)
662 for (thread
= 0; thread
< nthreads
; ++thread
) {
663 close(FD(evsel
, cpu
, thread
));
664 FD(evsel
, cpu
, thread
) = -1;
668 void perf_evsel__free_counts(struct perf_evsel
*evsel
)
673 void perf_evsel__exit(struct perf_evsel
*evsel
)
675 assert(list_empty(&evsel
->node
));
676 xyarray__delete(evsel
->fd
);
677 xyarray__delete(evsel
->sample_id
);
681 void perf_evsel__delete(struct perf_evsel
*evsel
)
683 perf_evsel__exit(evsel
);
684 close_cgroup(evsel
->cgrp
);
685 free(evsel
->group_name
);
686 if (evsel
->tp_format
)
687 pevent_free_format(evsel
->tp_format
);
692 static inline void compute_deltas(struct perf_evsel
*evsel
,
694 struct perf_counts_values
*count
)
696 struct perf_counts_values tmp
;
698 if (!evsel
->prev_raw_counts
)
702 tmp
= evsel
->prev_raw_counts
->aggr
;
703 evsel
->prev_raw_counts
->aggr
= *count
;
705 tmp
= evsel
->prev_raw_counts
->cpu
[cpu
];
706 evsel
->prev_raw_counts
->cpu
[cpu
] = *count
;
709 count
->val
= count
->val
- tmp
.val
;
710 count
->ena
= count
->ena
- tmp
.ena
;
711 count
->run
= count
->run
- tmp
.run
;
714 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
715 int cpu
, int thread
, bool scale
)
717 struct perf_counts_values count
;
718 size_t nv
= scale
? 3 : 1;
720 if (FD(evsel
, cpu
, thread
) < 0)
723 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1) < 0)
726 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
729 compute_deltas(evsel
, cpu
, &count
);
734 else if (count
.run
< count
.ena
)
735 count
.val
= (u64
)((double)count
.val
* count
.ena
/ count
.run
+ 0.5);
737 count
.ena
= count
.run
= 0;
739 evsel
->counts
->cpu
[cpu
] = count
;
743 int __perf_evsel__read(struct perf_evsel
*evsel
,
744 int ncpus
, int nthreads
, bool scale
)
746 size_t nv
= scale
? 3 : 1;
748 struct perf_counts_values
*aggr
= &evsel
->counts
->aggr
, count
;
750 aggr
->val
= aggr
->ena
= aggr
->run
= 0;
752 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
753 for (thread
= 0; thread
< nthreads
; thread
++) {
754 if (FD(evsel
, cpu
, thread
) < 0)
757 if (readn(FD(evsel
, cpu
, thread
),
758 &count
, nv
* sizeof(u64
)) < 0)
761 aggr
->val
+= count
.val
;
763 aggr
->ena
+= count
.ena
;
764 aggr
->run
+= count
.run
;
769 compute_deltas(evsel
, -1, aggr
);
771 evsel
->counts
->scaled
= 0;
773 if (aggr
->run
== 0) {
774 evsel
->counts
->scaled
= -1;
779 if (aggr
->run
< aggr
->ena
) {
780 evsel
->counts
->scaled
= 1;
781 aggr
->val
= (u64
)((double)aggr
->val
* aggr
->ena
/ aggr
->run
+ 0.5);
784 aggr
->ena
= aggr
->run
= 0;
789 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
791 struct perf_evsel
*leader
= evsel
->leader
;
794 if (perf_evsel__is_group_leader(evsel
))
798 * Leader must be already processed/open,
803 fd
= FD(leader
, cpu
, thread
);
809 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
810 struct thread_map
*threads
)
813 unsigned long flags
= 0;
816 if (evsel
->fd
== NULL
&&
817 perf_evsel__alloc_fd(evsel
, cpus
->nr
, threads
->nr
) < 0)
821 flags
= PERF_FLAG_PID_CGROUP
;
822 pid
= evsel
->cgrp
->fd
;
825 fallback_missing_features
:
826 if (perf_missing_features
.exclude_guest
)
827 evsel
->attr
.exclude_guest
= evsel
->attr
.exclude_host
= 0;
829 if (perf_missing_features
.sample_id_all
)
830 evsel
->attr
.sample_id_all
= 0;
832 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
834 for (thread
= 0; thread
< threads
->nr
; thread
++) {
838 pid
= threads
->map
[thread
];
840 group_fd
= get_group_fd(evsel
, cpu
, thread
);
842 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
846 if (FD(evsel
, cpu
, thread
) < 0) {
856 if (err
!= -EINVAL
|| cpu
> 0 || thread
> 0)
859 if (!perf_missing_features
.exclude_guest
&&
860 (evsel
->attr
.exclude_guest
|| evsel
->attr
.exclude_host
)) {
861 perf_missing_features
.exclude_guest
= true;
862 goto fallback_missing_features
;
863 } else if (!perf_missing_features
.sample_id_all
) {
864 perf_missing_features
.sample_id_all
= true;
865 goto retry_sample_id
;
870 while (--thread
>= 0) {
871 close(FD(evsel
, cpu
, thread
));
872 FD(evsel
, cpu
, thread
) = -1;
874 thread
= threads
->nr
;
875 } while (--cpu
>= 0);
879 void perf_evsel__close(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
881 if (evsel
->fd
== NULL
)
884 perf_evsel__close_fd(evsel
, ncpus
, nthreads
);
885 perf_evsel__free_fd(evsel
);
898 struct thread_map map
;
900 } empty_thread_map
= {
905 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
906 struct thread_map
*threads
)
909 /* Work around old compiler warnings about strict aliasing */
910 cpus
= &empty_cpu_map
.map
;
914 threads
= &empty_thread_map
.map
;
916 return __perf_evsel__open(evsel
, cpus
, threads
);
919 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
920 struct cpu_map
*cpus
)
922 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
);
925 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
926 struct thread_map
*threads
)
928 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
);
931 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
932 const union perf_event
*event
,
933 struct perf_sample
*sample
)
935 u64 type
= evsel
->attr
.sample_type
;
936 const u64
*array
= event
->sample
.array
;
937 bool swapped
= evsel
->needs_swap
;
940 array
+= ((event
->header
.size
-
941 sizeof(event
->header
)) / sizeof(u64
)) - 1;
943 if (type
& PERF_SAMPLE_CPU
) {
946 /* undo swap of u64, then swap on individual u32s */
947 u
.val64
= bswap_64(u
.val64
);
948 u
.val32
[0] = bswap_32(u
.val32
[0]);
951 sample
->cpu
= u
.val32
[0];
955 if (type
& PERF_SAMPLE_STREAM_ID
) {
956 sample
->stream_id
= *array
;
960 if (type
& PERF_SAMPLE_ID
) {
965 if (type
& PERF_SAMPLE_TIME
) {
966 sample
->time
= *array
;
970 if (type
& PERF_SAMPLE_TID
) {
973 /* undo swap of u64, then swap on individual u32s */
974 u
.val64
= bswap_64(u
.val64
);
975 u
.val32
[0] = bswap_32(u
.val32
[0]);
976 u
.val32
[1] = bswap_32(u
.val32
[1]);
979 sample
->pid
= u
.val32
[0];
980 sample
->tid
= u
.val32
[1];
986 static bool sample_overlap(const union perf_event
*event
,
987 const void *offset
, u64 size
)
989 const void *base
= event
;
991 if (offset
+ size
> base
+ event
->header
.size
)
997 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
998 struct perf_sample
*data
)
1000 u64 type
= evsel
->attr
.sample_type
;
1001 u64 regs_user
= evsel
->attr
.sample_regs_user
;
1002 bool swapped
= evsel
->needs_swap
;
1006 * used for cross-endian analysis. See git commit 65014ab3
1007 * for why this goofiness is needed.
1011 memset(data
, 0, sizeof(*data
));
1012 data
->cpu
= data
->pid
= data
->tid
= -1;
1013 data
->stream_id
= data
->id
= data
->time
= -1ULL;
1016 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
1017 if (!evsel
->attr
.sample_id_all
)
1019 return perf_evsel__parse_id_sample(evsel
, event
, data
);
1022 array
= event
->sample
.array
;
1024 if (evsel
->sample_size
+ sizeof(event
->header
) > event
->header
.size
)
1027 if (type
& PERF_SAMPLE_IP
) {
1028 data
->ip
= event
->ip
.ip
;
1032 if (type
& PERF_SAMPLE_TID
) {
1035 /* undo swap of u64, then swap on individual u32s */
1036 u
.val64
= bswap_64(u
.val64
);
1037 u
.val32
[0] = bswap_32(u
.val32
[0]);
1038 u
.val32
[1] = bswap_32(u
.val32
[1]);
1041 data
->pid
= u
.val32
[0];
1042 data
->tid
= u
.val32
[1];
1046 if (type
& PERF_SAMPLE_TIME
) {
1047 data
->time
= *array
;
1052 if (type
& PERF_SAMPLE_ADDR
) {
1053 data
->addr
= *array
;
1058 if (type
& PERF_SAMPLE_ID
) {
1063 if (type
& PERF_SAMPLE_STREAM_ID
) {
1064 data
->stream_id
= *array
;
1068 if (type
& PERF_SAMPLE_CPU
) {
1072 /* undo swap of u64, then swap on individual u32s */
1073 u
.val64
= bswap_64(u
.val64
);
1074 u
.val32
[0] = bswap_32(u
.val32
[0]);
1077 data
->cpu
= u
.val32
[0];
1081 if (type
& PERF_SAMPLE_PERIOD
) {
1082 data
->period
= *array
;
1086 if (type
& PERF_SAMPLE_READ
) {
1087 fprintf(stderr
, "PERF_SAMPLE_READ is unsupported for now\n");
1091 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1092 if (sample_overlap(event
, array
, sizeof(data
->callchain
->nr
)))
1095 data
->callchain
= (struct ip_callchain
*)array
;
1097 if (sample_overlap(event
, array
, data
->callchain
->nr
))
1100 array
+= 1 + data
->callchain
->nr
;
1103 if (type
& PERF_SAMPLE_RAW
) {
1107 if (WARN_ONCE(swapped
,
1108 "Endianness of raw data not corrected!\n")) {
1109 /* undo swap of u64, then swap on individual u32s */
1110 u
.val64
= bswap_64(u
.val64
);
1111 u
.val32
[0] = bswap_32(u
.val32
[0]);
1112 u
.val32
[1] = bswap_32(u
.val32
[1]);
1115 if (sample_overlap(event
, array
, sizeof(u32
)))
1118 data
->raw_size
= u
.val32
[0];
1119 pdata
= (void *) array
+ sizeof(u32
);
1121 if (sample_overlap(event
, pdata
, data
->raw_size
))
1124 data
->raw_data
= (void *) pdata
;
1126 array
= (void *)array
+ data
->raw_size
+ sizeof(u32
);
1129 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1132 data
->branch_stack
= (struct branch_stack
*)array
;
1135 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
1140 if (type
& PERF_SAMPLE_REGS_USER
) {
1141 /* First u64 tells us if we have any regs in sample. */
1142 u64 avail
= *array
++;
1145 data
->user_regs
.regs
= (u64
*)array
;
1146 array
+= hweight_long(regs_user
);
1150 if (type
& PERF_SAMPLE_STACK_USER
) {
1151 u64 size
= *array
++;
1153 data
->user_stack
.offset
= ((char *)(array
- 1)
1157 data
->user_stack
.size
= 0;
1159 data
->user_stack
.data
= (char *)array
;
1160 array
+= size
/ sizeof(*array
);
1161 data
->user_stack
.size
= *array
;
1168 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
1169 const struct perf_sample
*sample
,
1175 * used for cross-endian analysis. See git commit 65014ab3
1176 * for why this goofiness is needed.
1180 array
= event
->sample
.array
;
1182 if (type
& PERF_SAMPLE_IP
) {
1183 event
->ip
.ip
= sample
->ip
;
1187 if (type
& PERF_SAMPLE_TID
) {
1188 u
.val32
[0] = sample
->pid
;
1189 u
.val32
[1] = sample
->tid
;
1192 * Inverse of what is done in perf_evsel__parse_sample
1194 u
.val32
[0] = bswap_32(u
.val32
[0]);
1195 u
.val32
[1] = bswap_32(u
.val32
[1]);
1196 u
.val64
= bswap_64(u
.val64
);
1203 if (type
& PERF_SAMPLE_TIME
) {
1204 *array
= sample
->time
;
1208 if (type
& PERF_SAMPLE_ADDR
) {
1209 *array
= sample
->addr
;
1213 if (type
& PERF_SAMPLE_ID
) {
1214 *array
= sample
->id
;
1218 if (type
& PERF_SAMPLE_STREAM_ID
) {
1219 *array
= sample
->stream_id
;
1223 if (type
& PERF_SAMPLE_CPU
) {
1224 u
.val32
[0] = sample
->cpu
;
1227 * Inverse of what is done in perf_evsel__parse_sample
1229 u
.val32
[0] = bswap_32(u
.val32
[0]);
1230 u
.val64
= bswap_64(u
.val64
);
1236 if (type
& PERF_SAMPLE_PERIOD
) {
1237 *array
= sample
->period
;
1244 struct format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
1246 return pevent_find_field(evsel
->tp_format
, name
);
1249 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1252 struct format_field
*field
= perf_evsel__field(evsel
, name
);
1258 offset
= field
->offset
;
1260 if (field
->flags
& FIELD_IS_DYNAMIC
) {
1261 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
1265 return sample
->raw_data
+ offset
;
1268 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1271 struct format_field
*field
= perf_evsel__field(evsel
, name
);
1278 ptr
= sample
->raw_data
+ field
->offset
;
1280 switch (field
->size
) {
1284 value
= *(u16
*)ptr
;
1287 value
= *(u32
*)ptr
;
1290 value
= *(u64
*)ptr
;
1296 if (!evsel
->needs_swap
)
1299 switch (field
->size
) {
1301 return bswap_16(value
);
1303 return bswap_32(value
);
1305 return bswap_64(value
);
1313 static int comma_fprintf(FILE *fp
, bool *first
, const char *fmt
, ...)
1319 ret
+= fprintf(fp
, ",");
1321 ret
+= fprintf(fp
, ":");
1325 va_start(args
, fmt
);
1326 ret
+= vfprintf(fp
, fmt
, args
);
1331 static int __if_fprintf(FILE *fp
, bool *first
, const char *field
, u64 value
)
1336 return comma_fprintf(fp
, first
, " %s: %" PRIu64
, field
, value
);
1339 #define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1346 static int bits__fprintf(FILE *fp
, const char *field
, u64 value
,
1347 struct bit_names
*bits
, bool *first
)
1349 int i
= 0, printed
= comma_fprintf(fp
, first
, " %s: ", field
);
1350 bool first_bit
= true;
1353 if (value
& bits
[i
].bit
) {
1354 printed
+= fprintf(fp
, "%s%s", first_bit
? "" : "|", bits
[i
].name
);
1357 } while (bits
[++i
].name
!= NULL
);
1362 static int sample_type__fprintf(FILE *fp
, bool *first
, u64 value
)
1364 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1365 struct bit_names bits
[] = {
1366 bit_name(IP
), bit_name(TID
), bit_name(TIME
), bit_name(ADDR
),
1367 bit_name(READ
), bit_name(CALLCHAIN
), bit_name(ID
), bit_name(CPU
),
1368 bit_name(PERIOD
), bit_name(STREAM_ID
), bit_name(RAW
),
1369 bit_name(BRANCH_STACK
), bit_name(REGS_USER
), bit_name(STACK_USER
),
1373 return bits__fprintf(fp
, "sample_type", value
, bits
, first
);
1376 static int read_format__fprintf(FILE *fp
, bool *first
, u64 value
)
1378 #define bit_name(n) { PERF_FORMAT_##n, #n }
1379 struct bit_names bits
[] = {
1380 bit_name(TOTAL_TIME_ENABLED
), bit_name(TOTAL_TIME_RUNNING
),
1381 bit_name(ID
), bit_name(GROUP
),
1385 return bits__fprintf(fp
, "read_format", value
, bits
, first
);
1388 int perf_evsel__fprintf(struct perf_evsel
*evsel
,
1389 struct perf_attr_details
*details
, FILE *fp
)
1394 if (details
->event_group
) {
1395 struct perf_evsel
*pos
;
1397 if (!perf_evsel__is_group_leader(evsel
))
1400 if (evsel
->nr_members
> 1)
1401 printed
+= fprintf(fp
, "%s{", evsel
->group_name
?: "");
1403 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
1404 for_each_group_member(pos
, evsel
)
1405 printed
+= fprintf(fp
, ",%s", perf_evsel__name(pos
));
1407 if (evsel
->nr_members
> 1)
1408 printed
+= fprintf(fp
, "}");
1412 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
1414 if (details
->verbose
|| details
->freq
) {
1415 printed
+= comma_fprintf(fp
, &first
, " sample_freq=%" PRIu64
,
1416 (u64
)evsel
->attr
.sample_freq
);
1419 if (details
->verbose
) {
1425 printed
+= sample_type__fprintf(fp
, &first
, evsel
->attr
.sample_type
);
1426 if (evsel
->attr
.read_format
)
1427 printed
+= read_format__fprintf(fp
, &first
, evsel
->attr
.read_format
);
1431 if_print(exclusive
);
1432 if_print(exclude_user
);
1433 if_print(exclude_kernel
);
1434 if_print(exclude_hv
);
1435 if_print(exclude_idle
);
1439 if_print(inherit_stat
);
1440 if_print(enable_on_exec
);
1442 if_print(watermark
);
1443 if_print(precise_ip
);
1444 if_print(mmap_data
);
1445 if_print(sample_id_all
);
1446 if_print(exclude_host
);
1447 if_print(exclude_guest
);
1448 if_print(__reserved_1
);
1449 if_print(wakeup_events
);
1451 if_print(branch_sample_type
);
1458 bool perf_evsel__fallback(struct perf_evsel
*evsel
, int err
,
1459 char *msg
, size_t msgsize
)
1461 if ((err
== ENOENT
|| err
== ENXIO
) &&
1462 evsel
->attr
.type
== PERF_TYPE_HARDWARE
&&
1463 evsel
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
) {
1465 * If it's cycles then fall back to hrtimer based
1466 * cpu-clock-tick sw counter, which is always available even if
1469 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
1472 scnprintf(msg
, msgsize
, "%s",
1473 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
1475 evsel
->attr
.type
= PERF_TYPE_SOFTWARE
;
1476 evsel
->attr
.config
= PERF_COUNT_SW_CPU_CLOCK
;
1486 int perf_evsel__open_strerror(struct perf_evsel
*evsel
,
1487 struct perf_target
*target
,
1488 int err
, char *msg
, size_t size
)
1493 return scnprintf(msg
, size
, "%s",
1494 "You may not have permission to collect %sstats.\n"
1495 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
1496 " -1 - Not paranoid at all\n"
1497 " 0 - Disallow raw tracepoint access for unpriv\n"
1498 " 1 - Disallow cpu events for unpriv\n"
1499 " 2 - Disallow kernel profiling for unpriv",
1500 target
->system_wide
? "system-wide " : "");
1502 return scnprintf(msg
, size
, "The %s event is not supported.",
1503 perf_evsel__name(evsel
));
1505 return scnprintf(msg
, size
, "%s",
1506 "Too many events are opened.\n"
1507 "Try again after reducing the number of events.");
1509 if (target
->cpu_list
)
1510 return scnprintf(msg
, size
, "%s",
1511 "No such device - did you specify an out-of-range profile CPU?\n");
1514 if (evsel
->attr
.precise_ip
)
1515 return scnprintf(msg
, size
, "%s",
1516 "\'precise\' request may not be supported. Try removing 'p' modifier.");
1517 #if defined(__i386__) || defined(__x86_64__)
1518 if (evsel
->attr
.type
== PERF_TYPE_HARDWARE
)
1519 return scnprintf(msg
, size
, "%s",
1520 "No hardware sampling interrupt available.\n"
1521 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
1528 return scnprintf(msg
, size
,
1529 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n"
1530 "/bin/dmesg may provide additional information.\n"
1531 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
1532 err
, strerror(err
), perf_evsel__name(evsel
));