1 #define _FILE_OFFSET_BITS 64
3 #include <linux/kernel.h>
17 #include "event-parse.h"
19 int perf_session__synthesize_sample(struct perf_session
*session
,
20 union perf_event
*event
,
21 const struct perf_sample
*sample
)
23 return perf_event__synthesize_sample(event
, perf_evlist__sample_type(session
->evlist
),
24 sample
, session
->header
.needs_swap
);
27 static int perf_session__open(struct perf_session
*self
, bool force
)
29 struct stat input_stat
;
31 if (!strcmp(self
->filename
, "-")) {
33 self
->fd
= STDIN_FILENO
;
35 if (perf_session__read_header(self
, self
->fd
) < 0)
36 pr_err("incompatible file format (rerun with -v to learn more)");
41 self
->fd
= open(self
->filename
, O_RDONLY
);
45 pr_err("failed to open %s: %s", self
->filename
, strerror(err
));
46 if (err
== ENOENT
&& !strcmp(self
->filename
, "perf.data"))
47 pr_err(" (try 'perf record' first)");
52 if (fstat(self
->fd
, &input_stat
) < 0)
55 if (!force
&& input_stat
.st_uid
&& (input_stat
.st_uid
!= geteuid())) {
56 pr_err("file %s not owned by current user or root\n",
61 if (!input_stat
.st_size
) {
62 pr_info("zero-sized file (%s), nothing to do!\n",
67 if (perf_session__read_header(self
, self
->fd
) < 0) {
68 pr_err("incompatible file format (rerun with -v to learn more)");
72 if (!perf_evlist__valid_sample_type(self
->evlist
)) {
73 pr_err("non matching sample_type");
77 if (!perf_evlist__valid_sample_id_all(self
->evlist
)) {
78 pr_err("non matching sample_id_all");
82 self
->size
= input_stat
.st_size
;
91 void perf_session__set_id_hdr_size(struct perf_session
*session
)
93 u16 id_hdr_size
= perf_evlist__id_hdr_size(session
->evlist
);
95 session
->host_machine
.id_hdr_size
= id_hdr_size
;
96 machines__set_id_hdr_size(&session
->machines
, id_hdr_size
);
99 int perf_session__create_kernel_maps(struct perf_session
*self
)
101 int ret
= machine__create_kernel_maps(&self
->host_machine
);
104 ret
= machines__create_guest_kernel_maps(&self
->machines
);
108 static void perf_session__destroy_kernel_maps(struct perf_session
*self
)
110 machine__destroy_kernel_maps(&self
->host_machine
);
111 machines__destroy_guest_kernel_maps(&self
->machines
);
114 struct perf_session
*perf_session__new(const char *filename
, int mode
,
115 bool force
, bool repipe
,
116 struct perf_tool
*tool
)
118 struct perf_session
*self
;
122 if (!filename
|| !strlen(filename
)) {
123 if (!fstat(STDIN_FILENO
, &st
) && S_ISFIFO(st
.st_mode
))
126 filename
= "perf.data";
129 len
= strlen(filename
);
130 self
= zalloc(sizeof(*self
) + len
);
135 memcpy(self
->filename
, filename
, len
);
137 * On 64bit we can mmap the data file in one go. No need for tiny mmap
138 * slices. On 32bit we use 32MB.
140 #if BITS_PER_LONG == 64
141 self
->mmap_window
= ULLONG_MAX
;
143 self
->mmap_window
= 32 * 1024 * 1024ULL;
145 self
->machines
= RB_ROOT
;
146 self
->repipe
= repipe
;
147 INIT_LIST_HEAD(&self
->ordered_samples
.samples
);
148 INIT_LIST_HEAD(&self
->ordered_samples
.sample_cache
);
149 INIT_LIST_HEAD(&self
->ordered_samples
.to_free
);
150 machine__init(&self
->host_machine
, "", HOST_KERNEL_ID
);
151 hists__init(&self
->hists
);
153 if (mode
== O_RDONLY
) {
154 if (perf_session__open(self
, force
) < 0)
156 perf_session__set_id_hdr_size(self
);
157 } else if (mode
== O_WRONLY
) {
159 * In O_RDONLY mode this will be performed when reading the
160 * kernel MMAP event, in perf_event__process_mmap().
162 if (perf_session__create_kernel_maps(self
) < 0)
166 if (tool
&& tool
->ordering_requires_timestamps
&&
167 tool
->ordered_samples
&& !perf_evlist__sample_id_all(self
->evlist
)) {
168 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
169 tool
->ordered_samples
= false;
175 perf_session__delete(self
);
179 static void machine__delete_dead_threads(struct machine
*machine
)
181 struct thread
*n
, *t
;
183 list_for_each_entry_safe(t
, n
, &machine
->dead_threads
, node
) {
189 static void perf_session__delete_dead_threads(struct perf_session
*session
)
191 machine__delete_dead_threads(&session
->host_machine
);
194 static void machine__delete_threads(struct machine
*self
)
196 struct rb_node
*nd
= rb_first(&self
->threads
);
199 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
201 rb_erase(&t
->rb_node
, &self
->threads
);
207 static void perf_session__delete_threads(struct perf_session
*session
)
209 machine__delete_threads(&session
->host_machine
);
212 void perf_session__delete(struct perf_session
*self
)
214 perf_session__destroy_kernel_maps(self
);
215 perf_session__delete_dead_threads(self
);
216 perf_session__delete_threads(self
);
217 machine__exit(&self
->host_machine
);
222 void machine__remove_thread(struct machine
*self
, struct thread
*th
)
224 self
->last_match
= NULL
;
225 rb_erase(&th
->rb_node
, &self
->threads
);
227 * We may have references to this thread, for instance in some hist_entry
228 * instances, so just move them to a separate list.
230 list_add_tail(&th
->node
, &self
->dead_threads
);
233 static bool symbol__match_parent_regex(struct symbol
*sym
)
235 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
241 static const u8 cpumodes
[] = {
242 PERF_RECORD_MISC_USER
,
243 PERF_RECORD_MISC_KERNEL
,
244 PERF_RECORD_MISC_GUEST_USER
,
245 PERF_RECORD_MISC_GUEST_KERNEL
247 #define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
249 static void ip__resolve_ams(struct machine
*self
, struct thread
*thread
,
250 struct addr_map_symbol
*ams
,
253 struct addr_location al
;
257 memset(&al
, 0, sizeof(al
));
259 for (i
= 0; i
< NCPUMODES
; i
++) {
262 * We cannot use the header.misc hint to determine whether a
263 * branch stack address is user, kernel, guest, hypervisor.
264 * Branches may straddle the kernel/user/hypervisor boundaries.
265 * Thus, we have to try consecutively until we find a match
266 * or else, the symbol is unknown
268 thread__find_addr_location(thread
, self
, m
, MAP__FUNCTION
,
275 ams
->al_addr
= al
.addr
;
280 struct branch_info
*machine__resolve_bstack(struct machine
*self
,
282 struct branch_stack
*bs
)
284 struct branch_info
*bi
;
287 bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
291 for (i
= 0; i
< bs
->nr
; i
++) {
292 ip__resolve_ams(self
, thr
, &bi
[i
].to
, bs
->entries
[i
].to
);
293 ip__resolve_ams(self
, thr
, &bi
[i
].from
, bs
->entries
[i
].from
);
294 bi
[i
].flags
= bs
->entries
[i
].flags
;
299 int machine__resolve_callchain(struct machine
*self
,
300 struct thread
*thread
,
301 struct ip_callchain
*chain
,
302 struct symbol
**parent
)
304 u8 cpumode
= PERF_RECORD_MISC_USER
;
308 callchain_cursor_reset(&callchain_cursor
);
310 if (chain
->nr
> PERF_MAX_STACK_DEPTH
) {
311 pr_warning("corrupted callchain. skipping...\n");
315 for (i
= 0; i
< chain
->nr
; i
++) {
317 struct addr_location al
;
319 if (callchain_param
.order
== ORDER_CALLEE
)
322 ip
= chain
->ips
[chain
->nr
- i
- 1];
324 if (ip
>= PERF_CONTEXT_MAX
) {
326 case PERF_CONTEXT_HV
:
327 cpumode
= PERF_RECORD_MISC_HYPERVISOR
; break;
328 case PERF_CONTEXT_KERNEL
:
329 cpumode
= PERF_RECORD_MISC_KERNEL
; break;
330 case PERF_CONTEXT_USER
:
331 cpumode
= PERF_RECORD_MISC_USER
; break;
333 pr_debug("invalid callchain context: "
334 "%"PRId64
"\n", (s64
) ip
);
336 * It seems the callchain is corrupted.
339 callchain_cursor_reset(&callchain_cursor
);
346 thread__find_addr_location(thread
, self
, cpumode
,
347 MAP__FUNCTION
, ip
, &al
, NULL
);
348 if (al
.sym
!= NULL
) {
349 if (sort__has_parent
&& !*parent
&&
350 symbol__match_parent_regex(al
.sym
))
352 if (!symbol_conf
.use_callchain
)
356 err
= callchain_cursor_append(&callchain_cursor
,
365 static int process_event_synth_tracing_data_stub(union perf_event
*event __used
,
366 struct perf_session
*session __used
)
368 dump_printf(": unhandled!\n");
372 static int process_event_synth_attr_stub(union perf_event
*event __used
,
373 struct perf_evlist
**pevlist __used
)
375 dump_printf(": unhandled!\n");
379 static int process_event_sample_stub(struct perf_tool
*tool __used
,
380 union perf_event
*event __used
,
381 struct perf_sample
*sample __used
,
382 struct perf_evsel
*evsel __used
,
383 struct machine
*machine __used
)
385 dump_printf(": unhandled!\n");
389 static int process_event_stub(struct perf_tool
*tool __used
,
390 union perf_event
*event __used
,
391 struct perf_sample
*sample __used
,
392 struct machine
*machine __used
)
394 dump_printf(": unhandled!\n");
398 static int process_finished_round_stub(struct perf_tool
*tool __used
,
399 union perf_event
*event __used
,
400 struct perf_session
*perf_session __used
)
402 dump_printf(": unhandled!\n");
406 static int process_event_type_stub(struct perf_tool
*tool __used
,
407 union perf_event
*event __used
)
409 dump_printf(": unhandled!\n");
413 static int process_finished_round(struct perf_tool
*tool
,
414 union perf_event
*event
,
415 struct perf_session
*session
);
417 static void perf_tool__fill_defaults(struct perf_tool
*tool
)
419 if (tool
->sample
== NULL
)
420 tool
->sample
= process_event_sample_stub
;
421 if (tool
->mmap
== NULL
)
422 tool
->mmap
= process_event_stub
;
423 if (tool
->comm
== NULL
)
424 tool
->comm
= process_event_stub
;
425 if (tool
->fork
== NULL
)
426 tool
->fork
= process_event_stub
;
427 if (tool
->exit
== NULL
)
428 tool
->exit
= process_event_stub
;
429 if (tool
->lost
== NULL
)
430 tool
->lost
= perf_event__process_lost
;
431 if (tool
->read
== NULL
)
432 tool
->read
= process_event_sample_stub
;
433 if (tool
->throttle
== NULL
)
434 tool
->throttle
= process_event_stub
;
435 if (tool
->unthrottle
== NULL
)
436 tool
->unthrottle
= process_event_stub
;
437 if (tool
->attr
== NULL
)
438 tool
->attr
= process_event_synth_attr_stub
;
439 if (tool
->event_type
== NULL
)
440 tool
->event_type
= process_event_type_stub
;
441 if (tool
->tracing_data
== NULL
)
442 tool
->tracing_data
= process_event_synth_tracing_data_stub
;
443 if (tool
->build_id
== NULL
)
444 tool
->build_id
= process_finished_round_stub
;
445 if (tool
->finished_round
== NULL
) {
446 if (tool
->ordered_samples
)
447 tool
->finished_round
= process_finished_round
;
449 tool
->finished_round
= process_finished_round_stub
;
453 void mem_bswap_32(void *src
, int byte_size
)
456 while (byte_size
> 0) {
458 byte_size
-= sizeof(u32
);
463 void mem_bswap_64(void *src
, int byte_size
)
467 while (byte_size
> 0) {
469 byte_size
-= sizeof(u64
);
474 static void swap_sample_id_all(union perf_event
*event
, void *data
)
476 void *end
= (void *) event
+ event
->header
.size
;
477 int size
= end
- data
;
479 BUG_ON(size
% sizeof(u64
));
480 mem_bswap_64(data
, size
);
483 static void perf_event__all64_swap(union perf_event
*event
,
484 bool sample_id_all __used
)
486 struct perf_event_header
*hdr
= &event
->header
;
487 mem_bswap_64(hdr
+ 1, event
->header
.size
- sizeof(*hdr
));
490 static void perf_event__comm_swap(union perf_event
*event
, bool sample_id_all
)
492 event
->comm
.pid
= bswap_32(event
->comm
.pid
);
493 event
->comm
.tid
= bswap_32(event
->comm
.tid
);
496 void *data
= &event
->comm
.comm
;
498 data
+= ALIGN(strlen(data
) + 1, sizeof(u64
));
499 swap_sample_id_all(event
, data
);
503 static void perf_event__mmap_swap(union perf_event
*event
,
506 event
->mmap
.pid
= bswap_32(event
->mmap
.pid
);
507 event
->mmap
.tid
= bswap_32(event
->mmap
.tid
);
508 event
->mmap
.start
= bswap_64(event
->mmap
.start
);
509 event
->mmap
.len
= bswap_64(event
->mmap
.len
);
510 event
->mmap
.pgoff
= bswap_64(event
->mmap
.pgoff
);
513 void *data
= &event
->mmap
.filename
;
515 data
+= ALIGN(strlen(data
) + 1, sizeof(u64
));
516 swap_sample_id_all(event
, data
);
520 static void perf_event__task_swap(union perf_event
*event
, bool sample_id_all
)
522 event
->fork
.pid
= bswap_32(event
->fork
.pid
);
523 event
->fork
.tid
= bswap_32(event
->fork
.tid
);
524 event
->fork
.ppid
= bswap_32(event
->fork
.ppid
);
525 event
->fork
.ptid
= bswap_32(event
->fork
.ptid
);
526 event
->fork
.time
= bswap_64(event
->fork
.time
);
529 swap_sample_id_all(event
, &event
->fork
+ 1);
532 static void perf_event__read_swap(union perf_event
*event
, bool sample_id_all
)
534 event
->read
.pid
= bswap_32(event
->read
.pid
);
535 event
->read
.tid
= bswap_32(event
->read
.tid
);
536 event
->read
.value
= bswap_64(event
->read
.value
);
537 event
->read
.time_enabled
= bswap_64(event
->read
.time_enabled
);
538 event
->read
.time_running
= bswap_64(event
->read
.time_running
);
539 event
->read
.id
= bswap_64(event
->read
.id
);
542 swap_sample_id_all(event
, &event
->read
+ 1);
545 static u8
revbyte(u8 b
)
547 int rev
= (b
>> 4) | ((b
& 0xf) << 4);
548 rev
= ((rev
& 0xcc) >> 2) | ((rev
& 0x33) << 2);
549 rev
= ((rev
& 0xaa) >> 1) | ((rev
& 0x55) << 1);
554 * XXX this is hack in attempt to carry flags bitfield
555 * throught endian village. ABI says:
557 * Bit-fields are allocated from right to left (least to most significant)
558 * on little-endian implementations and from left to right (most to least
559 * significant) on big-endian implementations.
561 * The above seems to be byte specific, so we need to reverse each
562 * byte of the bitfield. 'Internet' also says this might be implementation
563 * specific and we probably need proper fix and carry perf_event_attr
564 * bitfield flags in separate data file FEAT_ section. Thought this seems
567 static void swap_bitfield(u8
*p
, unsigned len
)
571 for (i
= 0; i
< len
; i
++) {
577 /* exported for swapping attributes in file header */
578 void perf_event__attr_swap(struct perf_event_attr
*attr
)
580 attr
->type
= bswap_32(attr
->type
);
581 attr
->size
= bswap_32(attr
->size
);
582 attr
->config
= bswap_64(attr
->config
);
583 attr
->sample_period
= bswap_64(attr
->sample_period
);
584 attr
->sample_type
= bswap_64(attr
->sample_type
);
585 attr
->read_format
= bswap_64(attr
->read_format
);
586 attr
->wakeup_events
= bswap_32(attr
->wakeup_events
);
587 attr
->bp_type
= bswap_32(attr
->bp_type
);
588 attr
->bp_addr
= bswap_64(attr
->bp_addr
);
589 attr
->bp_len
= bswap_64(attr
->bp_len
);
591 swap_bitfield((u8
*) (&attr
->read_format
+ 1), sizeof(u64
));
594 static void perf_event__hdr_attr_swap(union perf_event
*event
,
595 bool sample_id_all __used
)
599 perf_event__attr_swap(&event
->attr
.attr
);
601 size
= event
->header
.size
;
602 size
-= (void *)&event
->attr
.id
- (void *)event
;
603 mem_bswap_64(event
->attr
.id
, size
);
606 static void perf_event__event_type_swap(union perf_event
*event
,
607 bool sample_id_all __used
)
609 event
->event_type
.event_type
.event_id
=
610 bswap_64(event
->event_type
.event_type
.event_id
);
613 static void perf_event__tracing_data_swap(union perf_event
*event
,
614 bool sample_id_all __used
)
616 event
->tracing_data
.size
= bswap_32(event
->tracing_data
.size
);
619 typedef void (*perf_event__swap_op
)(union perf_event
*event
,
622 static perf_event__swap_op perf_event__swap_ops
[] = {
623 [PERF_RECORD_MMAP
] = perf_event__mmap_swap
,
624 [PERF_RECORD_COMM
] = perf_event__comm_swap
,
625 [PERF_RECORD_FORK
] = perf_event__task_swap
,
626 [PERF_RECORD_EXIT
] = perf_event__task_swap
,
627 [PERF_RECORD_LOST
] = perf_event__all64_swap
,
628 [PERF_RECORD_READ
] = perf_event__read_swap
,
629 [PERF_RECORD_SAMPLE
] = perf_event__all64_swap
,
630 [PERF_RECORD_HEADER_ATTR
] = perf_event__hdr_attr_swap
,
631 [PERF_RECORD_HEADER_EVENT_TYPE
] = perf_event__event_type_swap
,
632 [PERF_RECORD_HEADER_TRACING_DATA
] = perf_event__tracing_data_swap
,
633 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
634 [PERF_RECORD_HEADER_MAX
] = NULL
,
637 struct sample_queue
{
640 union perf_event
*event
;
641 struct list_head list
;
644 static void perf_session_free_sample_buffers(struct perf_session
*session
)
646 struct ordered_samples
*os
= &session
->ordered_samples
;
648 while (!list_empty(&os
->to_free
)) {
649 struct sample_queue
*sq
;
651 sq
= list_entry(os
->to_free
.next
, struct sample_queue
, list
);
657 static int perf_session_deliver_event(struct perf_session
*session
,
658 union perf_event
*event
,
659 struct perf_sample
*sample
,
660 struct perf_tool
*tool
,
663 static void flush_sample_queue(struct perf_session
*s
,
664 struct perf_tool
*tool
)
666 struct ordered_samples
*os
= &s
->ordered_samples
;
667 struct list_head
*head
= &os
->samples
;
668 struct sample_queue
*tmp
, *iter
;
669 struct perf_sample sample
;
670 u64 limit
= os
->next_flush
;
671 u64 last_ts
= os
->last_sample
? os
->last_sample
->timestamp
: 0ULL;
672 unsigned idx
= 0, progress_next
= os
->nr_samples
/ 16;
675 if (!tool
->ordered_samples
|| !limit
)
678 list_for_each_entry_safe(iter
, tmp
, head
, list
) {
679 if (iter
->timestamp
> limit
)
682 ret
= perf_evlist__parse_sample(s
->evlist
, iter
->event
, &sample
,
683 s
->header
.needs_swap
);
685 pr_err("Can't parse sample, err = %d\n", ret
);
687 perf_session_deliver_event(s
, iter
->event
, &sample
, tool
,
690 os
->last_flush
= iter
->timestamp
;
691 list_del(&iter
->list
);
692 list_add(&iter
->list
, &os
->sample_cache
);
693 if (++idx
>= progress_next
) {
694 progress_next
+= os
->nr_samples
/ 16;
695 ui_progress__update(idx
, os
->nr_samples
,
696 "Processing time ordered events...");
700 if (list_empty(head
)) {
701 os
->last_sample
= NULL
;
702 } else if (last_ts
<= limit
) {
704 list_entry(head
->prev
, struct sample_queue
, list
);
711 * When perf record finishes a pass on every buffers, it records this pseudo
713 * We record the max timestamp t found in the pass n.
714 * Assuming these timestamps are monotonic across cpus, we know that if
715 * a buffer still has events with timestamps below t, they will be all
716 * available and then read in the pass n + 1.
717 * Hence when we start to read the pass n + 2, we can safely flush every
718 * events with timestamps below t.
720 * ============ PASS n =================
723 * cnt1 timestamps | cnt2 timestamps
726 * - | 4 <--- max recorded
728 * ============ PASS n + 1 ==============
731 * cnt1 timestamps | cnt2 timestamps
734 * 5 | 7 <---- max recorded
736 * Flush every events below timestamp 4
738 * ============ PASS n + 2 ==============
741 * cnt1 timestamps | cnt2 timestamps
746 * Flush every events below timestamp 7
749 static int process_finished_round(struct perf_tool
*tool
,
750 union perf_event
*event __used
,
751 struct perf_session
*session
)
753 flush_sample_queue(session
, tool
);
754 session
->ordered_samples
.next_flush
= session
->ordered_samples
.max_timestamp
;
759 /* The queue is ordered by time */
760 static void __queue_event(struct sample_queue
*new, struct perf_session
*s
)
762 struct ordered_samples
*os
= &s
->ordered_samples
;
763 struct sample_queue
*sample
= os
->last_sample
;
764 u64 timestamp
= new->timestamp
;
768 os
->last_sample
= new;
771 list_add(&new->list
, &os
->samples
);
772 os
->max_timestamp
= timestamp
;
777 * last_sample might point to some random place in the list as it's
778 * the last queued event. We expect that the new event is close to
781 if (sample
->timestamp
<= timestamp
) {
782 while (sample
->timestamp
<= timestamp
) {
783 p
= sample
->list
.next
;
784 if (p
== &os
->samples
) {
785 list_add_tail(&new->list
, &os
->samples
);
786 os
->max_timestamp
= timestamp
;
789 sample
= list_entry(p
, struct sample_queue
, list
);
791 list_add_tail(&new->list
, &sample
->list
);
793 while (sample
->timestamp
> timestamp
) {
794 p
= sample
->list
.prev
;
795 if (p
== &os
->samples
) {
796 list_add(&new->list
, &os
->samples
);
799 sample
= list_entry(p
, struct sample_queue
, list
);
801 list_add(&new->list
, &sample
->list
);
805 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
807 static int perf_session_queue_event(struct perf_session
*s
, union perf_event
*event
,
808 struct perf_sample
*sample
, u64 file_offset
)
810 struct ordered_samples
*os
= &s
->ordered_samples
;
811 struct list_head
*sc
= &os
->sample_cache
;
812 u64 timestamp
= sample
->time
;
813 struct sample_queue
*new;
815 if (!timestamp
|| timestamp
== ~0ULL)
818 if (timestamp
< s
->ordered_samples
.last_flush
) {
819 printf("Warning: Timestamp below last timeslice flush\n");
823 if (!list_empty(sc
)) {
824 new = list_entry(sc
->next
, struct sample_queue
, list
);
825 list_del(&new->list
);
826 } else if (os
->sample_buffer
) {
827 new = os
->sample_buffer
+ os
->sample_buffer_idx
;
828 if (++os
->sample_buffer_idx
== MAX_SAMPLE_BUFFER
)
829 os
->sample_buffer
= NULL
;
831 os
->sample_buffer
= malloc(MAX_SAMPLE_BUFFER
* sizeof(*new));
832 if (!os
->sample_buffer
)
834 list_add(&os
->sample_buffer
->list
, &os
->to_free
);
835 os
->sample_buffer_idx
= 2;
836 new = os
->sample_buffer
+ 1;
839 new->timestamp
= timestamp
;
840 new->file_offset
= file_offset
;
843 __queue_event(new, s
);
848 static void callchain__printf(struct perf_sample
*sample
)
852 printf("... chain: nr:%" PRIu64
"\n", sample
->callchain
->nr
);
854 for (i
= 0; i
< sample
->callchain
->nr
; i
++)
855 printf("..... %2d: %016" PRIx64
"\n",
856 i
, sample
->callchain
->ips
[i
]);
859 static void branch_stack__printf(struct perf_sample
*sample
)
863 printf("... branch stack: nr:%" PRIu64
"\n", sample
->branch_stack
->nr
);
865 for (i
= 0; i
< sample
->branch_stack
->nr
; i
++)
866 printf("..... %2"PRIu64
": %016" PRIx64
" -> %016" PRIx64
"\n",
867 i
, sample
->branch_stack
->entries
[i
].from
,
868 sample
->branch_stack
->entries
[i
].to
);
871 static void perf_session__print_tstamp(struct perf_session
*session
,
872 union perf_event
*event
,
873 struct perf_sample
*sample
)
875 u64 sample_type
= perf_evlist__sample_type(session
->evlist
);
877 if (event
->header
.type
!= PERF_RECORD_SAMPLE
&&
878 !perf_evlist__sample_id_all(session
->evlist
)) {
879 fputs("-1 -1 ", stdout
);
883 if ((sample_type
& PERF_SAMPLE_CPU
))
884 printf("%u ", sample
->cpu
);
886 if (sample_type
& PERF_SAMPLE_TIME
)
887 printf("%" PRIu64
" ", sample
->time
);
890 static void dump_event(struct perf_session
*session
, union perf_event
*event
,
891 u64 file_offset
, struct perf_sample
*sample
)
896 printf("\n%#" PRIx64
" [%#x]: event: %d\n",
897 file_offset
, event
->header
.size
, event
->header
.type
);
902 perf_session__print_tstamp(session
, event
, sample
);
904 printf("%#" PRIx64
" [%#x]: PERF_RECORD_%s", file_offset
,
905 event
->header
.size
, perf_event__name(event
->header
.type
));
908 static void dump_sample(struct perf_session
*session
, union perf_event
*event
,
909 struct perf_sample
*sample
)
916 printf("(IP, %d): %d/%d: %#" PRIx64
" period: %" PRIu64
" addr: %#" PRIx64
"\n",
917 event
->header
.misc
, sample
->pid
, sample
->tid
, sample
->ip
,
918 sample
->period
, sample
->addr
);
920 sample_type
= perf_evlist__sample_type(session
->evlist
);
922 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
923 callchain__printf(sample
);
925 if (sample_type
& PERF_SAMPLE_BRANCH_STACK
)
926 branch_stack__printf(sample
);
929 static struct machine
*
930 perf_session__find_machine_for_cpumode(struct perf_session
*session
,
931 union perf_event
*event
)
933 const u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
936 ((cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) ||
937 (cpumode
== PERF_RECORD_MISC_GUEST_USER
))) {
940 if (event
->header
.type
== PERF_RECORD_MMAP
)
941 pid
= event
->mmap
.pid
;
945 return perf_session__findnew_machine(session
, pid
);
948 return perf_session__find_host_machine(session
);
951 static int perf_session_deliver_event(struct perf_session
*session
,
952 union perf_event
*event
,
953 struct perf_sample
*sample
,
954 struct perf_tool
*tool
,
957 struct perf_evsel
*evsel
;
958 struct machine
*machine
;
960 dump_event(session
, event
, file_offset
, sample
);
962 evsel
= perf_evlist__id2evsel(session
->evlist
, sample
->id
);
963 if (evsel
!= NULL
&& event
->header
.type
!= PERF_RECORD_SAMPLE
) {
965 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
966 * because the tools right now may apply filters, discarding
967 * some of the samples. For consistency, in the future we
968 * should have something like nr_filtered_samples and remove
969 * the sample->period from total_sample_period, etc, KISS for
972 * Also testing against NULL allows us to handle files without
973 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
974 * future probably it'll be a good idea to restrict event
975 * processing via perf_session to files with both set.
977 hists__inc_nr_events(&evsel
->hists
, event
->header
.type
);
980 machine
= perf_session__find_machine_for_cpumode(session
, event
);
982 switch (event
->header
.type
) {
983 case PERF_RECORD_SAMPLE
:
984 dump_sample(session
, event
, sample
);
986 ++session
->hists
.stats
.nr_unknown_id
;
989 if (machine
== NULL
) {
990 ++session
->hists
.stats
.nr_unprocessable_samples
;
993 return tool
->sample(tool
, event
, sample
, evsel
, machine
);
994 case PERF_RECORD_MMAP
:
995 return tool
->mmap(tool
, event
, sample
, machine
);
996 case PERF_RECORD_COMM
:
997 return tool
->comm(tool
, event
, sample
, machine
);
998 case PERF_RECORD_FORK
:
999 return tool
->fork(tool
, event
, sample
, machine
);
1000 case PERF_RECORD_EXIT
:
1001 return tool
->exit(tool
, event
, sample
, machine
);
1002 case PERF_RECORD_LOST
:
1003 if (tool
->lost
== perf_event__process_lost
)
1004 session
->hists
.stats
.total_lost
+= event
->lost
.lost
;
1005 return tool
->lost(tool
, event
, sample
, machine
);
1006 case PERF_RECORD_READ
:
1007 return tool
->read(tool
, event
, sample
, evsel
, machine
);
1008 case PERF_RECORD_THROTTLE
:
1009 return tool
->throttle(tool
, event
, sample
, machine
);
1010 case PERF_RECORD_UNTHROTTLE
:
1011 return tool
->unthrottle(tool
, event
, sample
, machine
);
1013 ++session
->hists
.stats
.nr_unknown_events
;
1018 static int perf_session__preprocess_sample(struct perf_session
*session
,
1019 union perf_event
*event
, struct perf_sample
*sample
)
1021 if (event
->header
.type
!= PERF_RECORD_SAMPLE
||
1022 !(perf_evlist__sample_type(session
->evlist
) & PERF_SAMPLE_CALLCHAIN
))
1025 if (!ip_callchain__valid(sample
->callchain
, event
)) {
1026 pr_debug("call-chain problem with event, skipping it.\n");
1027 ++session
->hists
.stats
.nr_invalid_chains
;
1028 session
->hists
.stats
.total_invalid_chains
+= sample
->period
;
1034 static int perf_session__process_user_event(struct perf_session
*session
, union perf_event
*event
,
1035 struct perf_tool
*tool
, u64 file_offset
)
1039 dump_event(session
, event
, file_offset
, NULL
);
1041 /* These events are processed right away */
1042 switch (event
->header
.type
) {
1043 case PERF_RECORD_HEADER_ATTR
:
1044 err
= tool
->attr(event
, &session
->evlist
);
1046 perf_session__set_id_hdr_size(session
);
1048 case PERF_RECORD_HEADER_EVENT_TYPE
:
1049 return tool
->event_type(tool
, event
);
1050 case PERF_RECORD_HEADER_TRACING_DATA
:
1051 /* setup for reading amidst mmap */
1052 lseek(session
->fd
, file_offset
, SEEK_SET
);
1053 return tool
->tracing_data(event
, session
);
1054 case PERF_RECORD_HEADER_BUILD_ID
:
1055 return tool
->build_id(tool
, event
, session
);
1056 case PERF_RECORD_FINISHED_ROUND
:
1057 return tool
->finished_round(tool
, event
, session
);
1063 static void event_swap(union perf_event
*event
, bool sample_id_all
)
1065 perf_event__swap_op swap
;
1067 swap
= perf_event__swap_ops
[event
->header
.type
];
1069 swap(event
, sample_id_all
);
1072 static int perf_session__process_event(struct perf_session
*session
,
1073 union perf_event
*event
,
1074 struct perf_tool
*tool
,
1077 struct perf_sample sample
;
1080 if (session
->header
.needs_swap
)
1081 event_swap(event
, perf_evlist__sample_id_all(session
->evlist
));
1083 if (event
->header
.type
>= PERF_RECORD_HEADER_MAX
)
1086 hists__inc_nr_events(&session
->hists
, event
->header
.type
);
1088 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1089 return perf_session__process_user_event(session
, event
, tool
, file_offset
);
1092 * For all kernel events we get the sample data
1094 ret
= perf_evlist__parse_sample(session
->evlist
, event
, &sample
,
1095 session
->header
.needs_swap
);
1099 /* Preprocess sample records - precheck callchains */
1100 if (perf_session__preprocess_sample(session
, event
, &sample
))
1103 if (tool
->ordered_samples
) {
1104 ret
= perf_session_queue_event(session
, event
, &sample
,
1110 return perf_session_deliver_event(session
, event
, &sample
, tool
,
1114 void perf_event_header__bswap(struct perf_event_header
*self
)
1116 self
->type
= bswap_32(self
->type
);
1117 self
->misc
= bswap_16(self
->misc
);
1118 self
->size
= bswap_16(self
->size
);
1121 struct thread
*perf_session__findnew(struct perf_session
*session
, pid_t pid
)
1123 return machine__findnew_thread(&session
->host_machine
, pid
);
1126 static struct thread
*perf_session__register_idle_thread(struct perf_session
*self
)
1128 struct thread
*thread
= perf_session__findnew(self
, 0);
1130 if (thread
== NULL
|| thread__set_comm(thread
, "swapper")) {
1131 pr_err("problem inserting idle task.\n");
1138 static void perf_session__warn_about_errors(const struct perf_session
*session
,
1139 const struct perf_tool
*tool
)
1141 if (tool
->lost
== perf_event__process_lost
&&
1142 session
->hists
.stats
.nr_events
[PERF_RECORD_LOST
] != 0) {
1143 ui__warning("Processed %d events and lost %d chunks!\n\n"
1144 "Check IO/CPU overload!\n\n",
1145 session
->hists
.stats
.nr_events
[0],
1146 session
->hists
.stats
.nr_events
[PERF_RECORD_LOST
]);
1149 if (session
->hists
.stats
.nr_unknown_events
!= 0) {
1150 ui__warning("Found %u unknown events!\n\n"
1151 "Is this an older tool processing a perf.data "
1152 "file generated by a more recent tool?\n\n"
1153 "If that is not the case, consider "
1154 "reporting to linux-kernel@vger.kernel.org.\n\n",
1155 session
->hists
.stats
.nr_unknown_events
);
1158 if (session
->hists
.stats
.nr_unknown_id
!= 0) {
1159 ui__warning("%u samples with id not present in the header\n",
1160 session
->hists
.stats
.nr_unknown_id
);
1163 if (session
->hists
.stats
.nr_invalid_chains
!= 0) {
1164 ui__warning("Found invalid callchains!\n\n"
1165 "%u out of %u events were discarded for this reason.\n\n"
1166 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1167 session
->hists
.stats
.nr_invalid_chains
,
1168 session
->hists
.stats
.nr_events
[PERF_RECORD_SAMPLE
]);
1171 if (session
->hists
.stats
.nr_unprocessable_samples
!= 0) {
1172 ui__warning("%u unprocessable samples recorded.\n"
1173 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1174 session
->hists
.stats
.nr_unprocessable_samples
);
1178 #define session_done() (*(volatile int *)(&session_done))
1179 volatile int session_done
;
1181 static int __perf_session__process_pipe_events(struct perf_session
*self
,
1182 struct perf_tool
*tool
)
1184 union perf_event
*event
;
1185 uint32_t size
, cur_size
= 0;
1192 perf_tool__fill_defaults(tool
);
1195 cur_size
= sizeof(union perf_event
);
1197 buf
= malloc(cur_size
);
1202 err
= readn(self
->fd
, event
, sizeof(struct perf_event_header
));
1207 pr_err("failed to read event header\n");
1211 if (self
->header
.needs_swap
)
1212 perf_event_header__bswap(&event
->header
);
1214 size
= event
->header
.size
;
1218 if (size
> cur_size
) {
1219 void *new = realloc(buf
, size
);
1221 pr_err("failed to allocate memory to read event\n");
1229 p
+= sizeof(struct perf_event_header
);
1231 if (size
- sizeof(struct perf_event_header
)) {
1232 err
= readn(self
->fd
, p
, size
- sizeof(struct perf_event_header
));
1235 pr_err("unexpected end of event stream\n");
1239 pr_err("failed to read event data\n");
1244 if ((skip
= perf_session__process_event(self
, event
, tool
, head
)) < 0) {
1245 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1246 head
, event
->header
.size
, event
->header
.type
);
1256 if (!session_done())
1262 perf_session__warn_about_errors(self
, tool
);
1263 perf_session_free_sample_buffers(self
);
1267 static union perf_event
*
1268 fetch_mmaped_event(struct perf_session
*session
,
1269 u64 head
, size_t mmap_size
, char *buf
)
1271 union perf_event
*event
;
1274 * Ensure we have enough space remaining to read
1275 * the size of the event in the headers.
1277 if (head
+ sizeof(event
->header
) > mmap_size
)
1280 event
= (union perf_event
*)(buf
+ head
);
1282 if (session
->header
.needs_swap
)
1283 perf_event_header__bswap(&event
->header
);
1285 if (head
+ event
->header
.size
> mmap_size
)
1291 int __perf_session__process_events(struct perf_session
*session
,
1292 u64 data_offset
, u64 data_size
,
1293 u64 file_size
, struct perf_tool
*tool
)
1295 u64 head
, page_offset
, file_offset
, file_pos
, progress_next
;
1296 int err
, mmap_prot
, mmap_flags
, map_idx
= 0;
1297 size_t page_size
, mmap_size
;
1298 char *buf
, *mmaps
[8];
1299 union perf_event
*event
;
1302 perf_tool__fill_defaults(tool
);
1304 page_size
= sysconf(_SC_PAGESIZE
);
1306 page_offset
= page_size
* (data_offset
/ page_size
);
1307 file_offset
= page_offset
;
1308 head
= data_offset
- page_offset
;
1310 if (data_offset
+ data_size
< file_size
)
1311 file_size
= data_offset
+ data_size
;
1313 progress_next
= file_size
/ 16;
1315 mmap_size
= session
->mmap_window
;
1316 if (mmap_size
> file_size
)
1317 mmap_size
= file_size
;
1319 memset(mmaps
, 0, sizeof(mmaps
));
1321 mmap_prot
= PROT_READ
;
1322 mmap_flags
= MAP_SHARED
;
1324 if (session
->header
.needs_swap
) {
1325 mmap_prot
|= PROT_WRITE
;
1326 mmap_flags
= MAP_PRIVATE
;
1329 buf
= mmap(NULL
, mmap_size
, mmap_prot
, mmap_flags
, session
->fd
,
1331 if (buf
== MAP_FAILED
) {
1332 pr_err("failed to mmap file\n");
1336 mmaps
[map_idx
] = buf
;
1337 map_idx
= (map_idx
+ 1) & (ARRAY_SIZE(mmaps
) - 1);
1338 file_pos
= file_offset
+ head
;
1341 event
= fetch_mmaped_event(session
, head
, mmap_size
, buf
);
1343 if (mmaps
[map_idx
]) {
1344 munmap(mmaps
[map_idx
], mmap_size
);
1345 mmaps
[map_idx
] = NULL
;
1348 page_offset
= page_size
* (head
/ page_size
);
1349 file_offset
+= page_offset
;
1350 head
-= page_offset
;
1354 size
= event
->header
.size
;
1357 perf_session__process_event(session
, event
, tool
, file_pos
) < 0) {
1358 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1359 file_offset
+ head
, event
->header
.size
,
1360 event
->header
.type
);
1368 if (file_pos
>= progress_next
) {
1369 progress_next
+= file_size
/ 16;
1370 ui_progress__update(file_pos
, file_size
,
1371 "Processing events...");
1374 if (file_pos
< file_size
)
1378 /* do the final flush for ordered samples */
1379 session
->ordered_samples
.next_flush
= ULLONG_MAX
;
1380 flush_sample_queue(session
, tool
);
1382 perf_session__warn_about_errors(session
, tool
);
1383 perf_session_free_sample_buffers(session
);
1387 int perf_session__process_events(struct perf_session
*self
,
1388 struct perf_tool
*tool
)
1392 if (perf_session__register_idle_thread(self
) == NULL
)
1396 err
= __perf_session__process_events(self
,
1397 self
->header
.data_offset
,
1398 self
->header
.data_size
,
1401 err
= __perf_session__process_pipe_events(self
, tool
);
1406 bool perf_session__has_traces(struct perf_session
*session
, const char *msg
)
1408 if (!(perf_evlist__sample_type(session
->evlist
) & PERF_SAMPLE_RAW
)) {
1409 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
1416 int maps__set_kallsyms_ref_reloc_sym(struct map
**maps
,
1417 const char *symbol_name
, u64 addr
)
1421 struct ref_reloc_sym
*ref
;
1423 ref
= zalloc(sizeof(struct ref_reloc_sym
));
1427 ref
->name
= strdup(symbol_name
);
1428 if (ref
->name
== NULL
) {
1433 bracket
= strchr(ref
->name
, ']');
1439 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
1440 struct kmap
*kmap
= map__kmap(maps
[i
]);
1441 kmap
->ref_reloc_sym
= ref
;
1447 size_t perf_session__fprintf_dsos(struct perf_session
*self
, FILE *fp
)
1449 return __dsos__fprintf(&self
->host_machine
.kernel_dsos
, fp
) +
1450 __dsos__fprintf(&self
->host_machine
.user_dsos
, fp
) +
1451 machines__fprintf_dsos(&self
->machines
, fp
);
1454 size_t perf_session__fprintf_dsos_buildid(struct perf_session
*self
, FILE *fp
,
1457 size_t ret
= machine__fprintf_dsos_buildid(&self
->host_machine
, fp
, with_hits
);
1458 return ret
+ machines__fprintf_dsos_buildid(&self
->machines
, fp
, with_hits
);
1461 size_t perf_session__fprintf_nr_events(struct perf_session
*session
, FILE *fp
)
1463 struct perf_evsel
*pos
;
1464 size_t ret
= fprintf(fp
, "Aggregated stats:\n");
1466 ret
+= hists__fprintf_nr_events(&session
->hists
, fp
);
1468 list_for_each_entry(pos
, &session
->evlist
->entries
, node
) {
1469 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
1470 ret
+= hists__fprintf_nr_events(&pos
->hists
, fp
);
1476 size_t perf_session__fprintf(struct perf_session
*session
, FILE *fp
)
1479 * FIXME: Here we have to actually print all the machines in this
1480 * session, not just the host...
1482 return machine__fprintf(&session
->host_machine
, fp
);
1485 void perf_session__remove_thread(struct perf_session
*session
,
1489 * FIXME: This one makes no sense, we need to remove the thread from
1490 * the machine it belongs to, perf_session can have many machines, so
1491 * doing it always on ->host_machine is wrong. Fix when auditing all
1492 * the 'perf kvm' code.
1494 machine__remove_thread(&session
->host_machine
, th
);
1497 struct perf_evsel
*perf_session__find_first_evtype(struct perf_session
*session
,
1500 struct perf_evsel
*pos
;
1502 list_for_each_entry(pos
, &session
->evlist
->entries
, node
) {
1503 if (pos
->attr
.type
== type
)
1509 void perf_event__print_ip(union perf_event
*event
, struct perf_sample
*sample
,
1510 struct machine
*machine
, int print_sym
,
1511 int print_dso
, int print_symoffset
)
1513 struct addr_location al
;
1514 struct callchain_cursor_node
*node
;
1516 if (perf_event__preprocess_sample(event
, machine
, &al
, sample
,
1518 error("problem processing %d event, skipping it.\n",
1519 event
->header
.type
);
1523 if (symbol_conf
.use_callchain
&& sample
->callchain
) {
1525 if (machine__resolve_callchain(machine
, al
.thread
,
1526 sample
->callchain
, NULL
) != 0) {
1528 error("Failed to resolve callchain. Skipping\n");
1531 callchain_cursor_commit(&callchain_cursor
);
1534 node
= callchain_cursor_current(&callchain_cursor
);
1538 printf("\t%16" PRIx64
, node
->ip
);
1541 symbol__fprintf_symname(node
->sym
, stdout
);
1545 map__fprintf_dsoname(node
->map
, stdout
);
1550 callchain_cursor_advance(&callchain_cursor
);
1554 printf("%16" PRIx64
, sample
->ip
);
1557 if (print_symoffset
)
1558 symbol__fprintf_symname_offs(al
.sym
, &al
,
1561 symbol__fprintf_symname(al
.sym
, stdout
);
1566 map__fprintf_dsoname(al
.map
, stdout
);
1572 int perf_session__cpu_bitmap(struct perf_session
*session
,
1573 const char *cpu_list
, unsigned long *cpu_bitmap
)
1576 struct cpu_map
*map
;
1578 for (i
= 0; i
< PERF_TYPE_MAX
; ++i
) {
1579 struct perf_evsel
*evsel
;
1581 evsel
= perf_session__find_first_evtype(session
, i
);
1585 if (!(evsel
->attr
.sample_type
& PERF_SAMPLE_CPU
)) {
1586 pr_err("File does not contain CPU events. "
1587 "Remove -c option to proceed.\n");
1592 map
= cpu_map__new(cpu_list
);
1594 pr_err("Invalid cpu_list\n");
1598 for (i
= 0; i
< map
->nr
; i
++) {
1599 int cpu
= map
->map
[i
];
1601 if (cpu
>= MAX_NR_CPUS
) {
1602 pr_err("Requested CPU %d too large. "
1603 "Consider raising MAX_NR_CPUS\n", cpu
);
1607 set_bit(cpu
, cpu_bitmap
);
1613 void perf_session__fprintf_info(struct perf_session
*session
, FILE *fp
,
1619 if (session
== NULL
|| fp
== NULL
)
1622 ret
= fstat(session
->fd
, &st
);
1626 fprintf(fp
, "# ========\n");
1627 fprintf(fp
, "# captured on: %s", ctime(&st
.st_ctime
));
1628 perf_header__fprintf_info(session
, fp
, full
);
1629 fprintf(fp
, "# ========\n#\n");
1633 int __perf_session__set_tracepoints_handlers(struct perf_session
*session
,
1634 const struct perf_evsel_str_handler
*assocs
,
1637 struct perf_evlist
*evlist
= session
->evlist
;
1638 struct event_format
*format
;
1639 struct perf_evsel
*evsel
;
1640 char *tracepoint
, *name
;
1644 for (i
= 0; i
< nr_assocs
; i
++) {
1646 tracepoint
= strdup(assocs
[i
].name
);
1647 if (tracepoint
== NULL
)
1651 name
= strchr(tracepoint
, ':');
1656 format
= pevent_find_event_by_name(session
->pevent
,
1658 if (format
== NULL
) {
1660 * Adding a handler for an event not in the session,
1666 evsel
= perf_evlist__find_tracepoint_by_id(evlist
, format
->id
);
1671 if (evsel
->handler
.func
!= NULL
)
1673 evsel
->handler
.func
= assocs
[i
].handler
;