1 #define _FILE_OFFSET_BITS 64
3 #include <linux/kernel.h>
14 static int perf_session__open(struct perf_session
*self
, bool force
)
16 struct stat input_stat
;
18 if (!strcmp(self
->filename
, "-")) {
20 self
->fd
= STDIN_FILENO
;
22 if (perf_header__read(self
, self
->fd
) < 0)
23 pr_err("incompatible file format");
28 self
->fd
= open(self
->filename
, O_RDONLY
);
32 pr_err("failed to open %s: %s", self
->filename
, strerror(err
));
33 if (err
== ENOENT
&& !strcmp(self
->filename
, "perf.data"))
34 pr_err(" (try 'perf record' first)");
39 if (fstat(self
->fd
, &input_stat
) < 0)
42 if (!force
&& input_stat
.st_uid
&& (input_stat
.st_uid
!= geteuid())) {
43 pr_err("file %s not owned by current user or root\n",
48 if (!input_stat
.st_size
) {
49 pr_info("zero-sized file (%s), nothing to do!\n",
54 if (perf_header__read(self
, self
->fd
) < 0) {
55 pr_err("incompatible file format");
59 self
->size
= input_stat
.st_size
;
68 void perf_session__update_sample_type(struct perf_session
*self
)
70 self
->sample_type
= perf_header__sample_type(&self
->header
);
73 int perf_session__create_kernel_maps(struct perf_session
*self
)
75 int ret
= machine__create_kernel_maps(&self
->host_machine
);
78 ret
= machines__create_guest_kernel_maps(&self
->machines
);
82 static void perf_session__destroy_kernel_maps(struct perf_session
*self
)
84 machine__destroy_kernel_maps(&self
->host_machine
);
85 machines__destroy_guest_kernel_maps(&self
->machines
);
88 struct perf_session
*perf_session__new(const char *filename
, int mode
, bool force
, bool repipe
)
90 size_t len
= filename
? strlen(filename
) + 1 : 0;
91 struct perf_session
*self
= zalloc(sizeof(*self
) + len
);
96 if (perf_header__init(&self
->header
) < 0)
99 memcpy(self
->filename
, filename
, len
);
100 self
->threads
= RB_ROOT
;
101 INIT_LIST_HEAD(&self
->dead_threads
);
102 self
->hists_tree
= RB_ROOT
;
103 self
->last_match
= NULL
;
104 self
->mmap_window
= 32;
105 self
->machines
= RB_ROOT
;
106 self
->repipe
= repipe
;
107 INIT_LIST_HEAD(&self
->ordered_samples
.samples_head
);
108 machine__init(&self
->host_machine
, "", HOST_KERNEL_ID
);
110 if (mode
== O_RDONLY
) {
111 if (perf_session__open(self
, force
) < 0)
113 } else if (mode
== O_WRONLY
) {
115 * In O_RDONLY mode this will be performed when reading the
116 * kernel MMAP event, in event__process_mmap().
118 if (perf_session__create_kernel_maps(self
) < 0)
122 perf_session__update_sample_type(self
);
129 perf_session__delete(self
);
133 static void perf_session__delete_dead_threads(struct perf_session
*self
)
135 struct thread
*n
, *t
;
137 list_for_each_entry_safe(t
, n
, &self
->dead_threads
, node
) {
143 static void perf_session__delete_threads(struct perf_session
*self
)
145 struct rb_node
*nd
= rb_first(&self
->threads
);
148 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
150 rb_erase(&t
->rb_node
, &self
->threads
);
156 void perf_session__delete(struct perf_session
*self
)
158 perf_header__exit(&self
->header
);
159 perf_session__destroy_kernel_maps(self
);
160 perf_session__delete_dead_threads(self
);
161 perf_session__delete_threads(self
);
162 machine__exit(&self
->host_machine
);
167 void perf_session__remove_thread(struct perf_session
*self
, struct thread
*th
)
169 self
->last_match
= NULL
;
170 rb_erase(&th
->rb_node
, &self
->threads
);
172 * We may have references to this thread, for instance in some hist_entry
173 * instances, so just move them to a separate list.
175 list_add_tail(&th
->node
, &self
->dead_threads
);
178 static bool symbol__match_parent_regex(struct symbol
*sym
)
180 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
186 struct map_symbol
*perf_session__resolve_callchain(struct perf_session
*self
,
187 struct thread
*thread
,
188 struct ip_callchain
*chain
,
189 struct symbol
**parent
)
191 u8 cpumode
= PERF_RECORD_MISC_USER
;
193 struct map_symbol
*syms
= calloc(chain
->nr
, sizeof(*syms
));
198 for (i
= 0; i
< chain
->nr
; i
++) {
199 u64 ip
= chain
->ips
[i
];
200 struct addr_location al
;
202 if (ip
>= PERF_CONTEXT_MAX
) {
204 case PERF_CONTEXT_HV
:
205 cpumode
= PERF_RECORD_MISC_HYPERVISOR
; break;
206 case PERF_CONTEXT_KERNEL
:
207 cpumode
= PERF_RECORD_MISC_KERNEL
; break;
208 case PERF_CONTEXT_USER
:
209 cpumode
= PERF_RECORD_MISC_USER
; break;
217 thread__find_addr_location(thread
, self
, cpumode
,
218 MAP__FUNCTION
, thread
->pid
, ip
, &al
, NULL
);
219 if (al
.sym
!= NULL
) {
220 if (sort__has_parent
&& !*parent
&&
221 symbol__match_parent_regex(al
.sym
))
223 if (!symbol_conf
.use_callchain
)
225 syms
[i
].map
= al
.map
;
226 syms
[i
].sym
= al
.sym
;
233 static int process_event_stub(event_t
*event __used
,
234 struct perf_session
*session __used
)
236 dump_printf(": unhandled!\n");
240 static int process_finished_round_stub(event_t
*event __used
,
241 struct perf_session
*session __used
,
242 struct perf_event_ops
*ops __used
)
244 dump_printf(": unhandled!\n");
248 static int process_finished_round(event_t
*event
,
249 struct perf_session
*session
,
250 struct perf_event_ops
*ops
);
252 static void perf_event_ops__fill_defaults(struct perf_event_ops
*handler
)
254 if (handler
->sample
== NULL
)
255 handler
->sample
= process_event_stub
;
256 if (handler
->mmap
== NULL
)
257 handler
->mmap
= process_event_stub
;
258 if (handler
->comm
== NULL
)
259 handler
->comm
= process_event_stub
;
260 if (handler
->fork
== NULL
)
261 handler
->fork
= process_event_stub
;
262 if (handler
->exit
== NULL
)
263 handler
->exit
= process_event_stub
;
264 if (handler
->lost
== NULL
)
265 handler
->lost
= process_event_stub
;
266 if (handler
->read
== NULL
)
267 handler
->read
= process_event_stub
;
268 if (handler
->throttle
== NULL
)
269 handler
->throttle
= process_event_stub
;
270 if (handler
->unthrottle
== NULL
)
271 handler
->unthrottle
= process_event_stub
;
272 if (handler
->attr
== NULL
)
273 handler
->attr
= process_event_stub
;
274 if (handler
->event_type
== NULL
)
275 handler
->event_type
= process_event_stub
;
276 if (handler
->tracing_data
== NULL
)
277 handler
->tracing_data
= process_event_stub
;
278 if (handler
->build_id
== NULL
)
279 handler
->build_id
= process_event_stub
;
280 if (handler
->finished_round
== NULL
) {
281 if (handler
->ordered_samples
)
282 handler
->finished_round
= process_finished_round
;
284 handler
->finished_round
= process_finished_round_stub
;
288 void mem_bswap_64(void *src
, int byte_size
)
292 while (byte_size
> 0) {
294 byte_size
-= sizeof(u64
);
299 static void event__all64_swap(event_t
*self
)
301 struct perf_event_header
*hdr
= &self
->header
;
302 mem_bswap_64(hdr
+ 1, self
->header
.size
- sizeof(*hdr
));
305 static void event__comm_swap(event_t
*self
)
307 self
->comm
.pid
= bswap_32(self
->comm
.pid
);
308 self
->comm
.tid
= bswap_32(self
->comm
.tid
);
311 static void event__mmap_swap(event_t
*self
)
313 self
->mmap
.pid
= bswap_32(self
->mmap
.pid
);
314 self
->mmap
.tid
= bswap_32(self
->mmap
.tid
);
315 self
->mmap
.start
= bswap_64(self
->mmap
.start
);
316 self
->mmap
.len
= bswap_64(self
->mmap
.len
);
317 self
->mmap
.pgoff
= bswap_64(self
->mmap
.pgoff
);
320 static void event__task_swap(event_t
*self
)
322 self
->fork
.pid
= bswap_32(self
->fork
.pid
);
323 self
->fork
.tid
= bswap_32(self
->fork
.tid
);
324 self
->fork
.ppid
= bswap_32(self
->fork
.ppid
);
325 self
->fork
.ptid
= bswap_32(self
->fork
.ptid
);
326 self
->fork
.time
= bswap_64(self
->fork
.time
);
329 static void event__read_swap(event_t
*self
)
331 self
->read
.pid
= bswap_32(self
->read
.pid
);
332 self
->read
.tid
= bswap_32(self
->read
.tid
);
333 self
->read
.value
= bswap_64(self
->read
.value
);
334 self
->read
.time_enabled
= bswap_64(self
->read
.time_enabled
);
335 self
->read
.time_running
= bswap_64(self
->read
.time_running
);
336 self
->read
.id
= bswap_64(self
->read
.id
);
339 static void event__attr_swap(event_t
*self
)
343 self
->attr
.attr
.type
= bswap_32(self
->attr
.attr
.type
);
344 self
->attr
.attr
.size
= bswap_32(self
->attr
.attr
.size
);
345 self
->attr
.attr
.config
= bswap_64(self
->attr
.attr
.config
);
346 self
->attr
.attr
.sample_period
= bswap_64(self
->attr
.attr
.sample_period
);
347 self
->attr
.attr
.sample_type
= bswap_64(self
->attr
.attr
.sample_type
);
348 self
->attr
.attr
.read_format
= bswap_64(self
->attr
.attr
.read_format
);
349 self
->attr
.attr
.wakeup_events
= bswap_32(self
->attr
.attr
.wakeup_events
);
350 self
->attr
.attr
.bp_type
= bswap_32(self
->attr
.attr
.bp_type
);
351 self
->attr
.attr
.bp_addr
= bswap_64(self
->attr
.attr
.bp_addr
);
352 self
->attr
.attr
.bp_len
= bswap_64(self
->attr
.attr
.bp_len
);
354 size
= self
->header
.size
;
355 size
-= (void *)&self
->attr
.id
- (void *)self
;
356 mem_bswap_64(self
->attr
.id
, size
);
359 static void event__event_type_swap(event_t
*self
)
361 self
->event_type
.event_type
.event_id
=
362 bswap_64(self
->event_type
.event_type
.event_id
);
365 static void event__tracing_data_swap(event_t
*self
)
367 self
->tracing_data
.size
= bswap_32(self
->tracing_data
.size
);
370 typedef void (*event__swap_op
)(event_t
*self
);
372 static event__swap_op event__swap_ops
[] = {
373 [PERF_RECORD_MMAP
] = event__mmap_swap
,
374 [PERF_RECORD_COMM
] = event__comm_swap
,
375 [PERF_RECORD_FORK
] = event__task_swap
,
376 [PERF_RECORD_EXIT
] = event__task_swap
,
377 [PERF_RECORD_LOST
] = event__all64_swap
,
378 [PERF_RECORD_READ
] = event__read_swap
,
379 [PERF_RECORD_SAMPLE
] = event__all64_swap
,
380 [PERF_RECORD_HEADER_ATTR
] = event__attr_swap
,
381 [PERF_RECORD_HEADER_EVENT_TYPE
] = event__event_type_swap
,
382 [PERF_RECORD_HEADER_TRACING_DATA
] = event__tracing_data_swap
,
383 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
384 [PERF_RECORD_HEADER_MAX
] = NULL
,
387 struct sample_queue
{
389 struct sample_event
*event
;
390 struct list_head list
;
393 static void flush_sample_queue(struct perf_session
*s
,
394 struct perf_event_ops
*ops
)
396 struct list_head
*head
= &s
->ordered_samples
.samples_head
;
397 u64 limit
= s
->ordered_samples
.next_flush
;
398 struct sample_queue
*tmp
, *iter
;
400 if (!ops
->ordered_samples
|| !limit
)
403 list_for_each_entry_safe(iter
, tmp
, head
, list
) {
404 if (iter
->timestamp
> limit
)
407 if (iter
== s
->ordered_samples
.last_inserted
)
408 s
->ordered_samples
.last_inserted
= NULL
;
410 ops
->sample((event_t
*)iter
->event
, s
);
412 s
->ordered_samples
.last_flush
= iter
->timestamp
;
413 list_del(&iter
->list
);
420 * When perf record finishes a pass on every buffers, it records this pseudo
422 * We record the max timestamp t found in the pass n.
423 * Assuming these timestamps are monotonic across cpus, we know that if
424 * a buffer still has events with timestamps below t, they will be all
425 * available and then read in the pass n + 1.
426 * Hence when we start to read the pass n + 2, we can safely flush every
427 * events with timestamps below t.
429 * ============ PASS n =================
432 * cnt1 timestamps | cnt2 timestamps
435 * - | 4 <--- max recorded
437 * ============ PASS n + 1 ==============
440 * cnt1 timestamps | cnt2 timestamps
443 * 5 | 7 <---- max recorded
445 * Flush every events below timestamp 4
447 * ============ PASS n + 2 ==============
450 * cnt1 timestamps | cnt2 timestamps
455 * Flush every events below timestamp 7
458 static int process_finished_round(event_t
*event __used
,
459 struct perf_session
*session
,
460 struct perf_event_ops
*ops
)
462 flush_sample_queue(session
, ops
);
463 session
->ordered_samples
.next_flush
= session
->ordered_samples
.max_timestamp
;
468 static void __queue_sample_end(struct sample_queue
*new, struct list_head
*head
)
470 struct sample_queue
*iter
;
472 list_for_each_entry_reverse(iter
, head
, list
) {
473 if (iter
->timestamp
< new->timestamp
) {
474 list_add(&new->list
, &iter
->list
);
479 list_add(&new->list
, head
);
482 static void __queue_sample_before(struct sample_queue
*new,
483 struct sample_queue
*iter
,
484 struct list_head
*head
)
486 list_for_each_entry_continue_reverse(iter
, head
, list
) {
487 if (iter
->timestamp
< new->timestamp
) {
488 list_add(&new->list
, &iter
->list
);
493 list_add(&new->list
, head
);
496 static void __queue_sample_after(struct sample_queue
*new,
497 struct sample_queue
*iter
,
498 struct list_head
*head
)
500 list_for_each_entry_continue(iter
, head
, list
) {
501 if (iter
->timestamp
> new->timestamp
) {
502 list_add_tail(&new->list
, &iter
->list
);
506 list_add_tail(&new->list
, head
);
509 /* The queue is ordered by time */
510 static void __queue_sample_event(struct sample_queue
*new,
511 struct perf_session
*s
)
513 struct sample_queue
*last_inserted
= s
->ordered_samples
.last_inserted
;
514 struct list_head
*head
= &s
->ordered_samples
.samples_head
;
517 if (!last_inserted
) {
518 __queue_sample_end(new, head
);
523 * Most of the time the current event has a timestamp
524 * very close to the last event inserted, unless we just switched
525 * to another event buffer. Having a sorting based on a list and
526 * on the last inserted event that is close to the current one is
527 * probably more efficient than an rbtree based sorting.
529 if (last_inserted
->timestamp
>= new->timestamp
)
530 __queue_sample_before(new, last_inserted
, head
);
532 __queue_sample_after(new, last_inserted
, head
);
535 static int queue_sample_event(event_t
*event
, struct sample_data
*data
,
536 struct perf_session
*s
)
538 u64 timestamp
= data
->time
;
539 struct sample_queue
*new;
542 if (timestamp
< s
->ordered_samples
.last_flush
) {
543 printf("Warning: Timestamp below last timeslice flush\n");
547 new = malloc(sizeof(*new));
551 new->timestamp
= timestamp
;
553 new->event
= malloc(event
->header
.size
);
559 memcpy(new->event
, event
, event
->header
.size
);
561 __queue_sample_event(new, s
);
562 s
->ordered_samples
.last_inserted
= new;
564 if (new->timestamp
> s
->ordered_samples
.max_timestamp
)
565 s
->ordered_samples
.max_timestamp
= new->timestamp
;
570 static int perf_session__process_sample(event_t
*event
, struct perf_session
*s
,
571 struct perf_event_ops
*ops
)
573 struct sample_data data
;
575 if (!ops
->ordered_samples
)
576 return ops
->sample(event
, s
);
578 bzero(&data
, sizeof(struct sample_data
));
579 event__parse_sample(event
, s
->sample_type
, &data
);
581 queue_sample_event(event
, &data
, s
);
586 static int perf_session__process_event(struct perf_session
*self
,
588 struct perf_event_ops
*ops
,
589 u64 offset
, u64 head
)
593 if (event
->header
.type
< PERF_RECORD_HEADER_MAX
) {
594 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
595 offset
+ head
, event
->header
.size
,
596 event__name
[event
->header
.type
]);
597 hists__inc_nr_events(&self
->hists
, event
->header
.type
);
600 if (self
->header
.needs_swap
&& event__swap_ops
[event
->header
.type
])
601 event__swap_ops
[event
->header
.type
](event
);
603 switch (event
->header
.type
) {
604 case PERF_RECORD_SAMPLE
:
605 return perf_session__process_sample(event
, self
, ops
);
606 case PERF_RECORD_MMAP
:
607 return ops
->mmap(event
, self
);
608 case PERF_RECORD_COMM
:
609 return ops
->comm(event
, self
);
610 case PERF_RECORD_FORK
:
611 return ops
->fork(event
, self
);
612 case PERF_RECORD_EXIT
:
613 return ops
->exit(event
, self
);
614 case PERF_RECORD_LOST
:
615 return ops
->lost(event
, self
);
616 case PERF_RECORD_READ
:
617 return ops
->read(event
, self
);
618 case PERF_RECORD_THROTTLE
:
619 return ops
->throttle(event
, self
);
620 case PERF_RECORD_UNTHROTTLE
:
621 return ops
->unthrottle(event
, self
);
622 case PERF_RECORD_HEADER_ATTR
:
623 return ops
->attr(event
, self
);
624 case PERF_RECORD_HEADER_EVENT_TYPE
:
625 return ops
->event_type(event
, self
);
626 case PERF_RECORD_HEADER_TRACING_DATA
:
627 /* setup for reading amidst mmap */
628 lseek(self
->fd
, offset
+ head
, SEEK_SET
);
629 return ops
->tracing_data(event
, self
);
630 case PERF_RECORD_HEADER_BUILD_ID
:
631 return ops
->build_id(event
, self
);
632 case PERF_RECORD_FINISHED_ROUND
:
633 return ops
->finished_round(event
, self
, ops
);
635 ++self
->hists
.stats
.nr_unknown_events
;
640 void perf_event_header__bswap(struct perf_event_header
*self
)
642 self
->type
= bswap_32(self
->type
);
643 self
->misc
= bswap_16(self
->misc
);
644 self
->size
= bswap_16(self
->size
);
647 static struct thread
*perf_session__register_idle_thread(struct perf_session
*self
)
649 struct thread
*thread
= perf_session__findnew(self
, 0);
651 if (thread
== NULL
|| thread__set_comm(thread
, "swapper")) {
652 pr_err("problem inserting idle task.\n");
659 int do_read(int fd
, void *buf
, size_t size
)
661 void *buf_start
= buf
;
664 int ret
= read(fd
, buf
, size
);
673 return buf
- buf_start
;
676 #define session_done() (*(volatile int *)(&session_done))
677 volatile int session_done
;
679 static int __perf_session__process_pipe_events(struct perf_session
*self
,
680 struct perf_event_ops
*ops
)
689 perf_event_ops__fill_defaults(ops
);
693 err
= do_read(self
->fd
, &event
, sizeof(struct perf_event_header
));
698 pr_err("failed to read event header\n");
702 if (self
->header
.needs_swap
)
703 perf_event_header__bswap(&event
.header
);
705 size
= event
.header
.size
;
710 p
+= sizeof(struct perf_event_header
);
712 if (size
- sizeof(struct perf_event_header
)) {
713 err
= do_read(self
->fd
, p
,
714 size
- sizeof(struct perf_event_header
));
717 pr_err("unexpected end of event stream\n");
721 pr_err("failed to read event data\n");
727 (skip
= perf_session__process_event(self
, &event
, ops
,
729 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
730 head
, event
.header
.size
, event
.header
.type
);
732 * assume we lost track of the stream, check alignment, and
733 * increment a single u64 in the hope to catch on again 'soon'.
735 if (unlikely(head
& 7))
743 dump_printf("\n%#Lx [%#x]: event: %d\n",
744 head
, event
.header
.size
, event
.header
.type
);
757 int __perf_session__process_events(struct perf_session
*self
,
758 u64 data_offset
, u64 data_size
,
759 u64 file_size
, struct perf_event_ops
*ops
)
761 int err
, mmap_prot
, mmap_flags
;
768 struct ui_progress
*progress
= ui_progress__new("Processing events...",
770 if (progress
== NULL
)
773 perf_event_ops__fill_defaults(ops
);
775 page_size
= sysconf(_SC_PAGESIZE
);
778 shift
= page_size
* (head
/ page_size
);
782 mmap_prot
= PROT_READ
;
783 mmap_flags
= MAP_SHARED
;
785 if (self
->header
.needs_swap
) {
786 mmap_prot
|= PROT_WRITE
;
787 mmap_flags
= MAP_PRIVATE
;
790 buf
= mmap(NULL
, page_size
* self
->mmap_window
, mmap_prot
,
791 mmap_flags
, self
->fd
, offset
);
792 if (buf
== MAP_FAILED
) {
793 pr_err("failed to mmap file\n");
799 event
= (event_t
*)(buf
+ head
);
800 ui_progress__update(progress
, offset
);
802 if (self
->header
.needs_swap
)
803 perf_event_header__bswap(&event
->header
);
804 size
= event
->header
.size
;
808 if (head
+ event
->header
.size
>= page_size
* self
->mmap_window
) {
811 shift
= page_size
* (head
/ page_size
);
813 munmap_ret
= munmap(buf
, page_size
* self
->mmap_window
);
814 assert(munmap_ret
== 0);
821 size
= event
->header
.size
;
823 dump_printf("\n%#Lx [%#x]: event: %d\n",
824 offset
+ head
, event
->header
.size
, event
->header
.type
);
827 perf_session__process_event(self
, event
, ops
, offset
, head
) < 0) {
828 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
829 offset
+ head
, event
->header
.size
,
832 * assume we lost track of the stream, check alignment, and
833 * increment a single u64 in the hope to catch on again 'soon'.
835 if (unlikely(head
& 7))
843 if (offset
+ head
>= data_offset
+ data_size
)
846 if (offset
+ head
< file_size
)
850 /* do the final flush for ordered samples */
851 self
->ordered_samples
.next_flush
= ULLONG_MAX
;
852 flush_sample_queue(self
, ops
);
854 ui_progress__delete(progress
);
858 int perf_session__process_events(struct perf_session
*self
,
859 struct perf_event_ops
*ops
)
863 if (perf_session__register_idle_thread(self
) == NULL
)
867 err
= __perf_session__process_events(self
,
868 self
->header
.data_offset
,
869 self
->header
.data_size
,
872 err
= __perf_session__process_pipe_events(self
, ops
);
877 bool perf_session__has_traces(struct perf_session
*self
, const char *msg
)
879 if (!(self
->sample_type
& PERF_SAMPLE_RAW
)) {
880 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
887 int perf_session__set_kallsyms_ref_reloc_sym(struct map
**maps
,
888 const char *symbol_name
,
893 struct ref_reloc_sym
*ref
;
895 ref
= zalloc(sizeof(struct ref_reloc_sym
));
899 ref
->name
= strdup(symbol_name
);
900 if (ref
->name
== NULL
) {
905 bracket
= strchr(ref
->name
, ']');
911 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
912 struct kmap
*kmap
= map__kmap(maps
[i
]);
913 kmap
->ref_reloc_sym
= ref
;
919 size_t perf_session__fprintf_dsos(struct perf_session
*self
, FILE *fp
)
921 return __dsos__fprintf(&self
->host_machine
.kernel_dsos
, fp
) +
922 __dsos__fprintf(&self
->host_machine
.user_dsos
, fp
) +
923 machines__fprintf_dsos(&self
->machines
, fp
);
926 size_t perf_session__fprintf_dsos_buildid(struct perf_session
*self
, FILE *fp
,
929 size_t ret
= machine__fprintf_dsos_buildid(&self
->host_machine
, fp
, with_hits
);
930 return ret
+ machines__fprintf_dsos_buildid(&self
->machines
, fp
, with_hits
);