4 * Builtin report command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
10 #include "util/util.h"
12 #include "util/color.h"
13 #include <linux/list.h>
14 #include "util/cache.h"
15 #include <linux/rbtree.h>
16 #include "util/symbol.h"
17 #include "util/string.h"
18 #include "util/callchain.h"
19 #include "util/strlist.h"
20 #include "util/values.h"
23 #include "util/header.h"
25 #include "util/parse-options.h"
26 #include "util/parse-events.h"
32 static char const *input_name
= "perf.data";
34 static char default_sort_order
[] = "comm,dso,symbol";
35 static char *sort_order
= default_sort_order
;
36 static char *dso_list_str
, *comm_list_str
, *sym_list_str
,
38 static struct strlist
*dso_list
, *comm_list
, *sym_list
;
39 static char *field_sep
;
42 static int show_mask
= SHOW_KERNEL
| SHOW_USER
| SHOW_HV
;
44 static int dump_trace
= 0;
45 #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
46 #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
48 static int full_paths
;
49 static int show_nr_samples
;
51 static int show_threads
;
52 static struct perf_read_values show_threads_values
;
54 static char default_pretty_printing_style
[] = "normal";
55 static char *pretty_printing_style
= default_pretty_printing_style
;
57 static unsigned long page_size
;
58 static unsigned long mmap_window
= 32;
60 static char default_parent_pattern
[] = "^sys_|^do_page_fault";
61 static char *parent_pattern
= default_parent_pattern
;
62 static regex_t parent_regex
;
64 static int exclude_other
= 1;
66 static char callchain_default_opt
[] = "fractal,0.5";
71 struct callchain_param callchain_param
= {
72 .mode
= CHAIN_GRAPH_REL
,
76 static u64 sample_type
;
78 static int repsep_fprintf(FILE *fp
, const char *fmt
, ...)
85 n
= vfprintf(fp
, fmt
, ap
);
88 n
= vasprintf(&bf
, fmt
, ap
);
92 sep
= strchr(sep
, *field_sep
);
107 static char __cwd
[PATH_MAX
];
108 static char *cwd
= __cwd
;
111 static int strcommon(const char *pathname
)
115 while (n
< cwdlen
&& pathname
[n
] == cwd
[n
])
122 struct list_head node
;
126 u64 (*map_ip
)(struct map
*, u64
);
130 static u64
map__map_ip(struct map
*map
, u64 ip
)
132 return ip
- map
->start
+ map
->pgoff
;
135 static u64
vdso__map_ip(struct map
*map __used
, u64 ip
)
140 static inline int is_anon_memory(const char *filename
)
142 return strcmp(filename
, "//anon") == 0;
145 static struct map
*map__new(struct mmap_event
*event
)
147 struct map
*self
= malloc(sizeof(*self
));
150 const char *filename
= event
->filename
;
151 char newfilename
[PATH_MAX
];
155 int n
= strcommon(filename
);
158 snprintf(newfilename
, sizeof(newfilename
),
159 ".%s", filename
+ n
);
160 filename
= newfilename
;
164 anon
= is_anon_memory(filename
);
167 snprintf(newfilename
, sizeof(newfilename
), "/tmp/perf-%d.map", event
->pid
);
168 filename
= newfilename
;
171 self
->start
= event
->start
;
172 self
->end
= event
->start
+ event
->len
;
173 self
->pgoff
= event
->pgoff
;
175 self
->dso
= dsos__findnew(filename
);
176 if (self
->dso
== NULL
)
179 if (self
->dso
== vdso
|| anon
)
180 self
->map_ip
= vdso__map_ip
;
182 self
->map_ip
= map__map_ip
;
190 static struct map
*map__clone(struct map
*self
)
192 struct map
*map
= malloc(sizeof(*self
));
197 memcpy(map
, self
, sizeof(*self
));
202 static int map__overlap(struct map
*l
, struct map
*r
)
204 if (l
->start
> r
->start
) {
210 if (l
->end
> r
->start
)
216 static size_t map__fprintf(struct map
*self
, FILE *fp
)
218 return fprintf(fp
, " %Lx-%Lx %Lx %s\n",
219 self
->start
, self
->end
, self
->pgoff
, self
->dso
->name
);
224 struct rb_node rb_node
;
225 struct list_head maps
;
230 static struct thread
*thread__new(pid_t pid
)
232 struct thread
*self
= malloc(sizeof(*self
));
236 self
->comm
= malloc(32);
238 snprintf(self
->comm
, 32, ":%d", self
->pid
);
239 INIT_LIST_HEAD(&self
->maps
);
245 static unsigned int dsos__col_width
,
249 static int thread__set_comm(struct thread
*self
, const char *comm
)
253 self
->comm
= strdup(comm
);
257 if (!col_width_list_str
&& !field_sep
&&
258 (!comm_list
|| strlist__has_entry(comm_list
, comm
))) {
259 unsigned int slen
= strlen(comm
);
260 if (slen
> comms__col_width
) {
261 comms__col_width
= slen
;
262 threads__col_width
= slen
+ 6;
269 static size_t thread__fprintf(struct thread
*self
, FILE *fp
)
272 size_t ret
= fprintf(fp
, "Thread %d %s\n", self
->pid
, self
->comm
);
274 list_for_each_entry(pos
, &self
->maps
, node
)
275 ret
+= map__fprintf(pos
, fp
);
281 static struct rb_root threads
;
282 static struct thread
*last_match
;
284 static struct thread
*threads__findnew(pid_t pid
)
286 struct rb_node
**p
= &threads
.rb_node
;
287 struct rb_node
*parent
= NULL
;
291 * Font-end cache - PID lookups come in blocks,
292 * so most of the time we dont have to look up
295 if (last_match
&& last_match
->pid
== pid
)
300 th
= rb_entry(parent
, struct thread
, rb_node
);
302 if (th
->pid
== pid
) {
313 th
= thread__new(pid
);
315 rb_link_node(&th
->rb_node
, parent
, p
);
316 rb_insert_color(&th
->rb_node
, &threads
);
323 static void thread__insert_map(struct thread
*self
, struct map
*map
)
325 struct map
*pos
, *tmp
;
327 list_for_each_entry_safe(pos
, tmp
, &self
->maps
, node
) {
328 if (map__overlap(pos
, map
)) {
330 printf("overlapping maps:\n");
331 map__fprintf(map
, stdout
);
332 map__fprintf(pos
, stdout
);
335 if (map
->start
<= pos
->start
&& map
->end
> pos
->start
)
336 pos
->start
= map
->end
;
338 if (map
->end
>= pos
->end
&& map
->start
< pos
->end
)
339 pos
->end
= map
->start
;
342 printf("after collision:\n");
343 map__fprintf(pos
, stdout
);
346 if (pos
->start
>= pos
->end
) {
347 list_del_init(&pos
->node
);
353 list_add_tail(&map
->node
, &self
->maps
);
356 static int thread__fork(struct thread
*self
, struct thread
*parent
)
362 self
->comm
= strdup(parent
->comm
);
366 list_for_each_entry(map
, &parent
->maps
, node
) {
367 struct map
*new = map__clone(map
);
370 thread__insert_map(self
, new);
376 static struct map
*thread__find_map(struct thread
*self
, u64 ip
)
383 list_for_each_entry(pos
, &self
->maps
, node
)
384 if (ip
>= pos
->start
&& ip
<= pos
->end
)
390 static size_t threads__fprintf(FILE *fp
)
395 for (nd
= rb_first(&threads
); nd
; nd
= rb_next(nd
)) {
396 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
398 ret
+= thread__fprintf(pos
, fp
);
405 * histogram, sorted on item, collects counts
408 static struct rb_root hist
;
411 struct rb_node rb_node
;
413 struct thread
*thread
;
417 struct symbol
*parent
;
420 struct callchain_node callchain
;
421 struct rb_root sorted_chain
;
427 * configurable sorting bits
431 struct list_head list
;
435 int64_t (*cmp
)(struct hist_entry
*, struct hist_entry
*);
436 int64_t (*collapse
)(struct hist_entry
*, struct hist_entry
*);
437 size_t (*print
)(FILE *fp
, struct hist_entry
*, unsigned int width
);
442 static int64_t cmp_null(void *l
, void *r
)
455 sort__thread_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
457 return right
->thread
->pid
- left
->thread
->pid
;
461 sort__thread_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
463 return repsep_fprintf(fp
, "%*s:%5d", width
- 6,
464 self
->thread
->comm
?: "", self
->thread
->pid
);
467 static struct sort_entry sort_thread
= {
468 .header
= "Command: Pid",
469 .cmp
= sort__thread_cmp
,
470 .print
= sort__thread_print
,
471 .width
= &threads__col_width
,
477 sort__comm_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
479 return right
->thread
->pid
- left
->thread
->pid
;
483 sort__comm_collapse(struct hist_entry
*left
, struct hist_entry
*right
)
485 char *comm_l
= left
->thread
->comm
;
486 char *comm_r
= right
->thread
->comm
;
488 if (!comm_l
|| !comm_r
)
489 return cmp_null(comm_l
, comm_r
);
491 return strcmp(comm_l
, comm_r
);
495 sort__comm_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
497 return repsep_fprintf(fp
, "%*s", width
, self
->thread
->comm
);
500 static struct sort_entry sort_comm
= {
502 .cmp
= sort__comm_cmp
,
503 .collapse
= sort__comm_collapse
,
504 .print
= sort__comm_print
,
505 .width
= &comms__col_width
,
511 sort__dso_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
513 struct dso
*dso_l
= left
->dso
;
514 struct dso
*dso_r
= right
->dso
;
516 if (!dso_l
|| !dso_r
)
517 return cmp_null(dso_l
, dso_r
);
519 return strcmp(dso_l
->name
, dso_r
->name
);
523 sort__dso_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
526 return repsep_fprintf(fp
, "%-*s", width
, self
->dso
->name
);
528 return repsep_fprintf(fp
, "%*llx", width
, (u64
)self
->ip
);
531 static struct sort_entry sort_dso
= {
532 .header
= "Shared Object",
533 .cmp
= sort__dso_cmp
,
534 .print
= sort__dso_print
,
535 .width
= &dsos__col_width
,
541 sort__sym_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
545 if (left
->sym
== right
->sym
)
548 ip_l
= left
->sym
? left
->sym
->start
: left
->ip
;
549 ip_r
= right
->sym
? right
->sym
->start
: right
->ip
;
551 return (int64_t)(ip_r
- ip_l
);
555 sort__sym_print(FILE *fp
, struct hist_entry
*self
, unsigned int width __used
)
560 ret
+= repsep_fprintf(fp
, "%#018llx %c ", (u64
)self
->ip
,
561 dso__symtab_origin(self
->dso
));
563 ret
+= repsep_fprintf(fp
, "[%c] ", self
->level
);
565 ret
+= repsep_fprintf(fp
, "%s", self
->sym
->name
);
567 if (self
->sym
->module
)
568 ret
+= repsep_fprintf(fp
, "\t[%s]",
569 self
->sym
->module
->name
);
571 ret
+= repsep_fprintf(fp
, "%#016llx", (u64
)self
->ip
);
577 static struct sort_entry sort_sym
= {
579 .cmp
= sort__sym_cmp
,
580 .print
= sort__sym_print
,
586 sort__parent_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
588 struct symbol
*sym_l
= left
->parent
;
589 struct symbol
*sym_r
= right
->parent
;
591 if (!sym_l
|| !sym_r
)
592 return cmp_null(sym_l
, sym_r
);
594 return strcmp(sym_l
->name
, sym_r
->name
);
598 sort__parent_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
600 return repsep_fprintf(fp
, "%-*s", width
,
601 self
->parent
? self
->parent
->name
: "[other]");
604 static unsigned int parent_symbol__col_width
;
606 static struct sort_entry sort_parent
= {
607 .header
= "Parent symbol",
608 .cmp
= sort__parent_cmp
,
609 .print
= sort__parent_print
,
610 .width
= &parent_symbol__col_width
,
613 static int sort__need_collapse
= 0;
614 static int sort__has_parent
= 0;
616 struct sort_dimension
{
618 struct sort_entry
*entry
;
622 static struct sort_dimension sort_dimensions
[] = {
623 { .name
= "pid", .entry
= &sort_thread
, },
624 { .name
= "comm", .entry
= &sort_comm
, },
625 { .name
= "dso", .entry
= &sort_dso
, },
626 { .name
= "symbol", .entry
= &sort_sym
, },
627 { .name
= "parent", .entry
= &sort_parent
, },
630 static LIST_HEAD(hist_entry__sort_list
);
632 static int sort_dimension__add(char *tok
)
636 for (i
= 0; i
< ARRAY_SIZE(sort_dimensions
); i
++) {
637 struct sort_dimension
*sd
= &sort_dimensions
[i
];
642 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
645 if (sd
->entry
->collapse
)
646 sort__need_collapse
= 1;
648 if (sd
->entry
== &sort_parent
) {
649 int ret
= regcomp(&parent_regex
, parent_pattern
, REG_EXTENDED
);
653 regerror(ret
, &parent_regex
, err
, sizeof(err
));
654 fprintf(stderr
, "Invalid regex: %s\n%s",
655 parent_pattern
, err
);
658 sort__has_parent
= 1;
661 list_add_tail(&sd
->entry
->list
, &hist_entry__sort_list
);
671 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
673 struct sort_entry
*se
;
676 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
677 cmp
= se
->cmp(left
, right
);
686 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
688 struct sort_entry
*se
;
691 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
692 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
694 f
= se
->collapse
?: se
->cmp
;
696 cmp
= f(left
, right
);
704 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
)
709 ret
+= fprintf(fp
, "%s", " ");
711 for (i
= 0; i
< depth
; i
++)
712 if (depth_mask
& (1 << i
))
713 ret
+= fprintf(fp
, "| ");
715 ret
+= fprintf(fp
, " ");
717 ret
+= fprintf(fp
, "\n");
722 ipchain__fprintf_graph(FILE *fp
, struct callchain_list
*chain
, int depth
,
723 int depth_mask
, int count
, u64 total_samples
,
729 ret
+= fprintf(fp
, "%s", " ");
730 for (i
= 0; i
< depth
; i
++) {
731 if (depth_mask
& (1 << i
))
732 ret
+= fprintf(fp
, "|");
734 ret
+= fprintf(fp
, " ");
735 if (!count
&& i
== depth
- 1) {
738 percent
= hits
* 100.0 / total_samples
;
739 ret
+= percent_color_fprintf(fp
, "--%2.2f%%-- ", percent
);
741 ret
+= fprintf(fp
, "%s", " ");
744 ret
+= fprintf(fp
, "%s\n", chain
->sym
->name
);
746 ret
+= fprintf(fp
, "%p\n", (void *)(long)chain
->ip
);
751 static struct symbol
*rem_sq_bracket
;
752 static struct callchain_list rem_hits
;
754 static void init_rem_hits(void)
756 rem_sq_bracket
= malloc(sizeof(*rem_sq_bracket
) + 6);
757 if (!rem_sq_bracket
) {
758 fprintf(stderr
, "Not enough memory to display remaining hits\n");
762 strcpy(rem_sq_bracket
->name
, "[...]");
763 rem_hits
.sym
= rem_sq_bracket
;
767 callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
768 u64 total_samples
, int depth
, int depth_mask
)
770 struct rb_node
*node
, *next
;
771 struct callchain_node
*child
;
772 struct callchain_list
*chain
;
773 int new_depth_mask
= depth_mask
;
779 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
780 new_total
= self
->children_hit
;
782 new_total
= total_samples
;
784 remaining
= new_total
;
786 node
= rb_first(&self
->rb_root
);
790 child
= rb_entry(node
, struct callchain_node
, rb_node
);
791 cumul
= cumul_hits(child
);
795 * The depth mask manages the output of pipes that show
796 * the depth. We don't want to keep the pipes of the current
797 * level for the last child of this depth.
798 * Except if we have remaining filtered hits. They will
799 * supersede the last child
801 next
= rb_next(node
);
802 if (!next
&& (callchain_param
.mode
!= CHAIN_GRAPH_REL
|| !remaining
))
803 new_depth_mask
&= ~(1 << (depth
- 1));
806 * But we keep the older depth mask for the line seperator
807 * to keep the level link until we reach the last child
809 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
);
811 list_for_each_entry(chain
, &child
->val
, list
) {
812 if (chain
->ip
>= PERF_CONTEXT_MAX
)
814 ret
+= ipchain__fprintf_graph(fp
, chain
, depth
,
819 ret
+= callchain__fprintf_graph(fp
, child
, new_total
,
821 new_depth_mask
| (1 << depth
));
825 if (callchain_param
.mode
== CHAIN_GRAPH_REL
&&
826 remaining
&& remaining
!= new_total
) {
831 new_depth_mask
&= ~(1 << (depth
- 1));
833 ret
+= ipchain__fprintf_graph(fp
, &rem_hits
, depth
,
834 new_depth_mask
, 0, new_total
,
842 callchain__fprintf_flat(FILE *fp
, struct callchain_node
*self
,
845 struct callchain_list
*chain
;
851 ret
+= callchain__fprintf_flat(fp
, self
->parent
, total_samples
);
854 list_for_each_entry(chain
, &self
->val
, list
) {
855 if (chain
->ip
>= PERF_CONTEXT_MAX
)
858 ret
+= fprintf(fp
, " %s\n", chain
->sym
->name
);
860 ret
+= fprintf(fp
, " %p\n",
861 (void *)(long)chain
->ip
);
868 hist_entry_callchain__fprintf(FILE *fp
, struct hist_entry
*self
,
871 struct rb_node
*rb_node
;
872 struct callchain_node
*chain
;
875 rb_node
= rb_first(&self
->sorted_chain
);
879 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
880 percent
= chain
->hit
* 100.0 / total_samples
;
881 switch (callchain_param
.mode
) {
883 ret
+= percent_color_fprintf(fp
, " %6.2f%%\n",
885 ret
+= callchain__fprintf_flat(fp
, chain
, total_samples
);
887 case CHAIN_GRAPH_ABS
: /* Falldown */
888 case CHAIN_GRAPH_REL
:
889 ret
+= callchain__fprintf_graph(fp
, chain
,
890 total_samples
, 1, 1);
894 ret
+= fprintf(fp
, "\n");
895 rb_node
= rb_next(rb_node
);
903 hist_entry__fprintf(FILE *fp
, struct hist_entry
*self
, u64 total_samples
)
905 struct sort_entry
*se
;
908 if (exclude_other
&& !self
->parent
)
912 ret
= percent_color_fprintf(fp
,
913 field_sep
? "%.2f" : " %6.2f%%",
914 (self
->count
* 100.0) / total_samples
);
916 ret
= fprintf(fp
, field_sep
? "%lld" : "%12lld ", self
->count
);
918 if (show_nr_samples
) {
920 fprintf(fp
, "%c%lld", *field_sep
, self
->count
);
922 fprintf(fp
, "%11lld", self
->count
);
925 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
929 fprintf(fp
, "%s", field_sep
?: " ");
930 ret
+= se
->print(fp
, self
, se
->width
? *se
->width
: 0);
933 ret
+= fprintf(fp
, "\n");
936 hist_entry_callchain__fprintf(fp
, self
, total_samples
);
945 static void dso__calc_col_width(struct dso
*self
)
947 if (!col_width_list_str
&& !field_sep
&&
948 (!dso_list
|| strlist__has_entry(dso_list
, self
->name
))) {
949 unsigned int slen
= strlen(self
->name
);
950 if (slen
> dsos__col_width
)
951 dsos__col_width
= slen
;
954 self
->slen_calculated
= 1;
957 static struct symbol
*
958 resolve_symbol(struct thread
*thread
, struct map
**mapp
,
959 struct dso
**dsop
, u64
*ipp
)
961 struct dso
*dso
= dsop
? *dsop
: NULL
;
962 struct map
*map
= mapp
? *mapp
: NULL
;
974 map
= thread__find_map(thread
, ip
);
977 * We have to do this here as we may have a dso
978 * with no symbol hit that has a name longer than
979 * the ones with symbols sampled.
981 if (!sort_dso
.elide
&& !map
->dso
->slen_calculated
)
982 dso__calc_col_width(map
->dso
);
987 ip
= map
->map_ip(map
, ip
);
992 * If this is outside of all known maps,
993 * and is a negative address, try to look it
994 * up in the kernel dso, as it might be a
995 * vsyscall (which executes in user-mode):
997 if ((long long)ip
< 0)
1000 dprintf(" ...... dso: %s\n", dso
? dso
->name
: "<not found>");
1001 dprintf(" ...... map: %Lx -> %Lx\n", *ipp
, ip
);
1010 return dso
->find_symbol(dso
, ip
);
1013 static int call__match(struct symbol
*sym
)
1015 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
1021 static struct symbol
**
1022 resolve_callchain(struct thread
*thread
, struct map
*map __used
,
1023 struct ip_callchain
*chain
, struct hist_entry
*entry
)
1025 u64 context
= PERF_CONTEXT_MAX
;
1026 struct symbol
**syms
= NULL
;
1030 syms
= calloc(chain
->nr
, sizeof(*syms
));
1032 fprintf(stderr
, "Can't allocate memory for symbols\n");
1037 for (i
= 0; i
< chain
->nr
; i
++) {
1038 u64 ip
= chain
->ips
[i
];
1039 struct dso
*dso
= NULL
;
1042 if (ip
>= PERF_CONTEXT_MAX
) {
1048 case PERF_CONTEXT_HV
:
1049 dso
= hypervisor_dso
;
1051 case PERF_CONTEXT_KERNEL
:
1058 sym
= resolve_symbol(thread
, NULL
, &dso
, &ip
);
1061 if (sort__has_parent
&& call__match(sym
) &&
1063 entry
->parent
= sym
;
1074 * collect histogram counts
1078 hist_entry__add(struct thread
*thread
, struct map
*map
, struct dso
*dso
,
1079 struct symbol
*sym
, u64 ip
, struct ip_callchain
*chain
,
1080 char level
, u64 count
)
1082 struct rb_node
**p
= &hist
.rb_node
;
1083 struct rb_node
*parent
= NULL
;
1084 struct hist_entry
*he
;
1085 struct symbol
**syms
= NULL
;
1086 struct hist_entry entry
= {
1095 .sorted_chain
= RB_ROOT
1099 if ((sort__has_parent
|| callchain
) && chain
)
1100 syms
= resolve_callchain(thread
, map
, chain
, &entry
);
1102 while (*p
!= NULL
) {
1104 he
= rb_entry(parent
, struct hist_entry
, rb_node
);
1106 cmp
= hist_entry__cmp(&entry
, he
);
1111 append_chain(&he
->callchain
, chain
, syms
);
1120 p
= &(*p
)->rb_right
;
1123 he
= malloc(sizeof(*he
));
1128 callchain_init(&he
->callchain
);
1129 append_chain(&he
->callchain
, chain
, syms
);
1132 rb_link_node(&he
->rb_node
, parent
, p
);
1133 rb_insert_color(&he
->rb_node
, &hist
);
1138 static void hist_entry__free(struct hist_entry
*he
)
1144 * collapse the histogram
1147 static struct rb_root collapse_hists
;
1149 static void collapse__insert_entry(struct hist_entry
*he
)
1151 struct rb_node
**p
= &collapse_hists
.rb_node
;
1152 struct rb_node
*parent
= NULL
;
1153 struct hist_entry
*iter
;
1156 while (*p
!= NULL
) {
1158 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1160 cmp
= hist_entry__collapse(iter
, he
);
1163 iter
->count
+= he
->count
;
1164 hist_entry__free(he
);
1171 p
= &(*p
)->rb_right
;
1174 rb_link_node(&he
->rb_node
, parent
, p
);
1175 rb_insert_color(&he
->rb_node
, &collapse_hists
);
1178 static void collapse__resort(void)
1180 struct rb_node
*next
;
1181 struct hist_entry
*n
;
1183 if (!sort__need_collapse
)
1186 next
= rb_first(&hist
);
1188 n
= rb_entry(next
, struct hist_entry
, rb_node
);
1189 next
= rb_next(&n
->rb_node
);
1191 rb_erase(&n
->rb_node
, &hist
);
1192 collapse__insert_entry(n
);
1197 * reverse the map, sort on count.
1200 static struct rb_root output_hists
;
1202 static void output__insert_entry(struct hist_entry
*he
, u64 min_callchain_hits
)
1204 struct rb_node
**p
= &output_hists
.rb_node
;
1205 struct rb_node
*parent
= NULL
;
1206 struct hist_entry
*iter
;
1209 callchain_param
.sort(&he
->sorted_chain
, &he
->callchain
,
1210 min_callchain_hits
, &callchain_param
);
1212 while (*p
!= NULL
) {
1214 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1216 if (he
->count
> iter
->count
)
1219 p
= &(*p
)->rb_right
;
1222 rb_link_node(&he
->rb_node
, parent
, p
);
1223 rb_insert_color(&he
->rb_node
, &output_hists
);
1226 static void output__resort(u64 total_samples
)
1228 struct rb_node
*next
;
1229 struct hist_entry
*n
;
1230 struct rb_root
*tree
= &hist
;
1231 u64 min_callchain_hits
;
1233 min_callchain_hits
= total_samples
* (callchain_param
.min_percent
/ 100);
1235 if (sort__need_collapse
)
1236 tree
= &collapse_hists
;
1238 next
= rb_first(tree
);
1241 n
= rb_entry(next
, struct hist_entry
, rb_node
);
1242 next
= rb_next(&n
->rb_node
);
1244 rb_erase(&n
->rb_node
, tree
);
1245 output__insert_entry(n
, min_callchain_hits
);
1249 static size_t output__fprintf(FILE *fp
, u64 total_samples
)
1251 struct hist_entry
*pos
;
1252 struct sort_entry
*se
;
1256 char *col_width
= col_width_list_str
;
1257 int raw_printing_style
;
1259 raw_printing_style
= !strcmp(pretty_printing_style
, "raw");
1263 fprintf(fp
, "# Samples: %Ld\n", (u64
)total_samples
);
1266 fprintf(fp
, "# Overhead");
1267 if (show_nr_samples
) {
1269 fprintf(fp
, "%cSamples", *field_sep
);
1271 fputs(" Samples ", fp
);
1273 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1277 fprintf(fp
, "%c%s", *field_sep
, se
->header
);
1280 width
= strlen(se
->header
);
1282 if (col_width_list_str
) {
1284 *se
->width
= atoi(col_width
);
1285 col_width
= strchr(col_width
, ',');
1290 width
= *se
->width
= max(*se
->width
, width
);
1292 fprintf(fp
, " %*s", width
, se
->header
);
1299 fprintf(fp
, "# ........");
1300 if (show_nr_samples
)
1301 fprintf(fp
, " ..........");
1302 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1312 width
= strlen(se
->header
);
1313 for (i
= 0; i
< width
; i
++)
1321 for (nd
= rb_first(&output_hists
); nd
; nd
= rb_next(nd
)) {
1322 pos
= rb_entry(nd
, struct hist_entry
, rb_node
);
1323 ret
+= hist_entry__fprintf(fp
, pos
, total_samples
);
1326 if (sort_order
== default_sort_order
&&
1327 parent_pattern
== default_parent_pattern
) {
1329 fprintf(fp
, "# (For a higher level overview, try: perf report --sort comm,dso)\n");
1334 free(rem_sq_bracket
);
1337 perf_read_values_display(fp
, &show_threads_values
,
1338 raw_printing_style
);
1343 static void register_idle_thread(void)
1345 struct thread
*thread
= threads__findnew(0);
1347 if (thread
== NULL
||
1348 thread__set_comm(thread
, "[idle]")) {
1349 fprintf(stderr
, "problem inserting idle task.\n");
1354 static unsigned long total
= 0,
1361 static int validate_chain(struct ip_callchain
*chain
, event_t
*event
)
1363 unsigned int chain_size
;
1365 chain_size
= event
->header
.size
;
1366 chain_size
-= (unsigned long)&event
->ip
.__more_data
- (unsigned long)event
;
1368 if (chain
->nr
*sizeof(u64
) > chain_size
)
1375 process_sample_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1379 struct dso
*dso
= NULL
;
1380 struct thread
*thread
= threads__findnew(event
->ip
.pid
);
1381 u64 ip
= event
->ip
.ip
;
1383 struct map
*map
= NULL
;
1384 void *more_data
= event
->ip
.__more_data
;
1385 struct ip_callchain
*chain
= NULL
;
1388 if (sample_type
& PERF_SAMPLE_PERIOD
) {
1389 period
= *(u64
*)more_data
;
1390 more_data
+= sizeof(u64
);
1393 dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
1394 (void *)(offset
+ head
),
1395 (void *)(long)(event
->header
.size
),
1401 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
1404 chain
= (void *)more_data
;
1406 dprintf("... chain: nr:%Lu\n", chain
->nr
);
1408 if (validate_chain(chain
, event
) < 0) {
1409 eprintf("call-chain problem with event, skipping it.\n");
1414 for (i
= 0; i
< chain
->nr
; i
++)
1415 dprintf("..... %2d: %016Lx\n", i
, chain
->ips
[i
]);
1419 dprintf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
1421 if (thread
== NULL
) {
1422 eprintf("problem processing %d event, skipping it.\n",
1423 event
->header
.type
);
1427 if (comm_list
&& !strlist__has_entry(comm_list
, thread
->comm
))
1430 cpumode
= event
->header
.misc
& PERF_EVENT_MISC_CPUMODE_MASK
;
1432 if (cpumode
== PERF_EVENT_MISC_KERNEL
) {
1438 dprintf(" ...... dso: %s\n", dso
->name
);
1440 } else if (cpumode
== PERF_EVENT_MISC_USER
) {
1449 dso
= hypervisor_dso
;
1451 dprintf(" ...... dso: [hypervisor]\n");
1454 if (show
& show_mask
) {
1455 struct symbol
*sym
= resolve_symbol(thread
, &map
, &dso
, &ip
);
1457 if (dso_list
&& dso
&& dso
->name
&& !strlist__has_entry(dso_list
, dso
->name
))
1460 if (sym_list
&& sym
&& !strlist__has_entry(sym_list
, sym
->name
))
1463 if (hist_entry__add(thread
, map
, dso
, sym
, ip
, chain
, level
, period
)) {
1464 eprintf("problem incrementing symbol count, skipping event\n");
1474 process_mmap_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1476 struct thread
*thread
= threads__findnew(event
->mmap
.pid
);
1477 struct map
*map
= map__new(&event
->mmap
);
1479 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
1480 (void *)(offset
+ head
),
1481 (void *)(long)(event
->header
.size
),
1483 (void *)(long)event
->mmap
.start
,
1484 (void *)(long)event
->mmap
.len
,
1485 (void *)(long)event
->mmap
.pgoff
,
1486 event
->mmap
.filename
);
1488 if (thread
== NULL
|| map
== NULL
) {
1489 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
1493 thread__insert_map(thread
, map
);
1500 process_comm_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1502 struct thread
*thread
= threads__findnew(event
->comm
.pid
);
1504 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
1505 (void *)(offset
+ head
),
1506 (void *)(long)(event
->header
.size
),
1507 event
->comm
.comm
, event
->comm
.pid
);
1509 if (thread
== NULL
||
1510 thread__set_comm(thread
, event
->comm
.comm
)) {
1511 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
1520 process_task_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1522 struct thread
*thread
= threads__findnew(event
->fork
.pid
);
1523 struct thread
*parent
= threads__findnew(event
->fork
.ppid
);
1525 dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
1526 (void *)(offset
+ head
),
1527 (void *)(long)(event
->header
.size
),
1528 event
->header
.type
== PERF_EVENT_FORK
? "FORK" : "EXIT",
1529 event
->fork
.pid
, event
->fork
.tid
,
1530 event
->fork
.ppid
, event
->fork
.ptid
);
1533 * A thread clone will have the same PID for both
1536 if (thread
== parent
)
1539 if (event
->header
.type
== PERF_EVENT_EXIT
)
1542 if (!thread
|| !parent
|| thread__fork(thread
, parent
)) {
1543 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
1552 process_lost_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1554 dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
1555 (void *)(offset
+ head
),
1556 (void *)(long)(event
->header
.size
),
1560 total_lost
+= event
->lost
.lost
;
1565 static void trace_event(event_t
*event
)
1567 unsigned char *raw_event
= (void *)event
;
1568 char *color
= PERF_COLOR_BLUE
;
1575 cdprintf("\n. ... raw event: size %d bytes\n", event
->header
.size
);
1577 for (i
= 0; i
< event
->header
.size
; i
++) {
1578 if ((i
& 15) == 0) {
1580 cdprintf(" %04x: ", i
);
1583 cdprintf(" %02x", raw_event
[i
]);
1585 if (((i
& 15) == 15) || i
== event
->header
.size
-1) {
1587 for (j
= 0; j
< 15-(i
& 15); j
++)
1589 for (j
= 0; j
< (i
& 15); j
++) {
1590 if (isprint(raw_event
[i
-15+j
]))
1591 cdprintf("%c", raw_event
[i
-15+j
]);
1601 static struct perf_header
*header
;
1603 static struct perf_counter_attr
*perf_header__find_attr(u64 id
)
1607 for (i
= 0; i
< header
->attrs
; i
++) {
1608 struct perf_header_attr
*attr
= header
->attr
[i
];
1611 for (j
= 0; j
< attr
->ids
; j
++) {
1612 if (attr
->id
[j
] == id
)
1621 process_read_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1623 struct perf_counter_attr
*attr
= perf_header__find_attr(event
->read
.id
);
1626 char *name
= attr
? __event_name(attr
->type
, attr
->config
)
1628 perf_read_values_add_value(&show_threads_values
,
1629 event
->read
.pid
, event
->read
.tid
,
1635 dprintf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n",
1636 (void *)(offset
+ head
),
1637 (void *)(long)(event
->header
.size
),
1640 attr
? __event_name(attr
->type
, attr
->config
)
1648 process_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1652 switch (event
->header
.type
) {
1653 case PERF_EVENT_SAMPLE
:
1654 return process_sample_event(event
, offset
, head
);
1656 case PERF_EVENT_MMAP
:
1657 return process_mmap_event(event
, offset
, head
);
1659 case PERF_EVENT_COMM
:
1660 return process_comm_event(event
, offset
, head
);
1662 case PERF_EVENT_FORK
:
1663 case PERF_EVENT_EXIT
:
1664 return process_task_event(event
, offset
, head
);
1666 case PERF_EVENT_LOST
:
1667 return process_lost_event(event
, offset
, head
);
1669 case PERF_EVENT_READ
:
1670 return process_read_event(event
, offset
, head
);
1673 * We dont process them right now but they are fine:
1676 case PERF_EVENT_THROTTLE
:
1677 case PERF_EVENT_UNTHROTTLE
:
1687 static u64
perf_header__sample_type(void)
1689 u64 sample_type
= 0;
1692 for (i
= 0; i
< header
->attrs
; i
++) {
1693 struct perf_header_attr
*attr
= header
->attr
[i
];
1696 sample_type
= attr
->attr
.sample_type
;
1697 else if (sample_type
!= attr
->attr
.sample_type
)
1698 die("non matching sample_type");
1704 static int __cmd_report(void)
1706 int ret
, rc
= EXIT_FAILURE
;
1707 unsigned long offset
= 0;
1708 unsigned long head
, shift
;
1714 register_idle_thread();
1717 perf_read_values_init(&show_threads_values
);
1719 input
= open(input_name
, O_RDONLY
);
1721 fprintf(stderr
, " failed to open file: %s", input_name
);
1722 if (!strcmp(input_name
, "perf.data"))
1723 fprintf(stderr
, " (try 'perf record' first)");
1724 fprintf(stderr
, "\n");
1728 ret
= fstat(input
, &stat
);
1730 perror("failed to stat file");
1734 if (!stat
.st_size
) {
1735 fprintf(stderr
, "zero-sized file, nothing to do!\n");
1739 header
= perf_header__read(input
);
1740 head
= header
->data_offset
;
1742 sample_type
= perf_header__sample_type();
1744 if (!(sample_type
& PERF_SAMPLE_CALLCHAIN
)) {
1745 if (sort__has_parent
) {
1746 fprintf(stderr
, "selected --sort parent, but no"
1747 " callchain data. Did you call"
1748 " perf record without -g?\n");
1752 fprintf(stderr
, "selected -c but no callchain data."
1753 " Did you call perf record without"
1757 } else if (callchain_param
.mode
!= CHAIN_NONE
&& !callchain
) {
1759 if (register_callchain_param(&callchain_param
) < 0) {
1760 fprintf(stderr
, "Can't register callchain"
1766 if (load_kernel() < 0) {
1767 perror("failed to load kernel symbols");
1768 return EXIT_FAILURE
;
1772 if (getcwd(__cwd
, sizeof(__cwd
)) == NULL
) {
1773 perror("failed to get the current directory");
1774 return EXIT_FAILURE
;
1776 cwdlen
= strlen(cwd
);
1782 shift
= page_size
* (head
/ page_size
);
1787 buf
= (char *)mmap(NULL
, page_size
* mmap_window
, PROT_READ
,
1788 MAP_SHARED
, input
, offset
);
1789 if (buf
== MAP_FAILED
) {
1790 perror("failed to mmap file");
1795 event
= (event_t
*)(buf
+ head
);
1797 size
= event
->header
.size
;
1801 if (head
+ event
->header
.size
>= page_size
* mmap_window
) {
1804 shift
= page_size
* (head
/ page_size
);
1806 ret
= munmap(buf
, page_size
* mmap_window
);
1814 size
= event
->header
.size
;
1816 dprintf("\n%p [%p]: event: %d\n",
1817 (void *)(offset
+ head
),
1818 (void *)(long)event
->header
.size
,
1819 event
->header
.type
);
1821 if (!size
|| process_event(event
, offset
, head
) < 0) {
1823 dprintf("%p [%p]: skipping unknown header type: %d\n",
1824 (void *)(offset
+ head
),
1825 (void *)(long)(event
->header
.size
),
1826 event
->header
.type
);
1831 * assume we lost track of the stream, check alignment, and
1832 * increment a single u64 in the hope to catch on again 'soon'.
1835 if (unlikely(head
& 7))
1843 if (offset
+ head
>= header
->data_offset
+ header
->data_size
)
1846 if (offset
+ head
< (unsigned long)stat
.st_size
)
1853 dprintf(" IP events: %10ld\n", total
);
1854 dprintf(" mmap events: %10ld\n", total_mmap
);
1855 dprintf(" comm events: %10ld\n", total_comm
);
1856 dprintf(" fork events: %10ld\n", total_fork
);
1857 dprintf(" lost events: %10ld\n", total_lost
);
1858 dprintf(" unknown events: %10ld\n", total_unknown
);
1864 threads__fprintf(stdout
);
1867 dsos__fprintf(stdout
);
1870 output__resort(total
);
1871 output__fprintf(stdout
, total
);
1874 perf_read_values_destroy(&show_threads_values
);
1880 parse_callchain_opt(const struct option
*opt __used
, const char *arg
,
1891 tok
= strtok((char *)arg
, ",");
1895 /* get the output mode */
1896 if (!strncmp(tok
, "graph", strlen(arg
)))
1897 callchain_param
.mode
= CHAIN_GRAPH_ABS
;
1899 else if (!strncmp(tok
, "flat", strlen(arg
)))
1900 callchain_param
.mode
= CHAIN_FLAT
;
1902 else if (!strncmp(tok
, "fractal", strlen(arg
)))
1903 callchain_param
.mode
= CHAIN_GRAPH_REL
;
1905 else if (!strncmp(tok
, "none", strlen(arg
))) {
1906 callchain_param
.mode
= CHAIN_NONE
;
1915 /* get the min percentage */
1916 tok
= strtok(NULL
, ",");
1920 callchain_param
.min_percent
= strtod(tok
, &endptr
);
1925 if (register_callchain_param(&callchain_param
) < 0) {
1926 fprintf(stderr
, "Can't register callchain params\n");
1932 static const char * const report_usage
[] = {
1933 "perf report [<options>] <command>",
1937 static const struct option options
[] = {
1938 OPT_STRING('i', "input", &input_name
, "file",
1940 OPT_BOOLEAN('v', "verbose", &verbose
,
1941 "be more verbose (show symbol address, etc)"),
1942 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1943 "dump raw trace in ASCII"),
1944 OPT_STRING('k', "vmlinux", &vmlinux
, "file", "vmlinux pathname"),
1945 OPT_BOOLEAN('m', "modules", &modules
,
1946 "load module symbols - WARNING: use only with -k and LIVE kernel"),
1947 OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples
,
1948 "Show a column with the number of samples"),
1949 OPT_BOOLEAN('T', "threads", &show_threads
,
1950 "Show per-thread event counters"),
1951 OPT_STRING(0, "pretty", &pretty_printing_style
, "key",
1952 "pretty printing style key: normal raw"),
1953 OPT_STRING('s', "sort", &sort_order
, "key[,key2...]",
1954 "sort by key(s): pid, comm, dso, symbol, parent"),
1955 OPT_BOOLEAN('P', "full-paths", &full_paths
,
1956 "Don't shorten the pathnames taking into account the cwd"),
1957 OPT_STRING('p', "parent", &parent_pattern
, "regex",
1958 "regex filter to identify parent, see: '--sort parent'"),
1959 OPT_BOOLEAN('x', "exclude-other", &exclude_other
,
1960 "Only display entries with parent-match"),
1961 OPT_CALLBACK_DEFAULT('g', "call-graph", NULL
, "output_type,min_percent",
1962 "Display callchains using output_type and min percent threshold. "
1963 "Default: fractal,0.5", &parse_callchain_opt
, callchain_default_opt
),
1964 OPT_STRING('d', "dsos", &dso_list_str
, "dso[,dso...]",
1965 "only consider symbols in these dsos"),
1966 OPT_STRING('C', "comms", &comm_list_str
, "comm[,comm...]",
1967 "only consider symbols in these comms"),
1968 OPT_STRING('S', "symbols", &sym_list_str
, "symbol[,symbol...]",
1969 "only consider these symbols"),
1970 OPT_STRING('w', "column-widths", &col_width_list_str
,
1972 "don't try to adjust column width, use these fixed values"),
1973 OPT_STRING('t', "field-separator", &field_sep
, "separator",
1974 "separator for columns, no spaces will be added between "
1975 "columns '.' is reserved."),
1979 static void setup_sorting(void)
1981 char *tmp
, *tok
, *str
= strdup(sort_order
);
1983 for (tok
= strtok_r(str
, ", ", &tmp
);
1984 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
1985 if (sort_dimension__add(tok
) < 0) {
1986 error("Unknown --sort key: `%s'", tok
);
1987 usage_with_options(report_usage
, options
);
1994 static void setup_list(struct strlist
**list
, const char *list_str
,
1995 struct sort_entry
*se
, const char *list_name
,
1999 *list
= strlist__new(true, list_str
);
2001 fprintf(stderr
, "problems parsing %s list\n",
2005 if (strlist__nr_entries(*list
) == 1) {
2006 fprintf(fp
, "# %s: %s\n", list_name
,
2007 strlist__entry(*list
, 0)->s
);
2013 int cmd_report(int argc
, const char **argv
, const char *prefix __used
)
2017 page_size
= getpagesize();
2019 argc
= parse_options(argc
, argv
, options
, report_usage
, 0);
2023 if (parent_pattern
!= default_parent_pattern
) {
2024 sort_dimension__add("parent");
2025 sort_parent
.elide
= 1;
2030 * Any (unrecognized) arguments left?
2033 usage_with_options(report_usage
, options
);
2037 setup_list(&dso_list
, dso_list_str
, &sort_dso
, "dso", stdout
);
2038 setup_list(&comm_list
, comm_list_str
, &sort_comm
, "comm", stdout
);
2039 setup_list(&sym_list
, sym_list_str
, &sort_sym
, "symbol", stdout
);
2041 if (field_sep
&& *field_sep
== '.') {
2042 fputs("'.' is the only non valid --field-separator argument\n",
2047 return __cmd_report();