4 * Builtin report command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
10 #include "util/util.h"
12 #include "util/color.h"
13 #include <linux/list.h>
14 #include "util/cache.h"
15 #include <linux/rbtree.h>
16 #include "util/symbol.h"
17 #include "util/string.h"
18 #include "util/callchain.h"
19 #include "util/strlist.h"
20 #include "util/values.h"
23 #include "util/header.h"
25 #include "util/parse-options.h"
26 #include "util/parse-events.h"
32 static char const *input_name
= "perf.data";
33 static char *vmlinux
= NULL
;
35 static char default_sort_order
[] = "comm,dso,symbol";
36 static char *sort_order
= default_sort_order
;
37 static char *dso_list_str
, *comm_list_str
, *sym_list_str
,
39 static struct strlist
*dso_list
, *comm_list
, *sym_list
;
40 static char *field_sep
;
43 static int show_mask
= SHOW_KERNEL
| SHOW_USER
| SHOW_HV
;
45 static int dump_trace
= 0;
46 #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
47 #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
50 #define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
54 static int full_paths
;
55 static int show_nr_samples
;
57 static int show_threads
;
58 static struct perf_read_values show_threads_values
;
60 static char default_pretty_printing_style
[] = "normal";
61 static char *pretty_printing_style
= default_pretty_printing_style
;
63 static unsigned long page_size
;
64 static unsigned long mmap_window
= 32;
66 static char default_parent_pattern
[] = "^sys_|^do_page_fault";
67 static char *parent_pattern
= default_parent_pattern
;
68 static regex_t parent_regex
;
70 static int exclude_other
= 1;
72 static char callchain_default_opt
[] = "fractal,0.5";
77 struct callchain_param callchain_param
= {
78 .mode
= CHAIN_GRAPH_REL
,
82 static u64 sample_type
;
85 struct perf_event_header header
;
88 unsigned char __more_data
[];
92 struct perf_event_header header
;
97 char filename
[PATH_MAX
];
101 struct perf_event_header header
;
107 struct perf_event_header header
;
113 struct perf_event_header header
;
119 struct perf_event_header header
;
127 typedef union event_union
{
128 struct perf_event_header header
;
130 struct mmap_event mmap
;
131 struct comm_event comm
;
132 struct fork_event fork
;
133 struct lost_event lost
;
134 struct read_event read
;
137 static int repsep_fprintf(FILE *fp
, const char *fmt
, ...)
144 n
= vfprintf(fp
, fmt
, ap
);
147 n
= vasprintf(&bf
, fmt
, ap
);
151 sep
= strchr(sep
, *field_sep
);
164 static LIST_HEAD(dsos
);
165 static struct dso
*kernel_dso
;
166 static struct dso
*vdso
;
167 static struct dso
*hypervisor_dso
;
169 static void dsos__add(struct dso
*dso
)
171 list_add_tail(&dso
->node
, &dsos
);
174 static struct dso
*dsos__find(const char *name
)
178 list_for_each_entry(pos
, &dsos
, node
)
179 if (strcmp(pos
->name
, name
) == 0)
184 static struct dso
*dsos__findnew(const char *name
)
186 struct dso
*dso
= dsos__find(name
);
192 dso
= dso__new(name
, 0);
196 nr
= dso__load(dso
, NULL
, verbose
);
198 eprintf("Failed to open: %s\n", name
);
202 eprintf("No symbols found in: %s, maybe install a debug package?\n", name
);
213 static void dsos__fprintf(FILE *fp
)
217 list_for_each_entry(pos
, &dsos
, node
)
218 dso__fprintf(pos
, fp
);
221 static struct symbol
*vdso__find_symbol(struct dso
*dso
, u64 ip
)
223 return dso__find_symbol(dso
, ip
);
226 static int load_kernel(void)
230 kernel_dso
= dso__new("[kernel]", 0);
234 err
= dso__load_kernel(kernel_dso
, vmlinux
, NULL
, verbose
, modules
);
236 dso__delete(kernel_dso
);
239 dsos__add(kernel_dso
);
241 vdso
= dso__new("[vdso]", 0);
245 vdso
->find_symbol
= vdso__find_symbol
;
249 hypervisor_dso
= dso__new("[hypervisor]", 0);
252 dsos__add(hypervisor_dso
);
257 static char __cwd
[PATH_MAX
];
258 static char *cwd
= __cwd
;
261 static int strcommon(const char *pathname
)
265 while (n
< cwdlen
&& pathname
[n
] == cwd
[n
])
272 struct list_head node
;
276 u64 (*map_ip
)(struct map
*, u64
);
280 static u64
map__map_ip(struct map
*map
, u64 ip
)
282 return ip
- map
->start
+ map
->pgoff
;
285 static u64
vdso__map_ip(struct map
*map __used
, u64 ip
)
290 static inline int is_anon_memory(const char *filename
)
292 return strcmp(filename
, "//anon") == 0;
295 static struct map
*map__new(struct mmap_event
*event
)
297 struct map
*self
= malloc(sizeof(*self
));
300 const char *filename
= event
->filename
;
301 char newfilename
[PATH_MAX
];
305 int n
= strcommon(filename
);
308 snprintf(newfilename
, sizeof(newfilename
),
309 ".%s", filename
+ n
);
310 filename
= newfilename
;
314 anon
= is_anon_memory(filename
);
317 snprintf(newfilename
, sizeof(newfilename
), "/tmp/perf-%d.map", event
->pid
);
318 filename
= newfilename
;
321 self
->start
= event
->start
;
322 self
->end
= event
->start
+ event
->len
;
323 self
->pgoff
= event
->pgoff
;
325 self
->dso
= dsos__findnew(filename
);
326 if (self
->dso
== NULL
)
329 if (self
->dso
== vdso
|| anon
)
330 self
->map_ip
= vdso__map_ip
;
332 self
->map_ip
= map__map_ip
;
340 static struct map
*map__clone(struct map
*self
)
342 struct map
*map
= malloc(sizeof(*self
));
347 memcpy(map
, self
, sizeof(*self
));
352 static int map__overlap(struct map
*l
, struct map
*r
)
354 if (l
->start
> r
->start
) {
360 if (l
->end
> r
->start
)
366 static size_t map__fprintf(struct map
*self
, FILE *fp
)
368 return fprintf(fp
, " %Lx-%Lx %Lx %s\n",
369 self
->start
, self
->end
, self
->pgoff
, self
->dso
->name
);
374 struct rb_node rb_node
;
375 struct list_head maps
;
380 static struct thread
*thread__new(pid_t pid
)
382 struct thread
*self
= malloc(sizeof(*self
));
386 self
->comm
= malloc(32);
388 snprintf(self
->comm
, 32, ":%d", self
->pid
);
389 INIT_LIST_HEAD(&self
->maps
);
395 static unsigned int dsos__col_width
,
399 static int thread__set_comm(struct thread
*self
, const char *comm
)
403 self
->comm
= strdup(comm
);
407 if (!col_width_list_str
&& !field_sep
&&
408 (!comm_list
|| strlist__has_entry(comm_list
, comm
))) {
409 unsigned int slen
= strlen(comm
);
410 if (slen
> comms__col_width
) {
411 comms__col_width
= slen
;
412 threads__col_width
= slen
+ 6;
419 static size_t thread__fprintf(struct thread
*self
, FILE *fp
)
422 size_t ret
= fprintf(fp
, "Thread %d %s\n", self
->pid
, self
->comm
);
424 list_for_each_entry(pos
, &self
->maps
, node
)
425 ret
+= map__fprintf(pos
, fp
);
431 static struct rb_root threads
;
432 static struct thread
*last_match
;
434 static struct thread
*threads__findnew(pid_t pid
)
436 struct rb_node
**p
= &threads
.rb_node
;
437 struct rb_node
*parent
= NULL
;
441 * Font-end cache - PID lookups come in blocks,
442 * so most of the time we dont have to look up
445 if (last_match
&& last_match
->pid
== pid
)
450 th
= rb_entry(parent
, struct thread
, rb_node
);
452 if (th
->pid
== pid
) {
463 th
= thread__new(pid
);
465 rb_link_node(&th
->rb_node
, parent
, p
);
466 rb_insert_color(&th
->rb_node
, &threads
);
473 static void thread__insert_map(struct thread
*self
, struct map
*map
)
475 struct map
*pos
, *tmp
;
477 list_for_each_entry_safe(pos
, tmp
, &self
->maps
, node
) {
478 if (map__overlap(pos
, map
)) {
480 printf("overlapping maps:\n");
481 map__fprintf(map
, stdout
);
482 map__fprintf(pos
, stdout
);
485 if (map
->start
<= pos
->start
&& map
->end
> pos
->start
)
486 pos
->start
= map
->end
;
488 if (map
->end
>= pos
->end
&& map
->start
< pos
->end
)
489 pos
->end
= map
->start
;
492 printf("after collision:\n");
493 map__fprintf(pos
, stdout
);
496 if (pos
->start
>= pos
->end
) {
497 list_del_init(&pos
->node
);
503 list_add_tail(&map
->node
, &self
->maps
);
506 static int thread__fork(struct thread
*self
, struct thread
*parent
)
512 self
->comm
= strdup(parent
->comm
);
516 list_for_each_entry(map
, &parent
->maps
, node
) {
517 struct map
*new = map__clone(map
);
520 thread__insert_map(self
, new);
526 static struct map
*thread__find_map(struct thread
*self
, u64 ip
)
533 list_for_each_entry(pos
, &self
->maps
, node
)
534 if (ip
>= pos
->start
&& ip
<= pos
->end
)
540 static size_t threads__fprintf(FILE *fp
)
545 for (nd
= rb_first(&threads
); nd
; nd
= rb_next(nd
)) {
546 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
548 ret
+= thread__fprintf(pos
, fp
);
555 * histogram, sorted on item, collects counts
558 static struct rb_root hist
;
561 struct rb_node rb_node
;
563 struct thread
*thread
;
567 struct symbol
*parent
;
570 struct callchain_node callchain
;
571 struct rb_root sorted_chain
;
577 * configurable sorting bits
581 struct list_head list
;
585 int64_t (*cmp
)(struct hist_entry
*, struct hist_entry
*);
586 int64_t (*collapse
)(struct hist_entry
*, struct hist_entry
*);
587 size_t (*print
)(FILE *fp
, struct hist_entry
*, unsigned int width
);
592 static int64_t cmp_null(void *l
, void *r
)
605 sort__thread_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
607 return right
->thread
->pid
- left
->thread
->pid
;
611 sort__thread_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
613 return repsep_fprintf(fp
, "%*s:%5d", width
- 6,
614 self
->thread
->comm
?: "", self
->thread
->pid
);
617 static struct sort_entry sort_thread
= {
618 .header
= "Command: Pid",
619 .cmp
= sort__thread_cmp
,
620 .print
= sort__thread_print
,
621 .width
= &threads__col_width
,
627 sort__comm_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
629 return right
->thread
->pid
- left
->thread
->pid
;
633 sort__comm_collapse(struct hist_entry
*left
, struct hist_entry
*right
)
635 char *comm_l
= left
->thread
->comm
;
636 char *comm_r
= right
->thread
->comm
;
638 if (!comm_l
|| !comm_r
)
639 return cmp_null(comm_l
, comm_r
);
641 return strcmp(comm_l
, comm_r
);
645 sort__comm_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
647 return repsep_fprintf(fp
, "%*s", width
, self
->thread
->comm
);
650 static struct sort_entry sort_comm
= {
652 .cmp
= sort__comm_cmp
,
653 .collapse
= sort__comm_collapse
,
654 .print
= sort__comm_print
,
655 .width
= &comms__col_width
,
661 sort__dso_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
663 struct dso
*dso_l
= left
->dso
;
664 struct dso
*dso_r
= right
->dso
;
666 if (!dso_l
|| !dso_r
)
667 return cmp_null(dso_l
, dso_r
);
669 return strcmp(dso_l
->name
, dso_r
->name
);
673 sort__dso_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
676 return repsep_fprintf(fp
, "%-*s", width
, self
->dso
->name
);
678 return repsep_fprintf(fp
, "%*llx", width
, (u64
)self
->ip
);
681 static struct sort_entry sort_dso
= {
682 .header
= "Shared Object",
683 .cmp
= sort__dso_cmp
,
684 .print
= sort__dso_print
,
685 .width
= &dsos__col_width
,
691 sort__sym_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
695 if (left
->sym
== right
->sym
)
698 ip_l
= left
->sym
? left
->sym
->start
: left
->ip
;
699 ip_r
= right
->sym
? right
->sym
->start
: right
->ip
;
701 return (int64_t)(ip_r
- ip_l
);
705 sort__sym_print(FILE *fp
, struct hist_entry
*self
, unsigned int width __used
)
710 ret
+= repsep_fprintf(fp
, "%#018llx %c ", (u64
)self
->ip
,
711 dso__symtab_origin(self
->dso
));
713 ret
+= repsep_fprintf(fp
, "[%c] ", self
->level
);
715 ret
+= repsep_fprintf(fp
, "%s", self
->sym
->name
);
717 if (self
->sym
->module
)
718 ret
+= repsep_fprintf(fp
, "\t[%s]",
719 self
->sym
->module
->name
);
721 ret
+= repsep_fprintf(fp
, "%#016llx", (u64
)self
->ip
);
727 static struct sort_entry sort_sym
= {
729 .cmp
= sort__sym_cmp
,
730 .print
= sort__sym_print
,
736 sort__parent_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
738 struct symbol
*sym_l
= left
->parent
;
739 struct symbol
*sym_r
= right
->parent
;
741 if (!sym_l
|| !sym_r
)
742 return cmp_null(sym_l
, sym_r
);
744 return strcmp(sym_l
->name
, sym_r
->name
);
748 sort__parent_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
750 return repsep_fprintf(fp
, "%-*s", width
,
751 self
->parent
? self
->parent
->name
: "[other]");
754 static unsigned int parent_symbol__col_width
;
756 static struct sort_entry sort_parent
= {
757 .header
= "Parent symbol",
758 .cmp
= sort__parent_cmp
,
759 .print
= sort__parent_print
,
760 .width
= &parent_symbol__col_width
,
763 static int sort__need_collapse
= 0;
764 static int sort__has_parent
= 0;
766 struct sort_dimension
{
768 struct sort_entry
*entry
;
772 static struct sort_dimension sort_dimensions
[] = {
773 { .name
= "pid", .entry
= &sort_thread
, },
774 { .name
= "comm", .entry
= &sort_comm
, },
775 { .name
= "dso", .entry
= &sort_dso
, },
776 { .name
= "symbol", .entry
= &sort_sym
, },
777 { .name
= "parent", .entry
= &sort_parent
, },
780 static LIST_HEAD(hist_entry__sort_list
);
782 static int sort_dimension__add(char *tok
)
786 for (i
= 0; i
< ARRAY_SIZE(sort_dimensions
); i
++) {
787 struct sort_dimension
*sd
= &sort_dimensions
[i
];
792 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
795 if (sd
->entry
->collapse
)
796 sort__need_collapse
= 1;
798 if (sd
->entry
== &sort_parent
) {
799 int ret
= regcomp(&parent_regex
, parent_pattern
, REG_EXTENDED
);
803 regerror(ret
, &parent_regex
, err
, sizeof(err
));
804 fprintf(stderr
, "Invalid regex: %s\n%s",
805 parent_pattern
, err
);
808 sort__has_parent
= 1;
811 list_add_tail(&sd
->entry
->list
, &hist_entry__sort_list
);
821 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
823 struct sort_entry
*se
;
826 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
827 cmp
= se
->cmp(left
, right
);
836 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
838 struct sort_entry
*se
;
841 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
842 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
844 f
= se
->collapse
?: se
->cmp
;
846 cmp
= f(left
, right
);
854 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
)
859 ret
+= fprintf(fp
, "%s", " ");
861 for (i
= 0; i
< depth
; i
++)
862 if (depth_mask
& (1 << i
))
863 ret
+= fprintf(fp
, "| ");
865 ret
+= fprintf(fp
, " ");
867 ret
+= fprintf(fp
, "\n");
872 ipchain__fprintf_graph(FILE *fp
, struct callchain_list
*chain
, int depth
,
873 int depth_mask
, int count
, u64 total_samples
,
879 ret
+= fprintf(fp
, "%s", " ");
880 for (i
= 0; i
< depth
; i
++) {
881 if (depth_mask
& (1 << i
))
882 ret
+= fprintf(fp
, "|");
884 ret
+= fprintf(fp
, " ");
885 if (!count
&& i
== depth
- 1) {
888 percent
= hits
* 100.0 / total_samples
;
889 ret
+= percent_color_fprintf(fp
, "--%2.2f%%-- ", percent
);
891 ret
+= fprintf(fp
, "%s", " ");
894 ret
+= fprintf(fp
, "%s\n", chain
->sym
->name
);
896 ret
+= fprintf(fp
, "%p\n", (void *)(long)chain
->ip
);
901 static struct symbol
*rem_sq_bracket
;
902 static struct callchain_list rem_hits
;
904 static void init_rem_hits(void)
906 rem_sq_bracket
= malloc(sizeof(*rem_sq_bracket
) + 6);
907 if (!rem_sq_bracket
) {
908 fprintf(stderr
, "Not enough memory to display remaining hits\n");
912 strcpy(rem_sq_bracket
->name
, "[...]");
913 rem_hits
.sym
= rem_sq_bracket
;
917 callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
918 u64 total_samples
, int depth
, int depth_mask
)
920 struct rb_node
*node
, *next
;
921 struct callchain_node
*child
;
922 struct callchain_list
*chain
;
923 int new_depth_mask
= depth_mask
;
929 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
930 new_total
= self
->children_hit
;
932 new_total
= total_samples
;
934 remaining
= new_total
;
936 node
= rb_first(&self
->rb_root
);
940 child
= rb_entry(node
, struct callchain_node
, rb_node
);
941 cumul
= cumul_hits(child
);
945 * The depth mask manages the output of pipes that show
946 * the depth. We don't want to keep the pipes of the current
947 * level for the last child of this depth.
948 * Except if we have remaining filtered hits. They will
949 * supersede the last child
951 next
= rb_next(node
);
952 if (!next
&& (callchain_param
.mode
!= CHAIN_GRAPH_REL
|| !remaining
))
953 new_depth_mask
&= ~(1 << (depth
- 1));
956 * But we keep the older depth mask for the line seperator
957 * to keep the level link until we reach the last child
959 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
);
961 list_for_each_entry(chain
, &child
->val
, list
) {
962 if (chain
->ip
>= PERF_CONTEXT_MAX
)
964 ret
+= ipchain__fprintf_graph(fp
, chain
, depth
,
969 ret
+= callchain__fprintf_graph(fp
, child
, new_total
,
971 new_depth_mask
| (1 << depth
));
975 if (callchain_param
.mode
== CHAIN_GRAPH_REL
&&
976 remaining
&& remaining
!= new_total
) {
981 new_depth_mask
&= ~(1 << (depth
- 1));
983 ret
+= ipchain__fprintf_graph(fp
, &rem_hits
, depth
,
984 new_depth_mask
, 0, new_total
,
992 callchain__fprintf_flat(FILE *fp
, struct callchain_node
*self
,
995 struct callchain_list
*chain
;
1001 ret
+= callchain__fprintf_flat(fp
, self
->parent
, total_samples
);
1004 list_for_each_entry(chain
, &self
->val
, list
) {
1005 if (chain
->ip
>= PERF_CONTEXT_MAX
)
1008 ret
+= fprintf(fp
, " %s\n", chain
->sym
->name
);
1010 ret
+= fprintf(fp
, " %p\n",
1011 (void *)(long)chain
->ip
);
1018 hist_entry_callchain__fprintf(FILE *fp
, struct hist_entry
*self
,
1021 struct rb_node
*rb_node
;
1022 struct callchain_node
*chain
;
1025 rb_node
= rb_first(&self
->sorted_chain
);
1029 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
1030 percent
= chain
->hit
* 100.0 / total_samples
;
1031 switch (callchain_param
.mode
) {
1033 ret
+= percent_color_fprintf(fp
, " %6.2f%%\n",
1035 ret
+= callchain__fprintf_flat(fp
, chain
, total_samples
);
1037 case CHAIN_GRAPH_ABS
: /* Falldown */
1038 case CHAIN_GRAPH_REL
:
1039 ret
+= callchain__fprintf_graph(fp
, chain
,
1040 total_samples
, 1, 1);
1044 ret
+= fprintf(fp
, "\n");
1045 rb_node
= rb_next(rb_node
);
1053 hist_entry__fprintf(FILE *fp
, struct hist_entry
*self
, u64 total_samples
)
1055 struct sort_entry
*se
;
1058 if (exclude_other
&& !self
->parent
)
1062 ret
= percent_color_fprintf(fp
,
1063 field_sep
? "%.2f" : " %6.2f%%",
1064 (self
->count
* 100.0) / total_samples
);
1066 ret
= fprintf(fp
, field_sep
? "%lld" : "%12lld ", self
->count
);
1068 if (show_nr_samples
) {
1070 fprintf(fp
, "%c%lld", *field_sep
, self
->count
);
1072 fprintf(fp
, "%11lld", self
->count
);
1075 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1079 fprintf(fp
, "%s", field_sep
?: " ");
1080 ret
+= se
->print(fp
, self
, se
->width
? *se
->width
: 0);
1083 ret
+= fprintf(fp
, "\n");
1086 hist_entry_callchain__fprintf(fp
, self
, total_samples
);
1095 static void dso__calc_col_width(struct dso
*self
)
1097 if (!col_width_list_str
&& !field_sep
&&
1098 (!dso_list
|| strlist__has_entry(dso_list
, self
->name
))) {
1099 unsigned int slen
= strlen(self
->name
);
1100 if (slen
> dsos__col_width
)
1101 dsos__col_width
= slen
;
1104 self
->slen_calculated
= 1;
1107 static struct symbol
*
1108 resolve_symbol(struct thread
*thread
, struct map
**mapp
,
1109 struct dso
**dsop
, u64
*ipp
)
1111 struct dso
*dso
= dsop
? *dsop
: NULL
;
1112 struct map
*map
= mapp
? *mapp
: NULL
;
1124 map
= thread__find_map(thread
, ip
);
1127 * We have to do this here as we may have a dso
1128 * with no symbol hit that has a name longer than
1129 * the ones with symbols sampled.
1131 if (!sort_dso
.elide
&& !map
->dso
->slen_calculated
)
1132 dso__calc_col_width(map
->dso
);
1137 ip
= map
->map_ip(map
, ip
);
1142 * If this is outside of all known maps,
1143 * and is a negative address, try to look it
1144 * up in the kernel dso, as it might be a
1145 * vsyscall (which executes in user-mode):
1147 if ((long long)ip
< 0)
1150 dprintf(" ...... dso: %s\n", dso
? dso
->name
: "<not found>");
1151 dprintf(" ...... map: %Lx -> %Lx\n", *ipp
, ip
);
1160 return dso
->find_symbol(dso
, ip
);
1163 static int call__match(struct symbol
*sym
)
1165 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
1171 static struct symbol
**
1172 resolve_callchain(struct thread
*thread
, struct map
*map __used
,
1173 struct ip_callchain
*chain
, struct hist_entry
*entry
)
1175 u64 context
= PERF_CONTEXT_MAX
;
1176 struct symbol
**syms
= NULL
;
1180 syms
= calloc(chain
->nr
, sizeof(*syms
));
1182 fprintf(stderr
, "Can't allocate memory for symbols\n");
1187 for (i
= 0; i
< chain
->nr
; i
++) {
1188 u64 ip
= chain
->ips
[i
];
1189 struct dso
*dso
= NULL
;
1192 if (ip
>= PERF_CONTEXT_MAX
) {
1198 case PERF_CONTEXT_HV
:
1199 dso
= hypervisor_dso
;
1201 case PERF_CONTEXT_KERNEL
:
1208 sym
= resolve_symbol(thread
, NULL
, &dso
, &ip
);
1211 if (sort__has_parent
&& call__match(sym
) &&
1213 entry
->parent
= sym
;
1224 * collect histogram counts
1228 hist_entry__add(struct thread
*thread
, struct map
*map
, struct dso
*dso
,
1229 struct symbol
*sym
, u64 ip
, struct ip_callchain
*chain
,
1230 char level
, u64 count
)
1232 struct rb_node
**p
= &hist
.rb_node
;
1233 struct rb_node
*parent
= NULL
;
1234 struct hist_entry
*he
;
1235 struct symbol
**syms
= NULL
;
1236 struct hist_entry entry
= {
1245 .sorted_chain
= RB_ROOT
1249 if ((sort__has_parent
|| callchain
) && chain
)
1250 syms
= resolve_callchain(thread
, map
, chain
, &entry
);
1252 while (*p
!= NULL
) {
1254 he
= rb_entry(parent
, struct hist_entry
, rb_node
);
1256 cmp
= hist_entry__cmp(&entry
, he
);
1261 append_chain(&he
->callchain
, chain
, syms
);
1270 p
= &(*p
)->rb_right
;
1273 he
= malloc(sizeof(*he
));
1278 callchain_init(&he
->callchain
);
1279 append_chain(&he
->callchain
, chain
, syms
);
1282 rb_link_node(&he
->rb_node
, parent
, p
);
1283 rb_insert_color(&he
->rb_node
, &hist
);
1288 static void hist_entry__free(struct hist_entry
*he
)
1294 * collapse the histogram
1297 static struct rb_root collapse_hists
;
1299 static void collapse__insert_entry(struct hist_entry
*he
)
1301 struct rb_node
**p
= &collapse_hists
.rb_node
;
1302 struct rb_node
*parent
= NULL
;
1303 struct hist_entry
*iter
;
1306 while (*p
!= NULL
) {
1308 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1310 cmp
= hist_entry__collapse(iter
, he
);
1313 iter
->count
+= he
->count
;
1314 hist_entry__free(he
);
1321 p
= &(*p
)->rb_right
;
1324 rb_link_node(&he
->rb_node
, parent
, p
);
1325 rb_insert_color(&he
->rb_node
, &collapse_hists
);
1328 static void collapse__resort(void)
1330 struct rb_node
*next
;
1331 struct hist_entry
*n
;
1333 if (!sort__need_collapse
)
1336 next
= rb_first(&hist
);
1338 n
= rb_entry(next
, struct hist_entry
, rb_node
);
1339 next
= rb_next(&n
->rb_node
);
1341 rb_erase(&n
->rb_node
, &hist
);
1342 collapse__insert_entry(n
);
1347 * reverse the map, sort on count.
1350 static struct rb_root output_hists
;
1352 static void output__insert_entry(struct hist_entry
*he
, u64 min_callchain_hits
)
1354 struct rb_node
**p
= &output_hists
.rb_node
;
1355 struct rb_node
*parent
= NULL
;
1356 struct hist_entry
*iter
;
1359 callchain_param
.sort(&he
->sorted_chain
, &he
->callchain
,
1360 min_callchain_hits
, &callchain_param
);
1362 while (*p
!= NULL
) {
1364 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1366 if (he
->count
> iter
->count
)
1369 p
= &(*p
)->rb_right
;
1372 rb_link_node(&he
->rb_node
, parent
, p
);
1373 rb_insert_color(&he
->rb_node
, &output_hists
);
1376 static void output__resort(u64 total_samples
)
1378 struct rb_node
*next
;
1379 struct hist_entry
*n
;
1380 struct rb_root
*tree
= &hist
;
1381 u64 min_callchain_hits
;
1383 min_callchain_hits
= total_samples
* (callchain_param
.min_percent
/ 100);
1385 if (sort__need_collapse
)
1386 tree
= &collapse_hists
;
1388 next
= rb_first(tree
);
1391 n
= rb_entry(next
, struct hist_entry
, rb_node
);
1392 next
= rb_next(&n
->rb_node
);
1394 rb_erase(&n
->rb_node
, tree
);
1395 output__insert_entry(n
, min_callchain_hits
);
1399 static size_t output__fprintf(FILE *fp
, u64 total_samples
)
1401 struct hist_entry
*pos
;
1402 struct sort_entry
*se
;
1406 char *col_width
= col_width_list_str
;
1407 int raw_printing_style
;
1409 raw_printing_style
= !strcmp(pretty_printing_style
, "raw");
1413 fprintf(fp
, "# Samples: %Ld\n", (u64
)total_samples
);
1416 fprintf(fp
, "# Overhead");
1417 if (show_nr_samples
) {
1419 fprintf(fp
, "%cSamples", *field_sep
);
1421 fputs(" Samples ", fp
);
1423 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1427 fprintf(fp
, "%c%s", *field_sep
, se
->header
);
1430 width
= strlen(se
->header
);
1432 if (col_width_list_str
) {
1434 *se
->width
= atoi(col_width
);
1435 col_width
= strchr(col_width
, ',');
1440 width
= *se
->width
= max(*se
->width
, width
);
1442 fprintf(fp
, " %*s", width
, se
->header
);
1449 fprintf(fp
, "# ........");
1450 if (show_nr_samples
)
1451 fprintf(fp
, " ..........");
1452 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1462 width
= strlen(se
->header
);
1463 for (i
= 0; i
< width
; i
++)
1471 for (nd
= rb_first(&output_hists
); nd
; nd
= rb_next(nd
)) {
1472 pos
= rb_entry(nd
, struct hist_entry
, rb_node
);
1473 ret
+= hist_entry__fprintf(fp
, pos
, total_samples
);
1476 if (sort_order
== default_sort_order
&&
1477 parent_pattern
== default_parent_pattern
) {
1479 fprintf(fp
, "# (For a higher level overview, try: perf report --sort comm,dso)\n");
1484 free(rem_sq_bracket
);
1487 perf_read_values_display(fp
, &show_threads_values
,
1488 raw_printing_style
);
1493 static void register_idle_thread(void)
1495 struct thread
*thread
= threads__findnew(0);
1497 if (thread
== NULL
||
1498 thread__set_comm(thread
, "[idle]")) {
1499 fprintf(stderr
, "problem inserting idle task.\n");
1504 static unsigned long total
= 0,
1511 static int validate_chain(struct ip_callchain
*chain
, event_t
*event
)
1513 unsigned int chain_size
;
1515 chain_size
= event
->header
.size
;
1516 chain_size
-= (unsigned long)&event
->ip
.__more_data
- (unsigned long)event
;
1518 if (chain
->nr
*sizeof(u64
) > chain_size
)
1525 process_sample_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1529 struct dso
*dso
= NULL
;
1530 struct thread
*thread
= threads__findnew(event
->ip
.pid
);
1531 u64 ip
= event
->ip
.ip
;
1533 struct map
*map
= NULL
;
1534 void *more_data
= event
->ip
.__more_data
;
1535 struct ip_callchain
*chain
= NULL
;
1538 if (sample_type
& PERF_SAMPLE_PERIOD
) {
1539 period
= *(u64
*)more_data
;
1540 more_data
+= sizeof(u64
);
1543 dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
1544 (void *)(offset
+ head
),
1545 (void *)(long)(event
->header
.size
),
1551 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
1554 chain
= (void *)more_data
;
1556 dprintf("... chain: nr:%Lu\n", chain
->nr
);
1558 if (validate_chain(chain
, event
) < 0) {
1559 eprintf("call-chain problem with event, skipping it.\n");
1564 for (i
= 0; i
< chain
->nr
; i
++)
1565 dprintf("..... %2d: %016Lx\n", i
, chain
->ips
[i
]);
1569 dprintf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
1571 if (thread
== NULL
) {
1572 eprintf("problem processing %d event, skipping it.\n",
1573 event
->header
.type
);
1577 if (comm_list
&& !strlist__has_entry(comm_list
, thread
->comm
))
1580 cpumode
= event
->header
.misc
& PERF_EVENT_MISC_CPUMODE_MASK
;
1582 if (cpumode
== PERF_EVENT_MISC_KERNEL
) {
1588 dprintf(" ...... dso: %s\n", dso
->name
);
1590 } else if (cpumode
== PERF_EVENT_MISC_USER
) {
1599 dso
= hypervisor_dso
;
1601 dprintf(" ...... dso: [hypervisor]\n");
1604 if (show
& show_mask
) {
1605 struct symbol
*sym
= resolve_symbol(thread
, &map
, &dso
, &ip
);
1607 if (dso_list
&& dso
&& dso
->name
&& !strlist__has_entry(dso_list
, dso
->name
))
1610 if (sym_list
&& sym
&& !strlist__has_entry(sym_list
, sym
->name
))
1613 if (hist_entry__add(thread
, map
, dso
, sym
, ip
, chain
, level
, period
)) {
1614 eprintf("problem incrementing symbol count, skipping event\n");
1624 process_mmap_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1626 struct thread
*thread
= threads__findnew(event
->mmap
.pid
);
1627 struct map
*map
= map__new(&event
->mmap
);
1629 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
1630 (void *)(offset
+ head
),
1631 (void *)(long)(event
->header
.size
),
1633 (void *)(long)event
->mmap
.start
,
1634 (void *)(long)event
->mmap
.len
,
1635 (void *)(long)event
->mmap
.pgoff
,
1636 event
->mmap
.filename
);
1638 if (thread
== NULL
|| map
== NULL
) {
1639 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
1643 thread__insert_map(thread
, map
);
1650 process_comm_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1652 struct thread
*thread
= threads__findnew(event
->comm
.pid
);
1654 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
1655 (void *)(offset
+ head
),
1656 (void *)(long)(event
->header
.size
),
1657 event
->comm
.comm
, event
->comm
.pid
);
1659 if (thread
== NULL
||
1660 thread__set_comm(thread
, event
->comm
.comm
)) {
1661 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
1670 process_task_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1672 struct thread
*thread
= threads__findnew(event
->fork
.pid
);
1673 struct thread
*parent
= threads__findnew(event
->fork
.ppid
);
1675 dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
1676 (void *)(offset
+ head
),
1677 (void *)(long)(event
->header
.size
),
1678 event
->header
.type
== PERF_EVENT_FORK
? "FORK" : "EXIT",
1679 event
->fork
.pid
, event
->fork
.tid
,
1680 event
->fork
.ppid
, event
->fork
.ptid
);
1683 * A thread clone will have the same PID for both
1686 if (thread
== parent
)
1689 if (event
->header
.type
== PERF_EVENT_EXIT
)
1692 if (!thread
|| !parent
|| thread__fork(thread
, parent
)) {
1693 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
1702 process_lost_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1704 dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
1705 (void *)(offset
+ head
),
1706 (void *)(long)(event
->header
.size
),
1710 total_lost
+= event
->lost
.lost
;
1715 static void trace_event(event_t
*event
)
1717 unsigned char *raw_event
= (void *)event
;
1718 char *color
= PERF_COLOR_BLUE
;
1725 cdprintf("\n. ... raw event: size %d bytes\n", event
->header
.size
);
1727 for (i
= 0; i
< event
->header
.size
; i
++) {
1728 if ((i
& 15) == 0) {
1730 cdprintf(" %04x: ", i
);
1733 cdprintf(" %02x", raw_event
[i
]);
1735 if (((i
& 15) == 15) || i
== event
->header
.size
-1) {
1737 for (j
= 0; j
< 15-(i
& 15); j
++)
1739 for (j
= 0; j
< (i
& 15); j
++) {
1740 if (isprint(raw_event
[i
-15+j
]))
1741 cdprintf("%c", raw_event
[i
-15+j
]);
1751 static struct perf_header
*header
;
1753 static struct perf_counter_attr
*perf_header__find_attr(u64 id
)
1757 for (i
= 0; i
< header
->attrs
; i
++) {
1758 struct perf_header_attr
*attr
= header
->attr
[i
];
1761 for (j
= 0; j
< attr
->ids
; j
++) {
1762 if (attr
->id
[j
] == id
)
1771 process_read_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1773 struct perf_counter_attr
*attr
= perf_header__find_attr(event
->read
.id
);
1776 char *name
= attr
? __event_name(attr
->type
, attr
->config
)
1778 perf_read_values_add_value(&show_threads_values
,
1779 event
->read
.pid
, event
->read
.tid
,
1785 dprintf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n",
1786 (void *)(offset
+ head
),
1787 (void *)(long)(event
->header
.size
),
1790 attr
? __event_name(attr
->type
, attr
->config
)
1798 process_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1802 switch (event
->header
.type
) {
1803 case PERF_EVENT_SAMPLE
:
1804 return process_sample_event(event
, offset
, head
);
1806 case PERF_EVENT_MMAP
:
1807 return process_mmap_event(event
, offset
, head
);
1809 case PERF_EVENT_COMM
:
1810 return process_comm_event(event
, offset
, head
);
1812 case PERF_EVENT_FORK
:
1813 case PERF_EVENT_EXIT
:
1814 return process_task_event(event
, offset
, head
);
1816 case PERF_EVENT_LOST
:
1817 return process_lost_event(event
, offset
, head
);
1819 case PERF_EVENT_READ
:
1820 return process_read_event(event
, offset
, head
);
1823 * We dont process them right now but they are fine:
1826 case PERF_EVENT_THROTTLE
:
1827 case PERF_EVENT_UNTHROTTLE
:
1837 static u64
perf_header__sample_type(void)
1839 u64 sample_type
= 0;
1842 for (i
= 0; i
< header
->attrs
; i
++) {
1843 struct perf_header_attr
*attr
= header
->attr
[i
];
1846 sample_type
= attr
->attr
.sample_type
;
1847 else if (sample_type
!= attr
->attr
.sample_type
)
1848 die("non matching sample_type");
1854 static int __cmd_report(void)
1856 int ret
, rc
= EXIT_FAILURE
;
1857 unsigned long offset
= 0;
1858 unsigned long head
, shift
;
1864 register_idle_thread();
1867 perf_read_values_init(&show_threads_values
);
1869 input
= open(input_name
, O_RDONLY
);
1871 fprintf(stderr
, " failed to open file: %s", input_name
);
1872 if (!strcmp(input_name
, "perf.data"))
1873 fprintf(stderr
, " (try 'perf record' first)");
1874 fprintf(stderr
, "\n");
1878 ret
= fstat(input
, &stat
);
1880 perror("failed to stat file");
1884 if (!stat
.st_size
) {
1885 fprintf(stderr
, "zero-sized file, nothing to do!\n");
1889 header
= perf_header__read(input
);
1890 head
= header
->data_offset
;
1892 sample_type
= perf_header__sample_type();
1894 if (!(sample_type
& PERF_SAMPLE_CALLCHAIN
)) {
1895 if (sort__has_parent
) {
1896 fprintf(stderr
, "selected --sort parent, but no"
1897 " callchain data. Did you call"
1898 " perf record without -g?\n");
1902 fprintf(stderr
, "selected -c but no callchain data."
1903 " Did you call perf record without"
1907 } else if (callchain_param
.mode
!= CHAIN_NONE
&& !callchain
) {
1909 if (register_callchain_param(&callchain_param
) < 0) {
1910 fprintf(stderr
, "Can't register callchain"
1916 if (load_kernel() < 0) {
1917 perror("failed to load kernel symbols");
1918 return EXIT_FAILURE
;
1922 if (getcwd(__cwd
, sizeof(__cwd
)) == NULL
) {
1923 perror("failed to get the current directory");
1924 return EXIT_FAILURE
;
1926 cwdlen
= strlen(cwd
);
1932 shift
= page_size
* (head
/ page_size
);
1937 buf
= (char *)mmap(NULL
, page_size
* mmap_window
, PROT_READ
,
1938 MAP_SHARED
, input
, offset
);
1939 if (buf
== MAP_FAILED
) {
1940 perror("failed to mmap file");
1945 event
= (event_t
*)(buf
+ head
);
1947 size
= event
->header
.size
;
1951 if (head
+ event
->header
.size
>= page_size
* mmap_window
) {
1954 shift
= page_size
* (head
/ page_size
);
1956 ret
= munmap(buf
, page_size
* mmap_window
);
1964 size
= event
->header
.size
;
1966 dprintf("\n%p [%p]: event: %d\n",
1967 (void *)(offset
+ head
),
1968 (void *)(long)event
->header
.size
,
1969 event
->header
.type
);
1971 if (!size
|| process_event(event
, offset
, head
) < 0) {
1973 dprintf("%p [%p]: skipping unknown header type: %d\n",
1974 (void *)(offset
+ head
),
1975 (void *)(long)(event
->header
.size
),
1976 event
->header
.type
);
1981 * assume we lost track of the stream, check alignment, and
1982 * increment a single u64 in the hope to catch on again 'soon'.
1985 if (unlikely(head
& 7))
1993 if (offset
+ head
>= header
->data_offset
+ header
->data_size
)
1996 if (offset
+ head
< (unsigned long)stat
.st_size
)
2003 dprintf(" IP events: %10ld\n", total
);
2004 dprintf(" mmap events: %10ld\n", total_mmap
);
2005 dprintf(" comm events: %10ld\n", total_comm
);
2006 dprintf(" fork events: %10ld\n", total_fork
);
2007 dprintf(" lost events: %10ld\n", total_lost
);
2008 dprintf(" unknown events: %10ld\n", total_unknown
);
2014 threads__fprintf(stdout
);
2017 dsos__fprintf(stdout
);
2020 output__resort(total
);
2021 output__fprintf(stdout
, total
);
2024 perf_read_values_destroy(&show_threads_values
);
2030 parse_callchain_opt(const struct option
*opt __used
, const char *arg
,
2041 tok
= strtok((char *)arg
, ",");
2045 /* get the output mode */
2046 if (!strncmp(tok
, "graph", strlen(arg
)))
2047 callchain_param
.mode
= CHAIN_GRAPH_ABS
;
2049 else if (!strncmp(tok
, "flat", strlen(arg
)))
2050 callchain_param
.mode
= CHAIN_FLAT
;
2052 else if (!strncmp(tok
, "fractal", strlen(arg
)))
2053 callchain_param
.mode
= CHAIN_GRAPH_REL
;
2055 else if (!strncmp(tok
, "none", strlen(arg
))) {
2056 callchain_param
.mode
= CHAIN_NONE
;
2065 /* get the min percentage */
2066 tok
= strtok(NULL
, ",");
2070 callchain_param
.min_percent
= strtod(tok
, &endptr
);
2075 if (register_callchain_param(&callchain_param
) < 0) {
2076 fprintf(stderr
, "Can't register callchain params\n");
2082 static const char * const report_usage
[] = {
2083 "perf report [<options>] <command>",
2087 static const struct option options
[] = {
2088 OPT_STRING('i', "input", &input_name
, "file",
2090 OPT_BOOLEAN('v', "verbose", &verbose
,
2091 "be more verbose (show symbol address, etc)"),
2092 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
2093 "dump raw trace in ASCII"),
2094 OPT_STRING('k', "vmlinux", &vmlinux
, "file", "vmlinux pathname"),
2095 OPT_BOOLEAN('m', "modules", &modules
,
2096 "load module symbols - WARNING: use only with -k and LIVE kernel"),
2097 OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples
,
2098 "Show a column with the number of samples"),
2099 OPT_BOOLEAN('T', "threads", &show_threads
,
2100 "Show per-thread event counters"),
2101 OPT_STRING(0, "pretty", &pretty_printing_style
, "key",
2102 "pretty printing style key: normal raw"),
2103 OPT_STRING('s', "sort", &sort_order
, "key[,key2...]",
2104 "sort by key(s): pid, comm, dso, symbol, parent"),
2105 OPT_BOOLEAN('P', "full-paths", &full_paths
,
2106 "Don't shorten the pathnames taking into account the cwd"),
2107 OPT_STRING('p', "parent", &parent_pattern
, "regex",
2108 "regex filter to identify parent, see: '--sort parent'"),
2109 OPT_BOOLEAN('x', "exclude-other", &exclude_other
,
2110 "Only display entries with parent-match"),
2111 OPT_CALLBACK_DEFAULT('g', "call-graph", NULL
, "output_type,min_percent",
2112 "Display callchains using output_type and min percent threshold. "
2113 "Default: fractal,0.5", &parse_callchain_opt
, callchain_default_opt
),
2114 OPT_STRING('d', "dsos", &dso_list_str
, "dso[,dso...]",
2115 "only consider symbols in these dsos"),
2116 OPT_STRING('C', "comms", &comm_list_str
, "comm[,comm...]",
2117 "only consider symbols in these comms"),
2118 OPT_STRING('S', "symbols", &sym_list_str
, "symbol[,symbol...]",
2119 "only consider these symbols"),
2120 OPT_STRING('w', "column-widths", &col_width_list_str
,
2122 "don't try to adjust column width, use these fixed values"),
2123 OPT_STRING('t', "field-separator", &field_sep
, "separator",
2124 "separator for columns, no spaces will be added between "
2125 "columns '.' is reserved."),
2129 static void setup_sorting(void)
2131 char *tmp
, *tok
, *str
= strdup(sort_order
);
2133 for (tok
= strtok_r(str
, ", ", &tmp
);
2134 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
2135 if (sort_dimension__add(tok
) < 0) {
2136 error("Unknown --sort key: `%s'", tok
);
2137 usage_with_options(report_usage
, options
);
2144 static void setup_list(struct strlist
**list
, const char *list_str
,
2145 struct sort_entry
*se
, const char *list_name
,
2149 *list
= strlist__new(true, list_str
);
2151 fprintf(stderr
, "problems parsing %s list\n",
2155 if (strlist__nr_entries(*list
) == 1) {
2156 fprintf(fp
, "# %s: %s\n", list_name
,
2157 strlist__entry(*list
, 0)->s
);
2163 int cmd_report(int argc
, const char **argv
, const char *prefix __used
)
2167 page_size
= getpagesize();
2169 argc
= parse_options(argc
, argv
, options
, report_usage
, 0);
2173 if (parent_pattern
!= default_parent_pattern
) {
2174 sort_dimension__add("parent");
2175 sort_parent
.elide
= 1;
2180 * Any (unrecognized) arguments left?
2183 usage_with_options(report_usage
, options
);
2187 setup_list(&dso_list
, dso_list_str
, &sort_dso
, "dso", stdout
);
2188 setup_list(&comm_list
, comm_list_str
, &sort_comm
, "comm", stdout
);
2189 setup_list(&sym_list
, sym_list_str
, &sort_sym
, "symbol", stdout
);
2191 if (field_sep
&& *field_sep
== '.') {
2192 fputs("'.' is the only non valid --field-separator argument\n",
2197 return __cmd_report();