Merge commit 'tip/perfcounters/core' into perf-counters-for-linus
[linux-2.6/verdex.git] / tools / perf / builtin-report.c
bloba118bc77286d1d16780c08f348aa9b370225fab8
1 /*
2 * builtin-report.c
4 * Builtin report command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
7 */
8 #include "builtin.h"
10 #include "util/util.h"
12 #include "util/color.h"
13 #include <linux/list.h>
14 #include "util/cache.h"
15 #include <linux/rbtree.h>
16 #include "util/symbol.h"
17 #include "util/string.h"
18 #include "util/callchain.h"
19 #include "util/strlist.h"
21 #include "perf.h"
22 #include "util/header.h"
24 #include "util/parse-options.h"
25 #include "util/parse-events.h"
27 #define SHOW_KERNEL 1
28 #define SHOW_USER 2
29 #define SHOW_HV 4
31 static char const *input_name = "perf.data";
32 static char *vmlinux = NULL;
34 static char default_sort_order[] = "comm,dso";
35 static char *sort_order = default_sort_order;
36 static char *dso_list_str, *comm_list_str, *sym_list_str,
37 *col_width_list_str;
38 static struct strlist *dso_list, *comm_list, *sym_list;
39 static char *field_sep;
41 static int input;
42 static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
44 static int dump_trace = 0;
45 #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
46 #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
48 static int verbose;
49 #define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
51 static int modules;
53 static int full_paths;
54 static int show_nr_samples;
56 static unsigned long page_size;
57 static unsigned long mmap_window = 32;
59 static char default_parent_pattern[] = "^sys_|^do_page_fault";
60 static char *parent_pattern = default_parent_pattern;
61 static regex_t parent_regex;
63 static int exclude_other = 1;
65 static char callchain_default_opt[] = "fractal,0.5";
67 static int callchain;
69 static
70 struct callchain_param callchain_param = {
71 .mode = CHAIN_GRAPH_ABS,
72 .min_percent = 0.5
75 static u64 sample_type;
77 struct ip_event {
78 struct perf_event_header header;
79 u64 ip;
80 u32 pid, tid;
81 unsigned char __more_data[];
84 struct mmap_event {
85 struct perf_event_header header;
86 u32 pid, tid;
87 u64 start;
88 u64 len;
89 u64 pgoff;
90 char filename[PATH_MAX];
93 struct comm_event {
94 struct perf_event_header header;
95 u32 pid, tid;
96 char comm[16];
99 struct fork_event {
100 struct perf_event_header header;
101 u32 pid, ppid;
104 struct period_event {
105 struct perf_event_header header;
106 u64 time;
107 u64 id;
108 u64 sample_period;
111 struct lost_event {
112 struct perf_event_header header;
113 u64 id;
114 u64 lost;
117 struct read_event {
118 struct perf_event_header header;
119 u32 pid,tid;
120 u64 value;
121 u64 format[3];
124 typedef union event_union {
125 struct perf_event_header header;
126 struct ip_event ip;
127 struct mmap_event mmap;
128 struct comm_event comm;
129 struct fork_event fork;
130 struct period_event period;
131 struct lost_event lost;
132 struct read_event read;
133 } event_t;
135 static int repsep_fprintf(FILE *fp, const char *fmt, ...)
137 int n;
138 va_list ap;
140 va_start(ap, fmt);
141 if (!field_sep)
142 n = vfprintf(fp, fmt, ap);
143 else {
144 char *bf = NULL;
145 n = vasprintf(&bf, fmt, ap);
146 if (n > 0) {
147 char *sep = bf;
148 while (1) {
149 sep = strchr(sep, *field_sep);
150 if (sep == NULL)
151 break;
152 *sep = '.';
155 fputs(bf, fp);
156 free(bf);
158 va_end(ap);
159 return n;
162 static LIST_HEAD(dsos);
163 static struct dso *kernel_dso;
164 static struct dso *vdso;
165 static struct dso *hypervisor_dso;
167 static void dsos__add(struct dso *dso)
169 list_add_tail(&dso->node, &dsos);
172 static struct dso *dsos__find(const char *name)
174 struct dso *pos;
176 list_for_each_entry(pos, &dsos, node)
177 if (strcmp(pos->name, name) == 0)
178 return pos;
179 return NULL;
182 static struct dso *dsos__findnew(const char *name)
184 struct dso *dso = dsos__find(name);
185 int nr;
187 if (dso)
188 return dso;
190 dso = dso__new(name, 0);
191 if (!dso)
192 goto out_delete_dso;
194 nr = dso__load(dso, NULL, verbose);
195 if (nr < 0) {
196 eprintf("Failed to open: %s\n", name);
197 goto out_delete_dso;
199 if (!nr)
200 eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
202 dsos__add(dso);
204 return dso;
206 out_delete_dso:
207 dso__delete(dso);
208 return NULL;
211 static void dsos__fprintf(FILE *fp)
213 struct dso *pos;
215 list_for_each_entry(pos, &dsos, node)
216 dso__fprintf(pos, fp);
219 static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
221 return dso__find_symbol(dso, ip);
224 static int load_kernel(void)
226 int err;
228 kernel_dso = dso__new("[kernel]", 0);
229 if (!kernel_dso)
230 return -1;
232 err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
233 if (err <= 0) {
234 dso__delete(kernel_dso);
235 kernel_dso = NULL;
236 } else
237 dsos__add(kernel_dso);
239 vdso = dso__new("[vdso]", 0);
240 if (!vdso)
241 return -1;
243 vdso->find_symbol = vdso__find_symbol;
245 dsos__add(vdso);
247 hypervisor_dso = dso__new("[hypervisor]", 0);
248 if (!hypervisor_dso)
249 return -1;
250 dsos__add(hypervisor_dso);
252 return err;
255 static char __cwd[PATH_MAX];
256 static char *cwd = __cwd;
257 static int cwdlen;
259 static int strcommon(const char *pathname)
261 int n = 0;
263 while (pathname[n] == cwd[n] && n < cwdlen)
264 ++n;
266 return n;
269 struct map {
270 struct list_head node;
271 u64 start;
272 u64 end;
273 u64 pgoff;
274 u64 (*map_ip)(struct map *, u64);
275 struct dso *dso;
278 static u64 map__map_ip(struct map *map, u64 ip)
280 return ip - map->start + map->pgoff;
283 static u64 vdso__map_ip(struct map *map __used, u64 ip)
285 return ip;
288 static inline int is_anon_memory(const char *filename)
290 return strcmp(filename, "//anon") == 0;
293 static struct map *map__new(struct mmap_event *event)
295 struct map *self = malloc(sizeof(*self));
297 if (self != NULL) {
298 const char *filename = event->filename;
299 char newfilename[PATH_MAX];
300 int anon;
302 if (cwd) {
303 int n = strcommon(filename);
305 if (n == cwdlen) {
306 snprintf(newfilename, sizeof(newfilename),
307 ".%s", filename + n);
308 filename = newfilename;
312 anon = is_anon_memory(filename);
314 if (anon) {
315 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
316 filename = newfilename;
319 self->start = event->start;
320 self->end = event->start + event->len;
321 self->pgoff = event->pgoff;
323 self->dso = dsos__findnew(filename);
324 if (self->dso == NULL)
325 goto out_delete;
327 if (self->dso == vdso || anon)
328 self->map_ip = vdso__map_ip;
329 else
330 self->map_ip = map__map_ip;
332 return self;
333 out_delete:
334 free(self);
335 return NULL;
338 static struct map *map__clone(struct map *self)
340 struct map *map = malloc(sizeof(*self));
342 if (!map)
343 return NULL;
345 memcpy(map, self, sizeof(*self));
347 return map;
350 static int map__overlap(struct map *l, struct map *r)
352 if (l->start > r->start) {
353 struct map *t = l;
354 l = r;
355 r = t;
358 if (l->end > r->start)
359 return 1;
361 return 0;
364 static size_t map__fprintf(struct map *self, FILE *fp)
366 return fprintf(fp, " %Lx-%Lx %Lx %s\n",
367 self->start, self->end, self->pgoff, self->dso->name);
371 struct thread {
372 struct rb_node rb_node;
373 struct list_head maps;
374 pid_t pid;
375 char *comm;
378 static struct thread *thread__new(pid_t pid)
380 struct thread *self = malloc(sizeof(*self));
382 if (self != NULL) {
383 self->pid = pid;
384 self->comm = malloc(32);
385 if (self->comm)
386 snprintf(self->comm, 32, ":%d", self->pid);
387 INIT_LIST_HEAD(&self->maps);
390 return self;
393 static unsigned int dsos__col_width,
394 comms__col_width,
395 threads__col_width;
397 static int thread__set_comm(struct thread *self, const char *comm)
399 if (self->comm)
400 free(self->comm);
401 self->comm = strdup(comm);
402 if (!self->comm)
403 return -ENOMEM;
405 if (!col_width_list_str && !field_sep &&
406 (!comm_list || strlist__has_entry(comm_list, comm))) {
407 unsigned int slen = strlen(comm);
408 if (slen > comms__col_width) {
409 comms__col_width = slen;
410 threads__col_width = slen + 6;
414 return 0;
417 static size_t thread__fprintf(struct thread *self, FILE *fp)
419 struct map *pos;
420 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
422 list_for_each_entry(pos, &self->maps, node)
423 ret += map__fprintf(pos, fp);
425 return ret;
429 static struct rb_root threads;
430 static struct thread *last_match;
432 static struct thread *threads__findnew(pid_t pid)
434 struct rb_node **p = &threads.rb_node;
435 struct rb_node *parent = NULL;
436 struct thread *th;
439 * Font-end cache - PID lookups come in blocks,
440 * so most of the time we dont have to look up
441 * the full rbtree:
443 if (last_match && last_match->pid == pid)
444 return last_match;
446 while (*p != NULL) {
447 parent = *p;
448 th = rb_entry(parent, struct thread, rb_node);
450 if (th->pid == pid) {
451 last_match = th;
452 return th;
455 if (pid < th->pid)
456 p = &(*p)->rb_left;
457 else
458 p = &(*p)->rb_right;
461 th = thread__new(pid);
462 if (th != NULL) {
463 rb_link_node(&th->rb_node, parent, p);
464 rb_insert_color(&th->rb_node, &threads);
465 last_match = th;
468 return th;
471 static void thread__insert_map(struct thread *self, struct map *map)
473 struct map *pos, *tmp;
475 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
476 if (map__overlap(pos, map)) {
477 if (verbose >= 2) {
478 printf("overlapping maps:\n");
479 map__fprintf(map, stdout);
480 map__fprintf(pos, stdout);
483 if (map->start <= pos->start && map->end > pos->start)
484 pos->start = map->end;
486 if (map->end >= pos->end && map->start < pos->end)
487 pos->end = map->start;
489 if (verbose >= 2) {
490 printf("after collision:\n");
491 map__fprintf(pos, stdout);
494 if (pos->start >= pos->end) {
495 list_del_init(&pos->node);
496 free(pos);
501 list_add_tail(&map->node, &self->maps);
504 static int thread__fork(struct thread *self, struct thread *parent)
506 struct map *map;
508 if (self->comm)
509 free(self->comm);
510 self->comm = strdup(parent->comm);
511 if (!self->comm)
512 return -ENOMEM;
514 list_for_each_entry(map, &parent->maps, node) {
515 struct map *new = map__clone(map);
516 if (!new)
517 return -ENOMEM;
518 thread__insert_map(self, new);
521 return 0;
524 static struct map *thread__find_map(struct thread *self, u64 ip)
526 struct map *pos;
528 if (self == NULL)
529 return NULL;
531 list_for_each_entry(pos, &self->maps, node)
532 if (ip >= pos->start && ip <= pos->end)
533 return pos;
535 return NULL;
538 static size_t threads__fprintf(FILE *fp)
540 size_t ret = 0;
541 struct rb_node *nd;
543 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
544 struct thread *pos = rb_entry(nd, struct thread, rb_node);
546 ret += thread__fprintf(pos, fp);
549 return ret;
553 * histogram, sorted on item, collects counts
556 static struct rb_root hist;
558 struct hist_entry {
559 struct rb_node rb_node;
561 struct thread *thread;
562 struct map *map;
563 struct dso *dso;
564 struct symbol *sym;
565 struct symbol *parent;
566 u64 ip;
567 char level;
568 struct callchain_node callchain;
569 struct rb_root sorted_chain;
571 u64 count;
575 * configurable sorting bits
578 struct sort_entry {
579 struct list_head list;
581 char *header;
583 int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
584 int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
585 size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width);
586 unsigned int *width;
587 bool elide;
590 static int64_t cmp_null(void *l, void *r)
592 if (!l && !r)
593 return 0;
594 else if (!l)
595 return -1;
596 else
597 return 1;
600 /* --sort pid */
602 static int64_t
603 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
605 return right->thread->pid - left->thread->pid;
608 static size_t
609 sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
611 return repsep_fprintf(fp, "%*s:%5d", width - 6,
612 self->thread->comm ?: "", self->thread->pid);
615 static struct sort_entry sort_thread = {
616 .header = "Command: Pid",
617 .cmp = sort__thread_cmp,
618 .print = sort__thread_print,
619 .width = &threads__col_width,
622 /* --sort comm */
624 static int64_t
625 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
627 return right->thread->pid - left->thread->pid;
630 static int64_t
631 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
633 char *comm_l = left->thread->comm;
634 char *comm_r = right->thread->comm;
636 if (!comm_l || !comm_r)
637 return cmp_null(comm_l, comm_r);
639 return strcmp(comm_l, comm_r);
642 static size_t
643 sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
645 return repsep_fprintf(fp, "%*s", width, self->thread->comm);
648 static struct sort_entry sort_comm = {
649 .header = "Command",
650 .cmp = sort__comm_cmp,
651 .collapse = sort__comm_collapse,
652 .print = sort__comm_print,
653 .width = &comms__col_width,
656 /* --sort dso */
658 static int64_t
659 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
661 struct dso *dso_l = left->dso;
662 struct dso *dso_r = right->dso;
664 if (!dso_l || !dso_r)
665 return cmp_null(dso_l, dso_r);
667 return strcmp(dso_l->name, dso_r->name);
670 static size_t
671 sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
673 if (self->dso)
674 return repsep_fprintf(fp, "%-*s", width, self->dso->name);
676 return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
679 static struct sort_entry sort_dso = {
680 .header = "Shared Object",
681 .cmp = sort__dso_cmp,
682 .print = sort__dso_print,
683 .width = &dsos__col_width,
686 /* --sort symbol */
688 static int64_t
689 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
691 u64 ip_l, ip_r;
693 if (left->sym == right->sym)
694 return 0;
696 ip_l = left->sym ? left->sym->start : left->ip;
697 ip_r = right->sym ? right->sym->start : right->ip;
699 return (int64_t)(ip_r - ip_l);
702 static size_t
703 sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
705 size_t ret = 0;
707 if (verbose)
708 ret += repsep_fprintf(fp, "%#018llx ", (u64)self->ip);
710 ret += repsep_fprintf(fp, "[%c] ", self->level);
711 if (self->sym) {
712 ret += repsep_fprintf(fp, "%s", self->sym->name);
714 if (self->sym->module)
715 ret += repsep_fprintf(fp, "\t[%s]",
716 self->sym->module->name);
717 } else {
718 ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
721 return ret;
724 static struct sort_entry sort_sym = {
725 .header = "Symbol",
726 .cmp = sort__sym_cmp,
727 .print = sort__sym_print,
730 /* --sort parent */
732 static int64_t
733 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
735 struct symbol *sym_l = left->parent;
736 struct symbol *sym_r = right->parent;
738 if (!sym_l || !sym_r)
739 return cmp_null(sym_l, sym_r);
741 return strcmp(sym_l->name, sym_r->name);
744 static size_t
745 sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
747 return repsep_fprintf(fp, "%-*s", width,
748 self->parent ? self->parent->name : "[other]");
751 static unsigned int parent_symbol__col_width;
753 static struct sort_entry sort_parent = {
754 .header = "Parent symbol",
755 .cmp = sort__parent_cmp,
756 .print = sort__parent_print,
757 .width = &parent_symbol__col_width,
760 static int sort__need_collapse = 0;
761 static int sort__has_parent = 0;
763 struct sort_dimension {
764 char *name;
765 struct sort_entry *entry;
766 int taken;
769 static struct sort_dimension sort_dimensions[] = {
770 { .name = "pid", .entry = &sort_thread, },
771 { .name = "comm", .entry = &sort_comm, },
772 { .name = "dso", .entry = &sort_dso, },
773 { .name = "symbol", .entry = &sort_sym, },
774 { .name = "parent", .entry = &sort_parent, },
777 static LIST_HEAD(hist_entry__sort_list);
779 static int sort_dimension__add(char *tok)
781 unsigned int i;
783 for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
784 struct sort_dimension *sd = &sort_dimensions[i];
786 if (sd->taken)
787 continue;
789 if (strncasecmp(tok, sd->name, strlen(tok)))
790 continue;
792 if (sd->entry->collapse)
793 sort__need_collapse = 1;
795 if (sd->entry == &sort_parent) {
796 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
797 if (ret) {
798 char err[BUFSIZ];
800 regerror(ret, &parent_regex, err, sizeof(err));
801 fprintf(stderr, "Invalid regex: %s\n%s",
802 parent_pattern, err);
803 exit(-1);
805 sort__has_parent = 1;
808 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
809 sd->taken = 1;
811 return 0;
814 return -ESRCH;
817 static int64_t
818 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
820 struct sort_entry *se;
821 int64_t cmp = 0;
823 list_for_each_entry(se, &hist_entry__sort_list, list) {
824 cmp = se->cmp(left, right);
825 if (cmp)
826 break;
829 return cmp;
832 static int64_t
833 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
835 struct sort_entry *se;
836 int64_t cmp = 0;
838 list_for_each_entry(se, &hist_entry__sort_list, list) {
839 int64_t (*f)(struct hist_entry *, struct hist_entry *);
841 f = se->collapse ?: se->cmp;
843 cmp = f(left, right);
844 if (cmp)
845 break;
848 return cmp;
851 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask)
853 int i;
854 size_t ret = 0;
856 ret += fprintf(fp, "%s", " ");
858 for (i = 0; i < depth; i++)
859 if (depth_mask & (1 << i))
860 ret += fprintf(fp, "| ");
861 else
862 ret += fprintf(fp, " ");
864 ret += fprintf(fp, "\n");
866 return ret;
868 static size_t
869 ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth,
870 int depth_mask, int count, u64 total_samples,
871 int hits)
873 int i;
874 size_t ret = 0;
876 ret += fprintf(fp, "%s", " ");
877 for (i = 0; i < depth; i++) {
878 if (depth_mask & (1 << i))
879 ret += fprintf(fp, "|");
880 else
881 ret += fprintf(fp, " ");
882 if (!count && i == depth - 1) {
883 double percent;
885 percent = hits * 100.0 / total_samples;
886 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
887 } else
888 ret += fprintf(fp, "%s", " ");
890 if (chain->sym)
891 ret += fprintf(fp, "%s\n", chain->sym->name);
892 else
893 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
895 return ret;
898 static size_t
899 callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
900 u64 total_samples, int depth, int depth_mask)
902 struct rb_node *node, *next;
903 struct callchain_node *child;
904 struct callchain_list *chain;
905 int new_depth_mask = depth_mask;
906 u64 new_total;
907 size_t ret = 0;
908 int i;
910 if (callchain_param.mode == CHAIN_GRAPH_REL)
911 new_total = self->cumul_hit;
912 else
913 new_total = total_samples;
915 node = rb_first(&self->rb_root);
916 while (node) {
917 child = rb_entry(node, struct callchain_node, rb_node);
920 * The depth mask manages the output of pipes that show
921 * the depth. We don't want to keep the pipes of the current
922 * level for the last child of this depth
924 next = rb_next(node);
925 if (!next)
926 new_depth_mask &= ~(1 << (depth - 1));
929 * But we keep the older depth mask for the line seperator
930 * to keep the level link until we reach the last child
932 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask);
933 i = 0;
934 list_for_each_entry(chain, &child->val, list) {
935 if (chain->ip >= PERF_CONTEXT_MAX)
936 continue;
937 ret += ipchain__fprintf_graph(fp, chain, depth,
938 new_depth_mask, i++,
939 new_total,
940 child->cumul_hit);
942 ret += callchain__fprintf_graph(fp, child, new_total,
943 depth + 1,
944 new_depth_mask | (1 << depth));
945 node = next;
948 return ret;
951 static size_t
952 callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
953 u64 total_samples)
955 struct callchain_list *chain;
956 size_t ret = 0;
958 if (!self)
959 return 0;
961 ret += callchain__fprintf_flat(fp, self->parent, total_samples);
964 list_for_each_entry(chain, &self->val, list) {
965 if (chain->ip >= PERF_CONTEXT_MAX)
966 continue;
967 if (chain->sym)
968 ret += fprintf(fp, " %s\n", chain->sym->name);
969 else
970 ret += fprintf(fp, " %p\n",
971 (void *)(long)chain->ip);
974 return ret;
977 static size_t
978 hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
979 u64 total_samples)
981 struct rb_node *rb_node;
982 struct callchain_node *chain;
983 size_t ret = 0;
985 rb_node = rb_first(&self->sorted_chain);
986 while (rb_node) {
987 double percent;
989 chain = rb_entry(rb_node, struct callchain_node, rb_node);
990 percent = chain->hit * 100.0 / total_samples;
991 switch (callchain_param.mode) {
992 case CHAIN_FLAT:
993 ret += percent_color_fprintf(fp, " %6.2f%%\n",
994 percent);
995 ret += callchain__fprintf_flat(fp, chain, total_samples);
996 break;
997 case CHAIN_GRAPH_ABS: /* Falldown */
998 case CHAIN_GRAPH_REL:
999 ret += callchain__fprintf_graph(fp, chain,
1000 total_samples, 1, 1);
1001 default:
1002 break;
1004 ret += fprintf(fp, "\n");
1005 rb_node = rb_next(rb_node);
1008 return ret;
1012 static size_t
1013 hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
1015 struct sort_entry *se;
1016 size_t ret;
1018 if (exclude_other && !self->parent)
1019 return 0;
1021 if (total_samples)
1022 ret = percent_color_fprintf(fp,
1023 field_sep ? "%.2f" : " %6.2f%%",
1024 (self->count * 100.0) / total_samples);
1025 else
1026 ret = fprintf(fp, field_sep ? "%lld" : "%12lld ", self->count);
1028 if (show_nr_samples) {
1029 if (field_sep)
1030 fprintf(fp, "%c%lld", *field_sep, self->count);
1031 else
1032 fprintf(fp, "%11lld", self->count);
1035 list_for_each_entry(se, &hist_entry__sort_list, list) {
1036 if (se->elide)
1037 continue;
1039 fprintf(fp, "%s", field_sep ?: " ");
1040 ret += se->print(fp, self, se->width ? *se->width : 0);
1043 ret += fprintf(fp, "\n");
1045 if (callchain)
1046 hist_entry_callchain__fprintf(fp, self, total_samples);
1048 return ret;
1055 static void dso__calc_col_width(struct dso *self)
1057 if (!col_width_list_str && !field_sep &&
1058 (!dso_list || strlist__has_entry(dso_list, self->name))) {
1059 unsigned int slen = strlen(self->name);
1060 if (slen > dsos__col_width)
1061 dsos__col_width = slen;
1064 self->slen_calculated = 1;
1067 static struct symbol *
1068 resolve_symbol(struct thread *thread, struct map **mapp,
1069 struct dso **dsop, u64 *ipp)
1071 struct dso *dso = dsop ? *dsop : NULL;
1072 struct map *map = mapp ? *mapp : NULL;
1073 u64 ip = *ipp;
1075 if (!thread)
1076 return NULL;
1078 if (dso)
1079 goto got_dso;
1081 if (map)
1082 goto got_map;
1084 map = thread__find_map(thread, ip);
1085 if (map != NULL) {
1087 * We have to do this here as we may have a dso
1088 * with no symbol hit that has a name longer than
1089 * the ones with symbols sampled.
1091 if (!sort_dso.elide && !map->dso->slen_calculated)
1092 dso__calc_col_width(map->dso);
1094 if (mapp)
1095 *mapp = map;
1096 got_map:
1097 ip = map->map_ip(map, ip);
1099 dso = map->dso;
1100 } else {
1102 * If this is outside of all known maps,
1103 * and is a negative address, try to look it
1104 * up in the kernel dso, as it might be a
1105 * vsyscall (which executes in user-mode):
1107 if ((long long)ip < 0)
1108 dso = kernel_dso;
1110 dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
1111 dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
1112 *ipp = ip;
1114 if (dsop)
1115 *dsop = dso;
1117 if (!dso)
1118 return NULL;
1119 got_dso:
1120 return dso->find_symbol(dso, ip);
1123 static int call__match(struct symbol *sym)
1125 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
1126 return 1;
1128 return 0;
1131 static struct symbol **
1132 resolve_callchain(struct thread *thread, struct map *map __used,
1133 struct ip_callchain *chain, struct hist_entry *entry)
1135 u64 context = PERF_CONTEXT_MAX;
1136 struct symbol **syms = NULL;
1137 unsigned int i;
1139 if (callchain) {
1140 syms = calloc(chain->nr, sizeof(*syms));
1141 if (!syms) {
1142 fprintf(stderr, "Can't allocate memory for symbols\n");
1143 exit(-1);
1147 for (i = 0; i < chain->nr; i++) {
1148 u64 ip = chain->ips[i];
1149 struct dso *dso = NULL;
1150 struct symbol *sym;
1152 if (ip >= PERF_CONTEXT_MAX) {
1153 context = ip;
1154 continue;
1157 switch (context) {
1158 case PERF_CONTEXT_HV:
1159 dso = hypervisor_dso;
1160 break;
1161 case PERF_CONTEXT_KERNEL:
1162 dso = kernel_dso;
1163 break;
1164 default:
1165 break;
1168 sym = resolve_symbol(thread, NULL, &dso, &ip);
1170 if (sym) {
1171 if (sort__has_parent && call__match(sym) &&
1172 !entry->parent)
1173 entry->parent = sym;
1174 if (!callchain)
1175 break;
1176 syms[i] = sym;
1180 return syms;
1184 * collect histogram counts
1187 static int
1188 hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
1189 struct symbol *sym, u64 ip, struct ip_callchain *chain,
1190 char level, u64 count)
1192 struct rb_node **p = &hist.rb_node;
1193 struct rb_node *parent = NULL;
1194 struct hist_entry *he;
1195 struct symbol **syms = NULL;
1196 struct hist_entry entry = {
1197 .thread = thread,
1198 .map = map,
1199 .dso = dso,
1200 .sym = sym,
1201 .ip = ip,
1202 .level = level,
1203 .count = count,
1204 .parent = NULL,
1205 .sorted_chain = RB_ROOT
1207 int cmp;
1209 if ((sort__has_parent || callchain) && chain)
1210 syms = resolve_callchain(thread, map, chain, &entry);
1212 while (*p != NULL) {
1213 parent = *p;
1214 he = rb_entry(parent, struct hist_entry, rb_node);
1216 cmp = hist_entry__cmp(&entry, he);
1218 if (!cmp) {
1219 he->count += count;
1220 if (callchain) {
1221 append_chain(&he->callchain, chain, syms);
1222 free(syms);
1224 return 0;
1227 if (cmp < 0)
1228 p = &(*p)->rb_left;
1229 else
1230 p = &(*p)->rb_right;
1233 he = malloc(sizeof(*he));
1234 if (!he)
1235 return -ENOMEM;
1236 *he = entry;
1237 if (callchain) {
1238 callchain_init(&he->callchain);
1239 append_chain(&he->callchain, chain, syms);
1240 free(syms);
1242 rb_link_node(&he->rb_node, parent, p);
1243 rb_insert_color(&he->rb_node, &hist);
1245 return 0;
1248 static void hist_entry__free(struct hist_entry *he)
1250 free(he);
1254 * collapse the histogram
1257 static struct rb_root collapse_hists;
1259 static void collapse__insert_entry(struct hist_entry *he)
1261 struct rb_node **p = &collapse_hists.rb_node;
1262 struct rb_node *parent = NULL;
1263 struct hist_entry *iter;
1264 int64_t cmp;
1266 while (*p != NULL) {
1267 parent = *p;
1268 iter = rb_entry(parent, struct hist_entry, rb_node);
1270 cmp = hist_entry__collapse(iter, he);
1272 if (!cmp) {
1273 iter->count += he->count;
1274 hist_entry__free(he);
1275 return;
1278 if (cmp < 0)
1279 p = &(*p)->rb_left;
1280 else
1281 p = &(*p)->rb_right;
1284 rb_link_node(&he->rb_node, parent, p);
1285 rb_insert_color(&he->rb_node, &collapse_hists);
1288 static void collapse__resort(void)
1290 struct rb_node *next;
1291 struct hist_entry *n;
1293 if (!sort__need_collapse)
1294 return;
1296 next = rb_first(&hist);
1297 while (next) {
1298 n = rb_entry(next, struct hist_entry, rb_node);
1299 next = rb_next(&n->rb_node);
1301 rb_erase(&n->rb_node, &hist);
1302 collapse__insert_entry(n);
1307 * reverse the map, sort on count.
1310 static struct rb_root output_hists;
1312 static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
1314 struct rb_node **p = &output_hists.rb_node;
1315 struct rb_node *parent = NULL;
1316 struct hist_entry *iter;
1318 if (callchain)
1319 callchain_param.sort(&he->sorted_chain, &he->callchain,
1320 min_callchain_hits, &callchain_param);
1322 while (*p != NULL) {
1323 parent = *p;
1324 iter = rb_entry(parent, struct hist_entry, rb_node);
1326 if (he->count > iter->count)
1327 p = &(*p)->rb_left;
1328 else
1329 p = &(*p)->rb_right;
1332 rb_link_node(&he->rb_node, parent, p);
1333 rb_insert_color(&he->rb_node, &output_hists);
1336 static void output__resort(u64 total_samples)
1338 struct rb_node *next;
1339 struct hist_entry *n;
1340 struct rb_root *tree = &hist;
1341 u64 min_callchain_hits;
1343 min_callchain_hits = total_samples * (callchain_param.min_percent / 100);
1345 if (sort__need_collapse)
1346 tree = &collapse_hists;
1348 next = rb_first(tree);
1350 while (next) {
1351 n = rb_entry(next, struct hist_entry, rb_node);
1352 next = rb_next(&n->rb_node);
1354 rb_erase(&n->rb_node, tree);
1355 output__insert_entry(n, min_callchain_hits);
1359 static size_t output__fprintf(FILE *fp, u64 total_samples)
1361 struct hist_entry *pos;
1362 struct sort_entry *se;
1363 struct rb_node *nd;
1364 size_t ret = 0;
1365 unsigned int width;
1366 char *col_width = col_width_list_str;
1368 fprintf(fp, "# Samples: %Ld\n", (u64)total_samples);
1369 fprintf(fp, "#\n");
1371 fprintf(fp, "# Overhead");
1372 if (show_nr_samples) {
1373 if (field_sep)
1374 fprintf(fp, "%cSamples", *field_sep);
1375 else
1376 fputs(" Samples ", fp);
1378 list_for_each_entry(se, &hist_entry__sort_list, list) {
1379 if (se->elide)
1380 continue;
1381 if (field_sep) {
1382 fprintf(fp, "%c%s", *field_sep, se->header);
1383 continue;
1385 width = strlen(se->header);
1386 if (se->width) {
1387 if (col_width_list_str) {
1388 if (col_width) {
1389 *se->width = atoi(col_width);
1390 col_width = strchr(col_width, ',');
1391 if (col_width)
1392 ++col_width;
1395 width = *se->width = max(*se->width, width);
1397 fprintf(fp, " %*s", width, se->header);
1399 fprintf(fp, "\n");
1401 if (field_sep)
1402 goto print_entries;
1404 fprintf(fp, "# ........");
1405 if (show_nr_samples)
1406 fprintf(fp, " ..........");
1407 list_for_each_entry(se, &hist_entry__sort_list, list) {
1408 unsigned int i;
1410 if (se->elide)
1411 continue;
1413 fprintf(fp, " ");
1414 if (se->width)
1415 width = *se->width;
1416 else
1417 width = strlen(se->header);
1418 for (i = 0; i < width; i++)
1419 fprintf(fp, ".");
1421 fprintf(fp, "\n");
1423 fprintf(fp, "#\n");
1425 print_entries:
1426 for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
1427 pos = rb_entry(nd, struct hist_entry, rb_node);
1428 ret += hist_entry__fprintf(fp, pos, total_samples);
1431 if (sort_order == default_sort_order &&
1432 parent_pattern == default_parent_pattern) {
1433 fprintf(fp, "#\n");
1434 fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n");
1435 fprintf(fp, "#\n");
1437 fprintf(fp, "\n");
1439 return ret;
1442 static void register_idle_thread(void)
1444 struct thread *thread = threads__findnew(0);
1446 if (thread == NULL ||
1447 thread__set_comm(thread, "[idle]")) {
1448 fprintf(stderr, "problem inserting idle task.\n");
1449 exit(-1);
1453 static unsigned long total = 0,
1454 total_mmap = 0,
1455 total_comm = 0,
1456 total_fork = 0,
1457 total_unknown = 0,
1458 total_lost = 0;
1460 static int validate_chain(struct ip_callchain *chain, event_t *event)
1462 unsigned int chain_size;
1464 chain_size = event->header.size;
1465 chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
1467 if (chain->nr*sizeof(u64) > chain_size)
1468 return -1;
1470 return 0;
1473 static int
1474 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1476 char level;
1477 int show = 0;
1478 struct dso *dso = NULL;
1479 struct thread *thread = threads__findnew(event->ip.pid);
1480 u64 ip = event->ip.ip;
1481 u64 period = 1;
1482 struct map *map = NULL;
1483 void *more_data = event->ip.__more_data;
1484 struct ip_callchain *chain = NULL;
1485 int cpumode;
1487 if (sample_type & PERF_SAMPLE_PERIOD) {
1488 period = *(u64 *)more_data;
1489 more_data += sizeof(u64);
1492 dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
1493 (void *)(offset + head),
1494 (void *)(long)(event->header.size),
1495 event->header.misc,
1496 event->ip.pid,
1497 (void *)(long)ip,
1498 (long long)period);
1500 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
1501 unsigned int i;
1503 chain = (void *)more_data;
1505 dprintf("... chain: nr:%Lu\n", chain->nr);
1507 if (validate_chain(chain, event) < 0) {
1508 eprintf("call-chain problem with event, skipping it.\n");
1509 return 0;
1512 if (dump_trace) {
1513 for (i = 0; i < chain->nr; i++)
1514 dprintf("..... %2d: %016Lx\n", i, chain->ips[i]);
1518 dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1520 if (thread == NULL) {
1521 eprintf("problem processing %d event, skipping it.\n",
1522 event->header.type);
1523 return -1;
1526 if (comm_list && !strlist__has_entry(comm_list, thread->comm))
1527 return 0;
1529 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
1531 if (cpumode == PERF_EVENT_MISC_KERNEL) {
1532 show = SHOW_KERNEL;
1533 level = 'k';
1535 dso = kernel_dso;
1537 dprintf(" ...... dso: %s\n", dso->name);
1539 } else if (cpumode == PERF_EVENT_MISC_USER) {
1541 show = SHOW_USER;
1542 level = '.';
1544 } else {
1545 show = SHOW_HV;
1546 level = 'H';
1548 dso = hypervisor_dso;
1550 dprintf(" ...... dso: [hypervisor]\n");
1553 if (show & show_mask) {
1554 struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
1556 if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name))
1557 return 0;
1559 if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
1560 return 0;
1562 if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
1563 eprintf("problem incrementing symbol count, skipping event\n");
1564 return -1;
1567 total += period;
1569 return 0;
1572 static int
1573 process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1575 struct thread *thread = threads__findnew(event->mmap.pid);
1576 struct map *map = map__new(&event->mmap);
1578 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
1579 (void *)(offset + head),
1580 (void *)(long)(event->header.size),
1581 event->mmap.pid,
1582 (void *)(long)event->mmap.start,
1583 (void *)(long)event->mmap.len,
1584 (void *)(long)event->mmap.pgoff,
1585 event->mmap.filename);
1587 if (thread == NULL || map == NULL) {
1588 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
1589 return 0;
1592 thread__insert_map(thread, map);
1593 total_mmap++;
1595 return 0;
1598 static int
1599 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1601 struct thread *thread = threads__findnew(event->comm.pid);
1603 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
1604 (void *)(offset + head),
1605 (void *)(long)(event->header.size),
1606 event->comm.comm, event->comm.pid);
1608 if (thread == NULL ||
1609 thread__set_comm(thread, event->comm.comm)) {
1610 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
1611 return -1;
1613 total_comm++;
1615 return 0;
1618 static int
1619 process_fork_event(event_t *event, unsigned long offset, unsigned long head)
1621 struct thread *thread = threads__findnew(event->fork.pid);
1622 struct thread *parent = threads__findnew(event->fork.ppid);
1624 dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
1625 (void *)(offset + head),
1626 (void *)(long)(event->header.size),
1627 event->fork.pid, event->fork.ppid);
1629 if (!thread || !parent || thread__fork(thread, parent)) {
1630 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
1631 return -1;
1633 total_fork++;
1635 return 0;
1638 static int
1639 process_period_event(event_t *event, unsigned long offset, unsigned long head)
1641 dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
1642 (void *)(offset + head),
1643 (void *)(long)(event->header.size),
1644 event->period.time,
1645 event->period.id,
1646 event->period.sample_period);
1648 return 0;
1651 static int
1652 process_lost_event(event_t *event, unsigned long offset, unsigned long head)
1654 dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
1655 (void *)(offset + head),
1656 (void *)(long)(event->header.size),
1657 event->lost.id,
1658 event->lost.lost);
1660 total_lost += event->lost.lost;
1662 return 0;
1665 static void trace_event(event_t *event)
1667 unsigned char *raw_event = (void *)event;
1668 char *color = PERF_COLOR_BLUE;
1669 int i, j;
1671 if (!dump_trace)
1672 return;
1674 dprintf(".");
1675 cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
1677 for (i = 0; i < event->header.size; i++) {
1678 if ((i & 15) == 0) {
1679 dprintf(".");
1680 cdprintf(" %04x: ", i);
1683 cdprintf(" %02x", raw_event[i]);
1685 if (((i & 15) == 15) || i == event->header.size-1) {
1686 cdprintf(" ");
1687 for (j = 0; j < 15-(i & 15); j++)
1688 cdprintf(" ");
1689 for (j = 0; j < (i & 15); j++) {
1690 if (isprint(raw_event[i-15+j]))
1691 cdprintf("%c", raw_event[i-15+j]);
1692 else
1693 cdprintf(".");
1695 cdprintf("\n");
1698 dprintf(".\n");
1701 static int
1702 process_read_event(event_t *event, unsigned long offset, unsigned long head)
1704 dprintf("%p [%p]: PERF_EVENT_READ: %d %d %Lu\n",
1705 (void *)(offset + head),
1706 (void *)(long)(event->header.size),
1707 event->read.pid,
1708 event->read.tid,
1709 event->read.value);
1711 return 0;
1714 static int
1715 process_event(event_t *event, unsigned long offset, unsigned long head)
1717 trace_event(event);
1719 switch (event->header.type) {
1720 case PERF_EVENT_SAMPLE:
1721 return process_sample_event(event, offset, head);
1723 case PERF_EVENT_MMAP:
1724 return process_mmap_event(event, offset, head);
1726 case PERF_EVENT_COMM:
1727 return process_comm_event(event, offset, head);
1729 case PERF_EVENT_FORK:
1730 return process_fork_event(event, offset, head);
1732 case PERF_EVENT_PERIOD:
1733 return process_period_event(event, offset, head);
1735 case PERF_EVENT_LOST:
1736 return process_lost_event(event, offset, head);
1738 case PERF_EVENT_READ:
1739 return process_read_event(event, offset, head);
1742 * We dont process them right now but they are fine:
1745 case PERF_EVENT_THROTTLE:
1746 case PERF_EVENT_UNTHROTTLE:
1747 return 0;
1749 default:
1750 return -1;
1753 return 0;
1756 static struct perf_header *header;
1758 static u64 perf_header__sample_type(void)
1760 u64 sample_type = 0;
1761 int i;
1763 for (i = 0; i < header->attrs; i++) {
1764 struct perf_header_attr *attr = header->attr[i];
1766 if (!sample_type)
1767 sample_type = attr->attr.sample_type;
1768 else if (sample_type != attr->attr.sample_type)
1769 die("non matching sample_type");
1772 return sample_type;
1775 static int __cmd_report(void)
1777 int ret, rc = EXIT_FAILURE;
1778 unsigned long offset = 0;
1779 unsigned long head, shift;
1780 struct stat stat;
1781 event_t *event;
1782 uint32_t size;
1783 char *buf;
1785 register_idle_thread();
1787 input = open(input_name, O_RDONLY);
1788 if (input < 0) {
1789 fprintf(stderr, " failed to open file: %s", input_name);
1790 if (!strcmp(input_name, "perf.data"))
1791 fprintf(stderr, " (try 'perf record' first)");
1792 fprintf(stderr, "\n");
1793 exit(-1);
1796 ret = fstat(input, &stat);
1797 if (ret < 0) {
1798 perror("failed to stat file");
1799 exit(-1);
1802 if (!stat.st_size) {
1803 fprintf(stderr, "zero-sized file, nothing to do!\n");
1804 exit(0);
1807 header = perf_header__read(input);
1808 head = header->data_offset;
1810 sample_type = perf_header__sample_type();
1812 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
1813 if (sort__has_parent) {
1814 fprintf(stderr, "selected --sort parent, but no"
1815 " callchain data. Did you call"
1816 " perf record without -g?\n");
1817 exit(-1);
1819 if (callchain) {
1820 fprintf(stderr, "selected -c but no callchain data."
1821 " Did you call perf record without"
1822 " -g?\n");
1823 exit(-1);
1827 if (load_kernel() < 0) {
1828 perror("failed to load kernel symbols");
1829 return EXIT_FAILURE;
1832 if (!full_paths) {
1833 if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
1834 perror("failed to get the current directory");
1835 return EXIT_FAILURE;
1837 cwdlen = strlen(cwd);
1838 } else {
1839 cwd = NULL;
1840 cwdlen = 0;
1843 shift = page_size * (head / page_size);
1844 offset += shift;
1845 head -= shift;
1847 remap:
1848 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1849 MAP_SHARED, input, offset);
1850 if (buf == MAP_FAILED) {
1851 perror("failed to mmap file");
1852 exit(-1);
1855 more:
1856 event = (event_t *)(buf + head);
1858 size = event->header.size;
1859 if (!size)
1860 size = 8;
1862 if (head + event->header.size >= page_size * mmap_window) {
1863 int ret;
1865 shift = page_size * (head / page_size);
1867 ret = munmap(buf, page_size * mmap_window);
1868 assert(ret == 0);
1870 offset += shift;
1871 head -= shift;
1872 goto remap;
1875 size = event->header.size;
1877 dprintf("\n%p [%p]: event: %d\n",
1878 (void *)(offset + head),
1879 (void *)(long)event->header.size,
1880 event->header.type);
1882 if (!size || process_event(event, offset, head) < 0) {
1884 dprintf("%p [%p]: skipping unknown header type: %d\n",
1885 (void *)(offset + head),
1886 (void *)(long)(event->header.size),
1887 event->header.type);
1889 total_unknown++;
1892 * assume we lost track of the stream, check alignment, and
1893 * increment a single u64 in the hope to catch on again 'soon'.
1896 if (unlikely(head & 7))
1897 head &= ~7ULL;
1899 size = 8;
1902 head += size;
1904 if (offset + head >= header->data_offset + header->data_size)
1905 goto done;
1907 if (offset + head < (unsigned long)stat.st_size)
1908 goto more;
1910 done:
1911 rc = EXIT_SUCCESS;
1912 close(input);
1914 dprintf(" IP events: %10ld\n", total);
1915 dprintf(" mmap events: %10ld\n", total_mmap);
1916 dprintf(" comm events: %10ld\n", total_comm);
1917 dprintf(" fork events: %10ld\n", total_fork);
1918 dprintf(" lost events: %10ld\n", total_lost);
1919 dprintf(" unknown events: %10ld\n", total_unknown);
1921 if (dump_trace)
1922 return 0;
1924 if (verbose >= 3)
1925 threads__fprintf(stdout);
1927 if (verbose >= 2)
1928 dsos__fprintf(stdout);
1930 collapse__resort();
1931 output__resort(total);
1932 output__fprintf(stdout, total);
1934 return rc;
1937 static int
1938 parse_callchain_opt(const struct option *opt __used, const char *arg,
1939 int unset __used)
1941 char *tok;
1942 char *endptr;
1944 callchain = 1;
1946 if (!arg)
1947 return 0;
1949 tok = strtok((char *)arg, ",");
1950 if (!tok)
1951 return -1;
1953 /* get the output mode */
1954 if (!strncmp(tok, "graph", strlen(arg)))
1955 callchain_param.mode = CHAIN_GRAPH_ABS;
1957 else if (!strncmp(tok, "flat", strlen(arg)))
1958 callchain_param.mode = CHAIN_FLAT;
1960 else if (!strncmp(tok, "fractal", strlen(arg)))
1961 callchain_param.mode = CHAIN_GRAPH_REL;
1963 else
1964 return -1;
1966 /* get the min percentage */
1967 tok = strtok(NULL, ",");
1968 if (!tok)
1969 goto setup;
1971 callchain_param.min_percent = strtod(tok, &endptr);
1972 if (tok == endptr)
1973 return -1;
1975 setup:
1976 if (register_callchain_param(&callchain_param) < 0) {
1977 fprintf(stderr, "Can't register callchain params\n");
1978 return -1;
1980 return 0;
1983 static const char * const report_usage[] = {
1984 "perf report [<options>] <command>",
1985 NULL
1988 static const struct option options[] = {
1989 OPT_STRING('i', "input", &input_name, "file",
1990 "input file name"),
1991 OPT_BOOLEAN('v', "verbose", &verbose,
1992 "be more verbose (show symbol address, etc)"),
1993 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1994 "dump raw trace in ASCII"),
1995 OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
1996 OPT_BOOLEAN('m', "modules", &modules,
1997 "load module symbols - WARNING: use only with -k and LIVE kernel"),
1998 OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
1999 "Show a column with the number of samples"),
2000 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
2001 "sort by key(s): pid, comm, dso, symbol, parent"),
2002 OPT_BOOLEAN('P', "full-paths", &full_paths,
2003 "Don't shorten the pathnames taking into account the cwd"),
2004 OPT_STRING('p', "parent", &parent_pattern, "regex",
2005 "regex filter to identify parent, see: '--sort parent'"),
2006 OPT_BOOLEAN('x', "exclude-other", &exclude_other,
2007 "Only display entries with parent-match"),
2008 OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent",
2009 "Display callchains using output_type and min percent threshold. "
2010 "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt),
2011 OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
2012 "only consider symbols in these dsos"),
2013 OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
2014 "only consider symbols in these comms"),
2015 OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
2016 "only consider these symbols"),
2017 OPT_STRING('w', "column-widths", &col_width_list_str,
2018 "width[,width...]",
2019 "don't try to adjust column width, use these fixed values"),
2020 OPT_STRING('t', "field-separator", &field_sep, "separator",
2021 "separator for columns, no spaces will be added between "
2022 "columns '.' is reserved."),
2023 OPT_END()
2026 static void setup_sorting(void)
2028 char *tmp, *tok, *str = strdup(sort_order);
2030 for (tok = strtok_r(str, ", ", &tmp);
2031 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2032 if (sort_dimension__add(tok) < 0) {
2033 error("Unknown --sort key: `%s'", tok);
2034 usage_with_options(report_usage, options);
2038 free(str);
2041 static void setup_list(struct strlist **list, const char *list_str,
2042 struct sort_entry *se, const char *list_name,
2043 FILE *fp)
2045 if (list_str) {
2046 *list = strlist__new(true, list_str);
2047 if (!*list) {
2048 fprintf(stderr, "problems parsing %s list\n",
2049 list_name);
2050 exit(129);
2052 if (strlist__nr_entries(*list) == 1) {
2053 fprintf(fp, "# %s: %s\n", list_name,
2054 strlist__entry(*list, 0)->s);
2055 se->elide = true;
2060 int cmd_report(int argc, const char **argv, const char *prefix __used)
2062 symbol__init();
2064 page_size = getpagesize();
2066 argc = parse_options(argc, argv, options, report_usage, 0);
2068 setup_sorting();
2070 if (parent_pattern != default_parent_pattern) {
2071 sort_dimension__add("parent");
2072 sort_parent.elide = 1;
2073 } else
2074 exclude_other = 0;
2077 * Any (unrecognized) arguments left?
2079 if (argc)
2080 usage_with_options(report_usage, options);
2082 setup_pager();
2084 setup_list(&dso_list, dso_list_str, &sort_dso, "dso", stdout);
2085 setup_list(&comm_list, comm_list_str, &sort_comm, "comm", stdout);
2086 setup_list(&sym_list, sym_list_str, &sort_sym, "symbol", stdout);
2088 if (field_sep && *field_sep == '.') {
2089 fputs("'.' is the only non valid --field-separator argument\n",
2090 stderr);
2091 exit(129);
2094 return __cmd_report();