dm: calculate queue limits during resume not load
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / tools / perf / builtin-annotate.c
blob7e58e3ad1508053aa9cf92147caf15e1af80cee3
1 /*
2 * builtin-annotate.c
4 * Builtin annotate command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
7 */
8 #include "builtin.h"
10 #include "util/util.h"
12 #include "util/color.h"
13 #include "util/list.h"
14 #include "util/cache.h"
15 #include "util/rbtree.h"
16 #include "util/symbol.h"
17 #include "util/string.h"
19 #include "perf.h"
21 #include "util/parse-options.h"
22 #include "util/parse-events.h"
24 #define SHOW_KERNEL 1
25 #define SHOW_USER 2
26 #define SHOW_HV 4
28 #define MIN_GREEN 0.5
29 #define MIN_RED 5.0
32 static char const *input_name = "perf.data";
33 static char *vmlinux = "vmlinux";
35 static char default_sort_order[] = "comm,symbol";
36 static char *sort_order = default_sort_order;
38 static int input;
39 static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
41 static int dump_trace = 0;
42 #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
44 static int verbose;
46 static int print_line;
48 static unsigned long page_size;
49 static unsigned long mmap_window = 32;
51 struct ip_event {
52 struct perf_event_header header;
53 u64 ip;
54 u32 pid, tid;
57 struct mmap_event {
58 struct perf_event_header header;
59 u32 pid, tid;
60 u64 start;
61 u64 len;
62 u64 pgoff;
63 char filename[PATH_MAX];
66 struct comm_event {
67 struct perf_event_header header;
68 u32 pid, tid;
69 char comm[16];
72 struct fork_event {
73 struct perf_event_header header;
74 u32 pid, ppid;
77 struct period_event {
78 struct perf_event_header header;
79 u64 time;
80 u64 id;
81 u64 sample_period;
84 typedef union event_union {
85 struct perf_event_header header;
86 struct ip_event ip;
87 struct mmap_event mmap;
88 struct comm_event comm;
89 struct fork_event fork;
90 struct period_event period;
91 } event_t;
94 struct sym_ext {
95 struct rb_node node;
96 double percent;
97 char *path;
100 static LIST_HEAD(dsos);
101 static struct dso *kernel_dso;
102 static struct dso *vdso;
105 static void dsos__add(struct dso *dso)
107 list_add_tail(&dso->node, &dsos);
110 static struct dso *dsos__find(const char *name)
112 struct dso *pos;
114 list_for_each_entry(pos, &dsos, node)
115 if (strcmp(pos->name, name) == 0)
116 return pos;
117 return NULL;
120 static struct dso *dsos__findnew(const char *name)
122 struct dso *dso = dsos__find(name);
123 int nr;
125 if (dso)
126 return dso;
128 dso = dso__new(name, 0);
129 if (!dso)
130 goto out_delete_dso;
132 nr = dso__load(dso, NULL, verbose);
133 if (nr < 0) {
134 if (verbose)
135 fprintf(stderr, "Failed to open: %s\n", name);
136 goto out_delete_dso;
138 if (!nr && verbose) {
139 fprintf(stderr,
140 "No symbols found in: %s, maybe install a debug package?\n",
141 name);
144 dsos__add(dso);
146 return dso;
148 out_delete_dso:
149 dso__delete(dso);
150 return NULL;
153 static void dsos__fprintf(FILE *fp)
155 struct dso *pos;
157 list_for_each_entry(pos, &dsos, node)
158 dso__fprintf(pos, fp);
161 static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
163 return dso__find_symbol(kernel_dso, ip);
166 static int load_kernel(void)
168 int err;
170 kernel_dso = dso__new("[kernel]", 0);
171 if (!kernel_dso)
172 return -1;
174 err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
175 if (err) {
176 dso__delete(kernel_dso);
177 kernel_dso = NULL;
178 } else
179 dsos__add(kernel_dso);
181 vdso = dso__new("[vdso]", 0);
182 if (!vdso)
183 return -1;
185 vdso->find_symbol = vdso__find_symbol;
187 dsos__add(vdso);
189 return err;
192 struct map {
193 struct list_head node;
194 u64 start;
195 u64 end;
196 u64 pgoff;
197 u64 (*map_ip)(struct map *, u64);
198 struct dso *dso;
201 static u64 map__map_ip(struct map *map, u64 ip)
203 return ip - map->start + map->pgoff;
206 static u64 vdso__map_ip(struct map *map, u64 ip)
208 return ip;
211 static struct map *map__new(struct mmap_event *event)
213 struct map *self = malloc(sizeof(*self));
215 if (self != NULL) {
216 const char *filename = event->filename;
218 self->start = event->start;
219 self->end = event->start + event->len;
220 self->pgoff = event->pgoff;
222 self->dso = dsos__findnew(filename);
223 if (self->dso == NULL)
224 goto out_delete;
226 if (self->dso == vdso)
227 self->map_ip = vdso__map_ip;
228 else
229 self->map_ip = map__map_ip;
231 return self;
232 out_delete:
233 free(self);
234 return NULL;
237 static struct map *map__clone(struct map *self)
239 struct map *map = malloc(sizeof(*self));
241 if (!map)
242 return NULL;
244 memcpy(map, self, sizeof(*self));
246 return map;
249 static int map__overlap(struct map *l, struct map *r)
251 if (l->start > r->start) {
252 struct map *t = l;
253 l = r;
254 r = t;
257 if (l->end > r->start)
258 return 1;
260 return 0;
263 static size_t map__fprintf(struct map *self, FILE *fp)
265 return fprintf(fp, " %Lx-%Lx %Lx %s\n",
266 self->start, self->end, self->pgoff, self->dso->name);
270 struct thread {
271 struct rb_node rb_node;
272 struct list_head maps;
273 pid_t pid;
274 char *comm;
277 static struct thread *thread__new(pid_t pid)
279 struct thread *self = malloc(sizeof(*self));
281 if (self != NULL) {
282 self->pid = pid;
283 self->comm = malloc(32);
284 if (self->comm)
285 snprintf(self->comm, 32, ":%d", self->pid);
286 INIT_LIST_HEAD(&self->maps);
289 return self;
292 static int thread__set_comm(struct thread *self, const char *comm)
294 if (self->comm)
295 free(self->comm);
296 self->comm = strdup(comm);
297 return self->comm ? 0 : -ENOMEM;
300 static size_t thread__fprintf(struct thread *self, FILE *fp)
302 struct map *pos;
303 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
305 list_for_each_entry(pos, &self->maps, node)
306 ret += map__fprintf(pos, fp);
308 return ret;
312 static struct rb_root threads;
313 static struct thread *last_match;
315 static struct thread *threads__findnew(pid_t pid)
317 struct rb_node **p = &threads.rb_node;
318 struct rb_node *parent = NULL;
319 struct thread *th;
322 * Font-end cache - PID lookups come in blocks,
323 * so most of the time we dont have to look up
324 * the full rbtree:
326 if (last_match && last_match->pid == pid)
327 return last_match;
329 while (*p != NULL) {
330 parent = *p;
331 th = rb_entry(parent, struct thread, rb_node);
333 if (th->pid == pid) {
334 last_match = th;
335 return th;
338 if (pid < th->pid)
339 p = &(*p)->rb_left;
340 else
341 p = &(*p)->rb_right;
344 th = thread__new(pid);
345 if (th != NULL) {
346 rb_link_node(&th->rb_node, parent, p);
347 rb_insert_color(&th->rb_node, &threads);
348 last_match = th;
351 return th;
354 static void thread__insert_map(struct thread *self, struct map *map)
356 struct map *pos, *tmp;
358 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
359 if (map__overlap(pos, map)) {
360 list_del_init(&pos->node);
361 /* XXX leaks dsos */
362 free(pos);
366 list_add_tail(&map->node, &self->maps);
369 static int thread__fork(struct thread *self, struct thread *parent)
371 struct map *map;
373 if (self->comm)
374 free(self->comm);
375 self->comm = strdup(parent->comm);
376 if (!self->comm)
377 return -ENOMEM;
379 list_for_each_entry(map, &parent->maps, node) {
380 struct map *new = map__clone(map);
381 if (!new)
382 return -ENOMEM;
383 thread__insert_map(self, new);
386 return 0;
389 static struct map *thread__find_map(struct thread *self, u64 ip)
391 struct map *pos;
393 if (self == NULL)
394 return NULL;
396 list_for_each_entry(pos, &self->maps, node)
397 if (ip >= pos->start && ip <= pos->end)
398 return pos;
400 return NULL;
403 static size_t threads__fprintf(FILE *fp)
405 size_t ret = 0;
406 struct rb_node *nd;
408 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
409 struct thread *pos = rb_entry(nd, struct thread, rb_node);
411 ret += thread__fprintf(pos, fp);
414 return ret;
418 * histogram, sorted on item, collects counts
421 static struct rb_root hist;
423 struct hist_entry {
424 struct rb_node rb_node;
426 struct thread *thread;
427 struct map *map;
428 struct dso *dso;
429 struct symbol *sym;
430 u64 ip;
431 char level;
433 uint32_t count;
437 * configurable sorting bits
440 struct sort_entry {
441 struct list_head list;
443 char *header;
445 int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
446 int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
447 size_t (*print)(FILE *fp, struct hist_entry *);
450 /* --sort pid */
452 static int64_t
453 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
455 return right->thread->pid - left->thread->pid;
458 static size_t
459 sort__thread_print(FILE *fp, struct hist_entry *self)
461 return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
464 static struct sort_entry sort_thread = {
465 .header = " Command: Pid",
466 .cmp = sort__thread_cmp,
467 .print = sort__thread_print,
470 /* --sort comm */
472 static int64_t
473 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
475 return right->thread->pid - left->thread->pid;
478 static int64_t
479 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
481 char *comm_l = left->thread->comm;
482 char *comm_r = right->thread->comm;
484 if (!comm_l || !comm_r) {
485 if (!comm_l && !comm_r)
486 return 0;
487 else if (!comm_l)
488 return -1;
489 else
490 return 1;
493 return strcmp(comm_l, comm_r);
496 static size_t
497 sort__comm_print(FILE *fp, struct hist_entry *self)
499 return fprintf(fp, "%16s", self->thread->comm);
502 static struct sort_entry sort_comm = {
503 .header = " Command",
504 .cmp = sort__comm_cmp,
505 .collapse = sort__comm_collapse,
506 .print = sort__comm_print,
509 /* --sort dso */
511 static int64_t
512 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
514 struct dso *dso_l = left->dso;
515 struct dso *dso_r = right->dso;
517 if (!dso_l || !dso_r) {
518 if (!dso_l && !dso_r)
519 return 0;
520 else if (!dso_l)
521 return -1;
522 else
523 return 1;
526 return strcmp(dso_l->name, dso_r->name);
529 static size_t
530 sort__dso_print(FILE *fp, struct hist_entry *self)
532 if (self->dso)
533 return fprintf(fp, "%-25s", self->dso->name);
535 return fprintf(fp, "%016llx ", (u64)self->ip);
538 static struct sort_entry sort_dso = {
539 .header = "Shared Object ",
540 .cmp = sort__dso_cmp,
541 .print = sort__dso_print,
544 /* --sort symbol */
546 static int64_t
547 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
549 u64 ip_l, ip_r;
551 if (left->sym == right->sym)
552 return 0;
554 ip_l = left->sym ? left->sym->start : left->ip;
555 ip_r = right->sym ? right->sym->start : right->ip;
557 return (int64_t)(ip_r - ip_l);
560 static size_t
561 sort__sym_print(FILE *fp, struct hist_entry *self)
563 size_t ret = 0;
565 if (verbose)
566 ret += fprintf(fp, "%#018llx ", (u64)self->ip);
568 if (self->sym) {
569 ret += fprintf(fp, "[%c] %s",
570 self->dso == kernel_dso ? 'k' : '.', self->sym->name);
571 } else {
572 ret += fprintf(fp, "%#016llx", (u64)self->ip);
575 return ret;
578 static struct sort_entry sort_sym = {
579 .header = "Symbol",
580 .cmp = sort__sym_cmp,
581 .print = sort__sym_print,
584 static int sort__need_collapse = 0;
586 struct sort_dimension {
587 char *name;
588 struct sort_entry *entry;
589 int taken;
592 static struct sort_dimension sort_dimensions[] = {
593 { .name = "pid", .entry = &sort_thread, },
594 { .name = "comm", .entry = &sort_comm, },
595 { .name = "dso", .entry = &sort_dso, },
596 { .name = "symbol", .entry = &sort_sym, },
599 static LIST_HEAD(hist_entry__sort_list);
601 static int sort_dimension__add(char *tok)
603 int i;
605 for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
606 struct sort_dimension *sd = &sort_dimensions[i];
608 if (sd->taken)
609 continue;
611 if (strncasecmp(tok, sd->name, strlen(tok)))
612 continue;
614 if (sd->entry->collapse)
615 sort__need_collapse = 1;
617 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
618 sd->taken = 1;
620 return 0;
623 return -ESRCH;
626 static int64_t
627 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
629 struct sort_entry *se;
630 int64_t cmp = 0;
632 list_for_each_entry(se, &hist_entry__sort_list, list) {
633 cmp = se->cmp(left, right);
634 if (cmp)
635 break;
638 return cmp;
641 static int64_t
642 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
644 struct sort_entry *se;
645 int64_t cmp = 0;
647 list_for_each_entry(se, &hist_entry__sort_list, list) {
648 int64_t (*f)(struct hist_entry *, struct hist_entry *);
650 f = se->collapse ?: se->cmp;
652 cmp = f(left, right);
653 if (cmp)
654 break;
657 return cmp;
661 * collect histogram counts
663 static void hist_hit(struct hist_entry *he, u64 ip)
665 unsigned int sym_size, offset;
666 struct symbol *sym = he->sym;
668 he->count++;
670 if (!sym || !sym->hist)
671 return;
673 sym_size = sym->end - sym->start;
674 offset = ip - sym->start;
676 if (offset >= sym_size)
677 return;
679 sym->hist_sum++;
680 sym->hist[offset]++;
682 if (verbose >= 3)
683 printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n",
684 (void *)(unsigned long)he->sym->start,
685 he->sym->name,
686 (void *)(unsigned long)ip, ip - he->sym->start,
687 sym->hist[offset]);
690 static int
691 hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
692 struct symbol *sym, u64 ip, char level)
694 struct rb_node **p = &hist.rb_node;
695 struct rb_node *parent = NULL;
696 struct hist_entry *he;
697 struct hist_entry entry = {
698 .thread = thread,
699 .map = map,
700 .dso = dso,
701 .sym = sym,
702 .ip = ip,
703 .level = level,
704 .count = 1,
706 int cmp;
708 while (*p != NULL) {
709 parent = *p;
710 he = rb_entry(parent, struct hist_entry, rb_node);
712 cmp = hist_entry__cmp(&entry, he);
714 if (!cmp) {
715 hist_hit(he, ip);
717 return 0;
720 if (cmp < 0)
721 p = &(*p)->rb_left;
722 else
723 p = &(*p)->rb_right;
726 he = malloc(sizeof(*he));
727 if (!he)
728 return -ENOMEM;
729 *he = entry;
730 rb_link_node(&he->rb_node, parent, p);
731 rb_insert_color(&he->rb_node, &hist);
733 return 0;
736 static void hist_entry__free(struct hist_entry *he)
738 free(he);
742 * collapse the histogram
745 static struct rb_root collapse_hists;
747 static void collapse__insert_entry(struct hist_entry *he)
749 struct rb_node **p = &collapse_hists.rb_node;
750 struct rb_node *parent = NULL;
751 struct hist_entry *iter;
752 int64_t cmp;
754 while (*p != NULL) {
755 parent = *p;
756 iter = rb_entry(parent, struct hist_entry, rb_node);
758 cmp = hist_entry__collapse(iter, he);
760 if (!cmp) {
761 iter->count += he->count;
762 hist_entry__free(he);
763 return;
766 if (cmp < 0)
767 p = &(*p)->rb_left;
768 else
769 p = &(*p)->rb_right;
772 rb_link_node(&he->rb_node, parent, p);
773 rb_insert_color(&he->rb_node, &collapse_hists);
776 static void collapse__resort(void)
778 struct rb_node *next;
779 struct hist_entry *n;
781 if (!sort__need_collapse)
782 return;
784 next = rb_first(&hist);
785 while (next) {
786 n = rb_entry(next, struct hist_entry, rb_node);
787 next = rb_next(&n->rb_node);
789 rb_erase(&n->rb_node, &hist);
790 collapse__insert_entry(n);
795 * reverse the map, sort on count.
798 static struct rb_root output_hists;
800 static void output__insert_entry(struct hist_entry *he)
802 struct rb_node **p = &output_hists.rb_node;
803 struct rb_node *parent = NULL;
804 struct hist_entry *iter;
806 while (*p != NULL) {
807 parent = *p;
808 iter = rb_entry(parent, struct hist_entry, rb_node);
810 if (he->count > iter->count)
811 p = &(*p)->rb_left;
812 else
813 p = &(*p)->rb_right;
816 rb_link_node(&he->rb_node, parent, p);
817 rb_insert_color(&he->rb_node, &output_hists);
820 static void output__resort(void)
822 struct rb_node *next;
823 struct hist_entry *n;
824 struct rb_root *tree = &hist;
826 if (sort__need_collapse)
827 tree = &collapse_hists;
829 next = rb_first(tree);
831 while (next) {
832 n = rb_entry(next, struct hist_entry, rb_node);
833 next = rb_next(&n->rb_node);
835 rb_erase(&n->rb_node, tree);
836 output__insert_entry(n);
840 static void register_idle_thread(void)
842 struct thread *thread = threads__findnew(0);
844 if (thread == NULL ||
845 thread__set_comm(thread, "[idle]")) {
846 fprintf(stderr, "problem inserting idle task.\n");
847 exit(-1);
851 static unsigned long total = 0,
852 total_mmap = 0,
853 total_comm = 0,
854 total_fork = 0,
855 total_unknown = 0;
857 static int
858 process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
860 char level;
861 int show = 0;
862 struct dso *dso = NULL;
863 struct thread *thread = threads__findnew(event->ip.pid);
864 u64 ip = event->ip.ip;
865 struct map *map = NULL;
867 dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
868 (void *)(offset + head),
869 (void *)(long)(event->header.size),
870 event->header.misc,
871 event->ip.pid,
872 (void *)(long)ip);
874 dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
876 if (thread == NULL) {
877 fprintf(stderr, "problem processing %d event, skipping it.\n",
878 event->header.type);
879 return -1;
882 if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
883 show = SHOW_KERNEL;
884 level = 'k';
886 dso = kernel_dso;
888 dprintf(" ...... dso: %s\n", dso->name);
890 } else if (event->header.misc & PERF_EVENT_MISC_USER) {
892 show = SHOW_USER;
893 level = '.';
895 map = thread__find_map(thread, ip);
896 if (map != NULL) {
897 ip = map->map_ip(map, ip);
898 dso = map->dso;
899 } else {
901 * If this is outside of all known maps,
902 * and is a negative address, try to look it
903 * up in the kernel dso, as it might be a
904 * vsyscall (which executes in user-mode):
906 if ((long long)ip < 0)
907 dso = kernel_dso;
909 dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
911 } else {
912 show = SHOW_HV;
913 level = 'H';
914 dprintf(" ...... dso: [hypervisor]\n");
917 if (show & show_mask) {
918 struct symbol *sym = NULL;
920 if (dso)
921 sym = dso->find_symbol(dso, ip);
923 if (hist_entry__add(thread, map, dso, sym, ip, level)) {
924 fprintf(stderr,
925 "problem incrementing symbol count, skipping event\n");
926 return -1;
929 total++;
931 return 0;
934 static int
935 process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
937 struct thread *thread = threads__findnew(event->mmap.pid);
938 struct map *map = map__new(&event->mmap);
940 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
941 (void *)(offset + head),
942 (void *)(long)(event->header.size),
943 event->mmap.pid,
944 (void *)(long)event->mmap.start,
945 (void *)(long)event->mmap.len,
946 (void *)(long)event->mmap.pgoff,
947 event->mmap.filename);
949 if (thread == NULL || map == NULL) {
950 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
951 return 0;
954 thread__insert_map(thread, map);
955 total_mmap++;
957 return 0;
960 static int
961 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
963 struct thread *thread = threads__findnew(event->comm.pid);
965 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
966 (void *)(offset + head),
967 (void *)(long)(event->header.size),
968 event->comm.comm, event->comm.pid);
970 if (thread == NULL ||
971 thread__set_comm(thread, event->comm.comm)) {
972 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
973 return -1;
975 total_comm++;
977 return 0;
980 static int
981 process_fork_event(event_t *event, unsigned long offset, unsigned long head)
983 struct thread *thread = threads__findnew(event->fork.pid);
984 struct thread *parent = threads__findnew(event->fork.ppid);
986 dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
987 (void *)(offset + head),
988 (void *)(long)(event->header.size),
989 event->fork.pid, event->fork.ppid);
991 if (!thread || !parent || thread__fork(thread, parent)) {
992 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
993 return -1;
995 total_fork++;
997 return 0;
1000 static int
1001 process_period_event(event_t *event, unsigned long offset, unsigned long head)
1003 dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
1004 (void *)(offset + head),
1005 (void *)(long)(event->header.size),
1006 event->period.time,
1007 event->period.id,
1008 event->period.sample_period);
1010 return 0;
1013 static int
1014 process_event(event_t *event, unsigned long offset, unsigned long head)
1016 if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
1017 return process_overflow_event(event, offset, head);
1019 switch (event->header.type) {
1020 case PERF_EVENT_MMAP:
1021 return process_mmap_event(event, offset, head);
1023 case PERF_EVENT_COMM:
1024 return process_comm_event(event, offset, head);
1026 case PERF_EVENT_FORK:
1027 return process_fork_event(event, offset, head);
1029 case PERF_EVENT_PERIOD:
1030 return process_period_event(event, offset, head);
1032 * We dont process them right now but they are fine:
1035 case PERF_EVENT_THROTTLE:
1036 case PERF_EVENT_UNTHROTTLE:
1037 return 0;
1039 default:
1040 return -1;
1043 return 0;
1046 static char *get_color(double percent)
1048 char *color = PERF_COLOR_NORMAL;
1051 * We color high-overhead entries in red, mid-overhead
1052 * entries in green - and keep the low overhead places
1053 * normal:
1055 if (percent >= MIN_RED)
1056 color = PERF_COLOR_RED;
1057 else {
1058 if (percent > MIN_GREEN)
1059 color = PERF_COLOR_GREEN;
1061 return color;
1064 static int
1065 parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
1067 char *line = NULL, *tmp, *tmp2;
1068 static const char *prev_line;
1069 static const char *prev_color;
1070 unsigned int offset;
1071 size_t line_len;
1072 u64 line_ip;
1073 int ret;
1074 char *c;
1076 if (getline(&line, &line_len, file) < 0)
1077 return -1;
1078 if (!line)
1079 return -1;
1081 c = strchr(line, '\n');
1082 if (c)
1083 *c = 0;
1085 line_ip = -1;
1086 offset = 0;
1087 ret = -2;
1090 * Strip leading spaces:
1092 tmp = line;
1093 while (*tmp) {
1094 if (*tmp != ' ')
1095 break;
1096 tmp++;
1099 if (*tmp) {
1101 * Parse hexa addresses followed by ':'
1103 line_ip = strtoull(tmp, &tmp2, 16);
1104 if (*tmp2 != ':')
1105 line_ip = -1;
1108 if (line_ip != -1) {
1109 const char *path = NULL;
1110 unsigned int hits = 0;
1111 double percent = 0.0;
1112 char *color;
1113 struct sym_ext *sym_ext = sym->priv;
1115 offset = line_ip - start;
1116 if (offset < len)
1117 hits = sym->hist[offset];
1119 if (offset < len && sym_ext) {
1120 path = sym_ext[offset].path;
1121 percent = sym_ext[offset].percent;
1122 } else if (sym->hist_sum)
1123 percent = 100.0 * hits / sym->hist_sum;
1125 color = get_color(percent);
1128 * Also color the filename and line if needed, with
1129 * the same color than the percentage. Don't print it
1130 * twice for close colored ip with the same filename:line
1132 if (path) {
1133 if (!prev_line || strcmp(prev_line, path)
1134 || color != prev_color) {
1135 color_fprintf(stdout, color, " %s", path);
1136 prev_line = path;
1137 prev_color = color;
1141 color_fprintf(stdout, color, " %7.2f", percent);
1142 printf(" : ");
1143 color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", line);
1144 } else {
1145 if (!*line)
1146 printf(" :\n");
1147 else
1148 printf(" : %s\n", line);
1151 return 0;
1154 static struct rb_root root_sym_ext;
1156 static void insert_source_line(struct sym_ext *sym_ext)
1158 struct sym_ext *iter;
1159 struct rb_node **p = &root_sym_ext.rb_node;
1160 struct rb_node *parent = NULL;
1162 while (*p != NULL) {
1163 parent = *p;
1164 iter = rb_entry(parent, struct sym_ext, node);
1166 if (sym_ext->percent > iter->percent)
1167 p = &(*p)->rb_left;
1168 else
1169 p = &(*p)->rb_right;
1172 rb_link_node(&sym_ext->node, parent, p);
1173 rb_insert_color(&sym_ext->node, &root_sym_ext);
1176 static void free_source_line(struct symbol *sym, int len)
1178 struct sym_ext *sym_ext = sym->priv;
1179 int i;
1181 if (!sym_ext)
1182 return;
1184 for (i = 0; i < len; i++)
1185 free(sym_ext[i].path);
1186 free(sym_ext);
1188 sym->priv = NULL;
1189 root_sym_ext = RB_ROOT;
1192 /* Get the filename:line for the colored entries */
1193 static void
1194 get_source_line(struct symbol *sym, u64 start, int len, char *filename)
1196 int i;
1197 char cmd[PATH_MAX * 2];
1198 struct sym_ext *sym_ext;
1200 if (!sym->hist_sum)
1201 return;
1203 sym->priv = calloc(len, sizeof(struct sym_ext));
1204 if (!sym->priv)
1205 return;
1207 sym_ext = sym->priv;
1209 for (i = 0; i < len; i++) {
1210 char *path = NULL;
1211 size_t line_len;
1212 u64 offset;
1213 FILE *fp;
1215 sym_ext[i].percent = 100.0 * sym->hist[i] / sym->hist_sum;
1216 if (sym_ext[i].percent <= 0.5)
1217 continue;
1219 offset = start + i;
1220 sprintf(cmd, "addr2line -e %s %016llx", filename, offset);
1221 fp = popen(cmd, "r");
1222 if (!fp)
1223 continue;
1225 if (getline(&path, &line_len, fp) < 0 || !line_len)
1226 goto next;
1228 sym_ext[i].path = malloc(sizeof(char) * line_len + 1);
1229 if (!sym_ext[i].path)
1230 goto next;
1232 strcpy(sym_ext[i].path, path);
1233 insert_source_line(&sym_ext[i]);
1235 next:
1236 pclose(fp);
1240 static void print_summary(char *filename)
1242 struct sym_ext *sym_ext;
1243 struct rb_node *node;
1245 printf("\nSorted summary for file %s\n", filename);
1246 printf("----------------------------------------------\n\n");
1248 if (RB_EMPTY_ROOT(&root_sym_ext)) {
1249 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1250 return;
1253 node = rb_first(&root_sym_ext);
1254 while (node) {
1255 double percent;
1256 char *color;
1257 char *path;
1259 sym_ext = rb_entry(node, struct sym_ext, node);
1260 percent = sym_ext->percent;
1261 color = get_color(percent);
1262 path = sym_ext->path;
1264 color_fprintf(stdout, color, " %7.2f %s", percent, path);
1265 node = rb_next(node);
1269 static void annotate_sym(struct dso *dso, struct symbol *sym)
1271 char *filename = dso->name;
1272 u64 start, end, len;
1273 char command[PATH_MAX*2];
1274 FILE *file;
1276 if (!filename)
1277 return;
1278 if (dso == kernel_dso)
1279 filename = vmlinux;
1281 start = sym->obj_start;
1282 if (!start)
1283 start = sym->start;
1285 end = start + sym->end - sym->start + 1;
1286 len = sym->end - sym->start;
1288 if (print_line) {
1289 get_source_line(sym, start, len, filename);
1290 print_summary(filename);
1293 printf("\n\n------------------------------------------------\n");
1294 printf(" Percent | Source code & Disassembly of %s\n", filename);
1295 printf("------------------------------------------------\n");
1297 if (verbose >= 2)
1298 printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name);
1300 sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", (u64)start, (u64)end, filename);
1302 if (verbose >= 3)
1303 printf("doing: %s\n", command);
1305 file = popen(command, "r");
1306 if (!file)
1307 return;
1309 while (!feof(file)) {
1310 if (parse_line(file, sym, start, len) < 0)
1311 break;
1314 pclose(file);
1315 if (print_line)
1316 free_source_line(sym, len);
1319 static void find_annotations(void)
1321 struct rb_node *nd;
1322 struct dso *dso;
1323 int count = 0;
1325 list_for_each_entry(dso, &dsos, node) {
1327 for (nd = rb_first(&dso->syms); nd; nd = rb_next(nd)) {
1328 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
1330 if (sym->hist) {
1331 annotate_sym(dso, sym);
1332 count++;
1337 if (!count)
1338 printf(" Error: symbol '%s' not present amongst the samples.\n", sym_hist_filter);
1341 static int __cmd_annotate(void)
1343 int ret, rc = EXIT_FAILURE;
1344 unsigned long offset = 0;
1345 unsigned long head = 0;
1346 struct stat stat;
1347 event_t *event;
1348 uint32_t size;
1349 char *buf;
1351 register_idle_thread();
1353 input = open(input_name, O_RDONLY);
1354 if (input < 0) {
1355 perror("failed to open file");
1356 exit(-1);
1359 ret = fstat(input, &stat);
1360 if (ret < 0) {
1361 perror("failed to stat file");
1362 exit(-1);
1365 if (!stat.st_size) {
1366 fprintf(stderr, "zero-sized file, nothing to do!\n");
1367 exit(0);
1370 if (load_kernel() < 0) {
1371 perror("failed to load kernel symbols");
1372 return EXIT_FAILURE;
1375 remap:
1376 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1377 MAP_SHARED, input, offset);
1378 if (buf == MAP_FAILED) {
1379 perror("failed to mmap file");
1380 exit(-1);
1383 more:
1384 event = (event_t *)(buf + head);
1386 size = event->header.size;
1387 if (!size)
1388 size = 8;
1390 if (head + event->header.size >= page_size * mmap_window) {
1391 unsigned long shift = page_size * (head / page_size);
1392 int ret;
1394 ret = munmap(buf, page_size * mmap_window);
1395 assert(ret == 0);
1397 offset += shift;
1398 head -= shift;
1399 goto remap;
1402 size = event->header.size;
1404 dprintf("%p [%p]: event: %d\n",
1405 (void *)(offset + head),
1406 (void *)(long)event->header.size,
1407 event->header.type);
1409 if (!size || process_event(event, offset, head) < 0) {
1411 dprintf("%p [%p]: skipping unknown header type: %d\n",
1412 (void *)(offset + head),
1413 (void *)(long)(event->header.size),
1414 event->header.type);
1416 total_unknown++;
1419 * assume we lost track of the stream, check alignment, and
1420 * increment a single u64 in the hope to catch on again 'soon'.
1423 if (unlikely(head & 7))
1424 head &= ~7ULL;
1426 size = 8;
1429 head += size;
1431 if (offset + head < stat.st_size)
1432 goto more;
1434 rc = EXIT_SUCCESS;
1435 close(input);
1437 dprintf(" IP events: %10ld\n", total);
1438 dprintf(" mmap events: %10ld\n", total_mmap);
1439 dprintf(" comm events: %10ld\n", total_comm);
1440 dprintf(" fork events: %10ld\n", total_fork);
1441 dprintf(" unknown events: %10ld\n", total_unknown);
1443 if (dump_trace)
1444 return 0;
1446 if (verbose >= 3)
1447 threads__fprintf(stdout);
1449 if (verbose >= 2)
1450 dsos__fprintf(stdout);
1452 collapse__resort();
1453 output__resort();
1455 find_annotations();
1457 return rc;
1460 static const char * const annotate_usage[] = {
1461 "perf annotate [<options>] <command>",
1462 NULL
1465 static const struct option options[] = {
1466 OPT_STRING('i', "input", &input_name, "file",
1467 "input file name"),
1468 OPT_STRING('s', "symbol", &sym_hist_filter, "symbol",
1469 "symbol to annotate"),
1470 OPT_BOOLEAN('v', "verbose", &verbose,
1471 "be more verbose (show symbol address, etc)"),
1472 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1473 "dump raw trace in ASCII"),
1474 OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
1475 OPT_BOOLEAN('l', "print-line", &print_line,
1476 "print matching source lines (may be slow)"),
1477 OPT_END()
1480 static void setup_sorting(void)
1482 char *tmp, *tok, *str = strdup(sort_order);
1484 for (tok = strtok_r(str, ", ", &tmp);
1485 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1486 if (sort_dimension__add(tok) < 0) {
1487 error("Unknown --sort key: `%s'", tok);
1488 usage_with_options(annotate_usage, options);
1492 free(str);
1495 int cmd_annotate(int argc, const char **argv, const char *prefix)
1497 symbol__init();
1499 page_size = getpagesize();
1501 argc = parse_options(argc, argv, options, annotate_usage, 0);
1503 setup_sorting();
1505 if (argc) {
1507 * Special case: if there's an argument left then assume tha
1508 * it's a symbol filter:
1510 if (argc > 1)
1511 usage_with_options(annotate_usage, options);
1513 sym_hist_filter = argv[0];
1516 if (!sym_hist_filter)
1517 usage_with_options(annotate_usage, options);
1519 setup_pager();
1521 return __cmd_annotate();