perf hist: Make event__totals per hists
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / tools / perf / util / hist.c
blob1614ad710046da85691c47819a410bdb5471c227
1 #include "util.h"
2 #include "hist.h"
3 #include "session.h"
4 #include "sort.h"
5 #include <math.h>
7 struct callchain_param callchain_param = {
8 .mode = CHAIN_GRAPH_REL,
9 .min_percent = 0.5
12 static void hist_entry__add_cpumode_count(struct hist_entry *self,
13 unsigned int cpumode, u64 count)
15 switch (cpumode) {
16 case PERF_RECORD_MISC_KERNEL:
17 self->count_sys += count;
18 break;
19 case PERF_RECORD_MISC_USER:
20 self->count_us += count;
21 break;
22 case PERF_RECORD_MISC_GUEST_KERNEL:
23 self->count_guest_sys += count;
24 break;
25 case PERF_RECORD_MISC_GUEST_USER:
26 self->count_guest_us += count;
27 break;
28 default:
29 break;
34 * histogram, sorted on item, collects counts
37 static struct hist_entry *hist_entry__new(struct hist_entry *template)
39 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
40 struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
42 if (self != NULL) {
43 *self = *template;
44 if (symbol_conf.use_callchain)
45 callchain_init(self->callchain);
48 return self;
51 static void hists__inc_nr_entries(struct hists *self, struct hist_entry *entry)
53 if (entry->ms.sym && self->max_sym_namelen < entry->ms.sym->namelen)
54 self->max_sym_namelen = entry->ms.sym->namelen;
55 ++self->nr_entries;
58 struct hist_entry *__hists__add_entry(struct hists *self,
59 struct addr_location *al,
60 struct symbol *sym_parent, u64 count)
62 struct rb_node **p = &self->entries.rb_node;
63 struct rb_node *parent = NULL;
64 struct hist_entry *he;
65 struct hist_entry entry = {
66 .thread = al->thread,
67 .ms = {
68 .map = al->map,
69 .sym = al->sym,
71 .ip = al->addr,
72 .level = al->level,
73 .count = count,
74 .parent = sym_parent,
76 int cmp;
78 while (*p != NULL) {
79 parent = *p;
80 he = rb_entry(parent, struct hist_entry, rb_node);
82 cmp = hist_entry__cmp(&entry, he);
84 if (!cmp) {
85 he->count += count;
86 goto out;
89 if (cmp < 0)
90 p = &(*p)->rb_left;
91 else
92 p = &(*p)->rb_right;
95 he = hist_entry__new(&entry);
96 if (!he)
97 return NULL;
98 rb_link_node(&he->rb_node, parent, p);
99 rb_insert_color(&he->rb_node, &self->entries);
100 hists__inc_nr_entries(self, he);
101 out:
102 hist_entry__add_cpumode_count(he, al->cpumode, count);
103 return he;
106 int64_t
107 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
109 struct sort_entry *se;
110 int64_t cmp = 0;
112 list_for_each_entry(se, &hist_entry__sort_list, list) {
113 cmp = se->se_cmp(left, right);
114 if (cmp)
115 break;
118 return cmp;
121 int64_t
122 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
124 struct sort_entry *se;
125 int64_t cmp = 0;
127 list_for_each_entry(se, &hist_entry__sort_list, list) {
128 int64_t (*f)(struct hist_entry *, struct hist_entry *);
130 f = se->se_collapse ?: se->se_cmp;
132 cmp = f(left, right);
133 if (cmp)
134 break;
137 return cmp;
140 void hist_entry__free(struct hist_entry *he)
142 free(he);
146 * collapse the histogram
149 static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
151 struct rb_node **p = &root->rb_node;
152 struct rb_node *parent = NULL;
153 struct hist_entry *iter;
154 int64_t cmp;
156 while (*p != NULL) {
157 parent = *p;
158 iter = rb_entry(parent, struct hist_entry, rb_node);
160 cmp = hist_entry__collapse(iter, he);
162 if (!cmp) {
163 iter->count += he->count;
164 hist_entry__free(he);
165 return false;
168 if (cmp < 0)
169 p = &(*p)->rb_left;
170 else
171 p = &(*p)->rb_right;
174 rb_link_node(&he->rb_node, parent, p);
175 rb_insert_color(&he->rb_node, root);
176 return true;
179 void hists__collapse_resort(struct hists *self)
181 struct rb_root tmp;
182 struct rb_node *next;
183 struct hist_entry *n;
185 if (!sort__need_collapse)
186 return;
188 tmp = RB_ROOT;
189 next = rb_first(&self->entries);
190 self->nr_entries = 0;
191 self->max_sym_namelen = 0;
193 while (next) {
194 n = rb_entry(next, struct hist_entry, rb_node);
195 next = rb_next(&n->rb_node);
197 rb_erase(&n->rb_node, &self->entries);
198 if (collapse__insert_entry(&tmp, n))
199 hists__inc_nr_entries(self, n);
202 self->entries = tmp;
206 * reverse the map, sort on count.
209 static void __hists__insert_output_entry(struct rb_root *entries,
210 struct hist_entry *he,
211 u64 min_callchain_hits)
213 struct rb_node **p = &entries->rb_node;
214 struct rb_node *parent = NULL;
215 struct hist_entry *iter;
217 if (symbol_conf.use_callchain)
218 callchain_param.sort(&he->sorted_chain, he->callchain,
219 min_callchain_hits, &callchain_param);
221 while (*p != NULL) {
222 parent = *p;
223 iter = rb_entry(parent, struct hist_entry, rb_node);
225 if (he->count > iter->count)
226 p = &(*p)->rb_left;
227 else
228 p = &(*p)->rb_right;
231 rb_link_node(&he->rb_node, parent, p);
232 rb_insert_color(&he->rb_node, entries);
235 void hists__output_resort(struct hists *self)
237 struct rb_root tmp;
238 struct rb_node *next;
239 struct hist_entry *n;
240 u64 min_callchain_hits;
242 min_callchain_hits = self->stats.total * (callchain_param.min_percent / 100);
244 tmp = RB_ROOT;
245 next = rb_first(&self->entries);
247 self->nr_entries = 0;
248 self->max_sym_namelen = 0;
250 while (next) {
251 n = rb_entry(next, struct hist_entry, rb_node);
252 next = rb_next(&n->rb_node);
254 rb_erase(&n->rb_node, &self->entries);
255 __hists__insert_output_entry(&tmp, n, min_callchain_hits);
256 hists__inc_nr_entries(self, n);
259 self->entries = tmp;
262 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
264 int i;
265 int ret = fprintf(fp, " ");
267 for (i = 0; i < left_margin; i++)
268 ret += fprintf(fp, " ");
270 return ret;
273 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
274 int left_margin)
276 int i;
277 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
279 for (i = 0; i < depth; i++)
280 if (depth_mask & (1 << i))
281 ret += fprintf(fp, "| ");
282 else
283 ret += fprintf(fp, " ");
285 ret += fprintf(fp, "\n");
287 return ret;
290 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
291 int depth, int depth_mask, int count,
292 u64 total_samples, int hits,
293 int left_margin)
295 int i;
296 size_t ret = 0;
298 ret += callchain__fprintf_left_margin(fp, left_margin);
299 for (i = 0; i < depth; i++) {
300 if (depth_mask & (1 << i))
301 ret += fprintf(fp, "|");
302 else
303 ret += fprintf(fp, " ");
304 if (!count && i == depth - 1) {
305 double percent;
307 percent = hits * 100.0 / total_samples;
308 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
309 } else
310 ret += fprintf(fp, "%s", " ");
312 if (chain->ms.sym)
313 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
314 else
315 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
317 return ret;
320 static struct symbol *rem_sq_bracket;
321 static struct callchain_list rem_hits;
323 static void init_rem_hits(void)
325 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
326 if (!rem_sq_bracket) {
327 fprintf(stderr, "Not enough memory to display remaining hits\n");
328 return;
331 strcpy(rem_sq_bracket->name, "[...]");
332 rem_hits.ms.sym = rem_sq_bracket;
335 static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
336 u64 total_samples, int depth,
337 int depth_mask, int left_margin)
339 struct rb_node *node, *next;
340 struct callchain_node *child;
341 struct callchain_list *chain;
342 int new_depth_mask = depth_mask;
343 u64 new_total;
344 u64 remaining;
345 size_t ret = 0;
346 int i;
347 uint entries_printed = 0;
349 if (callchain_param.mode == CHAIN_GRAPH_REL)
350 new_total = self->children_hit;
351 else
352 new_total = total_samples;
354 remaining = new_total;
356 node = rb_first(&self->rb_root);
357 while (node) {
358 u64 cumul;
360 child = rb_entry(node, struct callchain_node, rb_node);
361 cumul = cumul_hits(child);
362 remaining -= cumul;
365 * The depth mask manages the output of pipes that show
366 * the depth. We don't want to keep the pipes of the current
367 * level for the last child of this depth.
368 * Except if we have remaining filtered hits. They will
369 * supersede the last child
371 next = rb_next(node);
372 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
373 new_depth_mask &= ~(1 << (depth - 1));
376 * But we keep the older depth mask for the line separator
377 * to keep the level link until we reach the last child
379 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
380 left_margin);
381 i = 0;
382 list_for_each_entry(chain, &child->val, list) {
383 ret += ipchain__fprintf_graph(fp, chain, depth,
384 new_depth_mask, i++,
385 new_total,
386 cumul,
387 left_margin);
389 ret += __callchain__fprintf_graph(fp, child, new_total,
390 depth + 1,
391 new_depth_mask | (1 << depth),
392 left_margin);
393 node = next;
394 if (++entries_printed == callchain_param.print_limit)
395 break;
398 if (callchain_param.mode == CHAIN_GRAPH_REL &&
399 remaining && remaining != new_total) {
401 if (!rem_sq_bracket)
402 return ret;
404 new_depth_mask &= ~(1 << (depth - 1));
406 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
407 new_depth_mask, 0, new_total,
408 remaining, left_margin);
411 return ret;
414 static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
415 u64 total_samples, int left_margin)
417 struct callchain_list *chain;
418 bool printed = false;
419 int i = 0;
420 int ret = 0;
421 u32 entries_printed = 0;
423 list_for_each_entry(chain, &self->val, list) {
424 if (!i++ && sort__first_dimension == SORT_SYM)
425 continue;
427 if (!printed) {
428 ret += callchain__fprintf_left_margin(fp, left_margin);
429 ret += fprintf(fp, "|\n");
430 ret += callchain__fprintf_left_margin(fp, left_margin);
431 ret += fprintf(fp, "---");
433 left_margin += 3;
434 printed = true;
435 } else
436 ret += callchain__fprintf_left_margin(fp, left_margin);
438 if (chain->ms.sym)
439 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
440 else
441 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
443 if (++entries_printed == callchain_param.print_limit)
444 break;
447 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
449 return ret;
452 static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
453 u64 total_samples)
455 struct callchain_list *chain;
456 size_t ret = 0;
458 if (!self)
459 return 0;
461 ret += callchain__fprintf_flat(fp, self->parent, total_samples);
464 list_for_each_entry(chain, &self->val, list) {
465 if (chain->ip >= PERF_CONTEXT_MAX)
466 continue;
467 if (chain->ms.sym)
468 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
469 else
470 ret += fprintf(fp, " %p\n",
471 (void *)(long)chain->ip);
474 return ret;
477 static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
478 u64 total_samples, int left_margin)
480 struct rb_node *rb_node;
481 struct callchain_node *chain;
482 size_t ret = 0;
483 u32 entries_printed = 0;
485 rb_node = rb_first(&self->sorted_chain);
486 while (rb_node) {
487 double percent;
489 chain = rb_entry(rb_node, struct callchain_node, rb_node);
490 percent = chain->hit * 100.0 / total_samples;
491 switch (callchain_param.mode) {
492 case CHAIN_FLAT:
493 ret += percent_color_fprintf(fp, " %6.2f%%\n",
494 percent);
495 ret += callchain__fprintf_flat(fp, chain, total_samples);
496 break;
497 case CHAIN_GRAPH_ABS: /* Falldown */
498 case CHAIN_GRAPH_REL:
499 ret += callchain__fprintf_graph(fp, chain, total_samples,
500 left_margin);
501 case CHAIN_NONE:
502 default:
503 break;
505 ret += fprintf(fp, "\n");
506 if (++entries_printed == callchain_param.print_limit)
507 break;
508 rb_node = rb_next(rb_node);
511 return ret;
514 int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
515 struct hists *pair_hists, bool show_displacement,
516 long displacement, bool color, u64 session_total)
518 struct sort_entry *se;
519 u64 count, total, count_sys, count_us, count_guest_sys, count_guest_us;
520 const char *sep = symbol_conf.field_sep;
521 int ret;
523 if (symbol_conf.exclude_other && !self->parent)
524 return 0;
526 if (pair_hists) {
527 count = self->pair ? self->pair->count : 0;
528 total = pair_hists->stats.total;
529 count_sys = self->pair ? self->pair->count_sys : 0;
530 count_us = self->pair ? self->pair->count_us : 0;
531 count_guest_sys = self->pair ? self->pair->count_guest_sys : 0;
532 count_guest_us = self->pair ? self->pair->count_guest_us : 0;
533 } else {
534 count = self->count;
535 total = session_total;
536 count_sys = self->count_sys;
537 count_us = self->count_us;
538 count_guest_sys = self->count_guest_sys;
539 count_guest_us = self->count_guest_us;
542 if (total) {
543 if (color)
544 ret = percent_color_snprintf(s, size,
545 sep ? "%.2f" : " %6.2f%%",
546 (count * 100.0) / total);
547 else
548 ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
549 (count * 100.0) / total);
550 if (symbol_conf.show_cpu_utilization) {
551 ret += percent_color_snprintf(s + ret, size - ret,
552 sep ? "%.2f" : " %6.2f%%",
553 (count_sys * 100.0) / total);
554 ret += percent_color_snprintf(s + ret, size - ret,
555 sep ? "%.2f" : " %6.2f%%",
556 (count_us * 100.0) / total);
557 if (perf_guest) {
558 ret += percent_color_snprintf(s + ret,
559 size - ret,
560 sep ? "%.2f" : " %6.2f%%",
561 (count_guest_sys * 100.0) /
562 total);
563 ret += percent_color_snprintf(s + ret,
564 size - ret,
565 sep ? "%.2f" : " %6.2f%%",
566 (count_guest_us * 100.0) /
567 total);
570 } else
571 ret = snprintf(s, size, sep ? "%lld" : "%12lld ", count);
573 if (symbol_conf.show_nr_samples) {
574 if (sep)
575 ret += snprintf(s + ret, size - ret, "%c%lld", *sep, count);
576 else
577 ret += snprintf(s + ret, size - ret, "%11lld", count);
580 if (pair_hists) {
581 char bf[32];
582 double old_percent = 0, new_percent = 0, diff;
584 if (total > 0)
585 old_percent = (count * 100.0) / total;
586 if (session_total > 0)
587 new_percent = (self->count * 100.0) / session_total;
589 diff = new_percent - old_percent;
591 if (fabs(diff) >= 0.01)
592 snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
593 else
594 snprintf(bf, sizeof(bf), " ");
596 if (sep)
597 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
598 else
599 ret += snprintf(s + ret, size - ret, "%11.11s", bf);
601 if (show_displacement) {
602 if (displacement)
603 snprintf(bf, sizeof(bf), "%+4ld", displacement);
604 else
605 snprintf(bf, sizeof(bf), " ");
607 if (sep)
608 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
609 else
610 ret += snprintf(s + ret, size - ret, "%6.6s", bf);
614 list_for_each_entry(se, &hist_entry__sort_list, list) {
615 if (se->elide)
616 continue;
618 ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
619 ret += se->se_snprintf(self, s + ret, size - ret,
620 se->se_width ? *se->se_width : 0);
623 return ret;
626 int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
627 bool show_displacement, long displacement, FILE *fp,
628 u64 session_total)
630 char bf[512];
631 hist_entry__snprintf(self, bf, sizeof(bf), pair_hists,
632 show_displacement, displacement,
633 true, session_total);
634 return fprintf(fp, "%s\n", bf);
637 static size_t hist_entry__fprintf_callchain(struct hist_entry *self, FILE *fp,
638 u64 session_total)
640 int left_margin = 0;
642 if (sort__first_dimension == SORT_COMM) {
643 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
644 typeof(*se), list);
645 left_margin = se->se_width ? *se->se_width : 0;
646 left_margin -= thread__comm_len(self->thread);
649 return hist_entry_callchain__fprintf(fp, self, session_total,
650 left_margin);
653 size_t hists__fprintf(struct hists *self, struct hists *pair,
654 bool show_displacement, FILE *fp)
656 struct sort_entry *se;
657 struct rb_node *nd;
658 size_t ret = 0;
659 unsigned long position = 1;
660 long displacement = 0;
661 unsigned int width;
662 const char *sep = symbol_conf.field_sep;
663 char *col_width = symbol_conf.col_width_list_str;
665 init_rem_hits();
667 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
669 if (symbol_conf.show_nr_samples) {
670 if (sep)
671 fprintf(fp, "%cSamples", *sep);
672 else
673 fputs(" Samples ", fp);
676 if (symbol_conf.show_cpu_utilization) {
677 if (sep) {
678 ret += fprintf(fp, "%csys", *sep);
679 ret += fprintf(fp, "%cus", *sep);
680 if (perf_guest) {
681 ret += fprintf(fp, "%cguest sys", *sep);
682 ret += fprintf(fp, "%cguest us", *sep);
684 } else {
685 ret += fprintf(fp, " sys ");
686 ret += fprintf(fp, " us ");
687 if (perf_guest) {
688 ret += fprintf(fp, " guest sys ");
689 ret += fprintf(fp, " guest us ");
694 if (pair) {
695 if (sep)
696 ret += fprintf(fp, "%cDelta", *sep);
697 else
698 ret += fprintf(fp, " Delta ");
700 if (show_displacement) {
701 if (sep)
702 ret += fprintf(fp, "%cDisplacement", *sep);
703 else
704 ret += fprintf(fp, " Displ");
708 list_for_each_entry(se, &hist_entry__sort_list, list) {
709 if (se->elide)
710 continue;
711 if (sep) {
712 fprintf(fp, "%c%s", *sep, se->se_header);
713 continue;
715 width = strlen(se->se_header);
716 if (se->se_width) {
717 if (symbol_conf.col_width_list_str) {
718 if (col_width) {
719 *se->se_width = atoi(col_width);
720 col_width = strchr(col_width, ',');
721 if (col_width)
722 ++col_width;
725 width = *se->se_width = max(*se->se_width, width);
727 fprintf(fp, " %*s", width, se->se_header);
729 fprintf(fp, "\n");
731 if (sep)
732 goto print_entries;
734 fprintf(fp, "# ........");
735 if (symbol_conf.show_nr_samples)
736 fprintf(fp, " ..........");
737 if (pair) {
738 fprintf(fp, " ..........");
739 if (show_displacement)
740 fprintf(fp, " .....");
742 list_for_each_entry(se, &hist_entry__sort_list, list) {
743 unsigned int i;
745 if (se->elide)
746 continue;
748 fprintf(fp, " ");
749 if (se->se_width)
750 width = *se->se_width;
751 else
752 width = strlen(se->se_header);
753 for (i = 0; i < width; i++)
754 fprintf(fp, ".");
757 fprintf(fp, "\n#\n");
759 print_entries:
760 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
761 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
763 if (show_displacement) {
764 if (h->pair != NULL)
765 displacement = ((long)h->pair->position -
766 (long)position);
767 else
768 displacement = 0;
769 ++position;
771 ret += hist_entry__fprintf(h, pair, show_displacement,
772 displacement, fp, self->stats.total);
774 if (symbol_conf.use_callchain)
775 ret += hist_entry__fprintf_callchain(h, fp, self->stats.total);
777 if (h->ms.map == NULL && verbose > 1) {
778 __map_groups__fprintf_maps(&h->thread->mg,
779 MAP__FUNCTION, verbose, fp);
780 fprintf(fp, "%.10s end\n", graph_dotted_line);
784 free(rem_sq_bracket);
786 return ret;
789 enum hist_filter {
790 HIST_FILTER__DSO,
791 HIST_FILTER__THREAD,
794 void hists__filter_by_dso(struct hists *self, const struct dso *dso)
796 struct rb_node *nd;
798 self->nr_entries = self->stats.total = 0;
799 self->max_sym_namelen = 0;
801 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
802 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
804 if (symbol_conf.exclude_other && !h->parent)
805 continue;
807 if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
808 h->filtered |= (1 << HIST_FILTER__DSO);
809 continue;
812 h->filtered &= ~(1 << HIST_FILTER__DSO);
813 if (!h->filtered) {
814 ++self->nr_entries;
815 self->stats.total += h->count;
816 if (h->ms.sym &&
817 self->max_sym_namelen < h->ms.sym->namelen)
818 self->max_sym_namelen = h->ms.sym->namelen;
823 void hists__filter_by_thread(struct hists *self, const struct thread *thread)
825 struct rb_node *nd;
827 self->nr_entries = self->stats.total = 0;
828 self->max_sym_namelen = 0;
830 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
831 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
833 if (thread != NULL && h->thread != thread) {
834 h->filtered |= (1 << HIST_FILTER__THREAD);
835 continue;
837 h->filtered &= ~(1 << HIST_FILTER__THREAD);
838 if (!h->filtered) {
839 ++self->nr_entries;
840 self->stats.total += h->count;
841 if (h->ms.sym &&
842 self->max_sym_namelen < h->ms.sym->namelen)
843 self->max_sym_namelen = h->ms.sym->namelen;
848 static int symbol__alloc_hist(struct symbol *self)
850 struct sym_priv *priv = symbol__priv(self);
851 const int size = (sizeof(*priv->hist) +
852 (self->end - self->start) * sizeof(u64));
854 priv->hist = zalloc(size);
855 return priv->hist == NULL ? -1 : 0;
858 int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
860 unsigned int sym_size, offset;
861 struct symbol *sym = self->ms.sym;
862 struct sym_priv *priv;
863 struct sym_hist *h;
865 if (!sym || !self->ms.map)
866 return 0;
868 priv = symbol__priv(sym);
869 if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
870 return -ENOMEM;
872 sym_size = sym->end - sym->start;
873 offset = ip - sym->start;
875 pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
877 if (offset >= sym_size)
878 return 0;
880 h = priv->hist;
881 h->sum++;
882 h->ip[offset]++;
884 pr_debug3("%#Lx %s: count++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
885 self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
886 return 0;
889 static struct objdump_line *objdump_line__new(s64 offset, char *line)
891 struct objdump_line *self = malloc(sizeof(*self));
893 if (self != NULL) {
894 self->offset = offset;
895 self->line = line;
898 return self;
901 void objdump_line__free(struct objdump_line *self)
903 free(self->line);
904 free(self);
907 static void objdump__add_line(struct list_head *head, struct objdump_line *line)
909 list_add_tail(&line->node, head);
912 struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
913 struct objdump_line *pos)
915 list_for_each_entry_continue(pos, head, node)
916 if (pos->offset >= 0)
917 return pos;
919 return NULL;
922 static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
923 struct list_head *head)
925 struct symbol *sym = self->ms.sym;
926 struct objdump_line *objdump_line;
927 char *line = NULL, *tmp, *tmp2, *c;
928 size_t line_len;
929 s64 line_ip, offset = -1;
931 if (getline(&line, &line_len, file) < 0)
932 return -1;
934 if (!line)
935 return -1;
937 while (line_len != 0 && isspace(line[line_len - 1]))
938 line[--line_len] = '\0';
940 c = strchr(line, '\n');
941 if (c)
942 *c = 0;
944 line_ip = -1;
947 * Strip leading spaces:
949 tmp = line;
950 while (*tmp) {
951 if (*tmp != ' ')
952 break;
953 tmp++;
956 if (*tmp) {
958 * Parse hexa addresses followed by ':'
960 line_ip = strtoull(tmp, &tmp2, 16);
961 if (*tmp2 != ':')
962 line_ip = -1;
965 if (line_ip != -1) {
966 u64 start = map__rip_2objdump(self->ms.map, sym->start);
967 offset = line_ip - start;
970 objdump_line = objdump_line__new(offset, line);
971 if (objdump_line == NULL) {
972 free(line);
973 return -1;
975 objdump__add_line(head, objdump_line);
977 return 0;
980 int hist_entry__annotate(struct hist_entry *self, struct list_head *head)
982 struct symbol *sym = self->ms.sym;
983 struct map *map = self->ms.map;
984 struct dso *dso = map->dso;
985 const char *filename = dso->long_name;
986 char command[PATH_MAX * 2];
987 FILE *file;
988 u64 len;
990 if (!filename)
991 return -1;
993 if (dso->origin == DSO__ORIG_KERNEL) {
994 if (dso->annotate_warned)
995 return 0;
996 dso->annotate_warned = 1;
997 pr_err("Can't annotate %s: No vmlinux file was found in the "
998 "path:\n", sym->name);
999 vmlinux_path__fprintf(stderr);
1000 return -1;
1003 pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
1004 filename, sym->name, map->unmap_ip(map, sym->start),
1005 map->unmap_ip(map, sym->end));
1007 len = sym->end - sym->start;
1009 pr_debug("annotating [%p] %30s : [%p] %30s\n",
1010 dso, dso->long_name, sym, sym->name);
1012 snprintf(command, sizeof(command),
1013 "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s|expand",
1014 map__rip_2objdump(map, sym->start),
1015 map__rip_2objdump(map, sym->end),
1016 filename, filename);
1018 pr_debug("Executing: %s\n", command);
1020 file = popen(command, "r");
1021 if (!file)
1022 return -1;
1024 while (!feof(file))
1025 if (hist_entry__parse_objdump_line(self, file, head) < 0)
1026 break;
1028 pclose(file);
1029 return 0;
1032 void hists__inc_nr_events(struct hists *self, u32 type)
1034 ++self->hists.stats.nr_events[0];
1035 ++self->hists.stats.nr_events[type];
1038 size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
1040 int i;
1041 size_t ret = 0;
1043 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1044 if (!event__name[i])
1045 continue;
1046 ret += fprintf(fp, "%10s events: %10d\n",
1047 event__name[i], self->stats.nr_events[i]);
1050 return ret;