perf report: Get rid of report__inc_stat()
[linux-2.6/btrfs-unstable.git] / tools / perf / util / hist.c
blobe17163fcb702cdf3f4eecbd8f207ea6297fe82d7
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
19 u16 hists__col_len(struct hists *hists, enum hist_column col)
21 return hists->col_len[col];
24 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
26 hists->col_len[col] = len;
29 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
31 if (len > hists__col_len(hists, col)) {
32 hists__set_col_len(hists, col, len);
33 return true;
35 return false;
38 void hists__reset_col_len(struct hists *hists)
40 enum hist_column col;
42 for (col = 0; col < HISTC_NR_COLS; ++col)
43 hists__set_col_len(hists, col, 0);
46 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
48 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
50 if (hists__col_len(hists, dso) < unresolved_col_width &&
51 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
52 !symbol_conf.dso_list)
53 hists__set_col_len(hists, dso, unresolved_col_width);
56 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59 int symlen;
60 u16 len;
63 * +4 accounts for '[x] ' priv level info
64 * +2 accounts for 0x prefix on raw addresses
65 * +3 accounts for ' y ' symtab origin info
67 if (h->ms.sym) {
68 symlen = h->ms.sym->namelen + 4;
69 if (verbose)
70 symlen += BITS_PER_LONG / 4 + 2 + 3;
71 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
72 } else {
73 symlen = unresolved_col_width + 4 + 2;
74 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
75 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78 len = thread__comm_len(h->thread);
79 if (hists__new_col_len(hists, HISTC_COMM, len))
80 hists__set_col_len(hists, HISTC_THREAD, len + 6);
82 if (h->ms.map) {
83 len = dso__name_len(h->ms.map->dso);
84 hists__new_col_len(hists, HISTC_DSO, len);
87 if (h->parent)
88 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
90 if (h->branch_info) {
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
93 if (verbose)
94 symlen += BITS_PER_LONG / 4 + 2 + 3;
95 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
97 symlen = dso__name_len(h->branch_info->from.map->dso);
98 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
99 } else {
100 symlen = unresolved_col_width + 4 + 2;
101 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 if (h->branch_info->to.sym) {
106 symlen = (int)h->branch_info->to.sym->namelen + 4;
107 if (verbose)
108 symlen += BITS_PER_LONG / 4 + 2 + 3;
109 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
111 symlen = dso__name_len(h->branch_info->to.map->dso);
112 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
113 } else {
114 symlen = unresolved_col_width + 4 + 2;
115 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
120 if (h->mem_info) {
121 if (h->mem_info->daddr.sym) {
122 symlen = (int)h->mem_info->daddr.sym->namelen + 4
123 + unresolved_col_width + 2;
124 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
125 symlen);
126 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
127 symlen + 1);
128 } else {
129 symlen = unresolved_col_width + 4 + 2;
130 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
131 symlen);
133 if (h->mem_info->daddr.map) {
134 symlen = dso__name_len(h->mem_info->daddr.map->dso);
135 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
136 symlen);
137 } else {
138 symlen = unresolved_col_width + 4 + 2;
139 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
141 } else {
142 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
147 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
148 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
149 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
150 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
151 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
152 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
154 if (h->transaction)
155 hists__new_col_len(hists, HISTC_TRANSACTION,
156 hist_entry__transaction_len());
159 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
161 struct rb_node *next = rb_first(&hists->entries);
162 struct hist_entry *n;
163 int row = 0;
165 hists__reset_col_len(hists);
167 while (next && row++ < max_rows) {
168 n = rb_entry(next, struct hist_entry, rb_node);
169 if (!n->filtered)
170 hists__calc_col_len(hists, n);
171 next = rb_next(&n->rb_node);
175 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
176 unsigned int cpumode, u64 period)
178 switch (cpumode) {
179 case PERF_RECORD_MISC_KERNEL:
180 he_stat->period_sys += period;
181 break;
182 case PERF_RECORD_MISC_USER:
183 he_stat->period_us += period;
184 break;
185 case PERF_RECORD_MISC_GUEST_KERNEL:
186 he_stat->period_guest_sys += period;
187 break;
188 case PERF_RECORD_MISC_GUEST_USER:
189 he_stat->period_guest_us += period;
190 break;
191 default:
192 break;
196 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
197 u64 weight)
200 he_stat->period += period;
201 he_stat->weight += weight;
202 he_stat->nr_events += 1;
205 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
207 dest->period += src->period;
208 dest->period_sys += src->period_sys;
209 dest->period_us += src->period_us;
210 dest->period_guest_sys += src->period_guest_sys;
211 dest->period_guest_us += src->period_guest_us;
212 dest->nr_events += src->nr_events;
213 dest->weight += src->weight;
216 static void he_stat__decay(struct he_stat *he_stat)
218 he_stat->period = (he_stat->period * 7) / 8;
219 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
220 /* XXX need decay for weight too? */
223 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
225 u64 prev_period = he->stat.period;
226 u64 diff;
228 if (prev_period == 0)
229 return true;
231 he_stat__decay(&he->stat);
232 if (symbol_conf.cumulate_callchain)
233 he_stat__decay(he->stat_acc);
235 diff = prev_period - he->stat.period;
237 hists->stats.total_period -= diff;
238 if (!he->filtered)
239 hists->stats.total_non_filtered_period -= diff;
241 return he->stat.period == 0;
244 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
246 rb_erase(&he->rb_node, &hists->entries);
248 if (sort__need_collapse)
249 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
251 --hists->nr_entries;
252 if (!he->filtered)
253 --hists->nr_non_filtered_entries;
255 hist_entry__delete(he);
258 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
260 struct rb_node *next = rb_first(&hists->entries);
261 struct hist_entry *n;
263 while (next) {
264 n = rb_entry(next, struct hist_entry, rb_node);
265 next = rb_next(&n->rb_node);
267 * We may be annotating this, for instance, so keep it here in
268 * case some it gets new samples, we'll eventually free it when
269 * the user stops browsing and it agains gets fully decayed.
271 if (((zap_user && n->level == '.') ||
272 (zap_kernel && n->level != '.') ||
273 hists__decay_entry(hists, n)) &&
274 !n->used) {
275 hists__delete_entry(hists, n);
280 void hists__delete_entries(struct hists *hists)
282 struct rb_node *next = rb_first(&hists->entries);
283 struct hist_entry *n;
285 while (next) {
286 n = rb_entry(next, struct hist_entry, rb_node);
287 next = rb_next(&n->rb_node);
289 hists__delete_entry(hists, n);
294 * histogram, sorted on item, collects periods
297 static struct hist_entry *hist_entry__new(struct hist_entry *template,
298 bool sample_self)
300 size_t callchain_size = 0;
301 struct hist_entry *he;
303 if (symbol_conf.use_callchain)
304 callchain_size = sizeof(struct callchain_root);
306 he = zalloc(sizeof(*he) + callchain_size);
308 if (he != NULL) {
309 *he = *template;
311 if (symbol_conf.cumulate_callchain) {
312 he->stat_acc = malloc(sizeof(he->stat));
313 if (he->stat_acc == NULL) {
314 free(he);
315 return NULL;
317 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
318 if (!sample_self)
319 memset(&he->stat, 0, sizeof(he->stat));
322 if (he->ms.map)
323 he->ms.map->referenced = true;
325 if (he->branch_info) {
327 * This branch info is (a part of) allocated from
328 * sample__resolve_bstack() and will be freed after
329 * adding new entries. So we need to save a copy.
331 he->branch_info = malloc(sizeof(*he->branch_info));
332 if (he->branch_info == NULL) {
333 free(he->stat_acc);
334 free(he);
335 return NULL;
338 memcpy(he->branch_info, template->branch_info,
339 sizeof(*he->branch_info));
341 if (he->branch_info->from.map)
342 he->branch_info->from.map->referenced = true;
343 if (he->branch_info->to.map)
344 he->branch_info->to.map->referenced = true;
347 if (he->mem_info) {
348 if (he->mem_info->iaddr.map)
349 he->mem_info->iaddr.map->referenced = true;
350 if (he->mem_info->daddr.map)
351 he->mem_info->daddr.map->referenced = true;
354 if (symbol_conf.use_callchain)
355 callchain_init(he->callchain);
357 INIT_LIST_HEAD(&he->pairs.node);
360 return he;
363 static u8 symbol__parent_filter(const struct symbol *parent)
365 if (symbol_conf.exclude_other && parent == NULL)
366 return 1 << HIST_FILTER__PARENT;
367 return 0;
370 static struct hist_entry *add_hist_entry(struct hists *hists,
371 struct hist_entry *entry,
372 struct addr_location *al,
373 bool sample_self)
375 struct rb_node **p;
376 struct rb_node *parent = NULL;
377 struct hist_entry *he;
378 int64_t cmp;
379 u64 period = entry->stat.period;
380 u64 weight = entry->stat.weight;
382 p = &hists->entries_in->rb_node;
384 while (*p != NULL) {
385 parent = *p;
386 he = rb_entry(parent, struct hist_entry, rb_node_in);
389 * Make sure that it receives arguments in a same order as
390 * hist_entry__collapse() so that we can use an appropriate
391 * function when searching an entry regardless which sort
392 * keys were used.
394 cmp = hist_entry__cmp(he, entry);
396 if (!cmp) {
397 if (sample_self)
398 he_stat__add_period(&he->stat, period, weight);
399 if (symbol_conf.cumulate_callchain)
400 he_stat__add_period(he->stat_acc, period, weight);
403 * This mem info was allocated from sample__resolve_mem
404 * and will not be used anymore.
406 zfree(&entry->mem_info);
408 /* If the map of an existing hist_entry has
409 * become out-of-date due to an exec() or
410 * similar, update it. Otherwise we will
411 * mis-adjust symbol addresses when computing
412 * the history counter to increment.
414 if (he->ms.map != entry->ms.map) {
415 he->ms.map = entry->ms.map;
416 if (he->ms.map)
417 he->ms.map->referenced = true;
419 goto out;
422 if (cmp < 0)
423 p = &(*p)->rb_left;
424 else
425 p = &(*p)->rb_right;
428 he = hist_entry__new(entry, sample_self);
429 if (!he)
430 return NULL;
432 hists->nr_entries++;
434 rb_link_node(&he->rb_node_in, parent, p);
435 rb_insert_color(&he->rb_node_in, hists->entries_in);
436 out:
437 if (sample_self)
438 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
439 if (symbol_conf.cumulate_callchain)
440 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
441 return he;
444 struct hist_entry *__hists__add_entry(struct hists *hists,
445 struct addr_location *al,
446 struct symbol *sym_parent,
447 struct branch_info *bi,
448 struct mem_info *mi,
449 u64 period, u64 weight, u64 transaction,
450 bool sample_self)
452 struct hist_entry entry = {
453 .thread = al->thread,
454 .comm = thread__comm(al->thread),
455 .ms = {
456 .map = al->map,
457 .sym = al->sym,
459 .cpu = al->cpu,
460 .cpumode = al->cpumode,
461 .ip = al->addr,
462 .level = al->level,
463 .stat = {
464 .nr_events = 1,
465 .period = period,
466 .weight = weight,
468 .parent = sym_parent,
469 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
470 .hists = hists,
471 .branch_info = bi,
472 .mem_info = mi,
473 .transaction = transaction,
476 return add_hist_entry(hists, &entry, al, sample_self);
479 static int
480 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
481 struct addr_location *al __maybe_unused)
483 return 0;
486 static int
487 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
488 struct addr_location *al __maybe_unused)
490 return 0;
493 static int
494 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
496 struct perf_sample *sample = iter->sample;
497 struct mem_info *mi;
499 mi = sample__resolve_mem(sample, al);
500 if (mi == NULL)
501 return -ENOMEM;
503 iter->priv = mi;
504 return 0;
507 static int
508 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
510 u64 cost;
511 struct mem_info *mi = iter->priv;
512 struct hists *hists = evsel__hists(iter->evsel);
513 struct hist_entry *he;
515 if (mi == NULL)
516 return -EINVAL;
518 cost = iter->sample->weight;
519 if (!cost)
520 cost = 1;
523 * must pass period=weight in order to get the correct
524 * sorting from hists__collapse_resort() which is solely
525 * based on periods. We want sorting be done on nr_events * weight
526 * and this is indirectly achieved by passing period=weight here
527 * and the he_stat__add_period() function.
529 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
530 cost, cost, 0, true);
531 if (!he)
532 return -ENOMEM;
534 iter->he = he;
535 return 0;
538 static int
539 iter_finish_mem_entry(struct hist_entry_iter *iter,
540 struct addr_location *al __maybe_unused)
542 struct perf_evsel *evsel = iter->evsel;
543 struct hists *hists = evsel__hists(evsel);
544 struct hist_entry *he = iter->he;
545 int err = -EINVAL;
547 if (he == NULL)
548 goto out;
550 hists__inc_nr_samples(hists, he->filtered);
552 err = hist_entry__append_callchain(he, iter->sample);
554 out:
556 * We don't need to free iter->priv (mem_info) here since
557 * the mem info was either already freed in add_hist_entry() or
558 * passed to a new hist entry by hist_entry__new().
560 iter->priv = NULL;
562 iter->he = NULL;
563 return err;
566 static int
567 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
569 struct branch_info *bi;
570 struct perf_sample *sample = iter->sample;
572 bi = sample__resolve_bstack(sample, al);
573 if (!bi)
574 return -ENOMEM;
576 iter->curr = 0;
577 iter->total = sample->branch_stack->nr;
579 iter->priv = bi;
580 return 0;
583 static int
584 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
585 struct addr_location *al __maybe_unused)
587 /* to avoid calling callback function */
588 iter->he = NULL;
590 return 0;
593 static int
594 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
596 struct branch_info *bi = iter->priv;
597 int i = iter->curr;
599 if (bi == NULL)
600 return 0;
602 if (iter->curr >= iter->total)
603 return 0;
605 al->map = bi[i].to.map;
606 al->sym = bi[i].to.sym;
607 al->addr = bi[i].to.addr;
608 return 1;
611 static int
612 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
614 struct branch_info *bi;
615 struct perf_evsel *evsel = iter->evsel;
616 struct hists *hists = evsel__hists(evsel);
617 struct hist_entry *he = NULL;
618 int i = iter->curr;
619 int err = 0;
621 bi = iter->priv;
623 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
624 goto out;
627 * The report shows the percentage of total branches captured
628 * and not events sampled. Thus we use a pseudo period of 1.
630 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
631 1, 1, 0, true);
632 if (he == NULL)
633 return -ENOMEM;
635 hists__inc_nr_samples(hists, he->filtered);
637 out:
638 iter->he = he;
639 iter->curr++;
640 return err;
643 static int
644 iter_finish_branch_entry(struct hist_entry_iter *iter,
645 struct addr_location *al __maybe_unused)
647 zfree(&iter->priv);
648 iter->he = NULL;
650 return iter->curr >= iter->total ? 0 : -1;
653 static int
654 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
655 struct addr_location *al __maybe_unused)
657 return 0;
660 static int
661 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
663 struct perf_evsel *evsel = iter->evsel;
664 struct perf_sample *sample = iter->sample;
665 struct hist_entry *he;
667 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
668 sample->period, sample->weight,
669 sample->transaction, true);
670 if (he == NULL)
671 return -ENOMEM;
673 iter->he = he;
674 return 0;
677 static int
678 iter_finish_normal_entry(struct hist_entry_iter *iter,
679 struct addr_location *al __maybe_unused)
681 struct hist_entry *he = iter->he;
682 struct perf_evsel *evsel = iter->evsel;
683 struct perf_sample *sample = iter->sample;
685 if (he == NULL)
686 return 0;
688 iter->he = NULL;
690 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
692 return hist_entry__append_callchain(he, sample);
695 static int
696 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
697 struct addr_location *al __maybe_unused)
699 struct hist_entry **he_cache;
701 callchain_cursor_commit(&callchain_cursor);
704 * This is for detecting cycles or recursions so that they're
705 * cumulated only one time to prevent entries more than 100%
706 * overhead.
708 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
709 if (he_cache == NULL)
710 return -ENOMEM;
712 iter->priv = he_cache;
713 iter->curr = 0;
715 return 0;
718 static int
719 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
720 struct addr_location *al)
722 struct perf_evsel *evsel = iter->evsel;
723 struct hists *hists = evsel__hists(evsel);
724 struct perf_sample *sample = iter->sample;
725 struct hist_entry **he_cache = iter->priv;
726 struct hist_entry *he;
727 int err = 0;
729 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
730 sample->period, sample->weight,
731 sample->transaction, true);
732 if (he == NULL)
733 return -ENOMEM;
735 iter->he = he;
736 he_cache[iter->curr++] = he;
738 hist_entry__append_callchain(he, sample);
741 * We need to re-initialize the cursor since callchain_append()
742 * advanced the cursor to the end.
744 callchain_cursor_commit(&callchain_cursor);
746 hists__inc_nr_samples(hists, he->filtered);
748 return err;
751 static int
752 iter_next_cumulative_entry(struct hist_entry_iter *iter,
753 struct addr_location *al)
755 struct callchain_cursor_node *node;
757 node = callchain_cursor_current(&callchain_cursor);
758 if (node == NULL)
759 return 0;
761 return fill_callchain_info(al, node, iter->hide_unresolved);
764 static int
765 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
766 struct addr_location *al)
768 struct perf_evsel *evsel = iter->evsel;
769 struct perf_sample *sample = iter->sample;
770 struct hist_entry **he_cache = iter->priv;
771 struct hist_entry *he;
772 struct hist_entry he_tmp = {
773 .cpu = al->cpu,
774 .thread = al->thread,
775 .comm = thread__comm(al->thread),
776 .ip = al->addr,
777 .ms = {
778 .map = al->map,
779 .sym = al->sym,
781 .parent = iter->parent,
783 int i;
784 struct callchain_cursor cursor;
786 callchain_cursor_snapshot(&cursor, &callchain_cursor);
788 callchain_cursor_advance(&callchain_cursor);
791 * Check if there's duplicate entries in the callchain.
792 * It's possible that it has cycles or recursive calls.
794 for (i = 0; i < iter->curr; i++) {
795 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
796 /* to avoid calling callback function */
797 iter->he = NULL;
798 return 0;
802 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
803 sample->period, sample->weight,
804 sample->transaction, false);
805 if (he == NULL)
806 return -ENOMEM;
808 iter->he = he;
809 he_cache[iter->curr++] = he;
811 if (symbol_conf.use_callchain)
812 callchain_append(he->callchain, &cursor, sample->period);
813 return 0;
816 static int
817 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
818 struct addr_location *al __maybe_unused)
820 zfree(&iter->priv);
821 iter->he = NULL;
823 return 0;
826 const struct hist_iter_ops hist_iter_mem = {
827 .prepare_entry = iter_prepare_mem_entry,
828 .add_single_entry = iter_add_single_mem_entry,
829 .next_entry = iter_next_nop_entry,
830 .add_next_entry = iter_add_next_nop_entry,
831 .finish_entry = iter_finish_mem_entry,
834 const struct hist_iter_ops hist_iter_branch = {
835 .prepare_entry = iter_prepare_branch_entry,
836 .add_single_entry = iter_add_single_branch_entry,
837 .next_entry = iter_next_branch_entry,
838 .add_next_entry = iter_add_next_branch_entry,
839 .finish_entry = iter_finish_branch_entry,
842 const struct hist_iter_ops hist_iter_normal = {
843 .prepare_entry = iter_prepare_normal_entry,
844 .add_single_entry = iter_add_single_normal_entry,
845 .next_entry = iter_next_nop_entry,
846 .add_next_entry = iter_add_next_nop_entry,
847 .finish_entry = iter_finish_normal_entry,
850 const struct hist_iter_ops hist_iter_cumulative = {
851 .prepare_entry = iter_prepare_cumulative_entry,
852 .add_single_entry = iter_add_single_cumulative_entry,
853 .next_entry = iter_next_cumulative_entry,
854 .add_next_entry = iter_add_next_cumulative_entry,
855 .finish_entry = iter_finish_cumulative_entry,
858 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
859 struct perf_evsel *evsel, struct perf_sample *sample,
860 int max_stack_depth, void *arg)
862 int err, err2;
864 err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
865 max_stack_depth);
866 if (err)
867 return err;
869 iter->evsel = evsel;
870 iter->sample = sample;
872 err = iter->ops->prepare_entry(iter, al);
873 if (err)
874 goto out;
876 err = iter->ops->add_single_entry(iter, al);
877 if (err)
878 goto out;
880 if (iter->he && iter->add_entry_cb) {
881 err = iter->add_entry_cb(iter, al, true, arg);
882 if (err)
883 goto out;
886 while (iter->ops->next_entry(iter, al)) {
887 err = iter->ops->add_next_entry(iter, al);
888 if (err)
889 break;
891 if (iter->he && iter->add_entry_cb) {
892 err = iter->add_entry_cb(iter, al, false, arg);
893 if (err)
894 goto out;
898 out:
899 err2 = iter->ops->finish_entry(iter, al);
900 if (!err)
901 err = err2;
903 return err;
906 int64_t
907 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
909 struct perf_hpp_fmt *fmt;
910 int64_t cmp = 0;
912 perf_hpp__for_each_sort_list(fmt) {
913 if (perf_hpp__should_skip(fmt))
914 continue;
916 cmp = fmt->cmp(left, right);
917 if (cmp)
918 break;
921 return cmp;
924 int64_t
925 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
927 struct perf_hpp_fmt *fmt;
928 int64_t cmp = 0;
930 perf_hpp__for_each_sort_list(fmt) {
931 if (perf_hpp__should_skip(fmt))
932 continue;
934 cmp = fmt->collapse(left, right);
935 if (cmp)
936 break;
939 return cmp;
942 void hist_entry__delete(struct hist_entry *he)
944 zfree(&he->branch_info);
945 zfree(&he->mem_info);
946 zfree(&he->stat_acc);
947 free_srcline(he->srcline);
948 free_callchain(he->callchain);
949 free(he);
953 * collapse the histogram
956 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
957 struct rb_root *root,
958 struct hist_entry *he)
960 struct rb_node **p = &root->rb_node;
961 struct rb_node *parent = NULL;
962 struct hist_entry *iter;
963 int64_t cmp;
965 while (*p != NULL) {
966 parent = *p;
967 iter = rb_entry(parent, struct hist_entry, rb_node_in);
969 cmp = hist_entry__collapse(iter, he);
971 if (!cmp) {
972 he_stat__add_stat(&iter->stat, &he->stat);
973 if (symbol_conf.cumulate_callchain)
974 he_stat__add_stat(iter->stat_acc, he->stat_acc);
976 if (symbol_conf.use_callchain) {
977 callchain_cursor_reset(&callchain_cursor);
978 callchain_merge(&callchain_cursor,
979 iter->callchain,
980 he->callchain);
982 hist_entry__delete(he);
983 return false;
986 if (cmp < 0)
987 p = &(*p)->rb_left;
988 else
989 p = &(*p)->rb_right;
991 hists->nr_entries++;
993 rb_link_node(&he->rb_node_in, parent, p);
994 rb_insert_color(&he->rb_node_in, root);
995 return true;
998 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1000 struct rb_root *root;
1002 pthread_mutex_lock(&hists->lock);
1004 root = hists->entries_in;
1005 if (++hists->entries_in > &hists->entries_in_array[1])
1006 hists->entries_in = &hists->entries_in_array[0];
1008 pthread_mutex_unlock(&hists->lock);
1010 return root;
1013 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1015 hists__filter_entry_by_dso(hists, he);
1016 hists__filter_entry_by_thread(hists, he);
1017 hists__filter_entry_by_symbol(hists, he);
1020 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1022 struct rb_root *root;
1023 struct rb_node *next;
1024 struct hist_entry *n;
1026 if (!sort__need_collapse)
1027 return;
1029 hists->nr_entries = 0;
1031 root = hists__get_rotate_entries_in(hists);
1033 next = rb_first(root);
1035 while (next) {
1036 if (session_done())
1037 break;
1038 n = rb_entry(next, struct hist_entry, rb_node_in);
1039 next = rb_next(&n->rb_node_in);
1041 rb_erase(&n->rb_node_in, root);
1042 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1044 * If it wasn't combined with one of the entries already
1045 * collapsed, we need to apply the filters that may have
1046 * been set by, say, the hist_browser.
1048 hists__apply_filters(hists, n);
1050 if (prog)
1051 ui_progress__update(prog, 1);
1055 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1057 struct perf_hpp_fmt *fmt;
1058 int64_t cmp = 0;
1060 perf_hpp__for_each_sort_list(fmt) {
1061 if (perf_hpp__should_skip(fmt))
1062 continue;
1064 cmp = fmt->sort(a, b);
1065 if (cmp)
1066 break;
1069 return cmp;
1072 static void hists__reset_filter_stats(struct hists *hists)
1074 hists->nr_non_filtered_entries = 0;
1075 hists->stats.total_non_filtered_period = 0;
1078 void hists__reset_stats(struct hists *hists)
1080 hists->nr_entries = 0;
1081 hists->stats.total_period = 0;
1083 hists__reset_filter_stats(hists);
1086 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1088 hists->nr_non_filtered_entries++;
1089 hists->stats.total_non_filtered_period += h->stat.period;
1092 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1094 if (!h->filtered)
1095 hists__inc_filter_stats(hists, h);
1097 hists->nr_entries++;
1098 hists->stats.total_period += h->stat.period;
1101 static void __hists__insert_output_entry(struct rb_root *entries,
1102 struct hist_entry *he,
1103 u64 min_callchain_hits)
1105 struct rb_node **p = &entries->rb_node;
1106 struct rb_node *parent = NULL;
1107 struct hist_entry *iter;
1109 if (symbol_conf.use_callchain)
1110 callchain_param.sort(&he->sorted_chain, he->callchain,
1111 min_callchain_hits, &callchain_param);
1113 while (*p != NULL) {
1114 parent = *p;
1115 iter = rb_entry(parent, struct hist_entry, rb_node);
1117 if (hist_entry__sort(he, iter) > 0)
1118 p = &(*p)->rb_left;
1119 else
1120 p = &(*p)->rb_right;
1123 rb_link_node(&he->rb_node, parent, p);
1124 rb_insert_color(&he->rb_node, entries);
1127 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1129 struct rb_root *root;
1130 struct rb_node *next;
1131 struct hist_entry *n;
1132 u64 min_callchain_hits;
1134 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1136 if (sort__need_collapse)
1137 root = &hists->entries_collapsed;
1138 else
1139 root = hists->entries_in;
1141 next = rb_first(root);
1142 hists->entries = RB_ROOT;
1144 hists__reset_stats(hists);
1145 hists__reset_col_len(hists);
1147 while (next) {
1148 n = rb_entry(next, struct hist_entry, rb_node_in);
1149 next = rb_next(&n->rb_node_in);
1151 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
1152 hists__inc_stats(hists, n);
1154 if (!n->filtered)
1155 hists__calc_col_len(hists, n);
1157 if (prog)
1158 ui_progress__update(prog, 1);
1162 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1163 enum hist_filter filter)
1165 h->filtered &= ~(1 << filter);
1166 if (h->filtered)
1167 return;
1169 /* force fold unfiltered entry for simplicity */
1170 h->ms.unfolded = false;
1171 h->row_offset = 0;
1173 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1175 hists__inc_filter_stats(hists, h);
1176 hists__calc_col_len(hists, h);
1180 static bool hists__filter_entry_by_dso(struct hists *hists,
1181 struct hist_entry *he)
1183 if (hists->dso_filter != NULL &&
1184 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1185 he->filtered |= (1 << HIST_FILTER__DSO);
1186 return true;
1189 return false;
1192 void hists__filter_by_dso(struct hists *hists)
1194 struct rb_node *nd;
1196 hists->stats.nr_non_filtered_samples = 0;
1198 hists__reset_filter_stats(hists);
1199 hists__reset_col_len(hists);
1201 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1202 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1204 if (symbol_conf.exclude_other && !h->parent)
1205 continue;
1207 if (hists__filter_entry_by_dso(hists, h))
1208 continue;
1210 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1214 static bool hists__filter_entry_by_thread(struct hists *hists,
1215 struct hist_entry *he)
1217 if (hists->thread_filter != NULL &&
1218 he->thread != hists->thread_filter) {
1219 he->filtered |= (1 << HIST_FILTER__THREAD);
1220 return true;
1223 return false;
1226 void hists__filter_by_thread(struct hists *hists)
1228 struct rb_node *nd;
1230 hists->stats.nr_non_filtered_samples = 0;
1232 hists__reset_filter_stats(hists);
1233 hists__reset_col_len(hists);
1235 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1236 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1238 if (hists__filter_entry_by_thread(hists, h))
1239 continue;
1241 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1245 static bool hists__filter_entry_by_symbol(struct hists *hists,
1246 struct hist_entry *he)
1248 if (hists->symbol_filter_str != NULL &&
1249 (!he->ms.sym || strstr(he->ms.sym->name,
1250 hists->symbol_filter_str) == NULL)) {
1251 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1252 return true;
1255 return false;
1258 void hists__filter_by_symbol(struct hists *hists)
1260 struct rb_node *nd;
1262 hists->stats.nr_non_filtered_samples = 0;
1264 hists__reset_filter_stats(hists);
1265 hists__reset_col_len(hists);
1267 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1268 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1270 if (hists__filter_entry_by_symbol(hists, h))
1271 continue;
1273 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1277 void events_stats__inc(struct events_stats *stats, u32 type)
1279 ++stats->nr_events[0];
1280 ++stats->nr_events[type];
1283 void hists__inc_nr_events(struct hists *hists, u32 type)
1285 events_stats__inc(&hists->stats, type);
1288 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1290 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1291 if (!filtered)
1292 hists->stats.nr_non_filtered_samples++;
1295 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1296 struct hist_entry *pair)
1298 struct rb_root *root;
1299 struct rb_node **p;
1300 struct rb_node *parent = NULL;
1301 struct hist_entry *he;
1302 int64_t cmp;
1304 if (sort__need_collapse)
1305 root = &hists->entries_collapsed;
1306 else
1307 root = hists->entries_in;
1309 p = &root->rb_node;
1311 while (*p != NULL) {
1312 parent = *p;
1313 he = rb_entry(parent, struct hist_entry, rb_node_in);
1315 cmp = hist_entry__collapse(he, pair);
1317 if (!cmp)
1318 goto out;
1320 if (cmp < 0)
1321 p = &(*p)->rb_left;
1322 else
1323 p = &(*p)->rb_right;
1326 he = hist_entry__new(pair, true);
1327 if (he) {
1328 memset(&he->stat, 0, sizeof(he->stat));
1329 he->hists = hists;
1330 rb_link_node(&he->rb_node_in, parent, p);
1331 rb_insert_color(&he->rb_node_in, root);
1332 hists__inc_stats(hists, he);
1333 he->dummy = true;
1335 out:
1336 return he;
1339 static struct hist_entry *hists__find_entry(struct hists *hists,
1340 struct hist_entry *he)
1342 struct rb_node *n;
1344 if (sort__need_collapse)
1345 n = hists->entries_collapsed.rb_node;
1346 else
1347 n = hists->entries_in->rb_node;
1349 while (n) {
1350 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1351 int64_t cmp = hist_entry__collapse(iter, he);
1353 if (cmp < 0)
1354 n = n->rb_left;
1355 else if (cmp > 0)
1356 n = n->rb_right;
1357 else
1358 return iter;
1361 return NULL;
1365 * Look for pairs to link to the leader buckets (hist_entries):
1367 void hists__match(struct hists *leader, struct hists *other)
1369 struct rb_root *root;
1370 struct rb_node *nd;
1371 struct hist_entry *pos, *pair;
1373 if (sort__need_collapse)
1374 root = &leader->entries_collapsed;
1375 else
1376 root = leader->entries_in;
1378 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1379 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1380 pair = hists__find_entry(other, pos);
1382 if (pair)
1383 hist_entry__add_pair(pair, pos);
1388 * Look for entries in the other hists that are not present in the leader, if
1389 * we find them, just add a dummy entry on the leader hists, with period=0,
1390 * nr_events=0, to serve as the list header.
1392 int hists__link(struct hists *leader, struct hists *other)
1394 struct rb_root *root;
1395 struct rb_node *nd;
1396 struct hist_entry *pos, *pair;
1398 if (sort__need_collapse)
1399 root = &other->entries_collapsed;
1400 else
1401 root = other->entries_in;
1403 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1404 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1406 if (!hist_entry__has_pairs(pos)) {
1407 pair = hists__add_dummy_entry(leader, pos);
1408 if (pair == NULL)
1409 return -1;
1410 hist_entry__add_pair(pos, pair);
1414 return 0;
1418 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1420 struct perf_evsel *pos;
1421 size_t ret = 0;
1423 evlist__for_each(evlist, pos) {
1424 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1425 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1428 return ret;
1432 u64 hists__total_period(struct hists *hists)
1434 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1435 hists->stats.total_period;
1438 int parse_filter_percentage(const struct option *opt __maybe_unused,
1439 const char *arg, int unset __maybe_unused)
1441 if (!strcmp(arg, "relative"))
1442 symbol_conf.filter_relative = true;
1443 else if (!strcmp(arg, "absolute"))
1444 symbol_conf.filter_relative = false;
1445 else
1446 return -1;
1448 return 0;
1451 int perf_hist_config(const char *var, const char *value)
1453 if (!strcmp(var, "hist.percentage"))
1454 return parse_filter_percentage(NULL, value, 0);
1456 return 0;
1459 static int hists_evsel__init(struct perf_evsel *evsel)
1461 struct hists *hists = evsel__hists(evsel);
1463 memset(hists, 0, sizeof(*hists));
1464 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1465 hists->entries_in = &hists->entries_in_array[0];
1466 hists->entries_collapsed = RB_ROOT;
1467 hists->entries = RB_ROOT;
1468 pthread_mutex_init(&hists->lock, NULL);
1469 return 0;
1473 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1474 * stored in the rbtree...
1477 int hists__init(void)
1479 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1480 hists_evsel__init, NULL);
1481 if (err)
1482 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1484 return err;