perf tools: Consolidate output field handling to hpp format routines
[linux-2.6/btrfs-unstable.git] / tools / perf / ui / hist.c
blob400437ee60b16d5f9ef1906d27442048d09f9b8a
1 #include <math.h>
2 #include <linux/compiler.h>
4 #include "../util/hist.h"
5 #include "../util/util.h"
6 #include "../util/sort.h"
7 #include "../util/evsel.h"
9 /* hist period print (hpp) functions */
11 #define hpp__call_print_fn(hpp, fn, fmt, ...) \
12 ({ \
13 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
14 advance_hpp(hpp, __ret); \
15 __ret; \
18 int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
19 hpp_field_fn get_field, hpp_callback_fn callback,
20 const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent)
22 int ret = 0;
23 struct hists *hists = he->hists;
24 struct perf_evsel *evsel = hists_to_evsel(hists);
25 char *buf = hpp->buf;
26 size_t size = hpp->size;
28 if (callback) {
29 ret = callback(hpp, true);
30 advance_hpp(hpp, ret);
33 if (fmt_percent) {
34 double percent = 0.0;
35 u64 total = hists__total_period(hists);
37 if (total)
38 percent = 100.0 * get_field(he) / total;
40 ret += hpp__call_print_fn(hpp, print_fn, fmt, percent);
41 } else
42 ret += hpp__call_print_fn(hpp, print_fn, fmt, get_field(he));
44 if (perf_evsel__is_group_event(evsel)) {
45 int prev_idx, idx_delta;
46 struct hist_entry *pair;
47 int nr_members = evsel->nr_members;
49 prev_idx = perf_evsel__group_idx(evsel);
51 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
52 u64 period = get_field(pair);
53 u64 total = hists__total_period(pair->hists);
55 if (!total)
56 continue;
58 evsel = hists_to_evsel(pair->hists);
59 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
61 while (idx_delta--) {
63 * zero-fill group members in the middle which
64 * have no sample
66 if (fmt_percent) {
67 ret += hpp__call_print_fn(hpp, print_fn,
68 fmt, 0.0);
69 } else {
70 ret += hpp__call_print_fn(hpp, print_fn,
71 fmt, 0ULL);
75 if (fmt_percent) {
76 ret += hpp__call_print_fn(hpp, print_fn, fmt,
77 100.0 * period / total);
78 } else {
79 ret += hpp__call_print_fn(hpp, print_fn, fmt,
80 period);
83 prev_idx = perf_evsel__group_idx(evsel);
86 idx_delta = nr_members - prev_idx - 1;
88 while (idx_delta--) {
90 * zero-fill group members at last which have no sample
92 if (fmt_percent) {
93 ret += hpp__call_print_fn(hpp, print_fn,
94 fmt, 0.0);
95 } else {
96 ret += hpp__call_print_fn(hpp, print_fn,
97 fmt, 0ULL);
102 if (callback) {
103 int __ret = callback(hpp, false);
105 advance_hpp(hpp, __ret);
106 ret += __ret;
110 * Restore original buf and size as it's where caller expects
111 * the result will be saved.
113 hpp->buf = buf;
114 hpp->size = size;
116 return ret;
119 static int field_cmp(u64 field_a, u64 field_b)
121 if (field_a > field_b)
122 return 1;
123 if (field_a < field_b)
124 return -1;
125 return 0;
128 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
129 hpp_field_fn get_field)
131 s64 ret;
132 int i, nr_members;
133 struct perf_evsel *evsel;
134 struct hist_entry *pair;
135 u64 *fields_a, *fields_b;
137 ret = field_cmp(get_field(a), get_field(b));
138 if (ret || !symbol_conf.event_group)
139 return ret;
141 evsel = hists_to_evsel(a->hists);
142 if (!perf_evsel__is_group_event(evsel))
143 return ret;
145 nr_members = evsel->nr_members;
146 fields_a = calloc(sizeof(*fields_a), nr_members);
147 fields_b = calloc(sizeof(*fields_b), nr_members);
149 if (!fields_a || !fields_b)
150 goto out;
152 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
153 evsel = hists_to_evsel(pair->hists);
154 fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
157 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
158 evsel = hists_to_evsel(pair->hists);
159 fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
162 for (i = 1; i < nr_members; i++) {
163 ret = field_cmp(fields_a[i], fields_b[i]);
164 if (ret)
165 break;
168 out:
169 free(fields_a);
170 free(fields_b);
172 return ret;
175 #define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
176 static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
177 struct perf_hpp *hpp, \
178 struct perf_evsel *evsel) \
180 int len = _min_width; \
182 if (symbol_conf.event_group) \
183 len = max(len, evsel->nr_members * _unit_width); \
185 return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \
188 #define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
189 static int hpp__width_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
190 struct perf_hpp *hpp __maybe_unused, \
191 struct perf_evsel *evsel) \
193 int len = _min_width; \
195 if (symbol_conf.event_group) \
196 len = max(len, evsel->nr_members * _unit_width); \
198 return len; \
201 static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
203 va_list args;
204 ssize_t ssize = hpp->size;
205 double percent;
206 int ret;
208 va_start(args, fmt);
209 percent = va_arg(args, double);
210 ret = value_color_snprintf(hpp->buf, hpp->size, fmt, percent);
211 va_end(args);
213 return (ret >= ssize) ? (ssize - 1) : ret;
216 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
218 va_list args;
219 ssize_t ssize = hpp->size;
220 int ret;
222 va_start(args, fmt);
223 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
224 va_end(args);
226 return (ret >= ssize) ? (ssize - 1) : ret;
229 #define __HPP_COLOR_PERCENT_FN(_type, _field) \
230 static u64 he_get_##_field(struct hist_entry *he) \
232 return he->stat._field; \
235 static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
236 struct perf_hpp *hpp, struct hist_entry *he) \
238 return __hpp__fmt(hpp, he, he_get_##_field, NULL, " %6.2f%%", \
239 hpp_color_scnprintf, true); \
242 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
243 static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
244 struct perf_hpp *hpp, struct hist_entry *he) \
246 const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
247 return __hpp__fmt(hpp, he, he_get_##_field, NULL, fmt, \
248 hpp_entry_scnprintf, true); \
251 #define __HPP_SORT_FN(_type, _field) \
252 static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
254 return __hpp__sort(a, b, he_get_##_field); \
257 #define __HPP_ENTRY_RAW_FN(_type, _field) \
258 static u64 he_get_raw_##_field(struct hist_entry *he) \
260 return he->stat._field; \
263 static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
264 struct perf_hpp *hpp, struct hist_entry *he) \
266 const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \
267 return __hpp__fmt(hpp, he, he_get_raw_##_field, NULL, fmt, \
268 hpp_entry_scnprintf, false); \
271 #define __HPP_SORT_RAW_FN(_type, _field) \
272 static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
274 return __hpp__sort(a, b, he_get_raw_##_field); \
278 #define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \
279 __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
280 __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
281 __HPP_COLOR_PERCENT_FN(_type, _field) \
282 __HPP_ENTRY_PERCENT_FN(_type, _field) \
283 __HPP_SORT_FN(_type, _field)
285 #define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \
286 __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
287 __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
288 __HPP_ENTRY_RAW_FN(_type, _field) \
289 __HPP_SORT_RAW_FN(_type, _field)
292 HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8)
293 HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8)
294 HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8)
295 HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8)
296 HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8)
298 HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12)
299 HPP_RAW_FNS(period, "Period", period, 12, 12)
301 static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused,
302 struct hist_entry *b __maybe_unused)
304 return 0;
307 #define HPP__COLOR_PRINT_FNS(_name) \
309 .header = hpp__header_ ## _name, \
310 .width = hpp__width_ ## _name, \
311 .color = hpp__color_ ## _name, \
312 .entry = hpp__entry_ ## _name, \
313 .cmp = hpp__nop_cmp, \
314 .collapse = hpp__nop_cmp, \
315 .sort = hpp__sort_ ## _name, \
318 #define HPP__PRINT_FNS(_name) \
320 .header = hpp__header_ ## _name, \
321 .width = hpp__width_ ## _name, \
322 .entry = hpp__entry_ ## _name, \
323 .cmp = hpp__nop_cmp, \
324 .collapse = hpp__nop_cmp, \
325 .sort = hpp__sort_ ## _name, \
328 struct perf_hpp_fmt perf_hpp__format[] = {
329 HPP__COLOR_PRINT_FNS(overhead),
330 HPP__COLOR_PRINT_FNS(overhead_sys),
331 HPP__COLOR_PRINT_FNS(overhead_us),
332 HPP__COLOR_PRINT_FNS(overhead_guest_sys),
333 HPP__COLOR_PRINT_FNS(overhead_guest_us),
334 HPP__PRINT_FNS(samples),
335 HPP__PRINT_FNS(period)
338 LIST_HEAD(perf_hpp__list);
339 LIST_HEAD(perf_hpp__sort_list);
342 #undef HPP__COLOR_PRINT_FNS
343 #undef HPP__PRINT_FNS
345 #undef HPP_PERCENT_FNS
346 #undef HPP_RAW_FNS
348 #undef __HPP_HEADER_FN
349 #undef __HPP_WIDTH_FN
350 #undef __HPP_COLOR_PERCENT_FN
351 #undef __HPP_ENTRY_PERCENT_FN
352 #undef __HPP_ENTRY_RAW_FN
355 void perf_hpp__init(void)
357 struct list_head *list;
358 int i;
360 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
361 INIT_LIST_HEAD(&perf_hpp__format[i].list);
362 INIT_LIST_HEAD(&perf_hpp__format[i].sort_list);
365 perf_hpp__column_enable(PERF_HPP__OVERHEAD);
367 if (symbol_conf.show_cpu_utilization) {
368 perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS);
369 perf_hpp__column_enable(PERF_HPP__OVERHEAD_US);
371 if (perf_guest) {
372 perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS);
373 perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US);
377 if (symbol_conf.show_nr_samples)
378 perf_hpp__column_enable(PERF_HPP__SAMPLES);
380 if (symbol_conf.show_total_period)
381 perf_hpp__column_enable(PERF_HPP__PERIOD);
383 /* prepend overhead field for backward compatiblity. */
384 list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list;
385 if (list_empty(list))
386 list_add(list, &perf_hpp__sort_list);
388 perf_hpp__setup_output_field();
391 void perf_hpp__column_register(struct perf_hpp_fmt *format)
393 list_add_tail(&format->list, &perf_hpp__list);
396 void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
398 list_add_tail(&format->sort_list, &perf_hpp__sort_list);
401 void perf_hpp__column_enable(unsigned col)
403 BUG_ON(col >= PERF_HPP__MAX_INDEX);
404 perf_hpp__column_register(&perf_hpp__format[col]);
407 void perf_hpp__setup_output_field(void)
409 struct perf_hpp_fmt *fmt;
411 /* append sort keys to output field */
412 perf_hpp__for_each_sort_list(fmt) {
413 if (list_empty(&fmt->list))
414 perf_hpp__column_register(fmt);
418 int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size,
419 struct hists *hists)
421 const char *sep = symbol_conf.field_sep;
422 struct sort_entry *se;
423 int ret = 0;
425 list_for_each_entry(se, &hist_entry__sort_list, list) {
426 if (se->elide)
427 continue;
429 ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
430 ret += se->se_snprintf(he, s + ret, size - ret,
431 hists__col_len(hists, se->se_width_idx));
434 return ret;
438 * See hists__fprintf to match the column widths
440 unsigned int hists__sort_list_width(struct hists *hists)
442 struct perf_hpp_fmt *fmt;
443 struct sort_entry *se;
444 int i = 0, ret = 0;
445 struct perf_hpp dummy_hpp;
447 perf_hpp__for_each_format(fmt) {
448 if (i)
449 ret += 2;
451 ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
454 list_for_each_entry(se, &hist_entry__sort_list, list)
455 if (!se->elide)
456 ret += 2 + hists__col_len(hists, se->se_width_idx);
458 if (verbose) /* Addr + origin */
459 ret += 3 + BITS_PER_LONG / 4;
461 return ret;