perf tools: Add front-end and back-end stalled cycles support
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / tools / perf / builtin-stat.c
blob6a4a8a399d955c950f3379c43129a73de3683c85
1 /*
2 * builtin-stat.c
4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
7 * Sample output:
9 $ perf stat ~/hackbench 10
10 Time: 0.104
12 Performance counter stats for '/home/mingo/hackbench':
14 1255.538611 task clock ticks # 10.143 CPU utilization factor
15 54011 context switches # 0.043 M/sec
16 385 CPU migrations # 0.000 M/sec
17 17755 pagefaults # 0.014 M/sec
18 3808323185 CPU cycles # 3033.219 M/sec
19 1575111190 instructions # 1254.530 M/sec
20 17367895 cache references # 13.833 M/sec
21 7674421 cache misses # 6.112 M/sec
23 Wall-clock time elapsed: 123.786620 msecs
26 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
28 * Improvements and fixes by:
30 * Arjan van de Ven <arjan@linux.intel.com>
31 * Yanmin Zhang <yanmin.zhang@intel.com>
32 * Wu Fengguang <fengguang.wu@intel.com>
33 * Mike Galbraith <efault@gmx.de>
34 * Paul Mackerras <paulus@samba.org>
35 * Jaswinder Singh Rajput <jaswinder@kernel.org>
37 * Released under the GPL v2. (and only v2, not any later version)
40 #include "perf.h"
41 #include "builtin.h"
42 #include "util/util.h"
43 #include "util/parse-options.h"
44 #include "util/parse-events.h"
45 #include "util/event.h"
46 #include "util/evlist.h"
47 #include "util/evsel.h"
48 #include "util/debug.h"
49 #include "util/color.h"
50 #include "util/header.h"
51 #include "util/cpumap.h"
52 #include "util/thread.h"
53 #include "util/thread_map.h"
55 #include <sys/prctl.h>
56 #include <math.h>
57 #include <locale.h>
59 #define DEFAULT_SEPARATOR " "
61 static struct perf_event_attr default_attrs[] = {
63 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
64 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
65 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
66 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
68 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
69 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
70 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
71 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
72 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
73 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
78 * Detailed stats:
80 static struct perf_event_attr detailed_attrs[] = {
82 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
83 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
84 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
85 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
87 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
88 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
89 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
90 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
91 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
92 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
94 { .type = PERF_TYPE_HW_CACHE,
95 .config =
96 PERF_COUNT_HW_CACHE_L1D << 0 |
97 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
98 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
100 { .type = PERF_TYPE_HW_CACHE,
101 .config =
102 PERF_COUNT_HW_CACHE_L1D << 0 |
103 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
104 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
106 { .type = PERF_TYPE_HW_CACHE,
107 .config =
108 PERF_COUNT_HW_CACHE_LL << 0 |
109 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
110 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
112 { .type = PERF_TYPE_HW_CACHE,
113 .config =
114 PERF_COUNT_HW_CACHE_LL << 0 |
115 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
116 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
119 struct perf_evlist *evsel_list;
121 static bool system_wide = false;
122 static int run_idx = 0;
124 static int run_count = 1;
125 static bool no_inherit = false;
126 static bool scale = true;
127 static bool no_aggr = false;
128 static pid_t target_pid = -1;
129 static pid_t target_tid = -1;
130 static pid_t child_pid = -1;
131 static bool null_run = false;
132 static bool detailed_run = false;
133 static bool sync_run = false;
134 static bool big_num = true;
135 static int big_num_opt = -1;
136 static const char *cpu_list;
137 static const char *csv_sep = NULL;
138 static bool csv_output = false;
140 static volatile int done = 0;
142 struct stats
144 double n, mean, M2;
147 struct perf_stat {
148 struct stats res_stats[3];
151 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
153 evsel->priv = zalloc(sizeof(struct perf_stat));
154 return evsel->priv == NULL ? -ENOMEM : 0;
157 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
159 free(evsel->priv);
160 evsel->priv = NULL;
163 static void update_stats(struct stats *stats, u64 val)
165 double delta;
167 stats->n++;
168 delta = val - stats->mean;
169 stats->mean += delta / stats->n;
170 stats->M2 += delta*(val - stats->mean);
173 static double avg_stats(struct stats *stats)
175 return stats->mean;
179 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
181 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
182 * s^2 = -------------------------------
183 * n - 1
185 * http://en.wikipedia.org/wiki/Stddev
187 * The std dev of the mean is related to the std dev by:
190 * s_mean = -------
191 * sqrt(n)
194 static double stddev_stats(struct stats *stats)
196 double variance = stats->M2 / (stats->n - 1);
197 double variance_mean = variance / stats->n;
199 return sqrt(variance_mean);
202 struct stats runtime_nsecs_stats[MAX_NR_CPUS];
203 struct stats runtime_cycles_stats[MAX_NR_CPUS];
204 struct stats runtime_stalled_cycles_stats[MAX_NR_CPUS];
205 struct stats runtime_branches_stats[MAX_NR_CPUS];
206 struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
207 struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
208 struct stats walltime_nsecs_stats;
210 static int create_perf_stat_counter(struct perf_evsel *evsel)
212 struct perf_event_attr *attr = &evsel->attr;
214 if (scale)
215 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
216 PERF_FORMAT_TOTAL_TIME_RUNNING;
218 attr->inherit = !no_inherit;
220 if (system_wide)
221 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false);
223 if (target_pid == -1 && target_tid == -1) {
224 attr->disabled = 1;
225 attr->enable_on_exec = 1;
228 return perf_evsel__open_per_thread(evsel, evsel_list->threads, false);
232 * Does the counter have nsecs as a unit?
234 static inline int nsec_counter(struct perf_evsel *evsel)
236 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
237 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
238 return 1;
240 return 0;
244 * Update various tracking values we maintain to print
245 * more semantic information such as miss/hit ratios,
246 * instruction rates, etc:
248 static void update_shadow_stats(struct perf_evsel *counter, u64 *count)
250 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
251 update_stats(&runtime_nsecs_stats[0], count[0]);
252 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
253 update_stats(&runtime_cycles_stats[0], count[0]);
254 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
255 update_stats(&runtime_stalled_cycles_stats[0], count[0]);
256 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
257 update_stats(&runtime_branches_stats[0], count[0]);
258 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
259 update_stats(&runtime_cacherefs_stats[0], count[0]);
260 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
261 update_stats(&runtime_l1_dcache_stats[0], count[0]);
265 * Read out the results of a single counter:
266 * aggregate counts across CPUs in system-wide mode
268 static int read_counter_aggr(struct perf_evsel *counter)
270 struct perf_stat *ps = counter->priv;
271 u64 *count = counter->counts->aggr.values;
272 int i;
274 if (__perf_evsel__read(counter, evsel_list->cpus->nr,
275 evsel_list->threads->nr, scale) < 0)
276 return -1;
278 for (i = 0; i < 3; i++)
279 update_stats(&ps->res_stats[i], count[i]);
281 if (verbose) {
282 fprintf(stderr, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
283 event_name(counter), count[0], count[1], count[2]);
287 * Save the full runtime - to allow normalization during printout:
289 update_shadow_stats(counter, count);
291 return 0;
295 * Read out the results of a single counter:
296 * do not aggregate counts across CPUs in system-wide mode
298 static int read_counter(struct perf_evsel *counter)
300 u64 *count;
301 int cpu;
303 for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
304 if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
305 return -1;
307 count = counter->counts->cpu[cpu].values;
309 update_shadow_stats(counter, count);
312 return 0;
315 static int run_perf_stat(int argc __used, const char **argv)
317 unsigned long long t0, t1;
318 struct perf_evsel *counter;
319 int status = 0;
320 int child_ready_pipe[2], go_pipe[2];
321 const bool forks = (argc > 0);
322 char buf;
324 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
325 perror("failed to create pipes");
326 exit(1);
329 if (forks) {
330 if ((child_pid = fork()) < 0)
331 perror("failed to fork");
333 if (!child_pid) {
334 close(child_ready_pipe[0]);
335 close(go_pipe[1]);
336 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
339 * Do a dummy execvp to get the PLT entry resolved,
340 * so we avoid the resolver overhead on the real
341 * execvp call.
343 execvp("", (char **)argv);
346 * Tell the parent we're ready to go
348 close(child_ready_pipe[1]);
351 * Wait until the parent tells us to go.
353 if (read(go_pipe[0], &buf, 1) == -1)
354 perror("unable to read pipe");
356 execvp(argv[0], (char **)argv);
358 perror(argv[0]);
359 exit(-1);
362 if (target_tid == -1 && target_pid == -1 && !system_wide)
363 evsel_list->threads->map[0] = child_pid;
366 * Wait for the child to be ready to exec.
368 close(child_ready_pipe[1]);
369 close(go_pipe[0]);
370 if (read(child_ready_pipe[0], &buf, 1) == -1)
371 perror("unable to read pipe");
372 close(child_ready_pipe[0]);
375 list_for_each_entry(counter, &evsel_list->entries, node) {
376 if (create_perf_stat_counter(counter) < 0) {
377 if (errno == EINVAL || errno == ENOSYS)
378 continue;
380 if (errno == EPERM || errno == EACCES) {
381 error("You may not have permission to collect %sstats.\n"
382 "\t Consider tweaking"
383 " /proc/sys/kernel/perf_event_paranoid or running as root.",
384 system_wide ? "system-wide " : "");
385 } else if (errno == ENOENT) {
386 error("%s event is not supported. ", event_name(counter));
387 } else {
388 error("open_counter returned with %d (%s). "
389 "/bin/dmesg may provide additional information.\n",
390 errno, strerror(errno));
392 if (child_pid != -1)
393 kill(child_pid, SIGTERM);
394 die("Not all events could be opened.\n");
395 return -1;
399 if (perf_evlist__set_filters(evsel_list)) {
400 error("failed to set filter with %d (%s)\n", errno,
401 strerror(errno));
402 return -1;
406 * Enable counters and exec the command:
408 t0 = rdclock();
410 if (forks) {
411 close(go_pipe[1]);
412 wait(&status);
413 } else {
414 while(!done) sleep(1);
417 t1 = rdclock();
419 update_stats(&walltime_nsecs_stats, t1 - t0);
421 if (no_aggr) {
422 list_for_each_entry(counter, &evsel_list->entries, node) {
423 read_counter(counter);
424 perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1);
426 } else {
427 list_for_each_entry(counter, &evsel_list->entries, node) {
428 read_counter_aggr(counter);
429 perf_evsel__close_fd(counter, evsel_list->cpus->nr,
430 evsel_list->threads->nr);
434 return WEXITSTATUS(status);
437 static void print_noise_pct(double total, double avg)
439 double pct = 0.0;
441 if (avg)
442 pct = 100.0*total/avg;
444 fprintf(stderr, " ( +-%6.2f%% )", pct);
447 static void print_noise(struct perf_evsel *evsel, double avg)
449 struct perf_stat *ps;
451 if (run_count == 1)
452 return;
454 ps = evsel->priv;
455 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
458 static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
460 double msecs = avg / 1e6;
461 char cpustr[16] = { '\0', };
462 const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-24s";
464 if (no_aggr)
465 sprintf(cpustr, "CPU%*d%s",
466 csv_output ? 0 : -4,
467 evsel_list->cpus->map[cpu], csv_sep);
469 fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(evsel));
471 if (evsel->cgrp)
472 fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name);
474 if (csv_output)
475 return;
477 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
478 fprintf(stderr, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats));
481 static void print_stalled_cycles(int cpu, struct perf_evsel *evsel __used, double avg)
483 double total, ratio = 0.0;
484 const char *color;
486 total = avg_stats(&runtime_cycles_stats[cpu]);
488 if (total)
489 ratio = avg / total * 100.0;
491 color = PERF_COLOR_NORMAL;
492 if (ratio > 75.0)
493 color = PERF_COLOR_RED;
494 else if (ratio > 50.0)
495 color = PERF_COLOR_MAGENTA;
496 else if (ratio > 25.0)
497 color = PERF_COLOR_YELLOW;
499 fprintf(stderr, " # ");
500 color_fprintf(stderr, color, "%5.2f%%", ratio);
501 fprintf(stderr, " of all cycles are idle ");
504 static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg)
506 double total, ratio = 0.0;
507 const char *color;
509 total = avg_stats(&runtime_branches_stats[cpu]);
511 if (total)
512 ratio = avg / total * 100.0;
514 color = PERF_COLOR_NORMAL;
515 if (ratio > 20.0)
516 color = PERF_COLOR_RED;
517 else if (ratio > 10.0)
518 color = PERF_COLOR_MAGENTA;
519 else if (ratio > 5.0)
520 color = PERF_COLOR_YELLOW;
522 fprintf(stderr, " # ");
523 color_fprintf(stderr, color, "%5.2f%%", ratio);
524 fprintf(stderr, " of all branches ");
527 static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
529 double total, ratio = 0.0;
530 const char *color;
532 total = avg_stats(&runtime_l1_dcache_stats[cpu]);
534 if (total)
535 ratio = avg / total * 100.0;
537 color = PERF_COLOR_NORMAL;
538 if (ratio > 20.0)
539 color = PERF_COLOR_RED;
540 else if (ratio > 10.0)
541 color = PERF_COLOR_MAGENTA;
542 else if (ratio > 5.0)
543 color = PERF_COLOR_YELLOW;
545 fprintf(stderr, " # ");
546 color_fprintf(stderr, color, "%5.2f%%", ratio);
547 fprintf(stderr, " of all L1-dcache hits ");
550 static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
552 double total, ratio = 0.0;
553 char cpustr[16] = { '\0', };
554 const char *fmt;
556 if (csv_output)
557 fmt = "%s%.0f%s%s";
558 else if (big_num)
559 fmt = "%s%'18.0f%s%-24s";
560 else
561 fmt = "%s%18.0f%s%-24s";
563 if (no_aggr)
564 sprintf(cpustr, "CPU%*d%s",
565 csv_output ? 0 : -4,
566 evsel_list->cpus->map[cpu], csv_sep);
567 else
568 cpu = 0;
570 fprintf(stderr, fmt, cpustr, avg, csv_sep, event_name(evsel));
572 if (evsel->cgrp)
573 fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name);
575 if (csv_output)
576 return;
578 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
579 total = avg_stats(&runtime_cycles_stats[cpu]);
581 if (total)
582 ratio = avg / total;
584 fprintf(stderr, " # %4.2f insns per cycle ", ratio);
586 total = avg_stats(&runtime_stalled_cycles_stats[cpu]);
588 if (total && avg) {
589 ratio = total / avg;
590 fprintf(stderr, "\n # %4.2f stalled cycles per insn", ratio);
593 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
594 runtime_branches_stats[cpu].n != 0) {
595 print_branch_misses(cpu, evsel, avg);
596 } else if (
597 evsel->attr.type == PERF_TYPE_HW_CACHE &&
598 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
599 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
600 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
601 runtime_l1_dcache_stats[cpu].n != 0) {
602 print_l1_dcache_misses(cpu, evsel, avg);
603 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) &&
604 runtime_cacherefs_stats[cpu].n != 0) {
605 total = avg_stats(&runtime_cacherefs_stats[cpu]);
607 if (total)
608 ratio = avg * 100 / total;
610 fprintf(stderr, " # %8.3f %% of all cache refs ", ratio);
612 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
613 print_stalled_cycles(cpu, evsel, avg);
614 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
615 total = avg_stats(&runtime_nsecs_stats[cpu]);
617 if (total)
618 ratio = 1.0 * avg / total;
620 fprintf(stderr, " # %8.3f GHz ", ratio);
621 } else if (runtime_nsecs_stats[cpu].n != 0) {
622 total = avg_stats(&runtime_nsecs_stats[cpu]);
624 if (total)
625 ratio = 1000.0 * avg / total;
627 fprintf(stderr, " # %8.3f M/sec ", ratio);
628 } else {
629 fprintf(stderr, " ");
634 * Print out the results of a single counter:
635 * aggregated counts in system-wide mode
637 static void print_counter_aggr(struct perf_evsel *counter)
639 struct perf_stat *ps = counter->priv;
640 double avg = avg_stats(&ps->res_stats[0]);
641 int scaled = counter->counts->scaled;
643 if (scaled == -1) {
644 fprintf(stderr, "%*s%s%*s",
645 csv_output ? 0 : 18,
646 "<not counted>",
647 csv_sep,
648 csv_output ? 0 : -24,
649 event_name(counter));
651 if (counter->cgrp)
652 fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name);
654 fputc('\n', stderr);
655 return;
658 if (nsec_counter(counter))
659 nsec_printout(-1, counter, avg);
660 else
661 abs_printout(-1, counter, avg);
663 if (csv_output) {
664 fputc('\n', stderr);
665 return;
668 print_noise(counter, avg);
670 if (scaled) {
671 double avg_enabled, avg_running;
673 avg_enabled = avg_stats(&ps->res_stats[1]);
674 avg_running = avg_stats(&ps->res_stats[2]);
676 fprintf(stderr, " (%.2f%%)", 100 * avg_running / avg_enabled);
678 fprintf(stderr, "\n");
682 * Print out the results of a single counter:
683 * does not use aggregated count in system-wide
685 static void print_counter(struct perf_evsel *counter)
687 u64 ena, run, val;
688 int cpu;
690 for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
691 val = counter->counts->cpu[cpu].val;
692 ena = counter->counts->cpu[cpu].ena;
693 run = counter->counts->cpu[cpu].run;
694 if (run == 0 || ena == 0) {
695 fprintf(stderr, "CPU%*d%s%*s%s%*s",
696 csv_output ? 0 : -4,
697 evsel_list->cpus->map[cpu], csv_sep,
698 csv_output ? 0 : 18,
699 "<not counted>", csv_sep,
700 csv_output ? 0 : -24,
701 event_name(counter));
703 if (counter->cgrp)
704 fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name);
706 fputc('\n', stderr);
707 continue;
710 if (nsec_counter(counter))
711 nsec_printout(cpu, counter, val);
712 else
713 abs_printout(cpu, counter, val);
715 if (!csv_output) {
716 print_noise(counter, 1.0);
718 if (run != ena)
719 fprintf(stderr, " (%.2f%%)", 100.0 * run / ena);
721 fputc('\n', stderr);
725 static void print_stat(int argc, const char **argv)
727 struct perf_evsel *counter;
728 int i;
730 fflush(stdout);
732 if (!csv_output) {
733 fprintf(stderr, "\n");
734 fprintf(stderr, " Performance counter stats for ");
735 if(target_pid == -1 && target_tid == -1) {
736 fprintf(stderr, "\'%s", argv[0]);
737 for (i = 1; i < argc; i++)
738 fprintf(stderr, " %s", argv[i]);
739 } else if (target_pid != -1)
740 fprintf(stderr, "process id \'%d", target_pid);
741 else
742 fprintf(stderr, "thread id \'%d", target_tid);
744 fprintf(stderr, "\'");
745 if (run_count > 1)
746 fprintf(stderr, " (%d runs)", run_count);
747 fprintf(stderr, ":\n\n");
750 if (no_aggr) {
751 list_for_each_entry(counter, &evsel_list->entries, node)
752 print_counter(counter);
753 } else {
754 list_for_each_entry(counter, &evsel_list->entries, node)
755 print_counter_aggr(counter);
758 if (!csv_output) {
759 fprintf(stderr, "\n");
760 fprintf(stderr, " %18.9f seconds time elapsed",
761 avg_stats(&walltime_nsecs_stats)/1e9);
762 if (run_count > 1) {
763 print_noise_pct(stddev_stats(&walltime_nsecs_stats),
764 avg_stats(&walltime_nsecs_stats));
766 fprintf(stderr, "\n\n");
770 static volatile int signr = -1;
772 static void skip_signal(int signo)
774 if(child_pid == -1)
775 done = 1;
777 signr = signo;
780 static void sig_atexit(void)
782 if (child_pid != -1)
783 kill(child_pid, SIGTERM);
785 if (signr == -1)
786 return;
788 signal(signr, SIG_DFL);
789 kill(getpid(), signr);
792 static const char * const stat_usage[] = {
793 "perf stat [<options>] [<command>]",
794 NULL
797 static int stat__set_big_num(const struct option *opt __used,
798 const char *s __used, int unset)
800 big_num_opt = unset ? 0 : 1;
801 return 0;
804 static const struct option options[] = {
805 OPT_CALLBACK('e', "event", &evsel_list, "event",
806 "event selector. use 'perf list' to list available events",
807 parse_events),
808 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
809 "event filter", parse_filter),
810 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
811 "child tasks do not inherit counters"),
812 OPT_INTEGER('p', "pid", &target_pid,
813 "stat events on existing process id"),
814 OPT_INTEGER('t', "tid", &target_tid,
815 "stat events on existing thread id"),
816 OPT_BOOLEAN('a', "all-cpus", &system_wide,
817 "system-wide collection from all CPUs"),
818 OPT_BOOLEAN('c', "scale", &scale,
819 "scale/normalize counters"),
820 OPT_INCR('v', "verbose", &verbose,
821 "be more verbose (show counter open errors, etc)"),
822 OPT_INTEGER('r', "repeat", &run_count,
823 "repeat command and print average + stddev (max: 100)"),
824 OPT_BOOLEAN('n', "null", &null_run,
825 "null run - dont start any counters"),
826 OPT_BOOLEAN('d', "detailed", &detailed_run,
827 "detailed run - start a lot of events"),
828 OPT_BOOLEAN('S', "sync", &sync_run,
829 "call sync() before starting a run"),
830 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
831 "print large numbers with thousands\' separators",
832 stat__set_big_num),
833 OPT_STRING('C', "cpu", &cpu_list, "cpu",
834 "list of cpus to monitor in system-wide"),
835 OPT_BOOLEAN('A', "no-aggr", &no_aggr,
836 "disable CPU count aggregation"),
837 OPT_STRING('x', "field-separator", &csv_sep, "separator",
838 "print counts with custom separator"),
839 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
840 "monitor event in cgroup name only",
841 parse_cgroups),
842 OPT_END()
845 int cmd_stat(int argc, const char **argv, const char *prefix __used)
847 struct perf_evsel *pos;
848 int status = -ENOMEM;
850 setlocale(LC_ALL, "");
852 evsel_list = perf_evlist__new(NULL, NULL);
853 if (evsel_list == NULL)
854 return -ENOMEM;
856 argc = parse_options(argc, argv, options, stat_usage,
857 PARSE_OPT_STOP_AT_NON_OPTION);
859 if (csv_sep)
860 csv_output = true;
861 else
862 csv_sep = DEFAULT_SEPARATOR;
865 * let the spreadsheet do the pretty-printing
867 if (csv_output) {
868 /* User explicitely passed -B? */
869 if (big_num_opt == 1) {
870 fprintf(stderr, "-B option not supported with -x\n");
871 usage_with_options(stat_usage, options);
872 } else /* Nope, so disable big number formatting */
873 big_num = false;
874 } else if (big_num_opt == 0) /* User passed --no-big-num */
875 big_num = false;
877 if (!argc && target_pid == -1 && target_tid == -1)
878 usage_with_options(stat_usage, options);
879 if (run_count <= 0)
880 usage_with_options(stat_usage, options);
882 /* no_aggr, cgroup are for system-wide only */
883 if ((no_aggr || nr_cgroups) && !system_wide) {
884 fprintf(stderr, "both cgroup and no-aggregation "
885 "modes only available in system-wide mode\n");
887 usage_with_options(stat_usage, options);
890 /* Set attrs and nr_counters if no event is selected and !null_run */
891 if (detailed_run) {
892 size_t c;
894 for (c = 0; c < ARRAY_SIZE(detailed_attrs); ++c) {
895 pos = perf_evsel__new(&detailed_attrs[c], c);
896 if (pos == NULL)
897 goto out;
898 perf_evlist__add(evsel_list, pos);
901 /* Set attrs and nr_counters if no event is selected and !null_run */
902 if (!detailed_run && !null_run && !evsel_list->nr_entries) {
903 size_t c;
905 for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
906 pos = perf_evsel__new(&default_attrs[c], c);
907 if (pos == NULL)
908 goto out;
909 perf_evlist__add(evsel_list, pos);
913 if (target_pid != -1)
914 target_tid = target_pid;
916 evsel_list->threads = thread_map__new(target_pid, target_tid);
917 if (evsel_list->threads == NULL) {
918 pr_err("Problems finding threads of monitor\n");
919 usage_with_options(stat_usage, options);
922 if (system_wide)
923 evsel_list->cpus = cpu_map__new(cpu_list);
924 else
925 evsel_list->cpus = cpu_map__dummy_new();
927 if (evsel_list->cpus == NULL) {
928 perror("failed to parse CPUs map");
929 usage_with_options(stat_usage, options);
930 return -1;
933 list_for_each_entry(pos, &evsel_list->entries, node) {
934 if (perf_evsel__alloc_stat_priv(pos) < 0 ||
935 perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0 ||
936 perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, evsel_list->threads->nr) < 0)
937 goto out_free_fd;
941 * We dont want to block the signals - that would cause
942 * child tasks to inherit that and Ctrl-C would not work.
943 * What we want is for Ctrl-C to work in the exec()-ed
944 * task, but being ignored by perf stat itself:
946 atexit(sig_atexit);
947 signal(SIGINT, skip_signal);
948 signal(SIGALRM, skip_signal);
949 signal(SIGABRT, skip_signal);
951 status = 0;
952 for (run_idx = 0; run_idx < run_count; run_idx++) {
953 if (run_count != 1 && verbose)
954 fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
956 if (sync_run)
957 sync();
959 status = run_perf_stat(argc, argv);
962 if (status != -1)
963 print_stat(argc, argv);
964 out_free_fd:
965 list_for_each_entry(pos, &evsel_list->entries, node)
966 perf_evsel__free_stat_priv(pos);
967 perf_evlist__delete_maps(evsel_list);
968 out:
969 perf_evlist__delete(evsel_list);
970 return status;