4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
9 $ perf stat ~/hackbench 10
12 Performance counter stats for '/home/mingo/hackbench':
14 1255.538611 task clock ticks # 10.143 CPU utilization factor
15 54011 context switches # 0.043 M/sec
16 385 CPU migrations # 0.000 M/sec
17 17755 pagefaults # 0.014 M/sec
18 3808323185 CPU cycles # 3033.219 M/sec
19 1575111190 instructions # 1254.530 M/sec
20 17367895 cache references # 13.833 M/sec
21 7674421 cache misses # 6.112 M/sec
23 Wall-clock time elapsed: 123.786620 msecs
26 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
28 * Improvements and fixes by:
30 * Arjan van de Ven <arjan@linux.intel.com>
31 * Yanmin Zhang <yanmin.zhang@intel.com>
32 * Wu Fengguang <fengguang.wu@intel.com>
33 * Mike Galbraith <efault@gmx.de>
34 * Paul Mackerras <paulus@samba.org>
35 * Jaswinder Singh Rajput <jaswinder@kernel.org>
37 * Released under the GPL v2. (and only v2, not any later version)
42 #include "util/util.h"
43 #include "util/parse-options.h"
44 #include "util/parse-events.h"
45 #include "util/event.h"
46 #include "util/debug.h"
48 #include <sys/prctl.h>
51 static struct perf_event_attr default_attrs
[] = {
53 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_TASK_CLOCK
},
54 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CONTEXT_SWITCHES
},
55 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CPU_MIGRATIONS
},
56 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_PAGE_FAULTS
},
58 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CPU_CYCLES
},
59 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_INSTRUCTIONS
},
60 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CACHE_REFERENCES
},
61 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CACHE_MISSES
},
65 static int system_wide
= 0;
66 static unsigned int nr_cpus
= 0;
67 static int run_idx
= 0;
69 static int run_count
= 1;
70 static int inherit
= 1;
72 static int target_pid
= -1;
73 static int null_run
= 0;
75 static int fd
[MAX_NR_CPUS
][MAX_COUNTERS
];
77 static int event_scaled
[MAX_COUNTERS
];
84 static void update_stats(struct stats
*stats
, u64 val
)
89 delta
= val
- stats
->mean
;
90 stats
->mean
+= delta
/ stats
->n
;
91 stats
->M2
+= delta
*(val
- stats
->mean
);
94 static double avg_stats(struct stats
*stats
)
100 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
102 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
103 * s^2 = -------------------------------
106 * http://en.wikipedia.org/wiki/Stddev
108 * The std dev of the mean is related to the std dev by:
115 static double stddev_stats(struct stats
*stats
)
117 double variance
= stats
->M2
/ (stats
->n
- 1);
118 double variance_mean
= variance
/ stats
->n
;
120 return sqrt(variance_mean
);
123 struct stats event_res_stats
[MAX_COUNTERS
][3];
124 struct stats runtime_nsecs_stats
;
125 struct stats walltime_nsecs_stats
;
126 struct stats runtime_cycles_stats
;
128 #define MATCH_EVENT(t, c, counter) \
129 (attrs[counter].type == PERF_TYPE_##t && \
130 attrs[counter].config == PERF_COUNT_##c)
132 #define ERR_PERF_OPEN \
133 "Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n"
135 static void create_perf_stat_counter(int counter
, int pid
)
137 struct perf_event_attr
*attr
= attrs
+ counter
;
140 attr
->read_format
= PERF_FORMAT_TOTAL_TIME_ENABLED
|
141 PERF_FORMAT_TOTAL_TIME_RUNNING
;
146 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
147 fd
[cpu
][counter
] = sys_perf_event_open(attr
, -1, cpu
, -1, 0);
148 if (fd
[cpu
][counter
] < 0 && verbose
)
149 fprintf(stderr
, ERR_PERF_OPEN
, counter
,
150 fd
[cpu
][counter
], strerror(errno
));
153 attr
->inherit
= inherit
;
155 attr
->enable_on_exec
= 1;
157 fd
[0][counter
] = sys_perf_event_open(attr
, pid
, -1, -1, 0);
158 if (fd
[0][counter
] < 0 && verbose
)
159 fprintf(stderr
, ERR_PERF_OPEN
, counter
,
160 fd
[0][counter
], strerror(errno
));
165 * Does the counter have nsecs as a unit?
167 static inline int nsec_counter(int counter
)
169 if (MATCH_EVENT(SOFTWARE
, SW_CPU_CLOCK
, counter
) ||
170 MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
))
177 * Read out the results of a single counter:
179 static void read_counter(int counter
)
181 u64 count
[3], single_count
[3];
187 count
[0] = count
[1] = count
[2] = 0;
190 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
191 if (fd
[cpu
][counter
] < 0)
194 res
= read(fd
[cpu
][counter
], single_count
, nv
* sizeof(u64
));
195 assert(res
== nv
* sizeof(u64
));
197 close(fd
[cpu
][counter
]);
198 fd
[cpu
][counter
] = -1;
200 count
[0] += single_count
[0];
202 count
[1] += single_count
[1];
203 count
[2] += single_count
[2];
210 event_scaled
[counter
] = -1;
215 if (count
[2] < count
[1]) {
216 event_scaled
[counter
] = 1;
217 count
[0] = (unsigned long long)
218 ((double)count
[0] * count
[1] / count
[2] + 0.5);
222 for (i
= 0; i
< 3; i
++)
223 update_stats(&event_res_stats
[counter
][i
], count
[i
]);
226 fprintf(stderr
, "%s: %Ld %Ld %Ld\n", event_name(counter
),
227 count
[0], count
[1], count
[2]);
231 * Save the full runtime - to allow normalization during printout:
233 if (MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
))
234 update_stats(&runtime_nsecs_stats
, count
[0]);
235 if (MATCH_EVENT(HARDWARE
, HW_CPU_CYCLES
, counter
))
236 update_stats(&runtime_cycles_stats
, count
[0]);
239 static int run_perf_stat(int argc __used
, const char **argv
)
241 unsigned long long t0
, t1
;
245 int child_ready_pipe
[2], go_pipe
[2];
251 if (pipe(child_ready_pipe
) < 0 || pipe(go_pipe
) < 0) {
252 perror("failed to create pipes");
256 if ((pid
= fork()) < 0)
257 perror("failed to fork");
260 close(child_ready_pipe
[0]);
262 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
265 * Do a dummy execvp to get the PLT entry resolved,
266 * so we avoid the resolver overhead on the real
269 execvp("", (char **)argv
);
272 * Tell the parent we're ready to go
274 close(child_ready_pipe
[1]);
277 * Wait until the parent tells us to go.
279 if (read(go_pipe
[0], &buf
, 1) == -1)
280 perror("unable to read pipe");
282 execvp(argv
[0], (char **)argv
);
289 * Wait for the child to be ready to exec.
291 close(child_ready_pipe
[1]);
293 if (read(child_ready_pipe
[0], &buf
, 1) == -1)
294 perror("unable to read pipe");
295 close(child_ready_pipe
[0]);
297 for (counter
= 0; counter
< nr_counters
; counter
++)
298 create_perf_stat_counter(counter
, pid
);
301 * Enable counters and exec the command:
310 update_stats(&walltime_nsecs_stats
, t1
- t0
);
312 for (counter
= 0; counter
< nr_counters
; counter
++)
313 read_counter(counter
);
315 return WEXITSTATUS(status
);
318 static void print_noise(int counter
, double avg
)
323 fprintf(stderr
, " ( +- %7.3f%% )",
324 100 * stddev_stats(&event_res_stats
[counter
][0]) / avg
);
327 static void nsec_printout(int counter
, double avg
)
329 double msecs
= avg
/ 1e6
;
331 fprintf(stderr
, " %14.6f %-24s", msecs
, event_name(counter
));
333 if (MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
)) {
334 fprintf(stderr
, " # %10.3f CPUs ",
335 avg
/ avg_stats(&walltime_nsecs_stats
));
339 static void abs_printout(int counter
, double avg
)
341 fprintf(stderr
, " %14.0f %-24s", avg
, event_name(counter
));
343 if (MATCH_EVENT(HARDWARE
, HW_INSTRUCTIONS
, counter
)) {
344 fprintf(stderr
, " # %10.3f IPC ",
345 avg
/ avg_stats(&runtime_cycles_stats
));
347 fprintf(stderr
, " # %10.3f M/sec",
348 1000.0 * avg
/ avg_stats(&runtime_nsecs_stats
));
353 * Print out the results of a single counter:
355 static void print_counter(int counter
)
357 double avg
= avg_stats(&event_res_stats
[counter
][0]);
358 int scaled
= event_scaled
[counter
];
361 fprintf(stderr
, " %14s %-24s\n",
362 "<not counted>", event_name(counter
));
366 if (nsec_counter(counter
))
367 nsec_printout(counter
, avg
);
369 abs_printout(counter
, avg
);
371 print_noise(counter
, avg
);
374 double avg_enabled
, avg_running
;
376 avg_enabled
= avg_stats(&event_res_stats
[counter
][1]);
377 avg_running
= avg_stats(&event_res_stats
[counter
][2]);
379 fprintf(stderr
, " (scaled from %.2f%%)",
380 100 * avg_running
/ avg_enabled
);
383 fprintf(stderr
, "\n");
386 static void print_stat(int argc
, const char **argv
)
392 fprintf(stderr
, "\n");
393 fprintf(stderr
, " Performance counter stats for \'%s", argv
[0]);
395 for (i
= 1; i
< argc
; i
++)
396 fprintf(stderr
, " %s", argv
[i
]);
398 fprintf(stderr
, "\'");
400 fprintf(stderr
, " (%d runs)", run_count
);
401 fprintf(stderr
, ":\n\n");
403 for (counter
= 0; counter
< nr_counters
; counter
++)
404 print_counter(counter
);
406 fprintf(stderr
, "\n");
407 fprintf(stderr
, " %14.9f seconds time elapsed",
408 avg_stats(&walltime_nsecs_stats
)/1e9
);
410 fprintf(stderr
, " ( +- %7.3f%% )",
411 100*stddev_stats(&walltime_nsecs_stats
) /
412 avg_stats(&walltime_nsecs_stats
));
414 fprintf(stderr
, "\n\n");
417 static volatile int signr
= -1;
419 static void skip_signal(int signo
)
424 static void sig_atexit(void)
429 signal(signr
, SIG_DFL
);
430 kill(getpid(), signr
);
433 static const char * const stat_usage
[] = {
434 "perf stat [<options>] <command>",
438 static const struct option options
[] = {
439 OPT_CALLBACK('e', "event", NULL
, "event",
440 "event selector. use 'perf list' to list available events",
442 OPT_BOOLEAN('i', "inherit", &inherit
,
443 "child tasks inherit counters"),
444 OPT_INTEGER('p', "pid", &target_pid
,
445 "stat events on existing pid"),
446 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
447 "system-wide collection from all CPUs"),
448 OPT_BOOLEAN('c', "scale", &scale
,
449 "scale/normalize counters"),
450 OPT_BOOLEAN('v', "verbose", &verbose
,
451 "be more verbose (show counter open errors, etc)"),
452 OPT_INTEGER('r', "repeat", &run_count
,
453 "repeat command and print average + stddev (max: 100)"),
454 OPT_BOOLEAN('n', "null", &null_run
,
455 "null run - dont start any counters"),
459 int cmd_stat(int argc
, const char **argv
, const char *prefix __used
)
463 argc
= parse_options(argc
, argv
, options
, stat_usage
,
464 PARSE_OPT_STOP_AT_NON_OPTION
);
466 usage_with_options(stat_usage
, options
);
468 usage_with_options(stat_usage
, options
);
470 /* Set attrs and nr_counters if no event is selected and !null_run */
471 if (!null_run
&& !nr_counters
) {
472 memcpy(attrs
, default_attrs
, sizeof(default_attrs
));
473 nr_counters
= ARRAY_SIZE(default_attrs
);
476 nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
477 assert(nr_cpus
<= MAX_NR_CPUS
);
478 assert((int)nr_cpus
>= 0);
481 * We dont want to block the signals - that would cause
482 * child tasks to inherit that and Ctrl-C would not work.
483 * What we want is for Ctrl-C to work in the exec()-ed
484 * task, but being ignored by perf stat itself:
487 signal(SIGINT
, skip_signal
);
488 signal(SIGALRM
, skip_signal
);
489 signal(SIGABRT
, skip_signal
);
492 for (run_idx
= 0; run_idx
< run_count
; run_idx
++) {
493 if (run_count
!= 1 && verbose
)
494 fprintf(stderr
, "[ perf stat: executing run #%d ... ]\n", run_idx
+ 1);
495 status
= run_perf_stat(argc
, argv
);
498 print_stat(argc
, argv
);