4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
9 $ perf stat ~/hackbench 10
12 Performance counter stats for '/home/mingo/hackbench':
14 1255.538611 task clock ticks # 10.143 CPU utilization factor
15 54011 context switches # 0.043 M/sec
16 385 CPU migrations # 0.000 M/sec
17 17755 pagefaults # 0.014 M/sec
18 3808323185 CPU cycles # 3033.219 M/sec
19 1575111190 instructions # 1254.530 M/sec
20 17367895 cache references # 13.833 M/sec
21 7674421 cache misses # 6.112 M/sec
23 Wall-clock time elapsed: 123.786620 msecs
26 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
28 * Improvements and fixes by:
30 * Arjan van de Ven <arjan@linux.intel.com>
31 * Yanmin Zhang <yanmin.zhang@intel.com>
32 * Wu Fengguang <fengguang.wu@intel.com>
33 * Mike Galbraith <efault@gmx.de>
34 * Paul Mackerras <paulus@samba.org>
35 * Jaswinder Singh Rajput <jaswinder@kernel.org>
37 * Released under the GPL v2. (and only v2, not any later version)
42 #include "util/util.h"
43 #include "util/parse-options.h"
44 #include "util/parse-events.h"
46 #include <sys/prctl.h>
49 static struct perf_counter_attr default_attrs
[] = {
51 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_TASK_CLOCK
},
52 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CONTEXT_SWITCHES
},
53 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CPU_MIGRATIONS
},
54 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_PAGE_FAULTS
},
56 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CPU_CYCLES
},
57 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_INSTRUCTIONS
},
58 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CACHE_REFERENCES
},
59 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CACHE_MISSES
},
65 static int system_wide
= 0;
66 static int verbose
= 0;
67 static unsigned int nr_cpus
= 0;
68 static int run_idx
= 0;
70 static int run_count
= 1;
71 static int inherit
= 1;
73 static int target_pid
= -1;
74 static int null_run
= 0;
76 static int fd
[MAX_NR_CPUS
][MAX_COUNTERS
];
78 static u64 runtime_nsecs
[MAX_RUN
];
79 static u64 walltime_nsecs
[MAX_RUN
];
80 static u64 runtime_cycles
[MAX_RUN
];
82 static u64 event_res
[MAX_RUN
][MAX_COUNTERS
][3];
83 static u64 event_scaled
[MAX_RUN
][MAX_COUNTERS
];
85 static u64 event_res_avg
[MAX_COUNTERS
][3];
86 static u64 event_res_noise
[MAX_COUNTERS
][3];
88 static u64 event_scaled_avg
[MAX_COUNTERS
];
90 static u64 runtime_nsecs_avg
;
91 static u64 runtime_nsecs_noise
;
93 static u64 walltime_nsecs_avg
;
94 static u64 walltime_nsecs_noise
;
96 static u64 runtime_cycles_avg
;
97 static u64 runtime_cycles_noise
;
99 #define MATCH_EVENT(t, c, counter) \
100 (attrs[counter].type == PERF_TYPE_##t && \
101 attrs[counter].config == PERF_COUNT_##c)
103 #define ERR_PERF_OPEN \
104 "Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n"
106 static void create_perf_stat_counter(int counter
, int pid
)
108 struct perf_counter_attr
*attr
= attrs
+ counter
;
111 attr
->read_format
= PERF_FORMAT_TOTAL_TIME_ENABLED
|
112 PERF_FORMAT_TOTAL_TIME_RUNNING
;
117 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
118 fd
[cpu
][counter
] = sys_perf_counter_open(attr
, -1, cpu
, -1, 0);
119 if (fd
[cpu
][counter
] < 0 && verbose
)
120 fprintf(stderr
, ERR_PERF_OPEN
, counter
,
121 fd
[cpu
][counter
], strerror(errno
));
124 attr
->inherit
= inherit
;
126 attr
->enable_on_exec
= 1;
128 fd
[0][counter
] = sys_perf_counter_open(attr
, pid
, -1, -1, 0);
129 if (fd
[0][counter
] < 0 && verbose
)
130 fprintf(stderr
, ERR_PERF_OPEN
, counter
,
131 fd
[0][counter
], strerror(errno
));
136 * Does the counter have nsecs as a unit?
138 static inline int nsec_counter(int counter
)
140 if (MATCH_EVENT(SOFTWARE
, SW_CPU_CLOCK
, counter
) ||
141 MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
))
148 * Read out the results of a single counter:
150 static void read_counter(int counter
)
152 u64
*count
, single_count
[3];
157 count
= event_res
[run_idx
][counter
];
159 count
[0] = count
[1] = count
[2] = 0;
162 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
163 if (fd
[cpu
][counter
] < 0)
166 res
= read(fd
[cpu
][counter
], single_count
, nv
* sizeof(u64
));
167 assert(res
== nv
* sizeof(u64
));
169 close(fd
[cpu
][counter
]);
170 fd
[cpu
][counter
] = -1;
172 count
[0] += single_count
[0];
174 count
[1] += single_count
[1];
175 count
[2] += single_count
[2];
182 event_scaled
[run_idx
][counter
] = -1;
187 if (count
[2] < count
[1]) {
188 event_scaled
[run_idx
][counter
] = 1;
189 count
[0] = (unsigned long long)
190 ((double)count
[0] * count
[1] / count
[2] + 0.5);
194 * Save the full runtime - to allow normalization during printout:
196 if (MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
))
197 runtime_nsecs
[run_idx
] = count
[0];
198 if (MATCH_EVENT(HARDWARE
, HW_CPU_CYCLES
, counter
))
199 runtime_cycles
[run_idx
] = count
[0];
202 static int run_perf_stat(int argc __used
, const char **argv
)
204 unsigned long long t0
, t1
;
208 int child_ready_pipe
[2], go_pipe
[2];
214 if (pipe(child_ready_pipe
) < 0 || pipe(go_pipe
) < 0) {
215 perror("failed to create pipes");
219 if ((pid
= fork()) < 0)
220 perror("failed to fork");
223 close(child_ready_pipe
[0]);
225 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
228 * Do a dummy execvp to get the PLT entry resolved,
229 * so we avoid the resolver overhead on the real
232 execvp("", (char **)argv
);
235 * Tell the parent we're ready to go
237 close(child_ready_pipe
[1]);
240 * Wait until the parent tells us to go.
242 if (read(go_pipe
[0], &buf
, 1) == -1)
243 perror("unable to read pipe");
245 execvp(argv
[0], (char **)argv
);
252 * Wait for the child to be ready to exec.
254 close(child_ready_pipe
[1]);
256 if (read(child_ready_pipe
[0], &buf
, 1) == -1)
257 perror("unable to read pipe");
258 close(child_ready_pipe
[0]);
260 for (counter
= 0; counter
< nr_counters
; counter
++)
261 create_perf_stat_counter(counter
, pid
);
264 * Enable counters and exec the command:
273 walltime_nsecs
[run_idx
] = t1
- t0
;
275 for (counter
= 0; counter
< nr_counters
; counter
++)
276 read_counter(counter
);
278 return WEXITSTATUS(status
);
281 static void print_noise(u64
*count
, u64
*noise
)
284 fprintf(stderr
, " ( +- %7.3f%% )",
285 (double)noise
[0]/(count
[0]+1)*100.0);
288 static void nsec_printout(int counter
, u64
*count
, u64
*noise
)
290 double msecs
= (double)count
[0] / 1000000;
292 fprintf(stderr
, " %14.6f %-24s", msecs
, event_name(counter
));
294 if (MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
)) {
295 if (walltime_nsecs_avg
)
296 fprintf(stderr
, " # %10.3f CPUs ",
297 (double)count
[0] / (double)walltime_nsecs_avg
);
299 print_noise(count
, noise
);
302 static void abs_printout(int counter
, u64
*count
, u64
*noise
)
304 fprintf(stderr
, " %14Ld %-24s", count
[0], event_name(counter
));
306 if (runtime_cycles_avg
&&
307 MATCH_EVENT(HARDWARE
, HW_INSTRUCTIONS
, counter
)) {
308 fprintf(stderr
, " # %10.3f IPC ",
309 (double)count
[0] / (double)runtime_cycles_avg
);
311 if (runtime_nsecs_avg
) {
312 fprintf(stderr
, " # %10.3f M/sec",
313 (double)count
[0]/runtime_nsecs_avg
*1000.0);
316 print_noise(count
, noise
);
320 * Print out the results of a single counter:
322 static void print_counter(int counter
)
327 count
= event_res_avg
[counter
];
328 noise
= event_res_noise
[counter
];
329 scaled
= event_scaled_avg
[counter
];
332 fprintf(stderr
, " %14s %-24s\n",
333 "<not counted>", event_name(counter
));
337 if (nsec_counter(counter
))
338 nsec_printout(counter
, count
, noise
);
340 abs_printout(counter
, count
, noise
);
343 fprintf(stderr
, " (scaled from %.2f%%)",
344 (double) count
[2] / count
[1] * 100);
346 fprintf(stderr
, "\n");
350 * normalize_noise noise values down to stddev:
352 static void normalize_noise(u64
*val
)
356 res
= (double)*val
/ (run_count
* sqrt((double)run_count
));
361 static void update_avg(const char *name
, int idx
, u64
*avg
, u64
*val
)
366 fprintf(stderr
, "debug: %20s[%d]: %Ld\n", name
, idx
, *val
);
369 * Calculate the averages and noises:
371 static void calc_avg(void)
376 fprintf(stderr
, "\n");
378 for (i
= 0; i
< run_count
; i
++) {
379 update_avg("runtime", 0, &runtime_nsecs_avg
, runtime_nsecs
+ i
);
380 update_avg("walltime", 0, &walltime_nsecs_avg
, walltime_nsecs
+ i
);
381 update_avg("runtime_cycles", 0, &runtime_cycles_avg
, runtime_cycles
+ i
);
383 for (j
= 0; j
< nr_counters
; j
++) {
384 update_avg("counter/0", j
,
385 event_res_avg
[j
]+0, event_res
[i
][j
]+0);
386 update_avg("counter/1", j
,
387 event_res_avg
[j
]+1, event_res
[i
][j
]+1);
388 update_avg("counter/2", j
,
389 event_res_avg
[j
]+2, event_res
[i
][j
]+2);
390 if (event_scaled
[i
][j
] != (u64
)-1)
391 update_avg("scaled", j
,
392 event_scaled_avg
+ j
, event_scaled
[i
]+j
);
394 event_scaled_avg
[j
] = -1;
397 runtime_nsecs_avg
/= run_count
;
398 walltime_nsecs_avg
/= run_count
;
399 runtime_cycles_avg
/= run_count
;
401 for (j
= 0; j
< nr_counters
; j
++) {
402 event_res_avg
[j
][0] /= run_count
;
403 event_res_avg
[j
][1] /= run_count
;
404 event_res_avg
[j
][2] /= run_count
;
407 for (i
= 0; i
< run_count
; i
++) {
408 runtime_nsecs_noise
+=
409 abs((s64
)(runtime_nsecs
[i
] - runtime_nsecs_avg
));
410 walltime_nsecs_noise
+=
411 abs((s64
)(walltime_nsecs
[i
] - walltime_nsecs_avg
));
412 runtime_cycles_noise
+=
413 abs((s64
)(runtime_cycles
[i
] - runtime_cycles_avg
));
415 for (j
= 0; j
< nr_counters
; j
++) {
416 event_res_noise
[j
][0] +=
417 abs((s64
)(event_res
[i
][j
][0] - event_res_avg
[j
][0]));
418 event_res_noise
[j
][1] +=
419 abs((s64
)(event_res
[i
][j
][1] - event_res_avg
[j
][1]));
420 event_res_noise
[j
][2] +=
421 abs((s64
)(event_res
[i
][j
][2] - event_res_avg
[j
][2]));
425 normalize_noise(&runtime_nsecs_noise
);
426 normalize_noise(&walltime_nsecs_noise
);
427 normalize_noise(&runtime_cycles_noise
);
429 for (j
= 0; j
< nr_counters
; j
++) {
430 normalize_noise(&event_res_noise
[j
][0]);
431 normalize_noise(&event_res_noise
[j
][1]);
432 normalize_noise(&event_res_noise
[j
][2]);
436 static void print_stat(int argc
, const char **argv
)
444 fprintf(stderr
, "\n");
445 fprintf(stderr
, " Performance counter stats for \'%s", argv
[0]);
447 for (i
= 1; i
< argc
; i
++)
448 fprintf(stderr
, " %s", argv
[i
]);
450 fprintf(stderr
, "\'");
452 fprintf(stderr
, " (%d runs)", run_count
);
453 fprintf(stderr
, ":\n\n");
455 for (counter
= 0; counter
< nr_counters
; counter
++)
456 print_counter(counter
);
458 fprintf(stderr
, "\n");
459 fprintf(stderr
, " %14.9f seconds time elapsed",
460 (double)walltime_nsecs_avg
/1e9
);
462 fprintf(stderr
, " ( +- %7.3f%% )",
463 100.0*(double)walltime_nsecs_noise
/(double)walltime_nsecs_avg
);
465 fprintf(stderr
, "\n\n");
468 static volatile int signr
= -1;
470 static void skip_signal(int signo
)
475 static void sig_atexit(void)
480 signal(signr
, SIG_DFL
);
481 kill(getpid(), signr
);
484 static const char * const stat_usage
[] = {
485 "perf stat [<options>] <command>",
489 static const struct option options
[] = {
490 OPT_CALLBACK('e', "event", NULL
, "event",
491 "event selector. use 'perf list' to list available events",
493 OPT_BOOLEAN('i', "inherit", &inherit
,
494 "child tasks inherit counters"),
495 OPT_INTEGER('p', "pid", &target_pid
,
496 "stat events on existing pid"),
497 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
498 "system-wide collection from all CPUs"),
499 OPT_BOOLEAN('c', "scale", &scale
,
500 "scale/normalize counters"),
501 OPT_BOOLEAN('v', "verbose", &verbose
,
502 "be more verbose (show counter open errors, etc)"),
503 OPT_INTEGER('r', "repeat", &run_count
,
504 "repeat command and print average + stddev (max: 100)"),
505 OPT_BOOLEAN('n', "null", &null_run
,
506 "null run - dont start any counters"),
510 int cmd_stat(int argc
, const char **argv
, const char *prefix __used
)
514 argc
= parse_options(argc
, argv
, options
, stat_usage
,
515 PARSE_OPT_STOP_AT_NON_OPTION
);
517 usage_with_options(stat_usage
, options
);
518 if (run_count
<= 0 || run_count
> MAX_RUN
)
519 usage_with_options(stat_usage
, options
);
521 /* Set attrs and nr_counters if no event is selected and !null_run */
522 if (!null_run
&& !nr_counters
) {
523 memcpy(attrs
, default_attrs
, sizeof(default_attrs
));
524 nr_counters
= ARRAY_SIZE(default_attrs
);
527 nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
528 assert(nr_cpus
<= MAX_NR_CPUS
);
529 assert((int)nr_cpus
>= 0);
532 * We dont want to block the signals - that would cause
533 * child tasks to inherit that and Ctrl-C would not work.
534 * What we want is for Ctrl-C to work in the exec()-ed
535 * task, but being ignored by perf stat itself:
538 signal(SIGINT
, skip_signal
);
539 signal(SIGALRM
, skip_signal
);
540 signal(SIGABRT
, skip_signal
);
543 for (run_idx
= 0; run_idx
< run_count
; run_idx
++) {
544 if (run_count
!= 1 && verbose
)
545 fprintf(stderr
, "[ perf stat: executing run #%d ... ]\n", run_idx
+ 1);
546 status
= run_perf_stat(argc
, argv
);
549 print_stat(argc
, argv
);