4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
9 $ perf stat ~/hackbench 10
12 Performance counter stats for '/home/mingo/hackbench':
14 1255.538611 task clock ticks # 10.143 CPU utilization factor
15 54011 context switches # 0.043 M/sec
16 385 CPU migrations # 0.000 M/sec
17 17755 pagefaults # 0.014 M/sec
18 3808323185 CPU cycles # 3033.219 M/sec
19 1575111190 instructions # 1254.530 M/sec
20 17367895 cache references # 13.833 M/sec
21 7674421 cache misses # 6.112 M/sec
23 Wall-clock time elapsed: 123.786620 msecs
26 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
28 * Improvements and fixes by:
30 * Arjan van de Ven <arjan@linux.intel.com>
31 * Yanmin Zhang <yanmin.zhang@intel.com>
32 * Wu Fengguang <fengguang.wu@intel.com>
33 * Mike Galbraith <efault@gmx.de>
34 * Paul Mackerras <paulus@samba.org>
35 * Jaswinder Singh Rajput <jaswinder@kernel.org>
37 * Released under the GPL v2. (and only v2, not any later version)
42 #include "util/util.h"
43 #include "util/parse-options.h"
44 #include "util/parse-events.h"
45 #include "util/event.h"
46 #include "util/debug.h"
47 #include "util/header.h"
48 #include "util/cpumap.h"
49 #include "util/thread.h"
51 #include <sys/prctl.h>
55 static struct perf_event_attr default_attrs
[] = {
57 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_TASK_CLOCK
},
58 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CONTEXT_SWITCHES
},
59 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CPU_MIGRATIONS
},
60 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_PAGE_FAULTS
},
62 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CPU_CYCLES
},
63 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_INSTRUCTIONS
},
64 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
},
65 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_BRANCH_MISSES
},
66 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CACHE_REFERENCES
},
67 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CACHE_MISSES
},
71 static bool system_wide
= false;
72 static int nr_cpus
= 0;
73 static int run_idx
= 0;
75 static int run_count
= 1;
76 static bool no_inherit
= false;
77 static bool scale
= true;
78 static pid_t target_pid
= -1;
79 static pid_t target_tid
= -1;
80 static pid_t
*all_tids
= NULL
;
81 static int thread_num
= 0;
82 static pid_t child_pid
= -1;
83 static bool null_run
= false;
84 static bool big_num
= false;
85 static const char *cpu_list
;
88 static int *fd
[MAX_NR_CPUS
][MAX_COUNTERS
];
90 static int event_scaled
[MAX_COUNTERS
];
92 static volatile int done
= 0;
99 static void update_stats(struct stats
*stats
, u64 val
)
104 delta
= val
- stats
->mean
;
105 stats
->mean
+= delta
/ stats
->n
;
106 stats
->M2
+= delta
*(val
- stats
->mean
);
109 static double avg_stats(struct stats
*stats
)
115 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
117 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
118 * s^2 = -------------------------------
121 * http://en.wikipedia.org/wiki/Stddev
123 * The std dev of the mean is related to the std dev by:
130 static double stddev_stats(struct stats
*stats
)
132 double variance
= stats
->M2
/ (stats
->n
- 1);
133 double variance_mean
= variance
/ stats
->n
;
135 return sqrt(variance_mean
);
138 struct stats event_res_stats
[MAX_COUNTERS
][3];
139 struct stats runtime_nsecs_stats
;
140 struct stats walltime_nsecs_stats
;
141 struct stats runtime_cycles_stats
;
142 struct stats runtime_branches_stats
;
144 #define MATCH_EVENT(t, c, counter) \
145 (attrs[counter].type == PERF_TYPE_##t && \
146 attrs[counter].config == PERF_COUNT_##c)
148 #define ERR_PERF_OPEN \
149 "Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n"
151 static int create_perf_stat_counter(int counter
)
153 struct perf_event_attr
*attr
= attrs
+ counter
;
158 attr
->read_format
= PERF_FORMAT_TOTAL_TIME_ENABLED
|
159 PERF_FORMAT_TOTAL_TIME_RUNNING
;
164 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
165 fd
[cpu
][counter
][0] = sys_perf_event_open(attr
,
166 -1, cpumap
[cpu
], -1, 0);
167 if (fd
[cpu
][counter
][0] < 0)
168 pr_debug(ERR_PERF_OPEN
, counter
,
169 fd
[cpu
][counter
][0], strerror(errno
));
174 attr
->inherit
= !no_inherit
;
175 if (target_pid
== -1 && target_tid
== -1) {
177 attr
->enable_on_exec
= 1;
179 for (thread
= 0; thread
< thread_num
; thread
++) {
180 fd
[0][counter
][thread
] = sys_perf_event_open(attr
,
181 all_tids
[thread
], -1, -1, 0);
182 if (fd
[0][counter
][thread
] < 0)
183 pr_debug(ERR_PERF_OPEN
, counter
,
184 fd
[0][counter
][thread
],
195 * Does the counter have nsecs as a unit?
197 static inline int nsec_counter(int counter
)
199 if (MATCH_EVENT(SOFTWARE
, SW_CPU_CLOCK
, counter
) ||
200 MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
))
207 * Read out the results of a single counter:
209 static void read_counter(int counter
)
211 u64 count
[3], single_count
[3];
217 count
[0] = count
[1] = count
[2] = 0;
220 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
221 for (thread
= 0; thread
< thread_num
; thread
++) {
222 if (fd
[cpu
][counter
][thread
] < 0)
225 res
= read(fd
[cpu
][counter
][thread
],
226 single_count
, nv
* sizeof(u64
));
227 assert(res
== nv
* sizeof(u64
));
229 close(fd
[cpu
][counter
][thread
]);
230 fd
[cpu
][counter
][thread
] = -1;
232 count
[0] += single_count
[0];
234 count
[1] += single_count
[1];
235 count
[2] += single_count
[2];
243 event_scaled
[counter
] = -1;
248 if (count
[2] < count
[1]) {
249 event_scaled
[counter
] = 1;
250 count
[0] = (unsigned long long)
251 ((double)count
[0] * count
[1] / count
[2] + 0.5);
255 for (i
= 0; i
< 3; i
++)
256 update_stats(&event_res_stats
[counter
][i
], count
[i
]);
259 fprintf(stderr
, "%s: %Ld %Ld %Ld\n", event_name(counter
),
260 count
[0], count
[1], count
[2]);
264 * Save the full runtime - to allow normalization during printout:
266 if (MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
))
267 update_stats(&runtime_nsecs_stats
, count
[0]);
268 if (MATCH_EVENT(HARDWARE
, HW_CPU_CYCLES
, counter
))
269 update_stats(&runtime_cycles_stats
, count
[0]);
270 if (MATCH_EVENT(HARDWARE
, HW_BRANCH_INSTRUCTIONS
, counter
))
271 update_stats(&runtime_branches_stats
, count
[0]);
274 static int run_perf_stat(int argc __used
, const char **argv
)
276 unsigned long long t0
, t1
;
278 int counter
, ncreated
= 0;
279 int child_ready_pipe
[2], go_pipe
[2];
280 const bool forks
= (argc
> 0);
286 if (forks
&& (pipe(child_ready_pipe
) < 0 || pipe(go_pipe
) < 0)) {
287 perror("failed to create pipes");
292 if ((child_pid
= fork()) < 0)
293 perror("failed to fork");
296 close(child_ready_pipe
[0]);
298 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
301 * Do a dummy execvp to get the PLT entry resolved,
302 * so we avoid the resolver overhead on the real
305 execvp("", (char **)argv
);
308 * Tell the parent we're ready to go
310 close(child_ready_pipe
[1]);
313 * Wait until the parent tells us to go.
315 if (read(go_pipe
[0], &buf
, 1) == -1)
316 perror("unable to read pipe");
318 execvp(argv
[0], (char **)argv
);
324 if (target_tid
== -1 && target_pid
== -1 && !system_wide
)
325 all_tids
[0] = child_pid
;
328 * Wait for the child to be ready to exec.
330 close(child_ready_pipe
[1]);
332 if (read(child_ready_pipe
[0], &buf
, 1) == -1)
333 perror("unable to read pipe");
334 close(child_ready_pipe
[0]);
337 for (counter
= 0; counter
< nr_counters
; counter
++)
338 ncreated
+= create_perf_stat_counter(counter
);
341 pr_err("No permission to collect %sstats.\n"
342 "Consider tweaking /proc/sys/kernel/perf_event_paranoid.\n",
343 system_wide
? "system-wide " : "");
345 kill(child_pid
, SIGTERM
);
350 * Enable counters and exec the command:
358 while(!done
) sleep(1);
363 update_stats(&walltime_nsecs_stats
, t1
- t0
);
365 for (counter
= 0; counter
< nr_counters
; counter
++)
366 read_counter(counter
);
368 return WEXITSTATUS(status
);
371 static void print_noise(int counter
, double avg
)
376 fprintf(stderr
, " ( +- %7.3f%% )",
377 100 * stddev_stats(&event_res_stats
[counter
][0]) / avg
);
380 static void nsec_printout(int counter
, double avg
)
382 double msecs
= avg
/ 1e6
;
384 fprintf(stderr
, " %18.6f %-24s", msecs
, event_name(counter
));
386 if (MATCH_EVENT(SOFTWARE
, SW_TASK_CLOCK
, counter
)) {
387 fprintf(stderr
, " # %10.3f CPUs ",
388 avg
/ avg_stats(&walltime_nsecs_stats
));
392 static void abs_printout(int counter
, double avg
)
394 double total
, ratio
= 0.0;
397 fprintf(stderr
, " %'18.0f %-24s", avg
, event_name(counter
));
399 fprintf(stderr
, " %18.0f %-24s", avg
, event_name(counter
));
401 if (MATCH_EVENT(HARDWARE
, HW_INSTRUCTIONS
, counter
)) {
402 total
= avg_stats(&runtime_cycles_stats
);
407 fprintf(stderr
, " # %10.3f IPC ", ratio
);
408 } else if (MATCH_EVENT(HARDWARE
, HW_BRANCH_MISSES
, counter
) &&
409 runtime_branches_stats
.n
!= 0) {
410 total
= avg_stats(&runtime_branches_stats
);
413 ratio
= avg
* 100 / total
;
415 fprintf(stderr
, " # %10.3f %% ", ratio
);
417 } else if (runtime_nsecs_stats
.n
!= 0) {
418 total
= avg_stats(&runtime_nsecs_stats
);
421 ratio
= 1000.0 * avg
/ total
;
423 fprintf(stderr
, " # %10.3f M/sec", ratio
);
428 * Print out the results of a single counter:
430 static void print_counter(int counter
)
432 double avg
= avg_stats(&event_res_stats
[counter
][0]);
433 int scaled
= event_scaled
[counter
];
436 fprintf(stderr
, " %18s %-24s\n",
437 "<not counted>", event_name(counter
));
441 if (nsec_counter(counter
))
442 nsec_printout(counter
, avg
);
444 abs_printout(counter
, avg
);
446 print_noise(counter
, avg
);
449 double avg_enabled
, avg_running
;
451 avg_enabled
= avg_stats(&event_res_stats
[counter
][1]);
452 avg_running
= avg_stats(&event_res_stats
[counter
][2]);
454 fprintf(stderr
, " (scaled from %.2f%%)",
455 100 * avg_running
/ avg_enabled
);
458 fprintf(stderr
, "\n");
461 static void print_stat(int argc
, const char **argv
)
467 fprintf(stderr
, "\n");
468 fprintf(stderr
, " Performance counter stats for ");
469 if(target_pid
== -1 && target_tid
== -1) {
470 fprintf(stderr
, "\'%s", argv
[0]);
471 for (i
= 1; i
< argc
; i
++)
472 fprintf(stderr
, " %s", argv
[i
]);
473 } else if (target_pid
!= -1)
474 fprintf(stderr
, "process id \'%d", target_pid
);
476 fprintf(stderr
, "thread id \'%d", target_tid
);
478 fprintf(stderr
, "\'");
480 fprintf(stderr
, " (%d runs)", run_count
);
481 fprintf(stderr
, ":\n\n");
483 for (counter
= 0; counter
< nr_counters
; counter
++)
484 print_counter(counter
);
486 fprintf(stderr
, "\n");
487 fprintf(stderr
, " %18.9f seconds time elapsed",
488 avg_stats(&walltime_nsecs_stats
)/1e9
);
490 fprintf(stderr
, " ( +- %7.3f%% )",
491 100*stddev_stats(&walltime_nsecs_stats
) /
492 avg_stats(&walltime_nsecs_stats
));
494 fprintf(stderr
, "\n\n");
497 static volatile int signr
= -1;
499 static void skip_signal(int signo
)
507 static void sig_atexit(void)
510 kill(child_pid
, SIGTERM
);
515 signal(signr
, SIG_DFL
);
516 kill(getpid(), signr
);
519 static const char * const stat_usage
[] = {
520 "perf stat [<options>] [<command>]",
524 static const struct option options
[] = {
525 OPT_CALLBACK('e', "event", NULL
, "event",
526 "event selector. use 'perf list' to list available events",
528 OPT_BOOLEAN('i', "no-inherit", &no_inherit
,
529 "child tasks do not inherit counters"),
530 OPT_INTEGER('p', "pid", &target_pid
,
531 "stat events on existing process id"),
532 OPT_INTEGER('t', "tid", &target_tid
,
533 "stat events on existing thread id"),
534 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
535 "system-wide collection from all CPUs"),
536 OPT_BOOLEAN('c', "scale", &scale
,
537 "scale/normalize counters"),
538 OPT_INCR('v', "verbose", &verbose
,
539 "be more verbose (show counter open errors, etc)"),
540 OPT_INTEGER('r', "repeat", &run_count
,
541 "repeat command and print average + stddev (max: 100)"),
542 OPT_BOOLEAN('n', "null", &null_run
,
543 "null run - dont start any counters"),
544 OPT_BOOLEAN('B', "big-num", &big_num
,
545 "print large numbers with thousands\' separators"),
546 OPT_STRING('C', "cpu", &cpu_list
, "cpu",
547 "list of cpus to monitor in system-wide"),
551 int cmd_stat(int argc
, const char **argv
, const char *prefix __used
)
556 setlocale(LC_ALL
, "");
558 argc
= parse_options(argc
, argv
, options
, stat_usage
,
559 PARSE_OPT_STOP_AT_NON_OPTION
);
560 if (!argc
&& target_pid
== -1 && target_tid
== -1)
561 usage_with_options(stat_usage
, options
);
563 usage_with_options(stat_usage
, options
);
565 /* Set attrs and nr_counters if no event is selected and !null_run */
566 if (!null_run
&& !nr_counters
) {
567 memcpy(attrs
, default_attrs
, sizeof(default_attrs
));
568 nr_counters
= ARRAY_SIZE(default_attrs
);
572 nr_cpus
= read_cpu_map(cpu_list
);
577 usage_with_options(stat_usage
, options
);
579 if (target_pid
!= -1) {
580 target_tid
= target_pid
;
581 thread_num
= find_all_tid(target_pid
, &all_tids
);
582 if (thread_num
<= 0) {
583 fprintf(stderr
, "Can't find all threads of pid %d\n",
585 usage_with_options(stat_usage
, options
);
588 all_tids
=malloc(sizeof(pid_t
));
592 all_tids
[0] = target_tid
;
596 for (i
= 0; i
< MAX_NR_CPUS
; i
++) {
597 for (j
= 0; j
< MAX_COUNTERS
; j
++) {
598 fd
[i
][j
] = malloc(sizeof(int)*thread_num
);
605 * We dont want to block the signals - that would cause
606 * child tasks to inherit that and Ctrl-C would not work.
607 * What we want is for Ctrl-C to work in the exec()-ed
608 * task, but being ignored by perf stat itself:
611 signal(SIGINT
, skip_signal
);
612 signal(SIGALRM
, skip_signal
);
613 signal(SIGABRT
, skip_signal
);
616 for (run_idx
= 0; run_idx
< run_count
; run_idx
++) {
617 if (run_count
!= 1 && verbose
)
618 fprintf(stderr
, "[ perf stat: executing run #%d ... ]\n", run_idx
+ 1);
619 status
= run_perf_stat(argc
, argv
);
623 print_stat(argc
, argv
);