4 * Builtin top command: Display a continuously updated profile of
5 * any workload, CPU or specific PID.
7 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
9 * Improvements and fixes by:
11 * Arjan van de Ven <arjan@linux.intel.com>
12 * Yanmin Zhang <yanmin.zhang@intel.com>
13 * Wu Fengguang <fengguang.wu@intel.com>
14 * Mike Galbraith <efault@gmx.de>
15 * Paul Mackerras <paulus@samba.org>
17 * Released under the GPL v2. (and only v2, not any later version)
23 #include "util/symbol.h"
24 #include "util/color.h"
25 #include "util/util.h"
26 #include <linux/rbtree.h>
27 #include "util/parse-options.h"
28 #include "util/parse-events.h"
40 #include <sys/syscall.h>
41 #include <sys/ioctl.h>
43 #include <sys/prctl.h>
48 #include <linux/unistd.h>
49 #include <linux/types.h>
51 static int fd
[MAX_NR_CPUS
][MAX_COUNTERS
];
53 static int system_wide
= 0;
55 static int default_interval
= 100000;
57 static u64 count_filter
= 5;
58 static int print_entries
= 15;
60 static int target_pid
= -1;
61 static int profile_cpu
= -1;
62 static int nr_cpus
= 0;
63 static unsigned int realtime_prio
= 0;
65 static unsigned int page_size
;
66 static unsigned int mmap_pages
= 16;
68 static int verbose
= 0;
69 static char *vmlinux
= NULL
;
71 static char *sym_filter
;
72 static unsigned long filter_start
;
73 static unsigned long filter_end
;
75 static int delay_secs
= 2;
77 static int dump_symtab
;
84 static u64 max_ip
= -1ll;
87 struct rb_node rb_node
;
88 struct list_head node
;
89 unsigned long count
[MAX_COUNTERS
];
90 unsigned long snap_count
;
95 struct sym_entry
*sym_filter_entry
;
97 struct dso
*kernel_dso
;
100 * Symbols will be added here in record_ip and will get out
103 static LIST_HEAD(active_symbols
);
104 static pthread_mutex_t active_symbols_lock
= PTHREAD_MUTEX_INITIALIZER
;
107 * Ordering weight: count-1 * count-2 * ... / count-n
109 static double sym_weight(const struct sym_entry
*sym
)
111 double weight
= sym
->snap_count
;
114 for (counter
= 1; counter
< nr_counters
-1; counter
++)
115 weight
*= sym
->count
[counter
];
117 weight
/= (sym
->count
[counter
] + 1);
123 static long userspace_samples
;
124 static const char CONSOLE_CLEAR
[] = "\e[H\e[2J";
126 static void __list_insert_active_sym(struct sym_entry
*syme
)
128 list_add(&syme
->node
, &active_symbols
);
131 static void list_remove_active_sym(struct sym_entry
*syme
)
133 pthread_mutex_lock(&active_symbols_lock
);
134 list_del_init(&syme
->node
);
135 pthread_mutex_unlock(&active_symbols_lock
);
138 static void rb_insert_active_sym(struct rb_root
*tree
, struct sym_entry
*se
)
140 struct rb_node
**p
= &tree
->rb_node
;
141 struct rb_node
*parent
= NULL
;
142 struct sym_entry
*iter
;
146 iter
= rb_entry(parent
, struct sym_entry
, rb_node
);
148 if (se
->weight
> iter
->weight
)
154 rb_link_node(&se
->rb_node
, parent
, p
);
155 rb_insert_color(&se
->rb_node
, tree
);
158 static void print_sym_table(void)
162 float samples_per_sec
= samples
/delay_secs
;
163 float ksamples_per_sec
= (samples
-userspace_samples
)/delay_secs
;
164 float sum_ksamples
= 0.0;
165 struct sym_entry
*syme
, *n
;
166 struct rb_root tmp
= RB_ROOT
;
169 samples
= userspace_samples
= 0;
171 /* Sort the active symbols */
172 pthread_mutex_lock(&active_symbols_lock
);
173 syme
= list_entry(active_symbols
.next
, struct sym_entry
, node
);
174 pthread_mutex_unlock(&active_symbols_lock
);
176 list_for_each_entry_safe_from(syme
, n
, &active_symbols
, node
) {
177 syme
->snap_count
= syme
->count
[0];
178 if (syme
->snap_count
!= 0) {
179 syme
->weight
= sym_weight(syme
);
180 rb_insert_active_sym(&tmp
, syme
);
181 sum_ksamples
+= syme
->snap_count
;
183 for (j
= 0; j
< nr_counters
; j
++)
184 syme
->count
[j
] = zero
? 0 : syme
->count
[j
] * 7 / 8;
186 list_remove_active_sym(syme
);
192 "------------------------------------------------------------------------------\n");
193 printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [",
195 100.0 - (100.0*((samples_per_sec
-ksamples_per_sec
)/samples_per_sec
)));
197 if (nr_counters
== 1) {
198 printf("%Ld", (u64
)attrs
[0].sample_period
);
205 for (counter
= 0; counter
< nr_counters
; counter
++) {
209 printf("%s", event_name(counter
));
214 if (target_pid
!= -1)
215 printf(" (target_pid: %d", target_pid
);
219 if (profile_cpu
!= -1)
220 printf(", cpu: %d)\n", profile_cpu
);
222 if (target_pid
!= -1)
225 printf(", %d CPUs)\n", nr_cpus
);
228 printf("------------------------------------------------------------------------------\n\n");
230 if (nr_counters
== 1)
231 printf(" samples pcnt");
233 printf(" weight samples pcnt");
235 printf(" RIP kernel function\n"
236 " ______ _______ _____ ________________ _______________\n\n"
239 for (nd
= rb_first(&tmp
); nd
; nd
= rb_next(nd
)) {
240 struct sym_entry
*syme
= rb_entry(nd
, struct sym_entry
, rb_node
);
241 struct symbol
*sym
= (struct symbol
*)(syme
+ 1);
244 if (++printed
> print_entries
|| syme
->snap_count
< count_filter
)
247 pcnt
= 100.0 - (100.0 * ((sum_ksamples
- syme
->snap_count
) /
250 if (nr_counters
== 1)
251 printf("%20.2f - ", syme
->weight
);
253 printf("%9.1f %10ld - ", syme
->weight
, syme
->snap_count
);
255 percent_color_fprintf(stdout
, "%4.1f%%", pcnt
);
256 printf(" - %016llx : %s", sym
->start
, sym
->name
);
258 printf("\t[%s]", sym
->module
->name
);
263 static void *display_thread(void *arg __used
)
265 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
266 int delay_msecs
= delay_secs
* 1000;
268 printf("PerfTop refresh period: %d seconds\n", delay_secs
);
272 } while (!poll(&stdin_poll
, 1, delay_msecs
) == 1);
274 printf("key pressed - exiting.\n");
280 /* Tag samples to be skipped. */
281 static const char *skip_symbols
[] = {
287 "ppc64_runlatch_off",
288 "pseries_dedicated_idle_sleep",
292 static int symbol_filter(struct dso
*self
, struct symbol
*sym
)
294 static int filter_match
;
295 struct sym_entry
*syme
;
296 const char *name
= sym
->name
;
300 * ppc64 uses function descriptors and appends a '.' to the
301 * start of every instruction address. Remove it.
306 if (!strcmp(name
, "_text") ||
307 !strcmp(name
, "_etext") ||
308 !strcmp(name
, "_sinittext") ||
309 !strncmp("init_module", name
, 11) ||
310 !strncmp("cleanup_module", name
, 14) ||
311 strstr(name
, "_text_start") ||
312 strstr(name
, "_text_end"))
315 syme
= dso__sym_priv(self
, sym
);
316 for (i
= 0; skip_symbols
[i
]; i
++) {
317 if (!strcmp(skip_symbols
[i
], name
)) {
323 if (filter_match
== 1) {
324 filter_end
= sym
->start
;
326 if (filter_end
- filter_start
> 10000) {
328 "hm, too large filter symbol <%s> - skipping.\n",
330 fprintf(stderr
, "symbol filter start: %016lx\n",
332 fprintf(stderr
, " end: %016lx\n",
334 filter_end
= filter_start
= 0;
340 if (filter_match
== 0 && sym_filter
&& !strcmp(name
, sym_filter
)) {
342 filter_start
= sym
->start
;
349 static int parse_symbols(void)
351 struct rb_node
*node
;
353 int modules
= vmlinux
? 1 : 0;
355 kernel_dso
= dso__new("[kernel]", sizeof(struct sym_entry
));
356 if (kernel_dso
== NULL
)
359 if (dso__load_kernel(kernel_dso
, vmlinux
, symbol_filter
, verbose
, modules
) <= 0)
362 node
= rb_first(&kernel_dso
->syms
);
363 sym
= rb_entry(node
, struct symbol
, rb_node
);
366 node
= rb_last(&kernel_dso
->syms
);
367 sym
= rb_entry(node
, struct symbol
, rb_node
);
371 dso__fprintf(kernel_dso
, stderr
);
376 dso__delete(kernel_dso
);
381 #define TRACE_COUNT 3
384 * Binary search in the histogram table and record the hit:
386 static void record_ip(u64 ip
, int counter
)
388 struct symbol
*sym
= dso__find_symbol(kernel_dso
, ip
);
391 struct sym_entry
*syme
= dso__sym_priv(kernel_dso
, sym
);
394 syme
->count
[counter
]++;
395 pthread_mutex_lock(&active_symbols_lock
);
396 if (list_empty(&syme
->node
) || !syme
->node
.next
)
397 __list_insert_active_sym(syme
);
398 pthread_mutex_unlock(&active_symbols_lock
);
406 static void process_event(u64 ip
, int counter
, int user
)
415 record_ip(ip
, counter
);
425 static unsigned int mmap_read_head(struct mmap_data
*md
)
427 struct perf_counter_mmap_page
*pc
= md
->base
;
430 head
= pc
->data_head
;
436 struct timeval last_read
, this_read
;
438 static void mmap_read_counter(struct mmap_data
*md
)
440 unsigned int head
= mmap_read_head(md
);
441 unsigned int old
= md
->prev
;
442 unsigned char *data
= md
->base
+ page_size
;
445 gettimeofday(&this_read
, NULL
);
448 * If we're further behind than half the buffer, there's a chance
449 * the writer will bite our tail and mess up the samples under us.
451 * If we somehow ended up ahead of the head, we got messed up.
453 * In either case, truncate and restart at head.
456 if (diff
> md
->mask
/ 2 || diff
< 0) {
460 timersub(&this_read
, &last_read
, &iv
);
461 msecs
= iv
.tv_sec
*1000 + iv
.tv_usec
/1000;
463 fprintf(stderr
, "WARNING: failed to keep up with mmap data."
464 " Last read %lu msecs ago.\n", msecs
);
467 * head points to a known good entry, start there.
472 last_read
= this_read
;
474 for (; old
!= head
;) {
476 struct perf_event_header header
;
481 struct perf_event_header header
;
486 char filename
[PATH_MAX
];
489 typedef union event_union
{
490 struct perf_event_header header
;
492 struct mmap_event mmap
;
495 event_t
*event
= (event_t
*)&data
[old
& md
->mask
];
499 size_t size
= event
->header
.size
;
502 * Event straddles the mmap boundary -- header should always
503 * be inside due to u64 alignment of output.
505 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
506 unsigned int offset
= old
;
507 unsigned int len
= min(sizeof(*event
), size
), cpy
;
508 void *dst
= &event_copy
;
511 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
512 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
523 if (event
->header
.type
== PERF_EVENT_SAMPLE
) {
525 (event
->header
.misc
& PERF_EVENT_MISC_CPUMODE_MASK
) == PERF_EVENT_MISC_USER
;
526 process_event(event
->ip
.ip
, md
->counter
, user
);
533 static struct pollfd event_array
[MAX_NR_CPUS
* MAX_COUNTERS
];
534 static struct mmap_data mmap_array
[MAX_NR_CPUS
][MAX_COUNTERS
];
536 static void mmap_read(void)
540 for (i
= 0; i
< nr_cpus
; i
++) {
541 for (counter
= 0; counter
< nr_counters
; counter
++)
542 mmap_read_counter(&mmap_array
[i
][counter
]);
549 static void start_counter(int i
, int counter
)
551 struct perf_counter_attr
*attr
;
555 if (target_pid
== -1 && profile_cpu
== -1)
558 attr
= attrs
+ counter
;
560 attr
->sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
;
564 fd
[i
][counter
] = sys_perf_counter_open(attr
, target_pid
, cpu
, group_fd
, 0);
566 if (fd
[i
][counter
] < 0) {
570 die("No permission - are you root?\n");
572 * If it's cycles then fall back to hrtimer
573 * based cpu-clock-tick sw counter, which
574 * is always available even if no PMU support:
576 if (attr
->type
== PERF_TYPE_HARDWARE
577 && attr
->config
== PERF_COUNT_HW_CPU_CYCLES
) {
580 warning(" ... trying to fall back to cpu-clock-ticks\n");
582 attr
->type
= PERF_TYPE_SOFTWARE
;
583 attr
->config
= PERF_COUNT_SW_CPU_CLOCK
;
587 error("perfcounter syscall returned with %d (%s)\n",
588 fd
[i
][counter
], strerror(err
));
589 die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n");
592 assert(fd
[i
][counter
] >= 0);
593 fcntl(fd
[i
][counter
], F_SETFL
, O_NONBLOCK
);
596 * First counter acts as the group leader:
598 if (group
&& group_fd
== -1)
599 group_fd
= fd
[i
][counter
];
601 event_array
[nr_poll
].fd
= fd
[i
][counter
];
602 event_array
[nr_poll
].events
= POLLIN
;
605 mmap_array
[i
][counter
].counter
= counter
;
606 mmap_array
[i
][counter
].prev
= 0;
607 mmap_array
[i
][counter
].mask
= mmap_pages
*page_size
- 1;
608 mmap_array
[i
][counter
].base
= mmap(NULL
, (mmap_pages
+1)*page_size
,
609 PROT_READ
, MAP_SHARED
, fd
[i
][counter
], 0);
610 if (mmap_array
[i
][counter
].base
== MAP_FAILED
)
611 die("failed to mmap with %d (%s)\n", errno
, strerror(errno
));
614 static int __cmd_top(void)
620 for (i
= 0; i
< nr_cpus
; i
++) {
622 for (counter
= 0; counter
< nr_counters
; counter
++)
623 start_counter(i
, counter
);
626 /* Wait for a minimal set of events before starting the snapshot */
627 poll(event_array
, nr_poll
, 100);
631 if (pthread_create(&thread
, NULL
, display_thread
, NULL
)) {
632 printf("Could not create display thread.\n");
637 struct sched_param param
;
639 param
.sched_priority
= realtime_prio
;
640 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
641 printf("Could not set realtime priority.\n");
652 ret
= poll(event_array
, nr_poll
, 100);
658 static const char * const top_usage
[] = {
659 "perf top [<options>]",
663 static const struct option options
[] = {
664 OPT_CALLBACK('e', "event", NULL
, "event",
665 "event selector. use 'perf list' to list available events",
667 OPT_INTEGER('c', "count", &default_interval
,
668 "event period to sample"),
669 OPT_INTEGER('p', "pid", &target_pid
,
670 "profile events on existing pid"),
671 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
672 "system-wide collection from all CPUs"),
673 OPT_INTEGER('C', "CPU", &profile_cpu
,
674 "CPU to profile on"),
675 OPT_STRING('k', "vmlinux", &vmlinux
, "file", "vmlinux pathname"),
676 OPT_INTEGER('m', "mmap-pages", &mmap_pages
,
677 "number of mmap data pages"),
678 OPT_INTEGER('r', "realtime", &realtime_prio
,
679 "collect data with this RT SCHED_FIFO priority"),
680 OPT_INTEGER('d', "delay", &delay_secs
,
681 "number of seconds to delay between refreshes"),
682 OPT_BOOLEAN('D', "dump-symtab", &dump_symtab
,
683 "dump the symbol table used for profiling"),
684 OPT_INTEGER('f', "count-filter", &count_filter
,
685 "only display functions with more events than this"),
686 OPT_BOOLEAN('g', "group", &group
,
687 "put the counters into a counter group"),
688 OPT_STRING('s', "sym-filter", &sym_filter
, "pattern",
689 "only display symbols matchig this pattern"),
690 OPT_BOOLEAN('z', "zero", &zero
,
691 "zero history across updates"),
692 OPT_INTEGER('F', "freq", &freq
,
693 "profile at this frequency"),
694 OPT_INTEGER('E', "entries", &print_entries
,
695 "display this many functions"),
696 OPT_BOOLEAN('v', "verbose", &verbose
,
697 "be more verbose (show counter open errors, etc)"),
701 int cmd_top(int argc
, const char **argv
, const char *prefix __used
)
707 page_size
= sysconf(_SC_PAGE_SIZE
);
709 argc
= parse_options(argc
, argv
, options
, top_usage
, 0);
711 usage_with_options(top_usage
, options
);
714 default_interval
= freq
;
718 /* CPU and PID are mutually exclusive */
719 if (target_pid
!= -1 && profile_cpu
!= -1) {
720 printf("WARNING: PID switch overriding CPU\n");
734 * Fill in the ones not specifically initialized via -c:
736 for (counter
= 0; counter
< nr_counters
; counter
++) {
737 if (attrs
[counter
].sample_period
)
740 attrs
[counter
].sample_period
= default_interval
;
743 nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
744 assert(nr_cpus
<= MAX_NR_CPUS
);
745 assert(nr_cpus
>= 0);
747 if (target_pid
!= -1 || profile_cpu
!= -1)