4 * Builtin top command: Display a continuously updated profile of
5 * any workload, CPU or specific PID.
7 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
9 * Improvements and fixes by:
11 * Arjan van de Ven <arjan@linux.intel.com>
12 * Yanmin Zhang <yanmin.zhang@intel.com>
13 * Wu Fengguang <fengguang.wu@intel.com>
14 * Mike Galbraith <efault@gmx.de>
15 * Paul Mackerras <paulus@samba.org>
17 * Released under the GPL v2. (and only v2, not any later version)
23 #include "util/annotate.h"
24 #include "util/cache.h"
25 #include "util/color.h"
26 #include "util/evlist.h"
27 #include "util/evsel.h"
28 #include "util/session.h"
29 #include "util/symbol.h"
30 #include "util/thread.h"
31 #include "util/thread_map.h"
33 #include "util/util.h"
34 #include <linux/rbtree.h>
35 #include "util/parse-options.h"
36 #include "util/parse-events.h"
37 #include "util/cpumap.h"
38 #include "util/xyarray.h"
40 #include "util/debug.h"
54 #include <sys/syscall.h>
55 #include <sys/ioctl.h>
57 #include <sys/prctl.h>
62 #include <linux/unistd.h>
63 #include <linux/types.h>
65 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
67 static struct perf_top top
= {
70 .display_weighted
= -1,
73 .active_symbols
= LIST_HEAD_INIT(top
.active_symbols
),
74 .active_symbols_lock
= PTHREAD_MUTEX_INITIALIZER
,
75 .active_symbols_cond
= PTHREAD_COND_INITIALIZER
,
76 .freq
= 1000, /* 1 KHz */
79 static bool system_wide
= false;
81 static bool use_tui
, use_stdio
;
83 static int default_interval
= 0;
85 static bool inherit
= false;
86 static int realtime_prio
= 0;
87 static bool group
= false;
88 static unsigned int page_size
;
89 static unsigned int mmap_pages
= 128;
91 static bool dump_symtab
= false;
93 static struct winsize winsize
;
95 static const char *sym_filter
= NULL
;
96 struct sym_entry
*sym_filter_entry_sched
= NULL
;
97 static int sym_pcnt_filter
= 5;
103 void get_term_dimensions(struct winsize
*ws
)
105 char *s
= getenv("LINES");
108 ws
->ws_row
= atoi(s
);
109 s
= getenv("COLUMNS");
111 ws
->ws_col
= atoi(s
);
112 if (ws
->ws_row
&& ws
->ws_col
)
117 if (ioctl(1, TIOCGWINSZ
, ws
) == 0 &&
118 ws
->ws_row
&& ws
->ws_col
)
125 static void update_print_entries(struct winsize
*ws
)
127 top
.print_entries
= ws
->ws_row
;
129 if (top
.print_entries
> 9)
130 top
.print_entries
-= 9;
133 static void sig_winch_handler(int sig __used
)
135 get_term_dimensions(&winsize
);
136 update_print_entries(&winsize
);
139 static int parse_source(struct sym_entry
*syme
)
142 struct annotation
*notes
;
149 sym
= sym_entry__symbol(syme
);
153 * We can't annotate with just /proc/kallsyms
155 if (map
->dso
->symtab_type
== SYMTAB__KALLSYMS
) {
156 pr_err("Can't annotate %s: No vmlinux file was found in the "
157 "path\n", sym
->name
);
162 notes
= symbol__annotation(sym
);
163 if (notes
->src
!= NULL
) {
164 pthread_mutex_lock(¬es
->lock
);
168 pthread_mutex_lock(¬es
->lock
);
170 if (symbol__alloc_hist(sym
, top
.evlist
->nr_entries
) < 0) {
171 pthread_mutex_unlock(¬es
->lock
);
172 pr_err("Not enough memory for annotating '%s' symbol!\n",
178 err
= symbol__annotate(sym
, syme
->map
, 0);
181 top
.sym_filter_entry
= syme
;
184 pthread_mutex_unlock(¬es
->lock
);
188 static void __zero_source_counters(struct sym_entry
*syme
)
190 struct symbol
*sym
= sym_entry__symbol(syme
);
191 symbol__annotate_zero_histograms(sym
);
194 static void record_precise_ip(struct sym_entry
*syme
, int counter
, u64 ip
)
196 struct annotation
*notes
;
199 if (syme
!= top
.sym_filter_entry
)
202 sym
= sym_entry__symbol(syme
);
203 notes
= symbol__annotation(sym
);
205 if (pthread_mutex_trylock(¬es
->lock
))
208 ip
= syme
->map
->map_ip(syme
->map
, ip
);
209 symbol__inc_addr_samples(sym
, syme
->map
, counter
, ip
);
211 pthread_mutex_unlock(¬es
->lock
);
214 static void show_details(struct sym_entry
*syme
)
216 struct annotation
*notes
;
217 struct symbol
*symbol
;
223 symbol
= sym_entry__symbol(syme
);
224 notes
= symbol__annotation(symbol
);
226 pthread_mutex_lock(¬es
->lock
);
228 if (notes
->src
== NULL
)
231 printf("Showing %s for %s\n", event_name(top
.sym_evsel
), symbol
->name
);
232 printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter
);
234 more
= symbol__annotate_printf(symbol
, syme
->map
, top
.sym_evsel
->idx
,
235 0, sym_pcnt_filter
, top
.print_entries
, 4);
237 symbol__annotate_zero_histogram(symbol
, top
.sym_evsel
->idx
);
239 symbol__annotate_decay_histogram(symbol
, top
.sym_evsel
->idx
);
241 printf("%d lines not displayed, maybe increase display entries [e]\n", more
);
243 pthread_mutex_unlock(¬es
->lock
);
246 static const char CONSOLE_CLEAR
[] = "\e[H\e[2J";
248 static void __list_insert_active_sym(struct sym_entry
*syme
)
250 list_add(&syme
->node
, &top
.active_symbols
);
253 static void print_sym_table(struct perf_session
*session
)
258 struct sym_entry
*syme
;
259 struct rb_root tmp
= RB_ROOT
;
260 const int win_width
= winsize
.ws_col
- 1;
261 int sym_width
, dso_width
, dso_short_width
;
262 float sum_ksamples
= perf_top__decay_samples(&top
, &tmp
);
266 perf_top__header_snprintf(&top
, bf
, sizeof(bf
));
269 perf_top__reset_sample_counters(&top
);
271 printf("%-*.*s\n", win_width
, win_width
, graph_dotted_line
);
273 if (session
->hists
.stats
.total_lost
!= 0) {
274 color_fprintf(stdout
, PERF_COLOR_RED
, "WARNING:");
275 printf(" LOST %" PRIu64
" events, Check IO/CPU overload\n",
276 session
->hists
.stats
.total_lost
);
279 if (top
.sym_filter_entry
) {
280 show_details(top
.sym_filter_entry
);
284 perf_top__find_widths(&top
, &tmp
, &dso_width
, &dso_short_width
,
287 if (sym_width
+ dso_width
> winsize
.ws_col
- 29) {
288 dso_width
= dso_short_width
;
289 if (sym_width
+ dso_width
> winsize
.ws_col
- 29)
290 sym_width
= winsize
.ws_col
- dso_width
- 29;
293 if (top
.evlist
->nr_entries
== 1)
294 printf(" samples pcnt");
296 printf(" weight samples pcnt");
300 printf(" %-*.*s DSO\n", sym_width
, sym_width
, "function");
301 printf(" %s _______ _____",
302 top
.evlist
->nr_entries
== 1 ? " " : "______");
304 printf(" ________________");
305 printf(" %-*.*s", sym_width
, sym_width
, graph_line
);
306 printf(" %-*.*s", dso_width
, dso_width
, graph_line
);
309 for (nd
= rb_first(&tmp
); nd
; nd
= rb_next(nd
)) {
313 syme
= rb_entry(nd
, struct sym_entry
, rb_node
);
314 sym
= sym_entry__symbol(syme
);
315 if (++printed
> top
.print_entries
||
316 (int)syme
->snap_count
< top
.count_filter
)
319 pcnt
= 100.0 - (100.0 * ((sum_ksamples
- syme
->snap_count
) /
322 if (top
.evlist
->nr_entries
== 1 || !top
.display_weighted
)
323 printf("%20.2f ", syme
->weight
);
325 printf("%9.1f %10ld ", syme
->weight
, syme
->snap_count
);
327 percent_color_fprintf(stdout
, "%4.1f%%", pcnt
);
329 printf(" %016" PRIx64
, sym
->start
);
330 printf(" %-*.*s", sym_width
, sym_width
, sym
->name
);
331 printf(" %-*.*s\n", dso_width
, dso_width
,
332 dso_width
>= syme
->map
->dso
->long_name_len
?
333 syme
->map
->dso
->long_name
:
334 syme
->map
->dso
->short_name
);
338 static void prompt_integer(int *target
, const char *msg
)
340 char *buf
= malloc(0), *p
;
344 fprintf(stdout
, "\n%s: ", msg
);
345 if (getline(&buf
, &dummy
, stdin
) < 0)
348 p
= strchr(buf
, '\n');
358 tmp
= strtoul(buf
, NULL
, 10);
364 static void prompt_percent(int *target
, const char *msg
)
368 prompt_integer(&tmp
, msg
);
369 if (tmp
>= 0 && tmp
<= 100)
373 static void prompt_symbol(struct sym_entry
**target
, const char *msg
)
375 char *buf
= malloc(0), *p
;
376 struct sym_entry
*syme
= *target
, *n
, *found
= NULL
;
379 /* zero counters of active symbol */
381 __zero_source_counters(syme
);
385 fprintf(stdout
, "\n%s: ", msg
);
386 if (getline(&buf
, &dummy
, stdin
) < 0)
389 p
= strchr(buf
, '\n');
393 pthread_mutex_lock(&top
.active_symbols_lock
);
394 syme
= list_entry(top
.active_symbols
.next
, struct sym_entry
, node
);
395 pthread_mutex_unlock(&top
.active_symbols_lock
);
397 list_for_each_entry_safe_from(syme
, n
, &top
.active_symbols
, node
) {
398 struct symbol
*sym
= sym_entry__symbol(syme
);
400 if (!strcmp(buf
, sym
->name
)) {
407 fprintf(stderr
, "Sorry, %s is not active.\n", buf
);
417 static void print_mapped_keys(void)
421 if (top
.sym_filter_entry
) {
422 struct symbol
*sym
= sym_entry__symbol(top
.sym_filter_entry
);
426 fprintf(stdout
, "\nMapped keys:\n");
427 fprintf(stdout
, "\t[d] display refresh delay. \t(%d)\n", top
.delay_secs
);
428 fprintf(stdout
, "\t[e] display entries (lines). \t(%d)\n", top
.print_entries
);
430 if (top
.evlist
->nr_entries
> 1)
431 fprintf(stdout
, "\t[E] active event counter. \t(%s)\n", event_name(top
.sym_evsel
));
433 fprintf(stdout
, "\t[f] profile display filter (count). \t(%d)\n", top
.count_filter
);
435 fprintf(stdout
, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter
);
436 fprintf(stdout
, "\t[s] annotate symbol. \t(%s)\n", name
?: "NULL");
437 fprintf(stdout
, "\t[S] stop annotation.\n");
439 if (top
.evlist
->nr_entries
> 1)
440 fprintf(stdout
, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", top
.display_weighted
? 1 : 0);
443 "\t[K] hide kernel_symbols symbols. \t(%s)\n",
444 top
.hide_kernel_symbols
? "yes" : "no");
446 "\t[U] hide user symbols. \t(%s)\n",
447 top
.hide_user_symbols
? "yes" : "no");
448 fprintf(stdout
, "\t[z] toggle sample zeroing. \t(%d)\n", top
.zero
? 1 : 0);
449 fprintf(stdout
, "\t[qQ] quit.\n");
452 static int key_mapped(int c
)
469 return top
.evlist
->nr_entries
> 1 ? 1 : 0;
477 static void handle_keypress(struct perf_session
*session
, int c
)
479 if (!key_mapped(c
)) {
480 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
481 struct termios tc
, save
;
484 fprintf(stdout
, "\nEnter selection, or unmapped key to continue: ");
489 tc
.c_lflag
&= ~(ICANON
| ECHO
);
492 tcsetattr(0, TCSANOW
, &tc
);
494 poll(&stdin_poll
, 1, -1);
497 tcsetattr(0, TCSAFLUSH
, &save
);
504 prompt_integer(&top
.delay_secs
, "Enter display delay");
505 if (top
.delay_secs
< 1)
509 prompt_integer(&top
.print_entries
, "Enter display entries (lines)");
510 if (top
.print_entries
== 0) {
511 sig_winch_handler(SIGWINCH
);
512 signal(SIGWINCH
, sig_winch_handler
);
514 signal(SIGWINCH
, SIG_DFL
);
517 if (top
.evlist
->nr_entries
> 1) {
518 /* Select 0 as the default event: */
521 fprintf(stderr
, "\nAvailable events:");
523 list_for_each_entry(top
.sym_evsel
, &top
.evlist
->entries
, node
)
524 fprintf(stderr
, "\n\t%d %s", top
.sym_evsel
->idx
, event_name(top
.sym_evsel
));
526 prompt_integer(&counter
, "Enter details event counter");
528 if (counter
>= top
.evlist
->nr_entries
) {
529 top
.sym_evsel
= list_entry(top
.evlist
->entries
.next
, struct perf_evsel
, node
);
530 fprintf(stderr
, "Sorry, no such event, using %s.\n", event_name(top
.sym_evsel
));
534 list_for_each_entry(top
.sym_evsel
, &top
.evlist
->entries
, node
)
535 if (top
.sym_evsel
->idx
== counter
)
538 top
.sym_evsel
= list_entry(top
.evlist
->entries
.next
, struct perf_evsel
, node
);
541 prompt_integer(&top
.count_filter
, "Enter display event count filter");
544 prompt_percent(&sym_pcnt_filter
, "Enter details display event filter (percent)");
547 top
.hide_kernel_symbols
= !top
.hide_kernel_symbols
;
551 printf("exiting.\n");
553 perf_session__fprintf_dsos(session
, stderr
);
556 prompt_symbol(&top
.sym_filter_entry
, "Enter details symbol");
559 if (!top
.sym_filter_entry
)
562 struct sym_entry
*syme
= top
.sym_filter_entry
;
564 top
.sym_filter_entry
= NULL
;
565 __zero_source_counters(syme
);
569 top
.hide_user_symbols
= !top
.hide_user_symbols
;
572 top
.display_weighted
= ~top
.display_weighted
;
575 top
.zero
= !top
.zero
;
582 static void *display_thread_tui(void *arg __used
)
585 pthread_mutex_lock(&top
.active_symbols_lock
);
586 while (list_empty(&top
.active_symbols
)) {
587 err
= pthread_cond_wait(&top
.active_symbols_cond
,
588 &top
.active_symbols_lock
);
592 pthread_mutex_unlock(&top
.active_symbols_lock
);
594 perf_top__tui_browser(&top
);
600 static void *display_thread(void *arg __used
)
602 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
603 struct termios tc
, save
;
605 struct perf_session
*session
= (struct perf_session
*) arg
;
609 tc
.c_lflag
&= ~(ICANON
| ECHO
);
614 delay_msecs
= top
.delay_secs
* 1000;
615 tcsetattr(0, TCSANOW
, &tc
);
620 print_sym_table(session
);
621 } while (!poll(&stdin_poll
, 1, delay_msecs
) == 1);
624 tcsetattr(0, TCSAFLUSH
, &save
);
626 handle_keypress(session
, c
);
632 /* Tag samples to be skipped. */
633 static const char *skip_symbols
[] = {
640 "mwait_idle_with_hints",
642 "ppc64_runlatch_off",
643 "pseries_dedicated_idle_sleep",
647 static int symbol_filter(struct map
*map
, struct symbol
*sym
)
649 struct sym_entry
*syme
;
650 const char *name
= sym
->name
;
654 * ppc64 uses function descriptors and appends a '.' to the
655 * start of every instruction address. Remove it.
660 if (!strcmp(name
, "_text") ||
661 !strcmp(name
, "_etext") ||
662 !strcmp(name
, "_sinittext") ||
663 !strncmp("init_module", name
, 11) ||
664 !strncmp("cleanup_module", name
, 14) ||
665 strstr(name
, "_text_start") ||
666 strstr(name
, "_text_end"))
669 syme
= symbol__priv(sym
);
671 symbol__annotate_init(map
, sym
);
673 if (!top
.sym_filter_entry
&& sym_filter
&& !strcmp(name
, sym_filter
)) {
674 /* schedule initial sym_filter_entry setup */
675 sym_filter_entry_sched
= syme
;
679 for (i
= 0; skip_symbols
[i
]; i
++) {
680 if (!strcmp(skip_symbols
[i
], name
)) {
689 static void perf_event__process_sample(const union perf_event
*event
,
690 struct perf_sample
*sample
,
691 struct perf_session
*session
)
693 u64 ip
= event
->ip
.ip
;
694 struct sym_entry
*syme
;
695 struct addr_location al
;
696 struct machine
*machine
;
697 u8 origin
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
702 case PERF_RECORD_MISC_USER
:
704 if (top
.hide_user_symbols
)
706 machine
= perf_session__find_host_machine(session
);
708 case PERF_RECORD_MISC_KERNEL
:
709 ++top
.kernel_samples
;
710 if (top
.hide_kernel_symbols
)
712 machine
= perf_session__find_host_machine(session
);
714 case PERF_RECORD_MISC_GUEST_KERNEL
:
715 ++top
.guest_kernel_samples
;
716 machine
= perf_session__find_machine(session
, event
->ip
.pid
);
718 case PERF_RECORD_MISC_GUEST_USER
:
719 ++top
.guest_us_samples
;
721 * TODO: we don't process guest user from host side
722 * except simple counting.
729 if (!machine
&& perf_guest
) {
730 pr_err("Can't find guest [%d]'s kernel information\n",
735 if (event
->header
.misc
& PERF_RECORD_MISC_EXACT_IP
)
738 if (perf_event__preprocess_sample(event
, session
, &al
, sample
,
739 symbol_filter
) < 0 ||
743 if (al
.sym
== NULL
) {
745 * As we do lazy loading of symtabs we only will know if the
746 * specified vmlinux file is invalid when we actually have a
747 * hit in kernel space and then try to load it. So if we get
748 * here and there are _no_ symbols in the DSO backing the
749 * kernel map, bail out.
751 * We may never get here, for instance, if we use -K/
752 * --hide-kernel-symbols, even if the user specifies an
753 * invalid --vmlinux ;-)
755 if (al
.map
== machine
->vmlinux_maps
[MAP__FUNCTION
] &&
756 RB_EMPTY_ROOT(&al
.map
->dso
->symbols
[MAP__FUNCTION
])) {
757 ui__warning("The %s file can't be used\n",
758 symbol_conf
.vmlinux_name
);
766 /* let's see, whether we need to install initial sym_filter_entry */
767 if (sym_filter_entry_sched
) {
768 top
.sym_filter_entry
= sym_filter_entry_sched
;
769 sym_filter_entry_sched
= NULL
;
770 if (parse_source(top
.sym_filter_entry
) < 0) {
771 struct symbol
*sym
= sym_entry__symbol(top
.sym_filter_entry
);
773 pr_err("Can't annotate %s", sym
->name
);
774 if (top
.sym_filter_entry
->map
->dso
->symtab_type
== SYMTAB__KALLSYMS
) {
775 pr_err(": No vmlinux file was found in the path:\n");
776 machine__fprintf_vmlinux_path(machine
, stderr
);
783 syme
= symbol__priv(al
.sym
);
784 if (!al
.sym
->ignore
) {
785 struct perf_evsel
*evsel
;
787 evsel
= perf_evlist__id2evsel(top
.evlist
, sample
->id
);
788 assert(evsel
!= NULL
);
789 syme
->count
[evsel
->idx
]++;
790 record_precise_ip(syme
, evsel
->idx
, ip
);
791 pthread_mutex_lock(&top
.active_symbols_lock
);
792 if (list_empty(&syme
->node
) || !syme
->node
.next
) {
793 static bool first
= true;
794 __list_insert_active_sym(syme
);
796 pthread_cond_broadcast(&top
.active_symbols_cond
);
800 pthread_mutex_unlock(&top
.active_symbols_lock
);
804 static void perf_session__mmap_read_cpu(struct perf_session
*self
, int cpu
)
806 struct perf_sample sample
;
807 union perf_event
*event
;
809 while ((event
= perf_evlist__read_on_cpu(top
.evlist
, cpu
)) != NULL
) {
810 perf_session__parse_sample(self
, event
, &sample
);
812 if (event
->header
.type
== PERF_RECORD_SAMPLE
)
813 perf_event__process_sample(event
, &sample
, self
);
815 perf_event__process(event
, &sample
, self
);
819 static void perf_session__mmap_read(struct perf_session
*self
)
823 for (i
= 0; i
< top
.evlist
->cpus
->nr
; i
++)
824 perf_session__mmap_read_cpu(self
, i
);
827 static void start_counters(struct perf_evlist
*evlist
)
829 struct perf_evsel
*counter
;
831 list_for_each_entry(counter
, &evlist
->entries
, node
) {
832 struct perf_event_attr
*attr
= &counter
->attr
;
834 attr
->sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
;
837 attr
->sample_type
|= PERF_SAMPLE_PERIOD
;
839 attr
->sample_freq
= top
.freq
;
842 if (evlist
->nr_entries
> 1) {
843 attr
->sample_type
|= PERF_SAMPLE_ID
;
844 attr
->read_format
|= PERF_FORMAT_ID
;
849 if (perf_evsel__open(counter
, top
.evlist
->cpus
,
850 top
.evlist
->threads
, group
, inherit
) < 0) {
853 if (err
== EPERM
|| err
== EACCES
) {
854 ui__warning_paranoid();
858 * If it's cycles then fall back to hrtimer
859 * based cpu-clock-tick sw counter, which
860 * is always available even if no PMU support:
862 if (attr
->type
== PERF_TYPE_HARDWARE
&&
863 attr
->config
== PERF_COUNT_HW_CPU_CYCLES
) {
865 ui__warning("Cycles event not supported,\n"
866 "trying to fall back to cpu-clock-ticks\n");
868 attr
->type
= PERF_TYPE_SOFTWARE
;
869 attr
->config
= PERF_COUNT_SW_CPU_CLOCK
;
874 ui__warning("The %s event is not supported.\n",
875 event_name(counter
));
879 ui__warning("The sys_perf_event_open() syscall "
880 "returned with %d (%s). /bin/dmesg "
881 "may provide additional information.\n"
882 "No CONFIG_PERF_EVENTS=y kernel support "
883 "configured?\n", err
, strerror(err
));
888 if (perf_evlist__mmap(evlist
, mmap_pages
, false) < 0) {
889 ui__warning("Failed to mmap with %d (%s)\n",
890 errno
, strerror(errno
));
901 static int __cmd_top(void)
906 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
907 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
909 struct perf_session
*session
= perf_session__new(NULL
, O_WRONLY
, false, false, NULL
);
913 if (top
.target_tid
!= -1)
914 perf_event__synthesize_thread_map(top
.evlist
->threads
,
915 perf_event__process
, session
);
917 perf_event__synthesize_threads(perf_event__process
, session
);
919 start_counters(top
.evlist
);
920 session
->evlist
= top
.evlist
;
921 perf_session__update_sample_type(session
);
923 /* Wait for a minimal set of events before starting the snapshot */
924 poll(top
.evlist
->pollfd
, top
.evlist
->nr_fds
, 100);
926 perf_session__mmap_read(session
);
928 if (pthread_create(&thread
, NULL
, (use_browser
> 0 ? display_thread_tui
:
929 display_thread
), session
)) {
930 printf("Could not create display thread.\n");
935 struct sched_param param
;
937 param
.sched_priority
= realtime_prio
;
938 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
939 printf("Could not set realtime priority.\n");
945 u64 hits
= top
.samples
;
947 perf_session__mmap_read(session
);
949 if (hits
== top
.samples
)
950 ret
= poll(top
.evlist
->pollfd
, top
.evlist
->nr_fds
, 100);
956 static const char * const top_usage
[] = {
957 "perf top [<options>]",
961 static const struct option options
[] = {
962 OPT_CALLBACK('e', "event", &top
.evlist
, "event",
963 "event selector. use 'perf list' to list available events",
965 OPT_INTEGER('c', "count", &default_interval
,
966 "event period to sample"),
967 OPT_INTEGER('p', "pid", &top
.target_pid
,
968 "profile events on existing process id"),
969 OPT_INTEGER('t', "tid", &top
.target_tid
,
970 "profile events on existing thread id"),
971 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
972 "system-wide collection from all CPUs"),
973 OPT_STRING('C', "cpu", &top
.cpu_list
, "cpu",
974 "list of cpus to monitor"),
975 OPT_STRING('k', "vmlinux", &symbol_conf
.vmlinux_name
,
976 "file", "vmlinux pathname"),
977 OPT_BOOLEAN('K', "hide_kernel_symbols", &top
.hide_kernel_symbols
,
978 "hide kernel symbols"),
979 OPT_UINTEGER('m', "mmap-pages", &mmap_pages
, "number of mmap data pages"),
980 OPT_INTEGER('r', "realtime", &realtime_prio
,
981 "collect data with this RT SCHED_FIFO priority"),
982 OPT_INTEGER('d', "delay", &top
.delay_secs
,
983 "number of seconds to delay between refreshes"),
984 OPT_BOOLEAN('D', "dump-symtab", &dump_symtab
,
985 "dump the symbol table used for profiling"),
986 OPT_INTEGER('f', "count-filter", &top
.count_filter
,
987 "only display functions with more events than this"),
988 OPT_BOOLEAN('g', "group", &group
,
989 "put the counters into a counter group"),
990 OPT_BOOLEAN('i', "inherit", &inherit
,
991 "child tasks inherit counters"),
992 OPT_STRING('s', "sym-annotate", &sym_filter
, "symbol name",
993 "symbol to annotate"),
994 OPT_BOOLEAN('z', "zero", &top
.zero
,
995 "zero history across updates"),
996 OPT_INTEGER('F', "freq", &top
.freq
,
997 "profile at this frequency"),
998 OPT_INTEGER('E', "entries", &top
.print_entries
,
999 "display this many functions"),
1000 OPT_BOOLEAN('U', "hide_user_symbols", &top
.hide_user_symbols
,
1001 "hide user symbols"),
1002 OPT_BOOLEAN(0, "tui", &use_tui
, "Use the TUI interface"),
1003 OPT_BOOLEAN(0, "stdio", &use_stdio
, "Use the stdio interface"),
1004 OPT_INCR('v', "verbose", &verbose
,
1005 "be more verbose (show counter open errors, etc)"),
1009 int cmd_top(int argc
, const char **argv
, const char *prefix __used
)
1011 struct perf_evsel
*pos
;
1012 int status
= -ENOMEM
;
1014 top
.evlist
= perf_evlist__new(NULL
, NULL
);
1015 if (top
.evlist
== NULL
)
1018 page_size
= sysconf(_SC_PAGE_SIZE
);
1020 argc
= parse_options(argc
, argv
, options
, top_usage
, 0);
1022 usage_with_options(top_usage
, options
);
1025 * XXX For now start disabled, only using TUI if explicitely asked for.
1026 * Change that when handle_keys equivalent gets written, live annotation
1036 setup_browser(false);
1038 /* CPU and PID are mutually exclusive */
1039 if (top
.target_tid
> 0 && top
.cpu_list
) {
1040 printf("WARNING: PID switch overriding CPU\n");
1042 top
.cpu_list
= NULL
;
1045 if (top
.target_pid
!= -1)
1046 top
.target_tid
= top
.target_pid
;
1048 if (perf_evlist__create_maps(top
.evlist
, top
.target_pid
,
1049 top
.target_tid
, top
.cpu_list
) < 0)
1050 usage_with_options(top_usage
, options
);
1052 if (!top
.evlist
->nr_entries
&&
1053 perf_evlist__add_default(top
.evlist
) < 0) {
1054 pr_err("Not enough memory for event selector list\n");
1058 if (top
.delay_secs
< 1)
1062 * User specified count overrides default frequency.
1064 if (default_interval
)
1066 else if (top
.freq
) {
1067 default_interval
= top
.freq
;
1069 fprintf(stderr
, "frequency and count are zero, aborting\n");
1073 list_for_each_entry(pos
, &top
.evlist
->entries
, node
) {
1074 if (perf_evsel__alloc_fd(pos
, top
.evlist
->cpus
->nr
,
1075 top
.evlist
->threads
->nr
) < 0)
1078 * Fill in the ones not specifically initialized via -c:
1080 if (pos
->attr
.sample_period
)
1083 pos
->attr
.sample_period
= default_interval
;
1086 if (perf_evlist__alloc_pollfd(top
.evlist
) < 0 ||
1087 perf_evlist__alloc_mmap(top
.evlist
) < 0)
1090 top
.sym_evsel
= list_entry(top
.evlist
->entries
.next
, struct perf_evsel
, node
);
1092 symbol_conf
.priv_size
= (sizeof(struct sym_entry
) + sizeof(struct annotation
) +
1093 (top
.evlist
->nr_entries
+ 1) * sizeof(unsigned long));
1095 symbol_conf
.try_vmlinux_path
= (symbol_conf
.vmlinux_name
== NULL
);
1096 if (symbol__init() < 0)
1099 get_term_dimensions(&winsize
);
1100 if (top
.print_entries
== 0) {
1101 update_print_entries(&winsize
);
1102 signal(SIGWINCH
, sig_winch_handler
);
1105 status
= __cmd_top();
1107 perf_evlist__delete(top
.evlist
);