4 * Builtin top command: Display a continuously updated profile of
5 * any workload, CPU or specific PID.
7 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
9 * Improvements and fixes by:
11 * Arjan van de Ven <arjan@linux.intel.com>
12 * Yanmin Zhang <yanmin.zhang@intel.com>
13 * Wu Fengguang <fengguang.wu@intel.com>
14 * Mike Galbraith <efault@gmx.de>
15 * Paul Mackerras <paulus@samba.org>
17 * Released under the GPL v2. (and only v2, not any later version)
23 #include "util/symbol.h"
24 #include "util/color.h"
25 #include "util/thread.h"
26 #include "util/util.h"
27 #include <linux/rbtree.h>
28 #include "util/parse-options.h"
29 #include "util/parse-events.h"
31 #include "util/debug.h"
45 #include <sys/syscall.h>
46 #include <sys/ioctl.h>
48 #include <sys/prctl.h>
53 #include <linux/unistd.h>
54 #include <linux/types.h>
56 static int fd
[MAX_NR_CPUS
][MAX_COUNTERS
];
58 static int system_wide
= 0;
60 static int default_interval
= 0;
62 static int count_filter
= 5;
63 static int print_entries
;
65 static int target_pid
= -1;
66 static int inherit
= 0;
67 static int profile_cpu
= -1;
68 static int nr_cpus
= 0;
69 static unsigned int realtime_prio
= 0;
71 static unsigned int page_size
;
72 static unsigned int mmap_pages
= 16;
73 static int freq
= 1000; /* 1 KHz */
75 static int delay_secs
= 2;
77 static int dump_symtab
= 0;
79 static bool hide_kernel_symbols
= false;
80 static bool hide_user_symbols
= false;
81 static struct winsize winsize
;
82 struct symbol_conf symbol_conf
;
90 unsigned long count
[MAX_COUNTERS
];
92 struct source_line
*next
;
95 static char *sym_filter
= NULL
;
96 struct sym_entry
*sym_filter_entry
= NULL
;
97 static int sym_pcnt_filter
= 5;
98 static int sym_counter
= 0;
99 static int display_weighted
= -1;
105 struct sym_entry_source
{
106 struct source_line
*source
;
107 struct source_line
*lines
;
108 struct source_line
**lines_tail
;
109 pthread_mutex_t lock
;
113 struct rb_node rb_node
;
114 struct list_head node
;
115 unsigned long snap_count
;
121 struct sym_entry_source
*src
;
122 unsigned long count
[0];
129 static inline struct symbol
*sym_entry__symbol(struct sym_entry
*self
)
131 return ((void *)self
) + symbol_conf
.priv_size
;
134 static void get_term_dimensions(struct winsize
*ws
)
136 char *s
= getenv("LINES");
139 ws
->ws_row
= atoi(s
);
140 s
= getenv("COLUMNS");
142 ws
->ws_col
= atoi(s
);
143 if (ws
->ws_row
&& ws
->ws_col
)
148 if (ioctl(1, TIOCGWINSZ
, ws
) == 0 &&
149 ws
->ws_row
&& ws
->ws_col
)
156 static void update_print_entries(struct winsize
*ws
)
158 print_entries
= ws
->ws_row
;
160 if (print_entries
> 9)
164 static void sig_winch_handler(int sig __used
)
166 get_term_dimensions(&winsize
);
167 update_print_entries(&winsize
);
170 static void parse_source(struct sym_entry
*syme
)
173 struct sym_entry_source
*source
;
176 char command
[PATH_MAX
*2];
183 if (syme
->src
== NULL
) {
184 syme
->src
= zalloc(sizeof(*source
));
185 if (syme
->src
== NULL
)
187 pthread_mutex_init(&syme
->src
->lock
, NULL
);
193 pthread_mutex_lock(&source
->lock
);
197 sym
= sym_entry__symbol(syme
);
199 path
= map
->dso
->long_name
;
201 len
= sym
->end
- sym
->start
;
204 "objdump --start-address=0x%016Lx "
205 "--stop-address=0x%016Lx -dS %s",
206 map
->unmap_ip(map
, sym
->start
),
207 map
->unmap_ip(map
, sym
->end
), path
);
209 file
= popen(command
, "r");
213 pthread_mutex_lock(&source
->lock
);
214 source
->lines_tail
= &source
->lines
;
215 while (!feof(file
)) {
216 struct source_line
*src
;
220 src
= malloc(sizeof(struct source_line
));
222 memset(src
, 0, sizeof(struct source_line
));
224 if (getline(&src
->line
, &dummy
, file
) < 0)
229 c
= strchr(src
->line
, '\n');
234 *source
->lines_tail
= src
;
235 source
->lines_tail
= &src
->next
;
237 if (strlen(src
->line
)>8 && src
->line
[8] == ':') {
238 src
->eip
= strtoull(src
->line
, NULL
, 16);
239 src
->eip
= map
->unmap_ip(map
, src
->eip
);
241 if (strlen(src
->line
)>8 && src
->line
[16] == ':') {
242 src
->eip
= strtoull(src
->line
, NULL
, 16);
243 src
->eip
= map
->unmap_ip(map
, src
->eip
);
248 sym_filter_entry
= syme
;
249 pthread_mutex_unlock(&source
->lock
);
252 static void __zero_source_counters(struct sym_entry
*syme
)
255 struct source_line
*line
;
257 line
= syme
->src
->lines
;
259 for (i
= 0; i
< nr_counters
; i
++)
265 static void record_precise_ip(struct sym_entry
*syme
, int counter
, u64 ip
)
267 struct source_line
*line
;
269 if (syme
!= sym_filter_entry
)
272 if (pthread_mutex_trylock(&syme
->src
->lock
))
275 if (syme
->src
== NULL
|| syme
->src
->source
== NULL
)
278 for (line
= syme
->src
->lines
; line
; line
= line
->next
) {
279 if (line
->eip
== ip
) {
280 line
->count
[counter
]++;
287 pthread_mutex_unlock(&syme
->src
->lock
);
290 static void lookup_sym_source(struct sym_entry
*syme
)
292 struct symbol
*symbol
= sym_entry__symbol(syme
);
293 struct source_line
*line
;
294 char pattern
[PATH_MAX
];
296 sprintf(pattern
, "<%s>:", symbol
->name
);
298 pthread_mutex_lock(&syme
->src
->lock
);
299 for (line
= syme
->src
->lines
; line
; line
= line
->next
) {
300 if (strstr(line
->line
, pattern
)) {
301 syme
->src
->source
= line
;
305 pthread_mutex_unlock(&syme
->src
->lock
);
308 static void show_lines(struct source_line
*queue
, int count
, int total
)
311 struct source_line
*line
;
314 for (i
= 0; i
< count
; i
++) {
315 float pcnt
= 100.0*(float)line
->count
[sym_counter
]/(float)total
;
317 printf("%8li %4.1f%%\t%s\n", line
->count
[sym_counter
], pcnt
, line
->line
);
322 #define TRACE_COUNT 3
324 static void show_details(struct sym_entry
*syme
)
326 struct symbol
*symbol
;
327 struct source_line
*line
;
328 struct source_line
*line_queue
= NULL
;
330 int line_queue_count
= 0, total
= 0, more
= 0;
335 if (!syme
->src
->source
)
336 lookup_sym_source(syme
);
338 if (!syme
->src
->source
)
341 symbol
= sym_entry__symbol(syme
);
342 printf("Showing %s for %s\n", event_name(sym_counter
), symbol
->name
);
343 printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter
);
345 pthread_mutex_lock(&syme
->src
->lock
);
346 line
= syme
->src
->source
;
348 total
+= line
->count
[sym_counter
];
352 line
= syme
->src
->source
;
356 if (!line_queue_count
)
360 if (line
->count
[sym_counter
])
361 pcnt
= 100.0 * line
->count
[sym_counter
] / (float)total
;
362 if (pcnt
>= (float)sym_pcnt_filter
) {
363 if (displayed
<= print_entries
)
364 show_lines(line_queue
, line_queue_count
, total
);
366 displayed
+= line_queue_count
;
367 line_queue_count
= 0;
369 } else if (line_queue_count
> TRACE_COUNT
) {
370 line_queue
= line_queue
->next
;
374 line
->count
[sym_counter
] = zero
? 0 : line
->count
[sym_counter
] * 7 / 8;
377 pthread_mutex_unlock(&syme
->src
->lock
);
379 printf("%d lines not displayed, maybe increase display entries [e]\n", more
);
383 * Symbols will be added here in event__process_sample and will get out
386 static LIST_HEAD(active_symbols
);
387 static pthread_mutex_t active_symbols_lock
= PTHREAD_MUTEX_INITIALIZER
;
390 * Ordering weight: count-1 * count-2 * ... / count-n
392 static double sym_weight(const struct sym_entry
*sym
)
394 double weight
= sym
->snap_count
;
397 if (!display_weighted
)
400 for (counter
= 1; counter
< nr_counters
-1; counter
++)
401 weight
*= sym
->count
[counter
];
403 weight
/= (sym
->count
[counter
] + 1);
409 static long userspace_samples
;
410 static const char CONSOLE_CLEAR
[] = "\e[H\e[2J";
412 static void __list_insert_active_sym(struct sym_entry
*syme
)
414 list_add(&syme
->node
, &active_symbols
);
417 static void list_remove_active_sym(struct sym_entry
*syme
)
419 pthread_mutex_lock(&active_symbols_lock
);
420 list_del_init(&syme
->node
);
421 pthread_mutex_unlock(&active_symbols_lock
);
424 static void rb_insert_active_sym(struct rb_root
*tree
, struct sym_entry
*se
)
426 struct rb_node
**p
= &tree
->rb_node
;
427 struct rb_node
*parent
= NULL
;
428 struct sym_entry
*iter
;
432 iter
= rb_entry(parent
, struct sym_entry
, rb_node
);
434 if (se
->weight
> iter
->weight
)
440 rb_link_node(&se
->rb_node
, parent
, p
);
441 rb_insert_color(&se
->rb_node
, tree
);
444 static void print_sym_table(void)
447 int counter
, snap
= !display_weighted
? sym_counter
: 0;
448 float samples_per_sec
= samples
/delay_secs
;
449 float ksamples_per_sec
= (samples
-userspace_samples
)/delay_secs
;
450 float sum_ksamples
= 0.0;
451 struct sym_entry
*syme
, *n
;
452 struct rb_root tmp
= RB_ROOT
;
454 int sym_width
= 0, dso_width
= 0, max_dso_width
;
455 const int win_width
= winsize
.ws_col
- 1;
457 samples
= userspace_samples
= 0;
459 /* Sort the active symbols */
460 pthread_mutex_lock(&active_symbols_lock
);
461 syme
= list_entry(active_symbols
.next
, struct sym_entry
, node
);
462 pthread_mutex_unlock(&active_symbols_lock
);
464 list_for_each_entry_safe_from(syme
, n
, &active_symbols
, node
) {
465 syme
->snap_count
= syme
->count
[snap
];
466 if (syme
->snap_count
!= 0) {
468 if ((hide_user_symbols
&&
469 syme
->origin
== PERF_RECORD_MISC_USER
) ||
470 (hide_kernel_symbols
&&
471 syme
->origin
== PERF_RECORD_MISC_KERNEL
)) {
472 list_remove_active_sym(syme
);
475 syme
->weight
= sym_weight(syme
);
476 rb_insert_active_sym(&tmp
, syme
);
477 sum_ksamples
+= syme
->snap_count
;
479 for (j
= 0; j
< nr_counters
; j
++)
480 syme
->count
[j
] = zero
? 0 : syme
->count
[j
] * 7 / 8;
482 list_remove_active_sym(syme
);
487 printf("%-*.*s\n", win_width
, win_width
, graph_dotted_line
);
488 printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [",
490 100.0 - (100.0*((samples_per_sec
-ksamples_per_sec
)/samples_per_sec
)));
492 if (nr_counters
== 1 || !display_weighted
) {
493 printf("%Ld", (u64
)attrs
[0].sample_period
);
500 if (!display_weighted
)
501 printf("%s", event_name(sym_counter
));
502 else for (counter
= 0; counter
< nr_counters
; counter
++) {
506 printf("%s", event_name(counter
));
511 if (target_pid
!= -1)
512 printf(" (target_pid: %d", target_pid
);
516 if (profile_cpu
!= -1)
517 printf(", cpu: %d)\n", profile_cpu
);
519 if (target_pid
!= -1)
522 printf(", %d CPUs)\n", nr_cpus
);
525 printf("%-*.*s\n", win_width
, win_width
, graph_dotted_line
);
527 if (sym_filter_entry
) {
528 show_details(sym_filter_entry
);
533 * Find the longest symbol name that will be displayed
535 for (nd
= rb_first(&tmp
); nd
; nd
= rb_next(nd
)) {
536 syme
= rb_entry(nd
, struct sym_entry
, rb_node
);
537 if (++printed
> print_entries
||
538 (int)syme
->snap_count
< count_filter
)
541 if (syme
->map
->dso
->long_name_len
> dso_width
)
542 dso_width
= syme
->map
->dso
->long_name_len
;
544 if (syme
->name_len
> sym_width
)
545 sym_width
= syme
->name_len
;
550 max_dso_width
= winsize
.ws_col
- sym_width
- 29;
551 if (dso_width
> max_dso_width
)
552 dso_width
= max_dso_width
;
554 if (nr_counters
== 1)
555 printf(" samples pcnt");
557 printf(" weight samples pcnt");
561 printf(" %-*.*s DSO\n", sym_width
, sym_width
, "function");
562 printf(" %s _______ _____",
563 nr_counters
== 1 ? " " : "______");
565 printf(" ________________");
566 printf(" %-*.*s", sym_width
, sym_width
, graph_line
);
567 printf(" %-*.*s", dso_width
, dso_width
, graph_line
);
570 for (nd
= rb_first(&tmp
); nd
; nd
= rb_next(nd
)) {
574 syme
= rb_entry(nd
, struct sym_entry
, rb_node
);
575 sym
= sym_entry__symbol(syme
);
577 if (++printed
> print_entries
|| (int)syme
->snap_count
< count_filter
)
580 pcnt
= 100.0 - (100.0 * ((sum_ksamples
- syme
->snap_count
) /
583 if (nr_counters
== 1 || !display_weighted
)
584 printf("%20.2f ", syme
->weight
);
586 printf("%9.1f %10ld ", syme
->weight
, syme
->snap_count
);
588 percent_color_fprintf(stdout
, "%4.1f%%", pcnt
);
590 printf(" %016llx", sym
->start
);
591 printf(" %-*.*s", sym_width
, sym_width
, sym
->name
);
592 printf(" %-*.*s\n", dso_width
, dso_width
,
593 dso_width
>= syme
->map
->dso
->long_name_len
?
594 syme
->map
->dso
->long_name
:
595 syme
->map
->dso
->short_name
);
599 static void prompt_integer(int *target
, const char *msg
)
601 char *buf
= malloc(0), *p
;
605 fprintf(stdout
, "\n%s: ", msg
);
606 if (getline(&buf
, &dummy
, stdin
) < 0)
609 p
= strchr(buf
, '\n');
619 tmp
= strtoul(buf
, NULL
, 10);
625 static void prompt_percent(int *target
, const char *msg
)
629 prompt_integer(&tmp
, msg
);
630 if (tmp
>= 0 && tmp
<= 100)
634 static void prompt_symbol(struct sym_entry
**target
, const char *msg
)
636 char *buf
= malloc(0), *p
;
637 struct sym_entry
*syme
= *target
, *n
, *found
= NULL
;
640 /* zero counters of active symbol */
642 pthread_mutex_lock(&syme
->src
->lock
);
643 __zero_source_counters(syme
);
645 pthread_mutex_unlock(&syme
->src
->lock
);
648 fprintf(stdout
, "\n%s: ", msg
);
649 if (getline(&buf
, &dummy
, stdin
) < 0)
652 p
= strchr(buf
, '\n');
656 pthread_mutex_lock(&active_symbols_lock
);
657 syme
= list_entry(active_symbols
.next
, struct sym_entry
, node
);
658 pthread_mutex_unlock(&active_symbols_lock
);
660 list_for_each_entry_safe_from(syme
, n
, &active_symbols
, node
) {
661 struct symbol
*sym
= sym_entry__symbol(syme
);
663 if (!strcmp(buf
, sym
->name
)) {
670 fprintf(stderr
, "Sorry, %s is not active.\n", sym_filter
);
680 static void print_mapped_keys(void)
684 if (sym_filter_entry
) {
685 struct symbol
*sym
= sym_entry__symbol(sym_filter_entry
);
689 fprintf(stdout
, "\nMapped keys:\n");
690 fprintf(stdout
, "\t[d] display refresh delay. \t(%d)\n", delay_secs
);
691 fprintf(stdout
, "\t[e] display entries (lines). \t(%d)\n", print_entries
);
694 fprintf(stdout
, "\t[E] active event counter. \t(%s)\n", event_name(sym_counter
));
696 fprintf(stdout
, "\t[f] profile display filter (count). \t(%d)\n", count_filter
);
698 if (symbol_conf
.vmlinux_name
) {
699 fprintf(stdout
, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter
);
700 fprintf(stdout
, "\t[s] annotate symbol. \t(%s)\n", name
?: "NULL");
701 fprintf(stdout
, "\t[S] stop annotation.\n");
705 fprintf(stdout
, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted
? 1 : 0);
708 "\t[K] hide kernel_symbols symbols. \t(%s)\n",
709 hide_kernel_symbols
? "yes" : "no");
711 "\t[U] hide user symbols. \t(%s)\n",
712 hide_user_symbols
? "yes" : "no");
713 fprintf(stdout
, "\t[z] toggle sample zeroing. \t(%d)\n", zero
? 1 : 0);
714 fprintf(stdout
, "\t[qQ] quit.\n");
717 static int key_mapped(int c
)
731 return nr_counters
> 1 ? 1 : 0;
735 return symbol_conf
.vmlinux_name
? 1 : 0;
743 static void handle_keypress(int c
)
745 if (!key_mapped(c
)) {
746 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
747 struct termios tc
, save
;
750 fprintf(stdout
, "\nEnter selection, or unmapped key to continue: ");
755 tc
.c_lflag
&= ~(ICANON
| ECHO
);
758 tcsetattr(0, TCSANOW
, &tc
);
760 poll(&stdin_poll
, 1, -1);
763 tcsetattr(0, TCSAFLUSH
, &save
);
770 prompt_integer(&delay_secs
, "Enter display delay");
775 prompt_integer(&print_entries
, "Enter display entries (lines)");
776 if (print_entries
== 0) {
777 sig_winch_handler(SIGWINCH
);
778 signal(SIGWINCH
, sig_winch_handler
);
780 signal(SIGWINCH
, SIG_DFL
);
783 if (nr_counters
> 1) {
786 fprintf(stderr
, "\nAvailable events:");
787 for (i
= 0; i
< nr_counters
; i
++)
788 fprintf(stderr
, "\n\t%d %s", i
, event_name(i
));
790 prompt_integer(&sym_counter
, "Enter details event counter");
792 if (sym_counter
>= nr_counters
) {
793 fprintf(stderr
, "Sorry, no such event, using %s.\n", event_name(0));
797 } else sym_counter
= 0;
800 prompt_integer(&count_filter
, "Enter display event count filter");
803 prompt_percent(&sym_pcnt_filter
, "Enter details display event filter (percent)");
806 hide_kernel_symbols
= !hide_kernel_symbols
;
810 printf("exiting.\n");
812 dsos__fprintf(stderr
);
815 prompt_symbol(&sym_filter_entry
, "Enter details symbol");
818 if (!sym_filter_entry
)
821 struct sym_entry
*syme
= sym_filter_entry
;
823 pthread_mutex_lock(&syme
->src
->lock
);
824 sym_filter_entry
= NULL
;
825 __zero_source_counters(syme
);
826 pthread_mutex_unlock(&syme
->src
->lock
);
830 hide_user_symbols
= !hide_user_symbols
;
833 display_weighted
= ~display_weighted
;
843 static void *display_thread(void *arg __used
)
845 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
846 struct termios tc
, save
;
851 tc
.c_lflag
&= ~(ICANON
| ECHO
);
856 delay_msecs
= delay_secs
* 1000;
857 tcsetattr(0, TCSANOW
, &tc
);
863 } while (!poll(&stdin_poll
, 1, delay_msecs
) == 1);
866 tcsetattr(0, TCSAFLUSH
, &save
);
874 /* Tag samples to be skipped. */
875 static const char *skip_symbols
[] = {
881 "mwait_idle_with_hints",
883 "ppc64_runlatch_off",
884 "pseries_dedicated_idle_sleep",
888 static int symbol_filter(struct map
*map
, struct symbol
*sym
)
890 struct sym_entry
*syme
;
891 const char *name
= sym
->name
;
895 * ppc64 uses function descriptors and appends a '.' to the
896 * start of every instruction address. Remove it.
901 if (!strcmp(name
, "_text") ||
902 !strcmp(name
, "_etext") ||
903 !strcmp(name
, "_sinittext") ||
904 !strncmp("init_module", name
, 11) ||
905 !strncmp("cleanup_module", name
, 14) ||
906 strstr(name
, "_text_start") ||
907 strstr(name
, "_text_end"))
910 syme
= symbol__priv(sym
);
913 if (!sym_filter_entry
&& sym_filter
&& !strcmp(name
, sym_filter
))
914 sym_filter_entry
= syme
;
916 for (i
= 0; skip_symbols
[i
]; i
++) {
917 if (!strcmp(skip_symbols
[i
], name
)) {
924 syme
->name_len
= strlen(sym
->name
);
929 static void event__process_sample(const event_t
*self
, int counter
)
931 u64 ip
= self
->ip
.ip
;
932 struct sym_entry
*syme
;
933 struct addr_location al
;
934 u8 origin
= self
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
937 case PERF_RECORD_MISC_USER
:
938 if (hide_user_symbols
)
941 case PERF_RECORD_MISC_KERNEL
:
942 if (hide_kernel_symbols
)
949 if (event__preprocess_sample(self
, &al
, symbol_filter
) < 0 ||
953 syme
= symbol__priv(al
.sym
);
955 syme
->count
[counter
]++;
956 syme
->origin
= origin
;
957 record_precise_ip(syme
, counter
, ip
);
958 pthread_mutex_lock(&active_symbols_lock
);
959 if (list_empty(&syme
->node
) || !syme
->node
.next
)
960 __list_insert_active_sym(syme
);
961 pthread_mutex_unlock(&active_symbols_lock
);
962 if (origin
== PERF_RECORD_MISC_USER
)
968 static int event__process(event_t
*event
)
970 switch (event
->header
.type
) {
971 case PERF_RECORD_COMM
:
972 event__process_comm(event
);
974 case PERF_RECORD_MMAP
:
975 event__process_mmap(event
);
991 static unsigned int mmap_read_head(struct mmap_data
*md
)
993 struct perf_event_mmap_page
*pc
= md
->base
;
996 head
= pc
->data_head
;
1002 static void mmap_read_counter(struct mmap_data
*md
)
1004 unsigned int head
= mmap_read_head(md
);
1005 unsigned int old
= md
->prev
;
1006 unsigned char *data
= md
->base
+ page_size
;
1010 * If we're further behind than half the buffer, there's a chance
1011 * the writer will bite our tail and mess up the samples under us.
1013 * If we somehow ended up ahead of the head, we got messed up.
1015 * In either case, truncate and restart at head.
1018 if (diff
> md
->mask
/ 2 || diff
< 0) {
1019 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
1022 * head points to a known good entry, start there.
1027 for (; old
!= head
;) {
1028 event_t
*event
= (event_t
*)&data
[old
& md
->mask
];
1032 size_t size
= event
->header
.size
;
1035 * Event straddles the mmap boundary -- header should always
1036 * be inside due to u64 alignment of output.
1038 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
1039 unsigned int offset
= old
;
1040 unsigned int len
= min(sizeof(*event
), size
), cpy
;
1041 void *dst
= &event_copy
;
1044 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
1045 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
1051 event
= &event_copy
;
1054 if (event
->header
.type
== PERF_RECORD_SAMPLE
)
1055 event__process_sample(event
, md
->counter
);
1057 event__process(event
);
1064 static struct pollfd event_array
[MAX_NR_CPUS
* MAX_COUNTERS
];
1065 static struct mmap_data mmap_array
[MAX_NR_CPUS
][MAX_COUNTERS
];
1067 static void mmap_read(void)
1071 for (i
= 0; i
< nr_cpus
; i
++) {
1072 for (counter
= 0; counter
< nr_counters
; counter
++)
1073 mmap_read_counter(&mmap_array
[i
][counter
]);
1080 static void start_counter(int i
, int counter
)
1082 struct perf_event_attr
*attr
;
1086 if (target_pid
== -1 && profile_cpu
== -1)
1089 attr
= attrs
+ counter
;
1091 attr
->sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
;
1094 attr
->sample_type
|= PERF_SAMPLE_PERIOD
;
1096 attr
->sample_freq
= freq
;
1099 attr
->inherit
= (cpu
< 0) && inherit
;
1103 fd
[i
][counter
] = sys_perf_event_open(attr
, target_pid
, cpu
, group_fd
, 0);
1105 if (fd
[i
][counter
] < 0) {
1108 if (err
== EPERM
|| err
== EACCES
)
1109 die("No permission - are you root?\n");
1111 * If it's cycles then fall back to hrtimer
1112 * based cpu-clock-tick sw counter, which
1113 * is always available even if no PMU support:
1115 if (attr
->type
== PERF_TYPE_HARDWARE
1116 && attr
->config
== PERF_COUNT_HW_CPU_CYCLES
) {
1119 warning(" ... trying to fall back to cpu-clock-ticks\n");
1121 attr
->type
= PERF_TYPE_SOFTWARE
;
1122 attr
->config
= PERF_COUNT_SW_CPU_CLOCK
;
1126 error("perfcounter syscall returned with %d (%s)\n",
1127 fd
[i
][counter
], strerror(err
));
1128 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
1131 assert(fd
[i
][counter
] >= 0);
1132 fcntl(fd
[i
][counter
], F_SETFL
, O_NONBLOCK
);
1135 * First counter acts as the group leader:
1137 if (group
&& group_fd
== -1)
1138 group_fd
= fd
[i
][counter
];
1140 event_array
[nr_poll
].fd
= fd
[i
][counter
];
1141 event_array
[nr_poll
].events
= POLLIN
;
1144 mmap_array
[i
][counter
].counter
= counter
;
1145 mmap_array
[i
][counter
].prev
= 0;
1146 mmap_array
[i
][counter
].mask
= mmap_pages
*page_size
- 1;
1147 mmap_array
[i
][counter
].base
= mmap(NULL
, (mmap_pages
+1)*page_size
,
1148 PROT_READ
, MAP_SHARED
, fd
[i
][counter
], 0);
1149 if (mmap_array
[i
][counter
].base
== MAP_FAILED
)
1150 die("failed to mmap with %d (%s)\n", errno
, strerror(errno
));
1153 static int __cmd_top(void)
1159 if (target_pid
!= -1)
1160 event__synthesize_thread(target_pid
, event__process
);
1162 event__synthesize_threads(event__process
);
1164 for (i
= 0; i
< nr_cpus
; i
++) {
1166 for (counter
= 0; counter
< nr_counters
; counter
++)
1167 start_counter(i
, counter
);
1170 /* Wait for a minimal set of events before starting the snapshot */
1171 poll(event_array
, nr_poll
, 100);
1175 if (pthread_create(&thread
, NULL
, display_thread
, NULL
)) {
1176 printf("Could not create display thread.\n");
1180 if (realtime_prio
) {
1181 struct sched_param param
;
1183 param
.sched_priority
= realtime_prio
;
1184 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
1185 printf("Could not set realtime priority.\n");
1195 if (hits
== samples
)
1196 ret
= poll(event_array
, nr_poll
, 100);
1202 static const char * const top_usage
[] = {
1203 "perf top [<options>]",
1207 static const struct option options
[] = {
1208 OPT_CALLBACK('e', "event", NULL
, "event",
1209 "event selector. use 'perf list' to list available events",
1211 OPT_INTEGER('c', "count", &default_interval
,
1212 "event period to sample"),
1213 OPT_INTEGER('p', "pid", &target_pid
,
1214 "profile events on existing pid"),
1215 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
1216 "system-wide collection from all CPUs"),
1217 OPT_INTEGER('C', "CPU", &profile_cpu
,
1218 "CPU to profile on"),
1219 OPT_STRING('k', "vmlinux", &symbol_conf
.vmlinux_name
,
1220 "file", "vmlinux pathname"),
1221 OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols
,
1222 "hide kernel symbols"),
1223 OPT_INTEGER('m', "mmap-pages", &mmap_pages
,
1224 "number of mmap data pages"),
1225 OPT_INTEGER('r', "realtime", &realtime_prio
,
1226 "collect data with this RT SCHED_FIFO priority"),
1227 OPT_INTEGER('d', "delay", &delay_secs
,
1228 "number of seconds to delay between refreshes"),
1229 OPT_BOOLEAN('D', "dump-symtab", &dump_symtab
,
1230 "dump the symbol table used for profiling"),
1231 OPT_INTEGER('f', "count-filter", &count_filter
,
1232 "only display functions with more events than this"),
1233 OPT_BOOLEAN('g', "group", &group
,
1234 "put the counters into a counter group"),
1235 OPT_BOOLEAN('i', "inherit", &inherit
,
1236 "child tasks inherit counters"),
1237 OPT_STRING('s', "sym-annotate", &sym_filter
, "symbol name",
1238 "symbol to annotate - requires -k option"),
1239 OPT_BOOLEAN('z', "zero", &zero
,
1240 "zero history across updates"),
1241 OPT_INTEGER('F', "freq", &freq
,
1242 "profile at this frequency"),
1243 OPT_INTEGER('E', "entries", &print_entries
,
1244 "display this many functions"),
1245 OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols
,
1246 "hide user symbols"),
1247 OPT_BOOLEAN('v', "verbose", &verbose
,
1248 "be more verbose (show counter open errors, etc)"),
1252 int cmd_top(int argc
, const char **argv
, const char *prefix __used
)
1256 page_size
= sysconf(_SC_PAGE_SIZE
);
1258 argc
= parse_options(argc
, argv
, options
, top_usage
, 0);
1260 usage_with_options(top_usage
, options
);
1262 /* CPU and PID are mutually exclusive */
1263 if (target_pid
!= -1 && profile_cpu
!= -1) {
1264 printf("WARNING: PID switch overriding CPU\n");
1272 symbol_conf
.priv_size
= (sizeof(struct sym_entry
) +
1273 (nr_counters
+ 1) * sizeof(unsigned long));
1274 if (symbol_conf
.vmlinux_name
== NULL
)
1275 symbol_conf
.try_vmlinux_path
= true;
1276 if (symbol__init(&symbol_conf
) < 0)
1282 parse_source(sym_filter_entry
);
1285 * User specified count overrides default frequency.
1287 if (default_interval
)
1290 default_interval
= freq
;
1292 fprintf(stderr
, "frequency and count are zero, aborting\n");
1297 * Fill in the ones not specifically initialized via -c:
1299 for (counter
= 0; counter
< nr_counters
; counter
++) {
1300 if (attrs
[counter
].sample_period
)
1303 attrs
[counter
].sample_period
= default_interval
;
1306 nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
1307 assert(nr_cpus
<= MAX_NR_CPUS
);
1308 assert(nr_cpus
>= 0);
1310 if (target_pid
!= -1 || profile_cpu
!= -1)
1313 get_term_dimensions(&winsize
);
1314 if (print_entries
== 0) {
1315 update_print_entries(&winsize
);
1316 signal(SIGWINCH
, sig_winch_handler
);