perf_counter tools: Print out symbol parsing errors only if --verbose
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / Documentation / perf_counter / builtin-top.c
blob548a8da4b15bd2b0caabcdb95cd4cfd84a886646
1 /*
2 * builtin-top.c
4 * Builtin top command: Display a continuously updated profile of
5 * any workload, CPU or specific PID.
7 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
9 * Improvements and fixes by:
11 * Arjan van de Ven <arjan@linux.intel.com>
12 * Yanmin Zhang <yanmin.zhang@intel.com>
13 * Wu Fengguang <fengguang.wu@intel.com>
14 * Mike Galbraith <efault@gmx.de>
15 * Paul Mackerras <paulus@samba.org>
17 * Released under the GPL v2. (and only v2, not any later version)
19 #include "builtin.h"
21 #include "perf.h"
23 #include "util/symbol.h"
24 #include "util/util.h"
25 #include "util/rbtree.h"
26 #include "util/parse-options.h"
27 #include "util/parse-events.h"
29 #include <assert.h>
30 #include <fcntl.h>
32 #include <stdio.h>
34 #include <errno.h>
35 #include <time.h>
36 #include <sched.h>
37 #include <pthread.h>
39 #include <sys/syscall.h>
40 #include <sys/ioctl.h>
41 #include <sys/poll.h>
42 #include <sys/prctl.h>
43 #include <sys/wait.h>
44 #include <sys/uio.h>
45 #include <sys/mman.h>
47 #include <linux/unistd.h>
48 #include <linux/types.h>
50 static int system_wide = 0;
52 static __u64 default_event_id[MAX_COUNTERS] = {
53 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK),
54 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES),
55 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS),
56 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS),
58 EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES),
59 EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS),
60 EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES),
61 EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES),
63 static int default_interval = 100000;
64 static int event_count[MAX_COUNTERS];
65 static int fd[MAX_NR_CPUS][MAX_COUNTERS];
67 static __u64 count_filter = 5;
68 static int print_entries = 15;
70 static int target_pid = -1;
71 static int profile_cpu = -1;
72 static int nr_cpus = 0;
73 static unsigned int realtime_prio = 0;
74 static int group = 0;
75 static unsigned int page_size;
76 static unsigned int mmap_pages = 16;
77 static int use_mmap = 0;
78 static int use_munmap = 0;
79 static int freq = 0;
81 static char *sym_filter;
82 static unsigned long filter_start;
83 static unsigned long filter_end;
85 static int delay_secs = 2;
86 static int zero;
87 static int dump_symtab;
89 static const unsigned int default_count[] = {
90 1000000,
91 1000000,
92 10000,
93 10000,
94 1000000,
95 10000,
99 * Symbols
102 static uint64_t min_ip;
103 static uint64_t max_ip = -1ll;
105 struct sym_entry {
106 struct rb_node rb_node;
107 struct list_head node;
108 unsigned long count[MAX_COUNTERS];
109 unsigned long snap_count;
110 double weight;
111 int skip;
114 struct sym_entry *sym_filter_entry;
116 struct dso *kernel_dso;
119 * Symbols will be added here in record_ip and will get out
120 * after decayed.
122 static LIST_HEAD(active_symbols);
123 static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
126 * Ordering weight: count-1 * count-2 * ... / count-n
128 static double sym_weight(const struct sym_entry *sym)
130 double weight = sym->snap_count;
131 int counter;
133 for (counter = 1; counter < nr_counters-1; counter++)
134 weight *= sym->count[counter];
136 weight /= (sym->count[counter] + 1);
138 return weight;
141 static long events;
142 static long userspace_events;
143 static const char CONSOLE_CLEAR[] = "\e[H\e[2J";
145 static void __list_insert_active_sym(struct sym_entry *syme)
147 list_add(&syme->node, &active_symbols);
150 static void list_remove_active_sym(struct sym_entry *syme)
152 pthread_mutex_lock(&active_symbols_lock);
153 list_del_init(&syme->node);
154 pthread_mutex_unlock(&active_symbols_lock);
157 static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
159 struct rb_node **p = &tree->rb_node;
160 struct rb_node *parent = NULL;
161 struct sym_entry *iter;
163 while (*p != NULL) {
164 parent = *p;
165 iter = rb_entry(parent, struct sym_entry, rb_node);
167 if (se->weight > iter->weight)
168 p = &(*p)->rb_left;
169 else
170 p = &(*p)->rb_right;
173 rb_link_node(&se->rb_node, parent, p);
174 rb_insert_color(&se->rb_node, tree);
177 static void print_sym_table(void)
179 int printed = 0, j;
180 int counter;
181 float events_per_sec = events/delay_secs;
182 float kevents_per_sec = (events-userspace_events)/delay_secs;
183 float sum_kevents = 0.0;
184 struct sym_entry *syme, *n;
185 struct rb_root tmp = RB_ROOT;
186 struct rb_node *nd;
188 events = userspace_events = 0;
190 /* Sort the active symbols */
191 pthread_mutex_lock(&active_symbols_lock);
192 syme = list_entry(active_symbols.next, struct sym_entry, node);
193 pthread_mutex_unlock(&active_symbols_lock);
195 list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
196 syme->snap_count = syme->count[0];
197 if (syme->snap_count != 0) {
198 syme->weight = sym_weight(syme);
199 rb_insert_active_sym(&tmp, syme);
200 sum_kevents += syme->snap_count;
202 for (j = 0; j < nr_counters; j++)
203 syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
204 } else
205 list_remove_active_sym(syme);
208 write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR));
210 printf(
211 "------------------------------------------------------------------------------\n");
212 printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [",
213 events_per_sec,
214 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec)));
216 if (nr_counters == 1)
217 printf("%d ", event_count[0]);
219 for (counter = 0; counter < nr_counters; counter++) {
220 if (counter)
221 printf("/");
223 printf("%s", event_name(counter));
226 printf( "], ");
228 if (target_pid != -1)
229 printf(" (target_pid: %d", target_pid);
230 else
231 printf(" (all");
233 if (profile_cpu != -1)
234 printf(", cpu: %d)\n", profile_cpu);
235 else {
236 if (target_pid != -1)
237 printf(")\n");
238 else
239 printf(", %d CPUs)\n", nr_cpus);
242 printf("------------------------------------------------------------------------------\n\n");
244 if (nr_counters == 1)
245 printf(" events pcnt");
246 else
247 printf(" weight events pcnt");
249 printf(" RIP kernel function\n"
250 " ______ ______ _____ ________________ _______________\n\n"
253 for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
254 struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node);
255 struct symbol *sym = (struct symbol *)(syme + 1);
256 float pcnt;
258 if (++printed > print_entries || syme->snap_count < count_filter)
259 continue;
261 pcnt = 100.0 - (100.0 * ((sum_kevents - syme->snap_count) /
262 sum_kevents));
264 if (nr_counters == 1)
265 printf("%19.2f - %4.1f%% - %016llx : %s\n",
266 syme->weight, pcnt, sym->start, sym->name);
267 else
268 printf("%8.1f %10ld - %4.1f%% - %016llx : %s\n",
269 syme->weight, syme->snap_count,
270 pcnt, sym->start, sym->name);
274 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
276 if (poll(&stdin_poll, 1, 0) == 1) {
277 printf("key pressed - exiting.\n");
278 exit(0);
283 static void *display_thread(void *arg)
285 printf("PerfTop refresh period: %d seconds\n", delay_secs);
287 while (!sleep(delay_secs))
288 print_sym_table();
290 return NULL;
293 static int symbol_filter(struct dso *self, struct symbol *sym)
295 static int filter_match;
296 struct sym_entry *syme;
297 const char *name = sym->name;
299 if (!strcmp(name, "_text") ||
300 !strcmp(name, "_etext") ||
301 !strcmp(name, "_sinittext") ||
302 !strncmp("init_module", name, 11) ||
303 !strncmp("cleanup_module", name, 14) ||
304 strstr(name, "_text_start") ||
305 strstr(name, "_text_end"))
306 return 1;
308 syme = dso__sym_priv(self, sym);
309 /* Tag events to be skipped. */
310 if (!strcmp("default_idle", name) ||
311 !strcmp("cpu_idle", name) ||
312 !strcmp("enter_idle", name) ||
313 !strcmp("exit_idle", name) ||
314 !strcmp("mwait_idle", name))
315 syme->skip = 1;
317 if (filter_match == 1) {
318 filter_end = sym->start;
319 filter_match = -1;
320 if (filter_end - filter_start > 10000) {
321 fprintf(stderr,
322 "hm, too large filter symbol <%s> - skipping.\n",
323 sym_filter);
324 fprintf(stderr, "symbol filter start: %016lx\n",
325 filter_start);
326 fprintf(stderr, " end: %016lx\n",
327 filter_end);
328 filter_end = filter_start = 0;
329 sym_filter = NULL;
330 sleep(1);
334 if (filter_match == 0 && sym_filter && !strcmp(name, sym_filter)) {
335 filter_match = 1;
336 filter_start = sym->start;
340 return 0;
343 static int parse_symbols(void)
345 struct rb_node *node;
346 struct symbol *sym;
348 kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry));
349 if (kernel_dso == NULL)
350 return -1;
352 if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) != 0)
353 goto out_delete_dso;
355 node = rb_first(&kernel_dso->syms);
356 sym = rb_entry(node, struct symbol, rb_node);
357 min_ip = sym->start;
359 node = rb_last(&kernel_dso->syms);
360 sym = rb_entry(node, struct symbol, rb_node);
361 max_ip = sym->end;
363 if (dump_symtab)
364 dso__fprintf(kernel_dso, stderr);
366 return 0;
368 out_delete_dso:
369 dso__delete(kernel_dso);
370 kernel_dso = NULL;
371 return -1;
374 #define TRACE_COUNT 3
377 * Binary search in the histogram table and record the hit:
379 static void record_ip(uint64_t ip, int counter)
381 struct symbol *sym = dso__find_symbol(kernel_dso, ip);
383 if (sym != NULL) {
384 struct sym_entry *syme = dso__sym_priv(kernel_dso, sym);
386 if (!syme->skip) {
387 syme->count[counter]++;
388 pthread_mutex_lock(&active_symbols_lock);
389 if (list_empty(&syme->node) || !syme->node.next)
390 __list_insert_active_sym(syme);
391 pthread_mutex_unlock(&active_symbols_lock);
392 return;
396 events--;
399 static void process_event(uint64_t ip, int counter)
401 events++;
403 if (ip < min_ip || ip > max_ip) {
404 userspace_events++;
405 return;
408 record_ip(ip, counter);
411 struct mmap_data {
412 int counter;
413 void *base;
414 unsigned int mask;
415 unsigned int prev;
418 static unsigned int mmap_read_head(struct mmap_data *md)
420 struct perf_counter_mmap_page *pc = md->base;
421 int head;
423 head = pc->data_head;
424 rmb();
426 return head;
429 struct timeval last_read, this_read;
431 static void mmap_read(struct mmap_data *md)
433 unsigned int head = mmap_read_head(md);
434 unsigned int old = md->prev;
435 unsigned char *data = md->base + page_size;
436 int diff;
438 gettimeofday(&this_read, NULL);
441 * If we're further behind than half the buffer, there's a chance
442 * the writer will bite our tail and screw up the events under us.
444 * If we somehow ended up ahead of the head, we got messed up.
446 * In either case, truncate and restart at head.
448 diff = head - old;
449 if (diff > md->mask / 2 || diff < 0) {
450 struct timeval iv;
451 unsigned long msecs;
453 timersub(&this_read, &last_read, &iv);
454 msecs = iv.tv_sec*1000 + iv.tv_usec/1000;
456 fprintf(stderr, "WARNING: failed to keep up with mmap data."
457 " Last read %lu msecs ago.\n", msecs);
460 * head points to a known good entry, start there.
462 old = head;
465 last_read = this_read;
467 for (; old != head;) {
468 struct ip_event {
469 struct perf_event_header header;
470 __u64 ip;
471 __u32 pid, target_pid;
473 struct mmap_event {
474 struct perf_event_header header;
475 __u32 pid, target_pid;
476 __u64 start;
477 __u64 len;
478 __u64 pgoff;
479 char filename[PATH_MAX];
482 typedef union event_union {
483 struct perf_event_header header;
484 struct ip_event ip;
485 struct mmap_event mmap;
486 } event_t;
488 event_t *event = (event_t *)&data[old & md->mask];
490 event_t event_copy;
492 size_t size = event->header.size;
495 * Event straddles the mmap boundary -- header should always
496 * be inside due to u64 alignment of output.
498 if ((old & md->mask) + size != ((old + size) & md->mask)) {
499 unsigned int offset = old;
500 unsigned int len = min(sizeof(*event), size), cpy;
501 void *dst = &event_copy;
503 do {
504 cpy = min(md->mask + 1 - (offset & md->mask), len);
505 memcpy(dst, &data[offset & md->mask], cpy);
506 offset += cpy;
507 dst += cpy;
508 len -= cpy;
509 } while (len);
511 event = &event_copy;
514 old += size;
516 if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) {
517 if (event->header.type & PERF_SAMPLE_IP)
518 process_event(event->ip.ip, md->counter);
519 } else {
520 switch (event->header.type) {
521 case PERF_EVENT_MMAP:
522 case PERF_EVENT_MUNMAP:
523 printf("%s: %Lu %Lu %Lu %s\n",
524 event->header.type == PERF_EVENT_MMAP
525 ? "mmap" : "munmap",
526 event->mmap.start,
527 event->mmap.len,
528 event->mmap.pgoff,
529 event->mmap.filename);
530 break;
535 md->prev = old;
538 static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
539 static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
541 static int __cmd_top(void)
543 struct perf_counter_attr attr;
544 pthread_t thread;
545 int i, counter, group_fd, nr_poll = 0;
546 unsigned int cpu;
547 int ret;
549 for (i = 0; i < nr_cpus; i++) {
550 group_fd = -1;
551 for (counter = 0; counter < nr_counters; counter++) {
553 cpu = profile_cpu;
554 if (target_pid == -1 && profile_cpu == -1)
555 cpu = i;
557 memset(&attr, 0, sizeof(attr));
558 attr.config = event_id[counter];
559 attr.sample_period = event_count[counter];
560 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
561 attr.mmap = use_mmap;
562 attr.munmap = use_munmap;
563 attr.freq = freq;
565 fd[i][counter] = sys_perf_counter_open(&attr, target_pid, cpu, group_fd, 0);
566 if (fd[i][counter] < 0) {
567 int err = errno;
569 error("syscall returned with %d (%s)\n",
570 fd[i][counter], strerror(err));
571 if (err == EPERM)
572 printf("Are you root?\n");
573 exit(-1);
575 assert(fd[i][counter] >= 0);
576 fcntl(fd[i][counter], F_SETFL, O_NONBLOCK);
579 * First counter acts as the group leader:
581 if (group && group_fd == -1)
582 group_fd = fd[i][counter];
584 event_array[nr_poll].fd = fd[i][counter];
585 event_array[nr_poll].events = POLLIN;
586 nr_poll++;
588 mmap_array[i][counter].counter = counter;
589 mmap_array[i][counter].prev = 0;
590 mmap_array[i][counter].mask = mmap_pages*page_size - 1;
591 mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
592 PROT_READ, MAP_SHARED, fd[i][counter], 0);
593 if (mmap_array[i][counter].base == MAP_FAILED)
594 die("failed to mmap with %d (%s)\n", errno, strerror(errno));
598 if (pthread_create(&thread, NULL, display_thread, NULL)) {
599 printf("Could not create display thread.\n");
600 exit(-1);
603 if (realtime_prio) {
604 struct sched_param param;
606 param.sched_priority = realtime_prio;
607 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
608 printf("Could not set realtime priority.\n");
609 exit(-1);
613 while (1) {
614 int hits = events;
616 for (i = 0; i < nr_cpus; i++) {
617 for (counter = 0; counter < nr_counters; counter++)
618 mmap_read(&mmap_array[i][counter]);
621 if (hits == events)
622 ret = poll(event_array, nr_poll, 100);
625 return 0;
628 static const char * const top_usage[] = {
629 "perf top [<options>]",
630 NULL
633 static char events_help_msg[EVENTS_HELP_MAX];
635 static const struct option options[] = {
636 OPT_CALLBACK('e', "event", NULL, "event",
637 events_help_msg, parse_events),
638 OPT_INTEGER('c', "count", &default_interval,
639 "event period to sample"),
640 OPT_INTEGER('p', "pid", &target_pid,
641 "profile events on existing pid"),
642 OPT_BOOLEAN('a', "all-cpus", &system_wide,
643 "system-wide collection from all CPUs"),
644 OPT_INTEGER('C', "CPU", &profile_cpu,
645 "CPU to profile on"),
646 OPT_INTEGER('m', "mmap-pages", &mmap_pages,
647 "number of mmap data pages"),
648 OPT_INTEGER('r', "realtime", &realtime_prio,
649 "collect data with this RT SCHED_FIFO priority"),
650 OPT_INTEGER('d', "delay", &delay_secs,
651 "number of seconds to delay between refreshes"),
652 OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
653 "dump the symbol table used for profiling"),
654 OPT_INTEGER('f', "count-filter", &count_filter,
655 "only display functions with more events than this"),
656 OPT_BOOLEAN('g', "group", &group,
657 "put the counters into a counter group"),
658 OPT_STRING('s', "sym-filter", &sym_filter, "pattern",
659 "only display symbols matchig this pattern"),
660 OPT_BOOLEAN('z', "zero", &group,
661 "zero history across updates"),
662 OPT_BOOLEAN('M', "use-mmap", &use_mmap,
663 "track mmap events"),
664 OPT_BOOLEAN('U', "use-munmap", &use_munmap,
665 "track munmap events"),
666 OPT_INTEGER('F', "freq", &freq,
667 "profile at this frequency"),
668 OPT_INTEGER('E', "entries", &print_entries,
669 "display this many functions"),
670 OPT_END()
673 int cmd_top(int argc, const char **argv, const char *prefix)
675 int counter;
677 page_size = sysconf(_SC_PAGE_SIZE);
679 create_events_help(events_help_msg);
680 memcpy(event_id, default_event_id, sizeof(default_event_id));
682 argc = parse_options(argc, argv, options, top_usage, 0);
683 if (argc)
684 usage_with_options(top_usage, options);
686 if (freq) {
687 default_interval = freq;
688 freq = 1;
691 /* CPU and PID are mutually exclusive */
692 if (target_pid != -1 && profile_cpu != -1) {
693 printf("WARNING: PID switch overriding CPU\n");
694 sleep(1);
695 profile_cpu = -1;
698 if (!nr_counters) {
699 nr_counters = 1;
700 event_id[0] = 0;
703 for (counter = 0; counter < nr_counters; counter++) {
704 if (event_count[counter])
705 continue;
707 event_count[counter] = default_interval;
710 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
711 assert(nr_cpus <= MAX_NR_CPUS);
712 assert(nr_cpus >= 0);
714 if (target_pid != -1 || profile_cpu != -1)
715 nr_cpus = 1;
717 parse_symbols();
719 return __cmd_top();