perf tools: Encode kernel module mappings in perf.data
[linux-2.6/btrfs-unstable.git] / tools / perf / builtin-kmem.c
blob4af7199c5af74a352d8e34c2b42645674b0f64b6
1 #include "builtin.h"
2 #include "perf.h"
4 #include "util/util.h"
5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
9 #include "util/session.h"
11 #include "util/parse-options.h"
12 #include "util/trace-event.h"
14 #include "util/debug.h"
16 #include <linux/rbtree.h>
18 struct alloc_stat;
19 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
21 static char const *input_name = "perf.data";
23 static int alloc_flag;
24 static int caller_flag;
26 static int alloc_lines = -1;
27 static int caller_lines = -1;
29 static bool raw_ip;
31 static char default_sort_order[] = "frag,hit,bytes";
33 static int *cpunode_map;
34 static int max_cpu_num;
36 struct alloc_stat {
37 u64 call_site;
38 u64 ptr;
39 u64 bytes_req;
40 u64 bytes_alloc;
41 u32 hit;
42 u32 pingpong;
44 short alloc_cpu;
46 struct rb_node node;
49 static struct rb_root root_alloc_stat;
50 static struct rb_root root_alloc_sorted;
51 static struct rb_root root_caller_stat;
52 static struct rb_root root_caller_sorted;
54 static unsigned long total_requested, total_allocated;
55 static unsigned long nr_allocs, nr_cross_allocs;
57 #define PATH_SYS_NODE "/sys/devices/system/node"
59 static void init_cpunode_map(void)
61 FILE *fp;
62 int i;
64 fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
65 if (!fp) {
66 max_cpu_num = 4096;
67 return;
70 if (fscanf(fp, "%d", &max_cpu_num) < 1)
71 die("Failed to read 'kernel_max' from sysfs");
72 max_cpu_num++;
74 cpunode_map = calloc(max_cpu_num, sizeof(int));
75 if (!cpunode_map)
76 die("calloc");
77 for (i = 0; i < max_cpu_num; i++)
78 cpunode_map[i] = -1;
79 fclose(fp);
82 static void setup_cpunode_map(void)
84 struct dirent *dent1, *dent2;
85 DIR *dir1, *dir2;
86 unsigned int cpu, mem;
87 char buf[PATH_MAX];
89 init_cpunode_map();
91 dir1 = opendir(PATH_SYS_NODE);
92 if (!dir1)
93 return;
95 while ((dent1 = readdir(dir1)) != NULL) {
96 if (dent1->d_type != DT_DIR ||
97 sscanf(dent1->d_name, "node%u", &mem) < 1)
98 continue;
100 snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
101 dir2 = opendir(buf);
102 if (!dir2)
103 continue;
104 while ((dent2 = readdir(dir2)) != NULL) {
105 if (dent2->d_type != DT_LNK ||
106 sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
107 continue;
108 cpunode_map[cpu] = mem;
113 static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
114 int bytes_req, int bytes_alloc, int cpu)
116 struct rb_node **node = &root_alloc_stat.rb_node;
117 struct rb_node *parent = NULL;
118 struct alloc_stat *data = NULL;
120 while (*node) {
121 parent = *node;
122 data = rb_entry(*node, struct alloc_stat, node);
124 if (ptr > data->ptr)
125 node = &(*node)->rb_right;
126 else if (ptr < data->ptr)
127 node = &(*node)->rb_left;
128 else
129 break;
132 if (data && data->ptr == ptr) {
133 data->hit++;
134 data->bytes_req += bytes_req;
135 data->bytes_alloc += bytes_alloc;
136 } else {
137 data = malloc(sizeof(*data));
138 if (!data)
139 die("malloc");
140 data->ptr = ptr;
141 data->pingpong = 0;
142 data->hit = 1;
143 data->bytes_req = bytes_req;
144 data->bytes_alloc = bytes_alloc;
146 rb_link_node(&data->node, parent, node);
147 rb_insert_color(&data->node, &root_alloc_stat);
149 data->call_site = call_site;
150 data->alloc_cpu = cpu;
153 static void insert_caller_stat(unsigned long call_site,
154 int bytes_req, int bytes_alloc)
156 struct rb_node **node = &root_caller_stat.rb_node;
157 struct rb_node *parent = NULL;
158 struct alloc_stat *data = NULL;
160 while (*node) {
161 parent = *node;
162 data = rb_entry(*node, struct alloc_stat, node);
164 if (call_site > data->call_site)
165 node = &(*node)->rb_right;
166 else if (call_site < data->call_site)
167 node = &(*node)->rb_left;
168 else
169 break;
172 if (data && data->call_site == call_site) {
173 data->hit++;
174 data->bytes_req += bytes_req;
175 data->bytes_alloc += bytes_alloc;
176 } else {
177 data = malloc(sizeof(*data));
178 if (!data)
179 die("malloc");
180 data->call_site = call_site;
181 data->pingpong = 0;
182 data->hit = 1;
183 data->bytes_req = bytes_req;
184 data->bytes_alloc = bytes_alloc;
186 rb_link_node(&data->node, parent, node);
187 rb_insert_color(&data->node, &root_caller_stat);
191 static void process_alloc_event(void *data,
192 struct event *event,
193 int cpu,
194 u64 timestamp __used,
195 struct thread *thread __used,
196 int node)
198 unsigned long call_site;
199 unsigned long ptr;
200 int bytes_req;
201 int bytes_alloc;
202 int node1, node2;
204 ptr = raw_field_value(event, "ptr", data);
205 call_site = raw_field_value(event, "call_site", data);
206 bytes_req = raw_field_value(event, "bytes_req", data);
207 bytes_alloc = raw_field_value(event, "bytes_alloc", data);
209 insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
210 insert_caller_stat(call_site, bytes_req, bytes_alloc);
212 total_requested += bytes_req;
213 total_allocated += bytes_alloc;
215 if (node) {
216 node1 = cpunode_map[cpu];
217 node2 = raw_field_value(event, "node", data);
218 if (node1 != node2)
219 nr_cross_allocs++;
221 nr_allocs++;
224 static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
225 static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
227 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
228 unsigned long call_site,
229 struct rb_root *root,
230 sort_fn_t sort_fn)
232 struct rb_node *node = root->rb_node;
233 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
235 while (node) {
236 struct alloc_stat *data;
237 int cmp;
239 data = rb_entry(node, struct alloc_stat, node);
241 cmp = sort_fn(&key, data);
242 if (cmp < 0)
243 node = node->rb_left;
244 else if (cmp > 0)
245 node = node->rb_right;
246 else
247 return data;
249 return NULL;
252 static void process_free_event(void *data,
253 struct event *event,
254 int cpu,
255 u64 timestamp __used,
256 struct thread *thread __used)
258 unsigned long ptr;
259 struct alloc_stat *s_alloc, *s_caller;
261 ptr = raw_field_value(event, "ptr", data);
263 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
264 if (!s_alloc)
265 return;
267 if (cpu != s_alloc->alloc_cpu) {
268 s_alloc->pingpong++;
270 s_caller = search_alloc_stat(0, s_alloc->call_site,
271 &root_caller_stat, callsite_cmp);
272 assert(s_caller);
273 s_caller->pingpong++;
275 s_alloc->alloc_cpu = -1;
278 static void
279 process_raw_event(event_t *raw_event __used, void *data,
280 int cpu, u64 timestamp, struct thread *thread)
282 struct event *event;
283 int type;
285 type = trace_parse_common_type(data);
286 event = trace_find_event(type);
288 if (!strcmp(event->name, "kmalloc") ||
289 !strcmp(event->name, "kmem_cache_alloc")) {
290 process_alloc_event(data, event, cpu, timestamp, thread, 0);
291 return;
294 if (!strcmp(event->name, "kmalloc_node") ||
295 !strcmp(event->name, "kmem_cache_alloc_node")) {
296 process_alloc_event(data, event, cpu, timestamp, thread, 1);
297 return;
300 if (!strcmp(event->name, "kfree") ||
301 !strcmp(event->name, "kmem_cache_free")) {
302 process_free_event(data, event, cpu, timestamp, thread);
303 return;
307 static int process_sample_event(event_t *event, struct perf_session *session)
309 struct sample_data data;
310 struct thread *thread;
312 memset(&data, 0, sizeof(data));
313 data.time = -1;
314 data.cpu = -1;
315 data.period = 1;
317 event__parse_sample(event, session->sample_type, &data);
319 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
320 event->header.misc,
321 data.pid, data.tid,
322 (void *)(long)data.ip,
323 (long long)data.period);
325 thread = perf_session__findnew(session, event->ip.pid);
326 if (thread == NULL) {
327 pr_debug("problem processing %d event, skipping it.\n",
328 event->header.type);
329 return -1;
332 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
334 process_raw_event(event, data.raw_data, data.cpu,
335 data.time, thread);
337 return 0;
340 static struct perf_event_ops event_ops = {
341 .sample = process_sample_event,
342 .comm = event__process_comm,
345 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
347 if (n_alloc == 0)
348 return 0.0;
349 else
350 return 100.0 - (100.0 * n_req / n_alloc);
353 static void __print_result(struct rb_root *root, struct perf_session *session,
354 int n_lines, int is_caller)
356 struct rb_node *next;
358 printf("%.102s\n", graph_dotted_line);
359 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
360 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
361 printf("%.102s\n", graph_dotted_line);
363 next = rb_first(root);
365 while (next && n_lines--) {
366 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
367 node);
368 struct symbol *sym = NULL;
369 char buf[BUFSIZ];
370 u64 addr;
372 if (is_caller) {
373 addr = data->call_site;
374 if (!raw_ip)
375 sym = map_groups__find_function(&session->kmaps, session, addr, NULL);
376 } else
377 addr = data->ptr;
379 if (sym != NULL)
380 snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
381 addr - sym->start);
382 else
383 snprintf(buf, sizeof(buf), "%#Lx", addr);
384 printf(" %-34s |", buf);
386 printf(" %9llu/%-5lu | %9llu/%-5lu | %6lu | %8lu | %6.3f%%\n",
387 (unsigned long long)data->bytes_alloc,
388 (unsigned long)data->bytes_alloc / data->hit,
389 (unsigned long long)data->bytes_req,
390 (unsigned long)data->bytes_req / data->hit,
391 (unsigned long)data->hit,
392 (unsigned long)data->pingpong,
393 fragmentation(data->bytes_req, data->bytes_alloc));
395 next = rb_next(next);
398 if (n_lines == -1)
399 printf(" ... | ... | ... | ... | ... | ... \n");
401 printf("%.102s\n", graph_dotted_line);
404 static void print_summary(void)
406 printf("\nSUMMARY\n=======\n");
407 printf("Total bytes requested: %lu\n", total_requested);
408 printf("Total bytes allocated: %lu\n", total_allocated);
409 printf("Total bytes wasted on internal fragmentation: %lu\n",
410 total_allocated - total_requested);
411 printf("Internal fragmentation: %f%%\n",
412 fragmentation(total_requested, total_allocated));
413 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
416 static void print_result(struct perf_session *session)
418 if (caller_flag)
419 __print_result(&root_caller_sorted, session, caller_lines, 1);
420 if (alloc_flag)
421 __print_result(&root_alloc_sorted, session, alloc_lines, 0);
422 print_summary();
425 struct sort_dimension {
426 const char name[20];
427 sort_fn_t cmp;
428 struct list_head list;
431 static LIST_HEAD(caller_sort);
432 static LIST_HEAD(alloc_sort);
434 static void sort_insert(struct rb_root *root, struct alloc_stat *data,
435 struct list_head *sort_list)
437 struct rb_node **new = &(root->rb_node);
438 struct rb_node *parent = NULL;
439 struct sort_dimension *sort;
441 while (*new) {
442 struct alloc_stat *this;
443 int cmp = 0;
445 this = rb_entry(*new, struct alloc_stat, node);
446 parent = *new;
448 list_for_each_entry(sort, sort_list, list) {
449 cmp = sort->cmp(data, this);
450 if (cmp)
451 break;
454 if (cmp > 0)
455 new = &((*new)->rb_left);
456 else
457 new = &((*new)->rb_right);
460 rb_link_node(&data->node, parent, new);
461 rb_insert_color(&data->node, root);
464 static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
465 struct list_head *sort_list)
467 struct rb_node *node;
468 struct alloc_stat *data;
470 for (;;) {
471 node = rb_first(root);
472 if (!node)
473 break;
475 rb_erase(node, root);
476 data = rb_entry(node, struct alloc_stat, node);
477 sort_insert(root_sorted, data, sort_list);
481 static void sort_result(void)
483 __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
484 __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
487 static int __cmd_kmem(void)
489 int err = -EINVAL;
490 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
491 if (session == NULL)
492 return -ENOMEM;
494 if (!perf_session__has_traces(session, "kmem record"))
495 goto out_delete;
497 if (perf_session__create_kernel_maps(session) < 0) {
498 pr_err("Problems creating kernel maps\n");
499 return -1;
502 setup_pager();
503 err = perf_session__process_events(session, &event_ops);
504 if (err != 0)
505 goto out_delete;
506 sort_result();
507 print_result(session);
508 out_delete:
509 perf_session__delete(session);
510 return err;
513 static const char * const kmem_usage[] = {
514 "perf kmem [<options>] {record|stat}",
515 NULL
518 static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
520 if (l->ptr < r->ptr)
521 return -1;
522 else if (l->ptr > r->ptr)
523 return 1;
524 return 0;
527 static struct sort_dimension ptr_sort_dimension = {
528 .name = "ptr",
529 .cmp = ptr_cmp,
532 static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
534 if (l->call_site < r->call_site)
535 return -1;
536 else if (l->call_site > r->call_site)
537 return 1;
538 return 0;
541 static struct sort_dimension callsite_sort_dimension = {
542 .name = "callsite",
543 .cmp = callsite_cmp,
546 static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
548 if (l->hit < r->hit)
549 return -1;
550 else if (l->hit > r->hit)
551 return 1;
552 return 0;
555 static struct sort_dimension hit_sort_dimension = {
556 .name = "hit",
557 .cmp = hit_cmp,
560 static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
562 if (l->bytes_alloc < r->bytes_alloc)
563 return -1;
564 else if (l->bytes_alloc > r->bytes_alloc)
565 return 1;
566 return 0;
569 static struct sort_dimension bytes_sort_dimension = {
570 .name = "bytes",
571 .cmp = bytes_cmp,
574 static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
576 double x, y;
578 x = fragmentation(l->bytes_req, l->bytes_alloc);
579 y = fragmentation(r->bytes_req, r->bytes_alloc);
581 if (x < y)
582 return -1;
583 else if (x > y)
584 return 1;
585 return 0;
588 static struct sort_dimension frag_sort_dimension = {
589 .name = "frag",
590 .cmp = frag_cmp,
593 static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
595 if (l->pingpong < r->pingpong)
596 return -1;
597 else if (l->pingpong > r->pingpong)
598 return 1;
599 return 0;
602 static struct sort_dimension pingpong_sort_dimension = {
603 .name = "pingpong",
604 .cmp = pingpong_cmp,
607 static struct sort_dimension *avail_sorts[] = {
608 &ptr_sort_dimension,
609 &callsite_sort_dimension,
610 &hit_sort_dimension,
611 &bytes_sort_dimension,
612 &frag_sort_dimension,
613 &pingpong_sort_dimension,
616 #define NUM_AVAIL_SORTS \
617 (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
619 static int sort_dimension__add(const char *tok, struct list_head *list)
621 struct sort_dimension *sort;
622 int i;
624 for (i = 0; i < NUM_AVAIL_SORTS; i++) {
625 if (!strcmp(avail_sorts[i]->name, tok)) {
626 sort = malloc(sizeof(*sort));
627 if (!sort)
628 die("malloc");
629 memcpy(sort, avail_sorts[i], sizeof(*sort));
630 list_add_tail(&sort->list, list);
631 return 0;
635 return -1;
638 static int setup_sorting(struct list_head *sort_list, const char *arg)
640 char *tok;
641 char *str = strdup(arg);
643 if (!str)
644 die("strdup");
646 while (true) {
647 tok = strsep(&str, ",");
648 if (!tok)
649 break;
650 if (sort_dimension__add(tok, sort_list) < 0) {
651 error("Unknown --sort key: '%s'", tok);
652 return -1;
656 free(str);
657 return 0;
660 static int parse_sort_opt(const struct option *opt __used,
661 const char *arg, int unset __used)
663 if (!arg)
664 return -1;
666 if (caller_flag > alloc_flag)
667 return setup_sorting(&caller_sort, arg);
668 else
669 return setup_sorting(&alloc_sort, arg);
671 return 0;
674 static int parse_caller_opt(const struct option *opt __used,
675 const char *arg __used, int unset __used)
677 caller_flag = (alloc_flag + 1);
678 return 0;
681 static int parse_alloc_opt(const struct option *opt __used,
682 const char *arg __used, int unset __used)
684 alloc_flag = (caller_flag + 1);
685 return 0;
688 static int parse_line_opt(const struct option *opt __used,
689 const char *arg, int unset __used)
691 int lines;
693 if (!arg)
694 return -1;
696 lines = strtoul(arg, NULL, 10);
698 if (caller_flag > alloc_flag)
699 caller_lines = lines;
700 else
701 alloc_lines = lines;
703 return 0;
706 static const struct option kmem_options[] = {
707 OPT_STRING('i', "input", &input_name, "file",
708 "input file name"),
709 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
710 "show per-callsite statistics",
711 parse_caller_opt),
712 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
713 "show per-allocation statistics",
714 parse_alloc_opt),
715 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
716 "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
717 parse_sort_opt),
718 OPT_CALLBACK('l', "line", NULL, "num",
719 "show n lines",
720 parse_line_opt),
721 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
722 OPT_END()
725 static const char *record_args[] = {
726 "record",
727 "-a",
728 "-R",
729 "-M",
730 "-f",
731 "-c", "1",
732 "-e", "kmem:kmalloc",
733 "-e", "kmem:kmalloc_node",
734 "-e", "kmem:kfree",
735 "-e", "kmem:kmem_cache_alloc",
736 "-e", "kmem:kmem_cache_alloc_node",
737 "-e", "kmem:kmem_cache_free",
740 static int __cmd_record(int argc, const char **argv)
742 unsigned int rec_argc, i, j;
743 const char **rec_argv;
745 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
746 rec_argv = calloc(rec_argc + 1, sizeof(char *));
748 for (i = 0; i < ARRAY_SIZE(record_args); i++)
749 rec_argv[i] = strdup(record_args[i]);
751 for (j = 1; j < (unsigned int)argc; j++, i++)
752 rec_argv[i] = argv[j];
754 return cmd_record(i, rec_argv, NULL);
757 int cmd_kmem(int argc, const char **argv, const char *prefix __used)
759 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
761 if (!argc)
762 usage_with_options(kmem_usage, kmem_options);
764 symbol__init();
766 if (!strncmp(argv[0], "rec", 3)) {
767 return __cmd_record(argc, argv);
768 } else if (!strcmp(argv[0], "stat")) {
769 setup_cpunode_map();
771 if (list_empty(&caller_sort))
772 setup_sorting(&caller_sort, default_sort_order);
773 if (list_empty(&alloc_sort))
774 setup_sorting(&alloc_sort, default_sort_order);
776 return __cmd_kmem();
779 return 0;