Use dentry_path() to create full path to inode object
[pohmelfs.git] / tools / perf / builtin-kmem.c
blob39104c0beea37990bd1775c0592179dda9a57140
1 #include "builtin.h"
2 #include "perf.h"
4 #include "util/util.h"
5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
9 #include "util/session.h"
10 #include "util/tool.h"
12 #include "util/parse-options.h"
13 #include "util/trace-event.h"
15 #include "util/debug.h"
17 #include <linux/rbtree.h>
19 struct alloc_stat;
20 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
22 static const char *input_name;
24 static int alloc_flag;
25 static int caller_flag;
27 static int alloc_lines = -1;
28 static int caller_lines = -1;
30 static bool raw_ip;
32 static char default_sort_order[] = "frag,hit,bytes";
34 static int *cpunode_map;
35 static int max_cpu_num;
37 struct alloc_stat {
38 u64 call_site;
39 u64 ptr;
40 u64 bytes_req;
41 u64 bytes_alloc;
42 u32 hit;
43 u32 pingpong;
45 short alloc_cpu;
47 struct rb_node node;
50 static struct rb_root root_alloc_stat;
51 static struct rb_root root_alloc_sorted;
52 static struct rb_root root_caller_stat;
53 static struct rb_root root_caller_sorted;
55 static unsigned long total_requested, total_allocated;
56 static unsigned long nr_allocs, nr_cross_allocs;
58 #define PATH_SYS_NODE "/sys/devices/system/node"
60 static void init_cpunode_map(void)
62 FILE *fp;
63 int i;
65 fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
66 if (!fp) {
67 max_cpu_num = 4096;
68 return;
71 if (fscanf(fp, "%d", &max_cpu_num) < 1)
72 die("Failed to read 'kernel_max' from sysfs");
73 max_cpu_num++;
75 cpunode_map = calloc(max_cpu_num, sizeof(int));
76 if (!cpunode_map)
77 die("calloc");
78 for (i = 0; i < max_cpu_num; i++)
79 cpunode_map[i] = -1;
80 fclose(fp);
83 static void setup_cpunode_map(void)
85 struct dirent *dent1, *dent2;
86 DIR *dir1, *dir2;
87 unsigned int cpu, mem;
88 char buf[PATH_MAX];
90 init_cpunode_map();
92 dir1 = opendir(PATH_SYS_NODE);
93 if (!dir1)
94 return;
96 while ((dent1 = readdir(dir1)) != NULL) {
97 if (dent1->d_type != DT_DIR ||
98 sscanf(dent1->d_name, "node%u", &mem) < 1)
99 continue;
101 snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
102 dir2 = opendir(buf);
103 if (!dir2)
104 continue;
105 while ((dent2 = readdir(dir2)) != NULL) {
106 if (dent2->d_type != DT_LNK ||
107 sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
108 continue;
109 cpunode_map[cpu] = mem;
111 closedir(dir2);
113 closedir(dir1);
116 static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
117 int bytes_req, int bytes_alloc, int cpu)
119 struct rb_node **node = &root_alloc_stat.rb_node;
120 struct rb_node *parent = NULL;
121 struct alloc_stat *data = NULL;
123 while (*node) {
124 parent = *node;
125 data = rb_entry(*node, struct alloc_stat, node);
127 if (ptr > data->ptr)
128 node = &(*node)->rb_right;
129 else if (ptr < data->ptr)
130 node = &(*node)->rb_left;
131 else
132 break;
135 if (data && data->ptr == ptr) {
136 data->hit++;
137 data->bytes_req += bytes_req;
138 data->bytes_alloc += bytes_alloc;
139 } else {
140 data = malloc(sizeof(*data));
141 if (!data)
142 die("malloc");
143 data->ptr = ptr;
144 data->pingpong = 0;
145 data->hit = 1;
146 data->bytes_req = bytes_req;
147 data->bytes_alloc = bytes_alloc;
149 rb_link_node(&data->node, parent, node);
150 rb_insert_color(&data->node, &root_alloc_stat);
152 data->call_site = call_site;
153 data->alloc_cpu = cpu;
156 static void insert_caller_stat(unsigned long call_site,
157 int bytes_req, int bytes_alloc)
159 struct rb_node **node = &root_caller_stat.rb_node;
160 struct rb_node *parent = NULL;
161 struct alloc_stat *data = NULL;
163 while (*node) {
164 parent = *node;
165 data = rb_entry(*node, struct alloc_stat, node);
167 if (call_site > data->call_site)
168 node = &(*node)->rb_right;
169 else if (call_site < data->call_site)
170 node = &(*node)->rb_left;
171 else
172 break;
175 if (data && data->call_site == call_site) {
176 data->hit++;
177 data->bytes_req += bytes_req;
178 data->bytes_alloc += bytes_alloc;
179 } else {
180 data = malloc(sizeof(*data));
181 if (!data)
182 die("malloc");
183 data->call_site = call_site;
184 data->pingpong = 0;
185 data->hit = 1;
186 data->bytes_req = bytes_req;
187 data->bytes_alloc = bytes_alloc;
189 rb_link_node(&data->node, parent, node);
190 rb_insert_color(&data->node, &root_caller_stat);
194 static void process_alloc_event(void *data,
195 struct event *event,
196 int cpu,
197 u64 timestamp __used,
198 struct thread *thread __used,
199 int node)
201 unsigned long call_site;
202 unsigned long ptr;
203 int bytes_req;
204 int bytes_alloc;
205 int node1, node2;
207 ptr = raw_field_value(event, "ptr", data);
208 call_site = raw_field_value(event, "call_site", data);
209 bytes_req = raw_field_value(event, "bytes_req", data);
210 bytes_alloc = raw_field_value(event, "bytes_alloc", data);
212 insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
213 insert_caller_stat(call_site, bytes_req, bytes_alloc);
215 total_requested += bytes_req;
216 total_allocated += bytes_alloc;
218 if (node) {
219 node1 = cpunode_map[cpu];
220 node2 = raw_field_value(event, "node", data);
221 if (node1 != node2)
222 nr_cross_allocs++;
224 nr_allocs++;
227 static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
228 static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
230 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
231 unsigned long call_site,
232 struct rb_root *root,
233 sort_fn_t sort_fn)
235 struct rb_node *node = root->rb_node;
236 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
238 while (node) {
239 struct alloc_stat *data;
240 int cmp;
242 data = rb_entry(node, struct alloc_stat, node);
244 cmp = sort_fn(&key, data);
245 if (cmp < 0)
246 node = node->rb_left;
247 else if (cmp > 0)
248 node = node->rb_right;
249 else
250 return data;
252 return NULL;
255 static void process_free_event(void *data,
256 struct event *event,
257 int cpu,
258 u64 timestamp __used,
259 struct thread *thread __used)
261 unsigned long ptr;
262 struct alloc_stat *s_alloc, *s_caller;
264 ptr = raw_field_value(event, "ptr", data);
266 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
267 if (!s_alloc)
268 return;
270 if (cpu != s_alloc->alloc_cpu) {
271 s_alloc->pingpong++;
273 s_caller = search_alloc_stat(0, s_alloc->call_site,
274 &root_caller_stat, callsite_cmp);
275 assert(s_caller);
276 s_caller->pingpong++;
278 s_alloc->alloc_cpu = -1;
281 static void process_raw_event(union perf_event *raw_event __used, void *data,
282 int cpu, u64 timestamp, struct thread *thread)
284 struct event *event;
285 int type;
287 type = trace_parse_common_type(data);
288 event = trace_find_event(type);
290 if (!strcmp(event->name, "kmalloc") ||
291 !strcmp(event->name, "kmem_cache_alloc")) {
292 process_alloc_event(data, event, cpu, timestamp, thread, 0);
293 return;
296 if (!strcmp(event->name, "kmalloc_node") ||
297 !strcmp(event->name, "kmem_cache_alloc_node")) {
298 process_alloc_event(data, event, cpu, timestamp, thread, 1);
299 return;
302 if (!strcmp(event->name, "kfree") ||
303 !strcmp(event->name, "kmem_cache_free")) {
304 process_free_event(data, event, cpu, timestamp, thread);
305 return;
309 static int process_sample_event(struct perf_tool *tool __used,
310 union perf_event *event,
311 struct perf_sample *sample,
312 struct perf_evsel *evsel __used,
313 struct machine *machine)
315 struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
317 if (thread == NULL) {
318 pr_debug("problem processing %d event, skipping it.\n",
319 event->header.type);
320 return -1;
323 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
325 process_raw_event(event, sample->raw_data, sample->cpu,
326 sample->time, thread);
328 return 0;
331 static struct perf_tool perf_kmem = {
332 .sample = process_sample_event,
333 .comm = perf_event__process_comm,
334 .ordered_samples = true,
337 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
339 if (n_alloc == 0)
340 return 0.0;
341 else
342 return 100.0 - (100.0 * n_req / n_alloc);
345 static void __print_result(struct rb_root *root, struct perf_session *session,
346 int n_lines, int is_caller)
348 struct rb_node *next;
349 struct machine *machine;
351 printf("%.102s\n", graph_dotted_line);
352 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
353 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
354 printf("%.102s\n", graph_dotted_line);
356 next = rb_first(root);
358 machine = perf_session__find_host_machine(session);
359 if (!machine) {
360 pr_err("__print_result: couldn't find kernel information\n");
361 return;
363 while (next && n_lines--) {
364 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
365 node);
366 struct symbol *sym = NULL;
367 struct map *map;
368 char buf[BUFSIZ];
369 u64 addr;
371 if (is_caller) {
372 addr = data->call_site;
373 if (!raw_ip)
374 sym = machine__find_kernel_function(machine, addr, &map, NULL);
375 } else
376 addr = data->ptr;
378 if (sym != NULL)
379 snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
380 addr - map->unmap_ip(map, sym->start));
381 else
382 snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
383 printf(" %-34s |", buf);
385 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
386 (unsigned long long)data->bytes_alloc,
387 (unsigned long)data->bytes_alloc / data->hit,
388 (unsigned long long)data->bytes_req,
389 (unsigned long)data->bytes_req / data->hit,
390 (unsigned long)data->hit,
391 (unsigned long)data->pingpong,
392 fragmentation(data->bytes_req, data->bytes_alloc));
394 next = rb_next(next);
397 if (n_lines == -1)
398 printf(" ... | ... | ... | ... | ... | ... \n");
400 printf("%.102s\n", graph_dotted_line);
403 static void print_summary(void)
405 printf("\nSUMMARY\n=======\n");
406 printf("Total bytes requested: %lu\n", total_requested);
407 printf("Total bytes allocated: %lu\n", total_allocated);
408 printf("Total bytes wasted on internal fragmentation: %lu\n",
409 total_allocated - total_requested);
410 printf("Internal fragmentation: %f%%\n",
411 fragmentation(total_requested, total_allocated));
412 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
415 static void print_result(struct perf_session *session)
417 if (caller_flag)
418 __print_result(&root_caller_sorted, session, caller_lines, 1);
419 if (alloc_flag)
420 __print_result(&root_alloc_sorted, session, alloc_lines, 0);
421 print_summary();
424 struct sort_dimension {
425 const char name[20];
426 sort_fn_t cmp;
427 struct list_head list;
430 static LIST_HEAD(caller_sort);
431 static LIST_HEAD(alloc_sort);
433 static void sort_insert(struct rb_root *root, struct alloc_stat *data,
434 struct list_head *sort_list)
436 struct rb_node **new = &(root->rb_node);
437 struct rb_node *parent = NULL;
438 struct sort_dimension *sort;
440 while (*new) {
441 struct alloc_stat *this;
442 int cmp = 0;
444 this = rb_entry(*new, struct alloc_stat, node);
445 parent = *new;
447 list_for_each_entry(sort, sort_list, list) {
448 cmp = sort->cmp(data, this);
449 if (cmp)
450 break;
453 if (cmp > 0)
454 new = &((*new)->rb_left);
455 else
456 new = &((*new)->rb_right);
459 rb_link_node(&data->node, parent, new);
460 rb_insert_color(&data->node, root);
463 static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
464 struct list_head *sort_list)
466 struct rb_node *node;
467 struct alloc_stat *data;
469 for (;;) {
470 node = rb_first(root);
471 if (!node)
472 break;
474 rb_erase(node, root);
475 data = rb_entry(node, struct alloc_stat, node);
476 sort_insert(root_sorted, data, sort_list);
480 static void sort_result(void)
482 __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
483 __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
486 static int __cmd_kmem(void)
488 int err = -EINVAL;
489 struct perf_session *session = perf_session__new(input_name, O_RDONLY,
490 0, false, &perf_kmem);
491 if (session == NULL)
492 return -ENOMEM;
494 if (perf_session__create_kernel_maps(session) < 0)
495 goto out_delete;
497 if (!perf_session__has_traces(session, "kmem record"))
498 goto out_delete;
500 setup_pager();
501 err = perf_session__process_events(session, &perf_kmem);
502 if (err != 0)
503 goto out_delete;
504 sort_result();
505 print_result(session);
506 out_delete:
507 perf_session__delete(session);
508 return err;
511 static const char * const kmem_usage[] = {
512 "perf kmem [<options>] {record|stat}",
513 NULL
516 static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
518 if (l->ptr < r->ptr)
519 return -1;
520 else if (l->ptr > r->ptr)
521 return 1;
522 return 0;
525 static struct sort_dimension ptr_sort_dimension = {
526 .name = "ptr",
527 .cmp = ptr_cmp,
530 static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
532 if (l->call_site < r->call_site)
533 return -1;
534 else if (l->call_site > r->call_site)
535 return 1;
536 return 0;
539 static struct sort_dimension callsite_sort_dimension = {
540 .name = "callsite",
541 .cmp = callsite_cmp,
544 static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
546 if (l->hit < r->hit)
547 return -1;
548 else if (l->hit > r->hit)
549 return 1;
550 return 0;
553 static struct sort_dimension hit_sort_dimension = {
554 .name = "hit",
555 .cmp = hit_cmp,
558 static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
560 if (l->bytes_alloc < r->bytes_alloc)
561 return -1;
562 else if (l->bytes_alloc > r->bytes_alloc)
563 return 1;
564 return 0;
567 static struct sort_dimension bytes_sort_dimension = {
568 .name = "bytes",
569 .cmp = bytes_cmp,
572 static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
574 double x, y;
576 x = fragmentation(l->bytes_req, l->bytes_alloc);
577 y = fragmentation(r->bytes_req, r->bytes_alloc);
579 if (x < y)
580 return -1;
581 else if (x > y)
582 return 1;
583 return 0;
586 static struct sort_dimension frag_sort_dimension = {
587 .name = "frag",
588 .cmp = frag_cmp,
591 static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
593 if (l->pingpong < r->pingpong)
594 return -1;
595 else if (l->pingpong > r->pingpong)
596 return 1;
597 return 0;
600 static struct sort_dimension pingpong_sort_dimension = {
601 .name = "pingpong",
602 .cmp = pingpong_cmp,
605 static struct sort_dimension *avail_sorts[] = {
606 &ptr_sort_dimension,
607 &callsite_sort_dimension,
608 &hit_sort_dimension,
609 &bytes_sort_dimension,
610 &frag_sort_dimension,
611 &pingpong_sort_dimension,
614 #define NUM_AVAIL_SORTS \
615 (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
617 static int sort_dimension__add(const char *tok, struct list_head *list)
619 struct sort_dimension *sort;
620 int i;
622 for (i = 0; i < NUM_AVAIL_SORTS; i++) {
623 if (!strcmp(avail_sorts[i]->name, tok)) {
624 sort = malloc(sizeof(*sort));
625 if (!sort)
626 die("malloc");
627 memcpy(sort, avail_sorts[i], sizeof(*sort));
628 list_add_tail(&sort->list, list);
629 return 0;
633 return -1;
636 static int setup_sorting(struct list_head *sort_list, const char *arg)
638 char *tok;
639 char *str = strdup(arg);
641 if (!str)
642 die("strdup");
644 while (true) {
645 tok = strsep(&str, ",");
646 if (!tok)
647 break;
648 if (sort_dimension__add(tok, sort_list) < 0) {
649 error("Unknown --sort key: '%s'", tok);
650 free(str);
651 return -1;
655 free(str);
656 return 0;
659 static int parse_sort_opt(const struct option *opt __used,
660 const char *arg, int unset __used)
662 if (!arg)
663 return -1;
665 if (caller_flag > alloc_flag)
666 return setup_sorting(&caller_sort, arg);
667 else
668 return setup_sorting(&alloc_sort, arg);
670 return 0;
673 static int parse_caller_opt(const struct option *opt __used,
674 const char *arg __used, int unset __used)
676 caller_flag = (alloc_flag + 1);
677 return 0;
680 static int parse_alloc_opt(const struct option *opt __used,
681 const char *arg __used, int unset __used)
683 alloc_flag = (caller_flag + 1);
684 return 0;
687 static int parse_line_opt(const struct option *opt __used,
688 const char *arg, int unset __used)
690 int lines;
692 if (!arg)
693 return -1;
695 lines = strtoul(arg, NULL, 10);
697 if (caller_flag > alloc_flag)
698 caller_lines = lines;
699 else
700 alloc_lines = lines;
702 return 0;
705 static const struct option kmem_options[] = {
706 OPT_STRING('i', "input", &input_name, "file",
707 "input file name"),
708 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
709 "show per-callsite statistics",
710 parse_caller_opt),
711 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
712 "show per-allocation statistics",
713 parse_alloc_opt),
714 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
715 "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
716 parse_sort_opt),
717 OPT_CALLBACK('l', "line", NULL, "num",
718 "show n lines",
719 parse_line_opt),
720 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
721 OPT_END()
724 static const char *record_args[] = {
725 "record",
726 "-a",
727 "-R",
728 "-f",
729 "-c", "1",
730 "-e", "kmem:kmalloc",
731 "-e", "kmem:kmalloc_node",
732 "-e", "kmem:kfree",
733 "-e", "kmem:kmem_cache_alloc",
734 "-e", "kmem:kmem_cache_alloc_node",
735 "-e", "kmem:kmem_cache_free",
738 static int __cmd_record(int argc, const char **argv)
740 unsigned int rec_argc, i, j;
741 const char **rec_argv;
743 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
744 rec_argv = calloc(rec_argc + 1, sizeof(char *));
746 if (rec_argv == NULL)
747 return -ENOMEM;
749 for (i = 0; i < ARRAY_SIZE(record_args); i++)
750 rec_argv[i] = strdup(record_args[i]);
752 for (j = 1; j < (unsigned int)argc; j++, i++)
753 rec_argv[i] = argv[j];
755 return cmd_record(i, rec_argv, NULL);
758 int cmd_kmem(int argc, const char **argv, const char *prefix __used)
760 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
762 if (!argc)
763 usage_with_options(kmem_usage, kmem_options);
765 symbol__init();
767 if (!strncmp(argv[0], "rec", 3)) {
768 return __cmd_record(argc, argv);
769 } else if (!strcmp(argv[0], "stat")) {
770 setup_cpunode_map();
772 if (list_empty(&caller_sort))
773 setup_sorting(&caller_sort, default_sort_order);
774 if (list_empty(&alloc_sort))
775 setup_sorting(&alloc_sort, default_sort_order);
777 return __cmd_kmem();
778 } else
779 usage_with_options(kmem_usage, kmem_options);
781 return 0;