lib/vsprintf.c: add %pU to print UUID/GUIDs
[linux-2.6/cjktty.git] / tools / perf / builtin-kmem.c
blob2071d248591334f8acfe97802ab12e33b3bbbe30
1 #include "builtin.h"
2 #include "perf.h"
4 #include "util/util.h"
5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
9 #include "util/session.h"
11 #include "util/parse-options.h"
12 #include "util/trace-event.h"
14 #include "util/debug.h"
15 #include "util/data_map.h"
17 #include <linux/rbtree.h>
19 struct alloc_stat;
20 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
22 static char const *input_name = "perf.data";
24 static u64 sample_type;
26 static int alloc_flag;
27 static int caller_flag;
29 static int alloc_lines = -1;
30 static int caller_lines = -1;
32 static bool raw_ip;
34 static char default_sort_order[] = "frag,hit,bytes";
36 static int *cpunode_map;
37 static int max_cpu_num;
39 struct alloc_stat {
40 u64 call_site;
41 u64 ptr;
42 u64 bytes_req;
43 u64 bytes_alloc;
44 u32 hit;
45 u32 pingpong;
47 short alloc_cpu;
49 struct rb_node node;
52 static struct rb_root root_alloc_stat;
53 static struct rb_root root_alloc_sorted;
54 static struct rb_root root_caller_stat;
55 static struct rb_root root_caller_sorted;
57 static unsigned long total_requested, total_allocated;
58 static unsigned long nr_allocs, nr_cross_allocs;
60 #define PATH_SYS_NODE "/sys/devices/system/node"
62 static void init_cpunode_map(void)
64 FILE *fp;
65 int i;
67 fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
68 if (!fp) {
69 max_cpu_num = 4096;
70 return;
73 if (fscanf(fp, "%d", &max_cpu_num) < 1)
74 die("Failed to read 'kernel_max' from sysfs");
75 max_cpu_num++;
77 cpunode_map = calloc(max_cpu_num, sizeof(int));
78 if (!cpunode_map)
79 die("calloc");
80 for (i = 0; i < max_cpu_num; i++)
81 cpunode_map[i] = -1;
82 fclose(fp);
85 static void setup_cpunode_map(void)
87 struct dirent *dent1, *dent2;
88 DIR *dir1, *dir2;
89 unsigned int cpu, mem;
90 char buf[PATH_MAX];
92 init_cpunode_map();
94 dir1 = opendir(PATH_SYS_NODE);
95 if (!dir1)
96 return;
98 while (true) {
99 dent1 = readdir(dir1);
100 if (!dent1)
101 break;
103 if (sscanf(dent1->d_name, "node%u", &mem) < 1)
104 continue;
106 snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
107 dir2 = opendir(buf);
108 if (!dir2)
109 continue;
110 while (true) {
111 dent2 = readdir(dir2);
112 if (!dent2)
113 break;
114 if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
115 continue;
116 cpunode_map[cpu] = mem;
121 static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
122 int bytes_req, int bytes_alloc, int cpu)
124 struct rb_node **node = &root_alloc_stat.rb_node;
125 struct rb_node *parent = NULL;
126 struct alloc_stat *data = NULL;
128 while (*node) {
129 parent = *node;
130 data = rb_entry(*node, struct alloc_stat, node);
132 if (ptr > data->ptr)
133 node = &(*node)->rb_right;
134 else if (ptr < data->ptr)
135 node = &(*node)->rb_left;
136 else
137 break;
140 if (data && data->ptr == ptr) {
141 data->hit++;
142 data->bytes_req += bytes_req;
143 data->bytes_alloc += bytes_req;
144 } else {
145 data = malloc(sizeof(*data));
146 if (!data)
147 die("malloc");
148 data->ptr = ptr;
149 data->pingpong = 0;
150 data->hit = 1;
151 data->bytes_req = bytes_req;
152 data->bytes_alloc = bytes_alloc;
154 rb_link_node(&data->node, parent, node);
155 rb_insert_color(&data->node, &root_alloc_stat);
157 data->call_site = call_site;
158 data->alloc_cpu = cpu;
161 static void insert_caller_stat(unsigned long call_site,
162 int bytes_req, int bytes_alloc)
164 struct rb_node **node = &root_caller_stat.rb_node;
165 struct rb_node *parent = NULL;
166 struct alloc_stat *data = NULL;
168 while (*node) {
169 parent = *node;
170 data = rb_entry(*node, struct alloc_stat, node);
172 if (call_site > data->call_site)
173 node = &(*node)->rb_right;
174 else if (call_site < data->call_site)
175 node = &(*node)->rb_left;
176 else
177 break;
180 if (data && data->call_site == call_site) {
181 data->hit++;
182 data->bytes_req += bytes_req;
183 data->bytes_alloc += bytes_req;
184 } else {
185 data = malloc(sizeof(*data));
186 if (!data)
187 die("malloc");
188 data->call_site = call_site;
189 data->pingpong = 0;
190 data->hit = 1;
191 data->bytes_req = bytes_req;
192 data->bytes_alloc = bytes_alloc;
194 rb_link_node(&data->node, parent, node);
195 rb_insert_color(&data->node, &root_caller_stat);
199 static void process_alloc_event(void *data,
200 struct event *event,
201 int cpu,
202 u64 timestamp __used,
203 struct thread *thread __used,
204 int node)
206 unsigned long call_site;
207 unsigned long ptr;
208 int bytes_req;
209 int bytes_alloc;
210 int node1, node2;
212 ptr = raw_field_value(event, "ptr", data);
213 call_site = raw_field_value(event, "call_site", data);
214 bytes_req = raw_field_value(event, "bytes_req", data);
215 bytes_alloc = raw_field_value(event, "bytes_alloc", data);
217 insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
218 insert_caller_stat(call_site, bytes_req, bytes_alloc);
220 total_requested += bytes_req;
221 total_allocated += bytes_alloc;
223 if (node) {
224 node1 = cpunode_map[cpu];
225 node2 = raw_field_value(event, "node", data);
226 if (node1 != node2)
227 nr_cross_allocs++;
229 nr_allocs++;
232 static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
233 static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
235 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
236 unsigned long call_site,
237 struct rb_root *root,
238 sort_fn_t sort_fn)
240 struct rb_node *node = root->rb_node;
241 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
243 while (node) {
244 struct alloc_stat *data;
245 int cmp;
247 data = rb_entry(node, struct alloc_stat, node);
249 cmp = sort_fn(&key, data);
250 if (cmp < 0)
251 node = node->rb_left;
252 else if (cmp > 0)
253 node = node->rb_right;
254 else
255 return data;
257 return NULL;
260 static void process_free_event(void *data,
261 struct event *event,
262 int cpu,
263 u64 timestamp __used,
264 struct thread *thread __used)
266 unsigned long ptr;
267 struct alloc_stat *s_alloc, *s_caller;
269 ptr = raw_field_value(event, "ptr", data);
271 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
272 if (!s_alloc)
273 return;
275 if (cpu != s_alloc->alloc_cpu) {
276 s_alloc->pingpong++;
278 s_caller = search_alloc_stat(0, s_alloc->call_site,
279 &root_caller_stat, callsite_cmp);
280 assert(s_caller);
281 s_caller->pingpong++;
283 s_alloc->alloc_cpu = -1;
286 static void
287 process_raw_event(event_t *raw_event __used, void *data,
288 int cpu, u64 timestamp, struct thread *thread)
290 struct event *event;
291 int type;
293 type = trace_parse_common_type(data);
294 event = trace_find_event(type);
296 if (!strcmp(event->name, "kmalloc") ||
297 !strcmp(event->name, "kmem_cache_alloc")) {
298 process_alloc_event(data, event, cpu, timestamp, thread, 0);
299 return;
302 if (!strcmp(event->name, "kmalloc_node") ||
303 !strcmp(event->name, "kmem_cache_alloc_node")) {
304 process_alloc_event(data, event, cpu, timestamp, thread, 1);
305 return;
308 if (!strcmp(event->name, "kfree") ||
309 !strcmp(event->name, "kmem_cache_free")) {
310 process_free_event(data, event, cpu, timestamp, thread);
311 return;
315 static int process_sample_event(event_t *event)
317 struct sample_data data;
318 struct thread *thread;
320 memset(&data, 0, sizeof(data));
321 data.time = -1;
322 data.cpu = -1;
323 data.period = 1;
325 event__parse_sample(event, sample_type, &data);
327 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
328 event->header.misc,
329 data.pid, data.tid,
330 (void *)(long)data.ip,
331 (long long)data.period);
333 thread = threads__findnew(event->ip.pid);
334 if (thread == NULL) {
335 pr_debug("problem processing %d event, skipping it.\n",
336 event->header.type);
337 return -1;
340 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
342 process_raw_event(event, data.raw_data, data.cpu,
343 data.time, thread);
345 return 0;
348 static int sample_type_check(u64 type)
350 sample_type = type;
352 if (!(sample_type & PERF_SAMPLE_RAW)) {
353 fprintf(stderr,
354 "No trace sample to read. Did you call perf record "
355 "without -R?");
356 return -1;
359 return 0;
362 static struct perf_file_handler file_handler = {
363 .process_sample_event = process_sample_event,
364 .process_comm_event = event__process_comm,
365 .sample_type_check = sample_type_check,
368 static int read_events(void)
370 int err;
371 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
373 if (session == NULL)
374 return -ENOMEM;
376 register_idle_thread();
377 register_perf_file_handler(&file_handler);
379 err = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd);
380 perf_session__delete(session);
381 return err;
384 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
386 if (n_alloc == 0)
387 return 0.0;
388 else
389 return 100.0 - (100.0 * n_req / n_alloc);
392 static void __print_result(struct rb_root *root, int n_lines, int is_caller)
394 struct rb_node *next;
396 printf("%.102s\n", graph_dotted_line);
397 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
398 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
399 printf("%.102s\n", graph_dotted_line);
401 next = rb_first(root);
403 while (next && n_lines--) {
404 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
405 node);
406 struct symbol *sym = NULL;
407 char buf[BUFSIZ];
408 u64 addr;
410 if (is_caller) {
411 addr = data->call_site;
412 if (!raw_ip)
413 sym = map_groups__find_function(kmaps, addr, NULL);
414 } else
415 addr = data->ptr;
417 if (sym != NULL)
418 snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
419 addr - sym->start);
420 else
421 snprintf(buf, sizeof(buf), "%#Lx", addr);
422 printf(" %-34s |", buf);
424 printf(" %9llu/%-5lu | %9llu/%-5lu | %6lu | %8lu | %6.3f%%\n",
425 (unsigned long long)data->bytes_alloc,
426 (unsigned long)data->bytes_alloc / data->hit,
427 (unsigned long long)data->bytes_req,
428 (unsigned long)data->bytes_req / data->hit,
429 (unsigned long)data->hit,
430 (unsigned long)data->pingpong,
431 fragmentation(data->bytes_req, data->bytes_alloc));
433 next = rb_next(next);
436 if (n_lines == -1)
437 printf(" ... | ... | ... | ... | ... | ... \n");
439 printf("%.102s\n", graph_dotted_line);
442 static void print_summary(void)
444 printf("\nSUMMARY\n=======\n");
445 printf("Total bytes requested: %lu\n", total_requested);
446 printf("Total bytes allocated: %lu\n", total_allocated);
447 printf("Total bytes wasted on internal fragmentation: %lu\n",
448 total_allocated - total_requested);
449 printf("Internal fragmentation: %f%%\n",
450 fragmentation(total_requested, total_allocated));
451 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
454 static void print_result(void)
456 if (caller_flag)
457 __print_result(&root_caller_sorted, caller_lines, 1);
458 if (alloc_flag)
459 __print_result(&root_alloc_sorted, alloc_lines, 0);
460 print_summary();
463 struct sort_dimension {
464 const char name[20];
465 sort_fn_t cmp;
466 struct list_head list;
469 static LIST_HEAD(caller_sort);
470 static LIST_HEAD(alloc_sort);
472 static void sort_insert(struct rb_root *root, struct alloc_stat *data,
473 struct list_head *sort_list)
475 struct rb_node **new = &(root->rb_node);
476 struct rb_node *parent = NULL;
477 struct sort_dimension *sort;
479 while (*new) {
480 struct alloc_stat *this;
481 int cmp = 0;
483 this = rb_entry(*new, struct alloc_stat, node);
484 parent = *new;
486 list_for_each_entry(sort, sort_list, list) {
487 cmp = sort->cmp(data, this);
488 if (cmp)
489 break;
492 if (cmp > 0)
493 new = &((*new)->rb_left);
494 else
495 new = &((*new)->rb_right);
498 rb_link_node(&data->node, parent, new);
499 rb_insert_color(&data->node, root);
502 static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
503 struct list_head *sort_list)
505 struct rb_node *node;
506 struct alloc_stat *data;
508 for (;;) {
509 node = rb_first(root);
510 if (!node)
511 break;
513 rb_erase(node, root);
514 data = rb_entry(node, struct alloc_stat, node);
515 sort_insert(root_sorted, data, sort_list);
519 static void sort_result(void)
521 __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
522 __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
525 static int __cmd_kmem(void)
527 setup_pager();
528 read_events();
529 sort_result();
530 print_result();
532 return 0;
535 static const char * const kmem_usage[] = {
536 "perf kmem [<options>] {record|stat}",
537 NULL
540 static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
542 if (l->ptr < r->ptr)
543 return -1;
544 else if (l->ptr > r->ptr)
545 return 1;
546 return 0;
549 static struct sort_dimension ptr_sort_dimension = {
550 .name = "ptr",
551 .cmp = ptr_cmp,
554 static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
556 if (l->call_site < r->call_site)
557 return -1;
558 else if (l->call_site > r->call_site)
559 return 1;
560 return 0;
563 static struct sort_dimension callsite_sort_dimension = {
564 .name = "callsite",
565 .cmp = callsite_cmp,
568 static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
570 if (l->hit < r->hit)
571 return -1;
572 else if (l->hit > r->hit)
573 return 1;
574 return 0;
577 static struct sort_dimension hit_sort_dimension = {
578 .name = "hit",
579 .cmp = hit_cmp,
582 static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
584 if (l->bytes_alloc < r->bytes_alloc)
585 return -1;
586 else if (l->bytes_alloc > r->bytes_alloc)
587 return 1;
588 return 0;
591 static struct sort_dimension bytes_sort_dimension = {
592 .name = "bytes",
593 .cmp = bytes_cmp,
596 static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
598 double x, y;
600 x = fragmentation(l->bytes_req, l->bytes_alloc);
601 y = fragmentation(r->bytes_req, r->bytes_alloc);
603 if (x < y)
604 return -1;
605 else if (x > y)
606 return 1;
607 return 0;
610 static struct sort_dimension frag_sort_dimension = {
611 .name = "frag",
612 .cmp = frag_cmp,
615 static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
617 if (l->pingpong < r->pingpong)
618 return -1;
619 else if (l->pingpong > r->pingpong)
620 return 1;
621 return 0;
624 static struct sort_dimension pingpong_sort_dimension = {
625 .name = "pingpong",
626 .cmp = pingpong_cmp,
629 static struct sort_dimension *avail_sorts[] = {
630 &ptr_sort_dimension,
631 &callsite_sort_dimension,
632 &hit_sort_dimension,
633 &bytes_sort_dimension,
634 &frag_sort_dimension,
635 &pingpong_sort_dimension,
638 #define NUM_AVAIL_SORTS \
639 (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
641 static int sort_dimension__add(const char *tok, struct list_head *list)
643 struct sort_dimension *sort;
644 int i;
646 for (i = 0; i < NUM_AVAIL_SORTS; i++) {
647 if (!strcmp(avail_sorts[i]->name, tok)) {
648 sort = malloc(sizeof(*sort));
649 if (!sort)
650 die("malloc");
651 memcpy(sort, avail_sorts[i], sizeof(*sort));
652 list_add_tail(&sort->list, list);
653 return 0;
657 return -1;
660 static int setup_sorting(struct list_head *sort_list, const char *arg)
662 char *tok;
663 char *str = strdup(arg);
665 if (!str)
666 die("strdup");
668 while (true) {
669 tok = strsep(&str, ",");
670 if (!tok)
671 break;
672 if (sort_dimension__add(tok, sort_list) < 0) {
673 error("Unknown --sort key: '%s'", tok);
674 return -1;
678 free(str);
679 return 0;
682 static int parse_sort_opt(const struct option *opt __used,
683 const char *arg, int unset __used)
685 if (!arg)
686 return -1;
688 if (caller_flag > alloc_flag)
689 return setup_sorting(&caller_sort, arg);
690 else
691 return setup_sorting(&alloc_sort, arg);
693 return 0;
696 static int parse_caller_opt(const struct option *opt __used,
697 const char *arg __used, int unset __used)
699 caller_flag = (alloc_flag + 1);
700 return 0;
703 static int parse_alloc_opt(const struct option *opt __used,
704 const char *arg __used, int unset __used)
706 alloc_flag = (caller_flag + 1);
707 return 0;
710 static int parse_line_opt(const struct option *opt __used,
711 const char *arg, int unset __used)
713 int lines;
715 if (!arg)
716 return -1;
718 lines = strtoul(arg, NULL, 10);
720 if (caller_flag > alloc_flag)
721 caller_lines = lines;
722 else
723 alloc_lines = lines;
725 return 0;
728 static const struct option kmem_options[] = {
729 OPT_STRING('i', "input", &input_name, "file",
730 "input file name"),
731 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
732 "show per-callsite statistics",
733 parse_caller_opt),
734 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
735 "show per-allocation statistics",
736 parse_alloc_opt),
737 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
738 "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
739 parse_sort_opt),
740 OPT_CALLBACK('l', "line", NULL, "num",
741 "show n lines",
742 parse_line_opt),
743 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
744 OPT_END()
747 static const char *record_args[] = {
748 "record",
749 "-a",
750 "-R",
751 "-M",
752 "-f",
753 "-c", "1",
754 "-e", "kmem:kmalloc",
755 "-e", "kmem:kmalloc_node",
756 "-e", "kmem:kfree",
757 "-e", "kmem:kmem_cache_alloc",
758 "-e", "kmem:kmem_cache_alloc_node",
759 "-e", "kmem:kmem_cache_free",
762 static int __cmd_record(int argc, const char **argv)
764 unsigned int rec_argc, i, j;
765 const char **rec_argv;
767 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
768 rec_argv = calloc(rec_argc + 1, sizeof(char *));
770 for (i = 0; i < ARRAY_SIZE(record_args); i++)
771 rec_argv[i] = strdup(record_args[i]);
773 for (j = 1; j < (unsigned int)argc; j++, i++)
774 rec_argv[i] = argv[j];
776 return cmd_record(i, rec_argv, NULL);
779 int cmd_kmem(int argc, const char **argv, const char *prefix __used)
781 symbol__init(0);
783 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
785 if (!argc)
786 usage_with_options(kmem_usage, kmem_options);
788 if (!strncmp(argv[0], "rec", 3)) {
789 return __cmd_record(argc, argv);
790 } else if (!strcmp(argv[0], "stat")) {
791 setup_cpunode_map();
793 if (list_empty(&caller_sort))
794 setup_sorting(&caller_sort, default_sort_order);
795 if (list_empty(&alloc_sort))
796 setup_sorting(&alloc_sort, default_sort_order);
798 return __cmd_kmem();
801 return 0;