Btrfs: fix warning for 32-bit build of fs/btrfs/check-integrity.c
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / tools / perf / builtin-kmem.c
blob225e963df105964166d5e5c8c252f76635bf7e37
1 #include "builtin.h"
2 #include "perf.h"
4 #include "util/util.h"
5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
9 #include "util/session.h"
11 #include "util/parse-options.h"
12 #include "util/trace-event.h"
14 #include "util/debug.h"
16 #include <linux/rbtree.h>
18 struct alloc_stat;
19 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
21 static char const *input_name = "perf.data";
23 static int alloc_flag;
24 static int caller_flag;
26 static int alloc_lines = -1;
27 static int caller_lines = -1;
29 static bool raw_ip;
31 static char default_sort_order[] = "frag,hit,bytes";
33 static int *cpunode_map;
34 static int max_cpu_num;
36 struct alloc_stat {
37 u64 call_site;
38 u64 ptr;
39 u64 bytes_req;
40 u64 bytes_alloc;
41 u32 hit;
42 u32 pingpong;
44 short alloc_cpu;
46 struct rb_node node;
49 static struct rb_root root_alloc_stat;
50 static struct rb_root root_alloc_sorted;
51 static struct rb_root root_caller_stat;
52 static struct rb_root root_caller_sorted;
54 static unsigned long total_requested, total_allocated;
55 static unsigned long nr_allocs, nr_cross_allocs;
57 #define PATH_SYS_NODE "/sys/devices/system/node"
59 static void init_cpunode_map(void)
61 FILE *fp;
62 int i;
64 fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
65 if (!fp) {
66 max_cpu_num = 4096;
67 return;
70 if (fscanf(fp, "%d", &max_cpu_num) < 1)
71 die("Failed to read 'kernel_max' from sysfs");
72 max_cpu_num++;
74 cpunode_map = calloc(max_cpu_num, sizeof(int));
75 if (!cpunode_map)
76 die("calloc");
77 for (i = 0; i < max_cpu_num; i++)
78 cpunode_map[i] = -1;
79 fclose(fp);
82 static void setup_cpunode_map(void)
84 struct dirent *dent1, *dent2;
85 DIR *dir1, *dir2;
86 unsigned int cpu, mem;
87 char buf[PATH_MAX];
89 init_cpunode_map();
91 dir1 = opendir(PATH_SYS_NODE);
92 if (!dir1)
93 return;
95 while ((dent1 = readdir(dir1)) != NULL) {
96 if (dent1->d_type != DT_DIR ||
97 sscanf(dent1->d_name, "node%u", &mem) < 1)
98 continue;
100 snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
101 dir2 = opendir(buf);
102 if (!dir2)
103 continue;
104 while ((dent2 = readdir(dir2)) != NULL) {
105 if (dent2->d_type != DT_LNK ||
106 sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
107 continue;
108 cpunode_map[cpu] = mem;
113 static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
114 int bytes_req, int bytes_alloc, int cpu)
116 struct rb_node **node = &root_alloc_stat.rb_node;
117 struct rb_node *parent = NULL;
118 struct alloc_stat *data = NULL;
120 while (*node) {
121 parent = *node;
122 data = rb_entry(*node, struct alloc_stat, node);
124 if (ptr > data->ptr)
125 node = &(*node)->rb_right;
126 else if (ptr < data->ptr)
127 node = &(*node)->rb_left;
128 else
129 break;
132 if (data && data->ptr == ptr) {
133 data->hit++;
134 data->bytes_req += bytes_req;
135 data->bytes_alloc += bytes_alloc;
136 } else {
137 data = malloc(sizeof(*data));
138 if (!data)
139 die("malloc");
140 data->ptr = ptr;
141 data->pingpong = 0;
142 data->hit = 1;
143 data->bytes_req = bytes_req;
144 data->bytes_alloc = bytes_alloc;
146 rb_link_node(&data->node, parent, node);
147 rb_insert_color(&data->node, &root_alloc_stat);
149 data->call_site = call_site;
150 data->alloc_cpu = cpu;
153 static void insert_caller_stat(unsigned long call_site,
154 int bytes_req, int bytes_alloc)
156 struct rb_node **node = &root_caller_stat.rb_node;
157 struct rb_node *parent = NULL;
158 struct alloc_stat *data = NULL;
160 while (*node) {
161 parent = *node;
162 data = rb_entry(*node, struct alloc_stat, node);
164 if (call_site > data->call_site)
165 node = &(*node)->rb_right;
166 else if (call_site < data->call_site)
167 node = &(*node)->rb_left;
168 else
169 break;
172 if (data && data->call_site == call_site) {
173 data->hit++;
174 data->bytes_req += bytes_req;
175 data->bytes_alloc += bytes_alloc;
176 } else {
177 data = malloc(sizeof(*data));
178 if (!data)
179 die("malloc");
180 data->call_site = call_site;
181 data->pingpong = 0;
182 data->hit = 1;
183 data->bytes_req = bytes_req;
184 data->bytes_alloc = bytes_alloc;
186 rb_link_node(&data->node, parent, node);
187 rb_insert_color(&data->node, &root_caller_stat);
191 static void process_alloc_event(void *data,
192 struct event *event,
193 int cpu,
194 u64 timestamp __used,
195 struct thread *thread __used,
196 int node)
198 unsigned long call_site;
199 unsigned long ptr;
200 int bytes_req;
201 int bytes_alloc;
202 int node1, node2;
204 ptr = raw_field_value(event, "ptr", data);
205 call_site = raw_field_value(event, "call_site", data);
206 bytes_req = raw_field_value(event, "bytes_req", data);
207 bytes_alloc = raw_field_value(event, "bytes_alloc", data);
209 insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
210 insert_caller_stat(call_site, bytes_req, bytes_alloc);
212 total_requested += bytes_req;
213 total_allocated += bytes_alloc;
215 if (node) {
216 node1 = cpunode_map[cpu];
217 node2 = raw_field_value(event, "node", data);
218 if (node1 != node2)
219 nr_cross_allocs++;
221 nr_allocs++;
224 static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
225 static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
227 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
228 unsigned long call_site,
229 struct rb_root *root,
230 sort_fn_t sort_fn)
232 struct rb_node *node = root->rb_node;
233 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
235 while (node) {
236 struct alloc_stat *data;
237 int cmp;
239 data = rb_entry(node, struct alloc_stat, node);
241 cmp = sort_fn(&key, data);
242 if (cmp < 0)
243 node = node->rb_left;
244 else if (cmp > 0)
245 node = node->rb_right;
246 else
247 return data;
249 return NULL;
252 static void process_free_event(void *data,
253 struct event *event,
254 int cpu,
255 u64 timestamp __used,
256 struct thread *thread __used)
258 unsigned long ptr;
259 struct alloc_stat *s_alloc, *s_caller;
261 ptr = raw_field_value(event, "ptr", data);
263 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
264 if (!s_alloc)
265 return;
267 if (cpu != s_alloc->alloc_cpu) {
268 s_alloc->pingpong++;
270 s_caller = search_alloc_stat(0, s_alloc->call_site,
271 &root_caller_stat, callsite_cmp);
272 assert(s_caller);
273 s_caller->pingpong++;
275 s_alloc->alloc_cpu = -1;
278 static void process_raw_event(union perf_event *raw_event __used, void *data,
279 int cpu, u64 timestamp, struct thread *thread)
281 struct event *event;
282 int type;
284 type = trace_parse_common_type(data);
285 event = trace_find_event(type);
287 if (!strcmp(event->name, "kmalloc") ||
288 !strcmp(event->name, "kmem_cache_alloc")) {
289 process_alloc_event(data, event, cpu, timestamp, thread, 0);
290 return;
293 if (!strcmp(event->name, "kmalloc_node") ||
294 !strcmp(event->name, "kmem_cache_alloc_node")) {
295 process_alloc_event(data, event, cpu, timestamp, thread, 1);
296 return;
299 if (!strcmp(event->name, "kfree") ||
300 !strcmp(event->name, "kmem_cache_free")) {
301 process_free_event(data, event, cpu, timestamp, thread);
302 return;
306 static int process_sample_event(union perf_event *event,
307 struct perf_sample *sample,
308 struct perf_evsel *evsel __used,
309 struct perf_session *session)
311 struct thread *thread = perf_session__findnew(session, event->ip.pid);
313 if (thread == NULL) {
314 pr_debug("problem processing %d event, skipping it.\n",
315 event->header.type);
316 return -1;
319 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
321 process_raw_event(event, sample->raw_data, sample->cpu,
322 sample->time, thread);
324 return 0;
327 static struct perf_event_ops event_ops = {
328 .sample = process_sample_event,
329 .comm = perf_event__process_comm,
330 .ordered_samples = true,
333 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
335 if (n_alloc == 0)
336 return 0.0;
337 else
338 return 100.0 - (100.0 * n_req / n_alloc);
341 static void __print_result(struct rb_root *root, struct perf_session *session,
342 int n_lines, int is_caller)
344 struct rb_node *next;
345 struct machine *machine;
347 printf("%.102s\n", graph_dotted_line);
348 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
349 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
350 printf("%.102s\n", graph_dotted_line);
352 next = rb_first(root);
354 machine = perf_session__find_host_machine(session);
355 if (!machine) {
356 pr_err("__print_result: couldn't find kernel information\n");
357 return;
359 while (next && n_lines--) {
360 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
361 node);
362 struct symbol *sym = NULL;
363 struct map *map;
364 char buf[BUFSIZ];
365 u64 addr;
367 if (is_caller) {
368 addr = data->call_site;
369 if (!raw_ip)
370 sym = machine__find_kernel_function(machine, addr, &map, NULL);
371 } else
372 addr = data->ptr;
374 if (sym != NULL)
375 snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
376 addr - map->unmap_ip(map, sym->start));
377 else
378 snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
379 printf(" %-34s |", buf);
381 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
382 (unsigned long long)data->bytes_alloc,
383 (unsigned long)data->bytes_alloc / data->hit,
384 (unsigned long long)data->bytes_req,
385 (unsigned long)data->bytes_req / data->hit,
386 (unsigned long)data->hit,
387 (unsigned long)data->pingpong,
388 fragmentation(data->bytes_req, data->bytes_alloc));
390 next = rb_next(next);
393 if (n_lines == -1)
394 printf(" ... | ... | ... | ... | ... | ... \n");
396 printf("%.102s\n", graph_dotted_line);
399 static void print_summary(void)
401 printf("\nSUMMARY\n=======\n");
402 printf("Total bytes requested: %lu\n", total_requested);
403 printf("Total bytes allocated: %lu\n", total_allocated);
404 printf("Total bytes wasted on internal fragmentation: %lu\n",
405 total_allocated - total_requested);
406 printf("Internal fragmentation: %f%%\n",
407 fragmentation(total_requested, total_allocated));
408 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
411 static void print_result(struct perf_session *session)
413 if (caller_flag)
414 __print_result(&root_caller_sorted, session, caller_lines, 1);
415 if (alloc_flag)
416 __print_result(&root_alloc_sorted, session, alloc_lines, 0);
417 print_summary();
420 struct sort_dimension {
421 const char name[20];
422 sort_fn_t cmp;
423 struct list_head list;
426 static LIST_HEAD(caller_sort);
427 static LIST_HEAD(alloc_sort);
429 static void sort_insert(struct rb_root *root, struct alloc_stat *data,
430 struct list_head *sort_list)
432 struct rb_node **new = &(root->rb_node);
433 struct rb_node *parent = NULL;
434 struct sort_dimension *sort;
436 while (*new) {
437 struct alloc_stat *this;
438 int cmp = 0;
440 this = rb_entry(*new, struct alloc_stat, node);
441 parent = *new;
443 list_for_each_entry(sort, sort_list, list) {
444 cmp = sort->cmp(data, this);
445 if (cmp)
446 break;
449 if (cmp > 0)
450 new = &((*new)->rb_left);
451 else
452 new = &((*new)->rb_right);
455 rb_link_node(&data->node, parent, new);
456 rb_insert_color(&data->node, root);
459 static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
460 struct list_head *sort_list)
462 struct rb_node *node;
463 struct alloc_stat *data;
465 for (;;) {
466 node = rb_first(root);
467 if (!node)
468 break;
470 rb_erase(node, root);
471 data = rb_entry(node, struct alloc_stat, node);
472 sort_insert(root_sorted, data, sort_list);
476 static void sort_result(void)
478 __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
479 __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
482 static int __cmd_kmem(void)
484 int err = -EINVAL;
485 struct perf_session *session = perf_session__new(input_name, O_RDONLY,
486 0, false, &event_ops);
487 if (session == NULL)
488 return -ENOMEM;
490 if (perf_session__create_kernel_maps(session) < 0)
491 goto out_delete;
493 if (!perf_session__has_traces(session, "kmem record"))
494 goto out_delete;
496 setup_pager();
497 err = perf_session__process_events(session, &event_ops);
498 if (err != 0)
499 goto out_delete;
500 sort_result();
501 print_result(session);
502 out_delete:
503 perf_session__delete(session);
504 return err;
507 static const char * const kmem_usage[] = {
508 "perf kmem [<options>] {record|stat}",
509 NULL
512 static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
514 if (l->ptr < r->ptr)
515 return -1;
516 else if (l->ptr > r->ptr)
517 return 1;
518 return 0;
521 static struct sort_dimension ptr_sort_dimension = {
522 .name = "ptr",
523 .cmp = ptr_cmp,
526 static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
528 if (l->call_site < r->call_site)
529 return -1;
530 else if (l->call_site > r->call_site)
531 return 1;
532 return 0;
535 static struct sort_dimension callsite_sort_dimension = {
536 .name = "callsite",
537 .cmp = callsite_cmp,
540 static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
542 if (l->hit < r->hit)
543 return -1;
544 else if (l->hit > r->hit)
545 return 1;
546 return 0;
549 static struct sort_dimension hit_sort_dimension = {
550 .name = "hit",
551 .cmp = hit_cmp,
554 static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
556 if (l->bytes_alloc < r->bytes_alloc)
557 return -1;
558 else if (l->bytes_alloc > r->bytes_alloc)
559 return 1;
560 return 0;
563 static struct sort_dimension bytes_sort_dimension = {
564 .name = "bytes",
565 .cmp = bytes_cmp,
568 static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
570 double x, y;
572 x = fragmentation(l->bytes_req, l->bytes_alloc);
573 y = fragmentation(r->bytes_req, r->bytes_alloc);
575 if (x < y)
576 return -1;
577 else if (x > y)
578 return 1;
579 return 0;
582 static struct sort_dimension frag_sort_dimension = {
583 .name = "frag",
584 .cmp = frag_cmp,
587 static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
589 if (l->pingpong < r->pingpong)
590 return -1;
591 else if (l->pingpong > r->pingpong)
592 return 1;
593 return 0;
596 static struct sort_dimension pingpong_sort_dimension = {
597 .name = "pingpong",
598 .cmp = pingpong_cmp,
601 static struct sort_dimension *avail_sorts[] = {
602 &ptr_sort_dimension,
603 &callsite_sort_dimension,
604 &hit_sort_dimension,
605 &bytes_sort_dimension,
606 &frag_sort_dimension,
607 &pingpong_sort_dimension,
610 #define NUM_AVAIL_SORTS \
611 (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
613 static int sort_dimension__add(const char *tok, struct list_head *list)
615 struct sort_dimension *sort;
616 int i;
618 for (i = 0; i < NUM_AVAIL_SORTS; i++) {
619 if (!strcmp(avail_sorts[i]->name, tok)) {
620 sort = malloc(sizeof(*sort));
621 if (!sort)
622 die("malloc");
623 memcpy(sort, avail_sorts[i], sizeof(*sort));
624 list_add_tail(&sort->list, list);
625 return 0;
629 return -1;
632 static int setup_sorting(struct list_head *sort_list, const char *arg)
634 char *tok;
635 char *str = strdup(arg);
637 if (!str)
638 die("strdup");
640 while (true) {
641 tok = strsep(&str, ",");
642 if (!tok)
643 break;
644 if (sort_dimension__add(tok, sort_list) < 0) {
645 error("Unknown --sort key: '%s'", tok);
646 return -1;
650 free(str);
651 return 0;
654 static int parse_sort_opt(const struct option *opt __used,
655 const char *arg, int unset __used)
657 if (!arg)
658 return -1;
660 if (caller_flag > alloc_flag)
661 return setup_sorting(&caller_sort, arg);
662 else
663 return setup_sorting(&alloc_sort, arg);
665 return 0;
668 static int parse_caller_opt(const struct option *opt __used,
669 const char *arg __used, int unset __used)
671 caller_flag = (alloc_flag + 1);
672 return 0;
675 static int parse_alloc_opt(const struct option *opt __used,
676 const char *arg __used, int unset __used)
678 alloc_flag = (caller_flag + 1);
679 return 0;
682 static int parse_line_opt(const struct option *opt __used,
683 const char *arg, int unset __used)
685 int lines;
687 if (!arg)
688 return -1;
690 lines = strtoul(arg, NULL, 10);
692 if (caller_flag > alloc_flag)
693 caller_lines = lines;
694 else
695 alloc_lines = lines;
697 return 0;
700 static const struct option kmem_options[] = {
701 OPT_STRING('i', "input", &input_name, "file",
702 "input file name"),
703 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
704 "show per-callsite statistics",
705 parse_caller_opt),
706 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
707 "show per-allocation statistics",
708 parse_alloc_opt),
709 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
710 "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
711 parse_sort_opt),
712 OPT_CALLBACK('l', "line", NULL, "num",
713 "show n lines",
714 parse_line_opt),
715 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
716 OPT_END()
719 static const char *record_args[] = {
720 "record",
721 "-a",
722 "-R",
723 "-f",
724 "-c", "1",
725 "-e", "kmem:kmalloc",
726 "-e", "kmem:kmalloc_node",
727 "-e", "kmem:kfree",
728 "-e", "kmem:kmem_cache_alloc",
729 "-e", "kmem:kmem_cache_alloc_node",
730 "-e", "kmem:kmem_cache_free",
733 static int __cmd_record(int argc, const char **argv)
735 unsigned int rec_argc, i, j;
736 const char **rec_argv;
738 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
739 rec_argv = calloc(rec_argc + 1, sizeof(char *));
741 if (rec_argv == NULL)
742 return -ENOMEM;
744 for (i = 0; i < ARRAY_SIZE(record_args); i++)
745 rec_argv[i] = strdup(record_args[i]);
747 for (j = 1; j < (unsigned int)argc; j++, i++)
748 rec_argv[i] = argv[j];
750 return cmd_record(i, rec_argv, NULL);
753 int cmd_kmem(int argc, const char **argv, const char *prefix __used)
755 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
757 if (!argc)
758 usage_with_options(kmem_usage, kmem_options);
760 symbol__init();
762 if (!strncmp(argv[0], "rec", 3)) {
763 return __cmd_record(argc, argv);
764 } else if (!strcmp(argv[0], "stat")) {
765 setup_cpunode_map();
767 if (list_empty(&caller_sort))
768 setup_sorting(&caller_sort, default_sort_order);
769 if (list_empty(&alloc_sort))
770 setup_sorting(&alloc_sort, default_sort_order);
772 return __cmd_kmem();
773 } else
774 usage_with_options(kmem_usage, kmem_options);
776 return 0;