perf: Add 'perf kmem' tool
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / tools / perf / builtin-kmem.c
blobf315b052f81904dd5b13f539959fb63d09e45623
1 #include "builtin.h"
2 #include "perf.h"
4 #include "util/util.h"
5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
14 #include "util/data_map.h"
16 #include <linux/rbtree.h>
18 struct alloc_stat;
19 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
21 static char const *input_name = "perf.data";
23 static struct perf_header *header;
24 static u64 sample_type;
26 static int alloc_flag;
27 static int caller_flag;
29 sort_fn_t alloc_sort_fn;
30 sort_fn_t caller_sort_fn;
32 static int alloc_lines = -1;
33 static int caller_lines = -1;
35 static char *cwd;
36 static int cwdlen;
38 struct alloc_stat {
39 union {
40 struct {
41 char *name;
42 u64 call_site;
44 u64 ptr;
46 u64 bytes_req;
47 u64 bytes_alloc;
48 u32 hit;
50 struct rb_node node;
53 static struct rb_root root_alloc_stat;
54 static struct rb_root root_alloc_sorted;
55 static struct rb_root root_caller_stat;
56 static struct rb_root root_caller_sorted;
58 static unsigned long total_requested, total_allocated;
60 struct raw_event_sample {
61 u32 size;
62 char data[0];
65 static int
66 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
68 struct thread *thread = threads__findnew(event->comm.pid);
70 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
71 (void *)(offset + head),
72 (void *)(long)(event->header.size),
73 event->comm.comm, event->comm.pid);
75 if (thread == NULL ||
76 thread__set_comm(thread, event->comm.comm)) {
77 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
78 return -1;
81 return 0;
84 static void insert_alloc_stat(unsigned long ptr,
85 int bytes_req, int bytes_alloc)
87 struct rb_node **node = &root_alloc_stat.rb_node;
88 struct rb_node *parent = NULL;
89 struct alloc_stat *data = NULL;
91 if (!alloc_flag)
92 return;
94 while (*node) {
95 parent = *node;
96 data = rb_entry(*node, struct alloc_stat, node);
98 if (ptr > data->ptr)
99 node = &(*node)->rb_right;
100 else if (ptr < data->ptr)
101 node = &(*node)->rb_left;
102 else
103 break;
106 if (data && data->ptr == ptr) {
107 data->hit++;
108 data->bytes_req += bytes_req;
109 data->bytes_alloc += bytes_req;
110 } else {
111 data = malloc(sizeof(*data));
112 data->ptr = ptr;
113 data->hit = 1;
114 data->bytes_req = bytes_req;
115 data->bytes_alloc = bytes_alloc;
117 rb_link_node(&data->node, parent, node);
118 rb_insert_color(&data->node, &root_alloc_stat);
122 static void insert_caller_stat(unsigned long call_site,
123 int bytes_req, int bytes_alloc)
125 struct rb_node **node = &root_caller_stat.rb_node;
126 struct rb_node *parent = NULL;
127 struct alloc_stat *data = NULL;
129 if (!caller_flag)
130 return;
132 while (*node) {
133 parent = *node;
134 data = rb_entry(*node, struct alloc_stat, node);
136 if (call_site > data->call_site)
137 node = &(*node)->rb_right;
138 else if (call_site < data->call_site)
139 node = &(*node)->rb_left;
140 else
141 break;
144 if (data && data->call_site == call_site) {
145 data->hit++;
146 data->bytes_req += bytes_req;
147 data->bytes_alloc += bytes_req;
148 } else {
149 data = malloc(sizeof(*data));
150 data->call_site = call_site;
151 data->hit = 1;
152 data->bytes_req = bytes_req;
153 data->bytes_alloc = bytes_alloc;
155 rb_link_node(&data->node, parent, node);
156 rb_insert_color(&data->node, &root_caller_stat);
160 static void process_alloc_event(struct raw_event_sample *raw,
161 struct event *event,
162 int cpu __used,
163 u64 timestamp __used,
164 struct thread *thread __used,
165 int node __used)
167 unsigned long call_site;
168 unsigned long ptr;
169 int bytes_req;
170 int bytes_alloc;
172 ptr = raw_field_value(event, "ptr", raw->data);
173 call_site = raw_field_value(event, "call_site", raw->data);
174 bytes_req = raw_field_value(event, "bytes_req", raw->data);
175 bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data);
177 insert_alloc_stat(ptr, bytes_req, bytes_alloc);
178 insert_caller_stat(call_site, bytes_req, bytes_alloc);
180 total_requested += bytes_req;
181 total_allocated += bytes_alloc;
184 static void process_free_event(struct raw_event_sample *raw __used,
185 struct event *event __used,
186 int cpu __used,
187 u64 timestamp __used,
188 struct thread *thread __used)
192 static void
193 process_raw_event(event_t *raw_event __used, void *more_data,
194 int cpu, u64 timestamp, struct thread *thread)
196 struct raw_event_sample *raw = more_data;
197 struct event *event;
198 int type;
200 type = trace_parse_common_type(raw->data);
201 event = trace_find_event(type);
203 if (!strcmp(event->name, "kmalloc") ||
204 !strcmp(event->name, "kmem_cache_alloc")) {
205 process_alloc_event(raw, event, cpu, timestamp, thread, 0);
206 return;
209 if (!strcmp(event->name, "kmalloc_node") ||
210 !strcmp(event->name, "kmem_cache_alloc_node")) {
211 process_alloc_event(raw, event, cpu, timestamp, thread, 1);
212 return;
215 if (!strcmp(event->name, "kfree") ||
216 !strcmp(event->name, "kmem_cache_free")) {
217 process_free_event(raw, event, cpu, timestamp, thread);
218 return;
222 static int
223 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
225 u64 ip = event->ip.ip;
226 u64 timestamp = -1;
227 u32 cpu = -1;
228 u64 period = 1;
229 void *more_data = event->ip.__more_data;
230 struct thread *thread = threads__findnew(event->ip.pid);
232 if (sample_type & PERF_SAMPLE_TIME) {
233 timestamp = *(u64 *)more_data;
234 more_data += sizeof(u64);
237 if (sample_type & PERF_SAMPLE_CPU) {
238 cpu = *(u32 *)more_data;
239 more_data += sizeof(u32);
240 more_data += sizeof(u32); /* reserved */
243 if (sample_type & PERF_SAMPLE_PERIOD) {
244 period = *(u64 *)more_data;
245 more_data += sizeof(u64);
248 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
249 (void *)(offset + head),
250 (void *)(long)(event->header.size),
251 event->header.misc,
252 event->ip.pid, event->ip.tid,
253 (void *)(long)ip,
254 (long long)period);
256 if (thread == NULL) {
257 pr_debug("problem processing %d event, skipping it.\n",
258 event->header.type);
259 return -1;
262 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
264 process_raw_event(event, more_data, cpu, timestamp, thread);
266 return 0;
269 static int sample_type_check(u64 type)
271 sample_type = type;
273 if (!(sample_type & PERF_SAMPLE_RAW)) {
274 fprintf(stderr,
275 "No trace sample to read. Did you call perf record "
276 "without -R?");
277 return -1;
280 return 0;
283 static struct perf_file_handler file_handler = {
284 .process_sample_event = process_sample_event,
285 .process_comm_event = process_comm_event,
286 .sample_type_check = sample_type_check,
289 static int read_events(void)
291 register_idle_thread();
292 register_perf_file_handler(&file_handler);
294 return mmap_dispatch_perf_file(&header, input_name, 0, 0,
295 &cwdlen, &cwd);
298 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
300 if (n_alloc == 0)
301 return 0.0;
302 else
303 return 100.0 - (100.0 * n_req / n_alloc);
306 static void __print_result(struct rb_root *root, int n_lines, int is_caller)
308 struct rb_node *next;
310 printf("\n ------------------------------------------------------------------------------\n");
311 if (is_caller)
312 printf(" Callsite |");
313 else
314 printf(" Alloc Ptr |");
315 printf(" Total_alloc/Per | Total_req/Per | Hit | Fragmentation\n");
316 printf(" ------------------------------------------------------------------------------\n");
318 next = rb_first(root);
320 while (next && n_lines--) {
321 struct alloc_stat *data;
323 data = rb_entry(next, struct alloc_stat, node);
325 printf(" %-16p | %8llu/%-6lu | %8llu/%-6lu | %6lu | %8.3f%%\n",
326 is_caller ? (void *)(unsigned long)data->call_site :
327 (void *)(unsigned long)data->ptr,
328 (unsigned long long)data->bytes_alloc,
329 (unsigned long)data->bytes_alloc / data->hit,
330 (unsigned long long)data->bytes_req,
331 (unsigned long)data->bytes_req / data->hit,
332 (unsigned long)data->hit,
333 fragmentation(data->bytes_req, data->bytes_alloc));
335 next = rb_next(next);
338 if (n_lines == -1)
339 printf(" ... | ... | ... | ... | ... \n");
341 printf(" ------------------------------------------------------------------------------\n");
344 static void print_summary(void)
346 printf("\nSUMMARY\n=======\n");
347 printf("Total bytes requested: %lu\n", total_requested);
348 printf("Total bytes allocated: %lu\n", total_allocated);
349 printf("Total bytes wasted on internal fragmentation: %lu\n",
350 total_allocated - total_requested);
351 printf("Internal fragmentation: %f%%\n",
352 fragmentation(total_requested, total_allocated));
355 static void print_result(void)
357 if (caller_flag)
358 __print_result(&root_caller_sorted, caller_lines, 1);
359 if (alloc_flag)
360 __print_result(&root_alloc_sorted, alloc_lines, 0);
361 print_summary();
364 static void sort_insert(struct rb_root *root, struct alloc_stat *data,
365 sort_fn_t sort_fn)
367 struct rb_node **new = &(root->rb_node);
368 struct rb_node *parent = NULL;
370 while (*new) {
371 struct alloc_stat *this;
372 int cmp;
374 this = rb_entry(*new, struct alloc_stat, node);
375 parent = *new;
377 cmp = sort_fn(data, this);
379 if (cmp > 0)
380 new = &((*new)->rb_left);
381 else
382 new = &((*new)->rb_right);
385 rb_link_node(&data->node, parent, new);
386 rb_insert_color(&data->node, root);
389 static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
390 sort_fn_t sort_fn)
392 struct rb_node *node;
393 struct alloc_stat *data;
395 for (;;) {
396 node = rb_first(root);
397 if (!node)
398 break;
400 rb_erase(node, root);
401 data = rb_entry(node, struct alloc_stat, node);
402 sort_insert(root_sorted, data, sort_fn);
406 static void sort_result(void)
408 __sort_result(&root_alloc_stat, &root_alloc_sorted, alloc_sort_fn);
409 __sort_result(&root_caller_stat, &root_caller_sorted, caller_sort_fn);
412 static int __cmd_kmem(void)
414 setup_pager();
415 read_events();
416 sort_result();
417 print_result();
419 return 0;
422 static const char * const kmem_usage[] = {
423 "perf kmem [<options>] {record}",
424 NULL
428 static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
430 if (l->ptr < r->ptr)
431 return -1;
432 else if (l->ptr > r->ptr)
433 return 1;
434 return 0;
437 static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
439 if (l->call_site < r->call_site)
440 return -1;
441 else if (l->call_site > r->call_site)
442 return 1;
443 return 0;
446 static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
448 if (l->bytes_alloc < r->bytes_alloc)
449 return -1;
450 else if (l->bytes_alloc > r->bytes_alloc)
451 return 1;
452 return 0;
455 static int parse_sort_opt(const struct option *opt __used,
456 const char *arg, int unset __used)
458 sort_fn_t sort_fn;
460 if (!arg)
461 return -1;
463 if (strcmp(arg, "ptr") == 0)
464 sort_fn = ptr_cmp;
465 else if (strcmp(arg, "call_site") == 0)
466 sort_fn = callsite_cmp;
467 else if (strcmp(arg, "bytes") == 0)
468 sort_fn = bytes_cmp;
469 else
470 return -1;
472 if (caller_flag > alloc_flag)
473 caller_sort_fn = sort_fn;
474 else
475 alloc_sort_fn = sort_fn;
477 return 0;
480 static int parse_stat_opt(const struct option *opt __used,
481 const char *arg, int unset __used)
483 if (!arg)
484 return -1;
486 if (strcmp(arg, "alloc") == 0)
487 alloc_flag = (caller_flag + 1);
488 else if (strcmp(arg, "caller") == 0)
489 caller_flag = (alloc_flag + 1);
490 else
491 return -1;
492 return 0;
495 static int parse_line_opt(const struct option *opt __used,
496 const char *arg, int unset __used)
498 int lines;
500 if (!arg)
501 return -1;
503 lines = strtoul(arg, NULL, 10);
505 if (caller_flag > alloc_flag)
506 caller_lines = lines;
507 else
508 alloc_lines = lines;
510 return 0;
513 static const struct option kmem_options[] = {
514 OPT_STRING('i', "input", &input_name, "file",
515 "input file name"),
516 OPT_CALLBACK(0, "stat", NULL, "<alloc>|<caller>",
517 "stat selector, Pass 'alloc' or 'caller'.",
518 parse_stat_opt),
519 OPT_CALLBACK('s', "sort", NULL, "key",
520 "sort by key: ptr, call_site, hit, bytes",
521 parse_sort_opt),
522 OPT_CALLBACK('l', "line", NULL, "num",
523 "show n lins",
524 parse_line_opt),
525 OPT_END()
528 static const char *record_args[] = {
529 "record",
530 "-a",
531 "-R",
532 "-M",
533 "-f",
534 "-c", "1",
535 "-e", "kmem:kmalloc",
536 "-e", "kmem:kmalloc_node",
537 "-e", "kmem:kfree",
538 "-e", "kmem:kmem_cache_alloc",
539 "-e", "kmem:kmem_cache_alloc_node",
540 "-e", "kmem:kmem_cache_free",
543 static int __cmd_record(int argc, const char **argv)
545 unsigned int rec_argc, i, j;
546 const char **rec_argv;
548 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
549 rec_argv = calloc(rec_argc + 1, sizeof(char *));
551 for (i = 0; i < ARRAY_SIZE(record_args); i++)
552 rec_argv[i] = strdup(record_args[i]);
554 for (j = 1; j < (unsigned int)argc; j++, i++)
555 rec_argv[i] = argv[j];
557 return cmd_record(i, rec_argv, NULL);
560 int cmd_kmem(int argc, const char **argv, const char *prefix __used)
562 symbol__init(0);
564 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
566 if (argc && !strncmp(argv[0], "rec", 3))
567 return __cmd_record(argc, argv);
568 else if (argc)
569 usage_with_options(kmem_usage, kmem_options);
571 if (!alloc_sort_fn)
572 alloc_sort_fn = bytes_cmp;
573 if (!caller_sort_fn)
574 caller_sort_fn = bytes_cmp;
576 return __cmd_kmem();