ptrace: cleanup arch_ptrace() on sparc
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / tools / perf / builtin-lock.c
blob821c1586a22b7da92cd732ad1a4ee4e05b037085
1 #include "builtin.h"
2 #include "perf.h"
4 #include "util/util.h"
5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
14 #include "util/session.h"
16 #include <sys/types.h>
17 #include <sys/prctl.h>
18 #include <semaphore.h>
19 #include <pthread.h>
20 #include <math.h>
21 #include <limits.h>
23 #include <linux/list.h>
24 #include <linux/hash.h>
26 static struct perf_session *session;
28 /* based on kernel/lockdep.c */
29 #define LOCKHASH_BITS 12
30 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
32 static struct list_head lockhash_table[LOCKHASH_SIZE];
34 #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
35 #define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
37 struct lock_stat {
38 struct list_head hash_entry;
39 struct rb_node rb; /* used for sorting */
42 * FIXME: raw_field_value() returns unsigned long long,
43 * so address of lockdep_map should be dealed as 64bit.
44 * Is there more better solution?
46 void *addr; /* address of lockdep_map, used as ID */
47 char *name; /* for strcpy(), we cannot use const */
49 unsigned int nr_acquire;
50 unsigned int nr_acquired;
51 unsigned int nr_contended;
52 unsigned int nr_release;
54 unsigned int nr_readlock;
55 unsigned int nr_trylock;
56 /* these times are in nano sec. */
57 u64 wait_time_total;
58 u64 wait_time_min;
59 u64 wait_time_max;
61 int discard; /* flag of blacklist */
65 * States of lock_seq_stat
67 * UNINITIALIZED is required for detecting first event of acquire.
68 * As the nature of lock events, there is no guarantee
69 * that the first event for the locks are acquire,
70 * it can be acquired, contended or release.
72 #define SEQ_STATE_UNINITIALIZED 0 /* initial state */
73 #define SEQ_STATE_RELEASED 1
74 #define SEQ_STATE_ACQUIRING 2
75 #define SEQ_STATE_ACQUIRED 3
76 #define SEQ_STATE_READ_ACQUIRED 4
77 #define SEQ_STATE_CONTENDED 5
80 * MAX_LOCK_DEPTH
81 * Imported from include/linux/sched.h.
82 * Should this be synchronized?
84 #define MAX_LOCK_DEPTH 48
87 * struct lock_seq_stat:
88 * Place to put on state of one lock sequence
89 * 1) acquire -> acquired -> release
90 * 2) acquire -> contended -> acquired -> release
91 * 3) acquire (with read or try) -> release
92 * 4) Are there other patterns?
94 struct lock_seq_stat {
95 struct list_head list;
96 int state;
97 u64 prev_event_time;
98 void *addr;
100 int read_count;
103 struct thread_stat {
104 struct rb_node rb;
106 u32 tid;
107 struct list_head seq_list;
110 static struct rb_root thread_stats;
112 static struct thread_stat *thread_stat_find(u32 tid)
114 struct rb_node *node;
115 struct thread_stat *st;
117 node = thread_stats.rb_node;
118 while (node) {
119 st = container_of(node, struct thread_stat, rb);
120 if (st->tid == tid)
121 return st;
122 else if (tid < st->tid)
123 node = node->rb_left;
124 else
125 node = node->rb_right;
128 return NULL;
131 static void thread_stat_insert(struct thread_stat *new)
133 struct rb_node **rb = &thread_stats.rb_node;
134 struct rb_node *parent = NULL;
135 struct thread_stat *p;
137 while (*rb) {
138 p = container_of(*rb, struct thread_stat, rb);
139 parent = *rb;
141 if (new->tid < p->tid)
142 rb = &(*rb)->rb_left;
143 else if (new->tid > p->tid)
144 rb = &(*rb)->rb_right;
145 else
146 BUG_ON("inserting invalid thread_stat\n");
149 rb_link_node(&new->rb, parent, rb);
150 rb_insert_color(&new->rb, &thread_stats);
153 static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
155 struct thread_stat *st;
157 st = thread_stat_find(tid);
158 if (st)
159 return st;
161 st = zalloc(sizeof(struct thread_stat));
162 if (!st)
163 die("memory allocation failed\n");
165 st->tid = tid;
166 INIT_LIST_HEAD(&st->seq_list);
168 thread_stat_insert(st);
170 return st;
173 static struct thread_stat *thread_stat_findnew_first(u32 tid);
174 static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
175 thread_stat_findnew_first;
177 static struct thread_stat *thread_stat_findnew_first(u32 tid)
179 struct thread_stat *st;
181 st = zalloc(sizeof(struct thread_stat));
182 if (!st)
183 die("memory allocation failed\n");
184 st->tid = tid;
185 INIT_LIST_HEAD(&st->seq_list);
187 rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
188 rb_insert_color(&st->rb, &thread_stats);
190 thread_stat_findnew = thread_stat_findnew_after_first;
191 return st;
194 /* build simple key function one is bigger than two */
195 #define SINGLE_KEY(member) \
196 static int lock_stat_key_ ## member(struct lock_stat *one, \
197 struct lock_stat *two) \
199 return one->member > two->member; \
202 SINGLE_KEY(nr_acquired)
203 SINGLE_KEY(nr_contended)
204 SINGLE_KEY(wait_time_total)
205 SINGLE_KEY(wait_time_min)
206 SINGLE_KEY(wait_time_max)
208 struct lock_key {
210 * name: the value for specify by user
211 * this should be simpler than raw name of member
212 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
214 const char *name;
215 int (*key)(struct lock_stat*, struct lock_stat*);
218 static const char *sort_key = "acquired";
220 static int (*compare)(struct lock_stat *, struct lock_stat *);
222 static struct rb_root result; /* place to store sorted data */
224 #define DEF_KEY_LOCK(name, fn_suffix) \
225 { #name, lock_stat_key_ ## fn_suffix }
226 struct lock_key keys[] = {
227 DEF_KEY_LOCK(acquired, nr_acquired),
228 DEF_KEY_LOCK(contended, nr_contended),
229 DEF_KEY_LOCK(wait_total, wait_time_total),
230 DEF_KEY_LOCK(wait_min, wait_time_min),
231 DEF_KEY_LOCK(wait_max, wait_time_max),
233 /* extra comparisons much complicated should be here */
235 { NULL, NULL }
238 static void select_key(void)
240 int i;
242 for (i = 0; keys[i].name; i++) {
243 if (!strcmp(keys[i].name, sort_key)) {
244 compare = keys[i].key;
245 return;
249 die("Unknown compare key:%s\n", sort_key);
252 static void insert_to_result(struct lock_stat *st,
253 int (*bigger)(struct lock_stat *, struct lock_stat *))
255 struct rb_node **rb = &result.rb_node;
256 struct rb_node *parent = NULL;
257 struct lock_stat *p;
259 while (*rb) {
260 p = container_of(*rb, struct lock_stat, rb);
261 parent = *rb;
263 if (bigger(st, p))
264 rb = &(*rb)->rb_left;
265 else
266 rb = &(*rb)->rb_right;
269 rb_link_node(&st->rb, parent, rb);
270 rb_insert_color(&st->rb, &result);
273 /* returns left most element of result, and erase it */
274 static struct lock_stat *pop_from_result(void)
276 struct rb_node *node = result.rb_node;
278 if (!node)
279 return NULL;
281 while (node->rb_left)
282 node = node->rb_left;
284 rb_erase(node, &result);
285 return container_of(node, struct lock_stat, rb);
288 static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
290 struct list_head *entry = lockhashentry(addr);
291 struct lock_stat *ret, *new;
293 list_for_each_entry(ret, entry, hash_entry) {
294 if (ret->addr == addr)
295 return ret;
298 new = zalloc(sizeof(struct lock_stat));
299 if (!new)
300 goto alloc_failed;
302 new->addr = addr;
303 new->name = zalloc(sizeof(char) * strlen(name) + 1);
304 if (!new->name)
305 goto alloc_failed;
306 strcpy(new->name, name);
308 new->wait_time_min = ULLONG_MAX;
310 list_add(&new->hash_entry, entry);
311 return new;
313 alloc_failed:
314 die("memory allocation failed\n");
317 static char const *input_name = "perf.data";
319 struct raw_event_sample {
320 u32 size;
321 char data[0];
324 struct trace_acquire_event {
325 void *addr;
326 const char *name;
327 int flag;
330 struct trace_acquired_event {
331 void *addr;
332 const char *name;
335 struct trace_contended_event {
336 void *addr;
337 const char *name;
340 struct trace_release_event {
341 void *addr;
342 const char *name;
345 struct trace_lock_handler {
346 void (*acquire_event)(struct trace_acquire_event *,
347 struct event *,
348 int cpu,
349 u64 timestamp,
350 struct thread *thread);
352 void (*acquired_event)(struct trace_acquired_event *,
353 struct event *,
354 int cpu,
355 u64 timestamp,
356 struct thread *thread);
358 void (*contended_event)(struct trace_contended_event *,
359 struct event *,
360 int cpu,
361 u64 timestamp,
362 struct thread *thread);
364 void (*release_event)(struct trace_release_event *,
365 struct event *,
366 int cpu,
367 u64 timestamp,
368 struct thread *thread);
371 static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
373 struct lock_seq_stat *seq;
375 list_for_each_entry(seq, &ts->seq_list, list) {
376 if (seq->addr == addr)
377 return seq;
380 seq = zalloc(sizeof(struct lock_seq_stat));
381 if (!seq)
382 die("Not enough memory\n");
383 seq->state = SEQ_STATE_UNINITIALIZED;
384 seq->addr = addr;
386 list_add(&seq->list, &ts->seq_list);
387 return seq;
390 enum broken_state {
391 BROKEN_ACQUIRE,
392 BROKEN_ACQUIRED,
393 BROKEN_CONTENDED,
394 BROKEN_RELEASE,
395 BROKEN_MAX,
398 static int bad_hist[BROKEN_MAX];
400 enum acquire_flags {
401 TRY_LOCK = 1,
402 READ_LOCK = 2,
405 static void
406 report_lock_acquire_event(struct trace_acquire_event *acquire_event,
407 struct event *__event __used,
408 int cpu __used,
409 u64 timestamp __used,
410 struct thread *thread __used)
412 struct lock_stat *ls;
413 struct thread_stat *ts;
414 struct lock_seq_stat *seq;
416 ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
417 if (ls->discard)
418 return;
420 ts = thread_stat_findnew(thread->pid);
421 seq = get_seq(ts, acquire_event->addr);
423 switch (seq->state) {
424 case SEQ_STATE_UNINITIALIZED:
425 case SEQ_STATE_RELEASED:
426 if (!acquire_event->flag) {
427 seq->state = SEQ_STATE_ACQUIRING;
428 } else {
429 if (acquire_event->flag & TRY_LOCK)
430 ls->nr_trylock++;
431 if (acquire_event->flag & READ_LOCK)
432 ls->nr_readlock++;
433 seq->state = SEQ_STATE_READ_ACQUIRED;
434 seq->read_count = 1;
435 ls->nr_acquired++;
437 break;
438 case SEQ_STATE_READ_ACQUIRED:
439 if (acquire_event->flag & READ_LOCK) {
440 seq->read_count++;
441 ls->nr_acquired++;
442 goto end;
443 } else {
444 goto broken;
446 break;
447 case SEQ_STATE_ACQUIRED:
448 case SEQ_STATE_ACQUIRING:
449 case SEQ_STATE_CONTENDED:
450 broken:
451 /* broken lock sequence, discard it */
452 ls->discard = 1;
453 bad_hist[BROKEN_ACQUIRE]++;
454 list_del(&seq->list);
455 free(seq);
456 goto end;
457 break;
458 default:
459 BUG_ON("Unknown state of lock sequence found!\n");
460 break;
463 ls->nr_acquire++;
464 seq->prev_event_time = timestamp;
465 end:
466 return;
469 static void
470 report_lock_acquired_event(struct trace_acquired_event *acquired_event,
471 struct event *__event __used,
472 int cpu __used,
473 u64 timestamp __used,
474 struct thread *thread __used)
476 struct lock_stat *ls;
477 struct thread_stat *ts;
478 struct lock_seq_stat *seq;
479 u64 contended_term;
481 ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
482 if (ls->discard)
483 return;
485 ts = thread_stat_findnew(thread->pid);
486 seq = get_seq(ts, acquired_event->addr);
488 switch (seq->state) {
489 case SEQ_STATE_UNINITIALIZED:
490 /* orphan event, do nothing */
491 return;
492 case SEQ_STATE_ACQUIRING:
493 break;
494 case SEQ_STATE_CONTENDED:
495 contended_term = timestamp - seq->prev_event_time;
496 ls->wait_time_total += contended_term;
497 if (contended_term < ls->wait_time_min)
498 ls->wait_time_min = contended_term;
499 if (ls->wait_time_max < contended_term)
500 ls->wait_time_max = contended_term;
501 break;
502 case SEQ_STATE_RELEASED:
503 case SEQ_STATE_ACQUIRED:
504 case SEQ_STATE_READ_ACQUIRED:
505 /* broken lock sequence, discard it */
506 ls->discard = 1;
507 bad_hist[BROKEN_ACQUIRED]++;
508 list_del(&seq->list);
509 free(seq);
510 goto end;
511 break;
513 default:
514 BUG_ON("Unknown state of lock sequence found!\n");
515 break;
518 seq->state = SEQ_STATE_ACQUIRED;
519 ls->nr_acquired++;
520 seq->prev_event_time = timestamp;
521 end:
522 return;
525 static void
526 report_lock_contended_event(struct trace_contended_event *contended_event,
527 struct event *__event __used,
528 int cpu __used,
529 u64 timestamp __used,
530 struct thread *thread __used)
532 struct lock_stat *ls;
533 struct thread_stat *ts;
534 struct lock_seq_stat *seq;
536 ls = lock_stat_findnew(contended_event->addr, contended_event->name);
537 if (ls->discard)
538 return;
540 ts = thread_stat_findnew(thread->pid);
541 seq = get_seq(ts, contended_event->addr);
543 switch (seq->state) {
544 case SEQ_STATE_UNINITIALIZED:
545 /* orphan event, do nothing */
546 return;
547 case SEQ_STATE_ACQUIRING:
548 break;
549 case SEQ_STATE_RELEASED:
550 case SEQ_STATE_ACQUIRED:
551 case SEQ_STATE_READ_ACQUIRED:
552 case SEQ_STATE_CONTENDED:
553 /* broken lock sequence, discard it */
554 ls->discard = 1;
555 bad_hist[BROKEN_CONTENDED]++;
556 list_del(&seq->list);
557 free(seq);
558 goto end;
559 break;
560 default:
561 BUG_ON("Unknown state of lock sequence found!\n");
562 break;
565 seq->state = SEQ_STATE_CONTENDED;
566 ls->nr_contended++;
567 seq->prev_event_time = timestamp;
568 end:
569 return;
572 static void
573 report_lock_release_event(struct trace_release_event *release_event,
574 struct event *__event __used,
575 int cpu __used,
576 u64 timestamp __used,
577 struct thread *thread __used)
579 struct lock_stat *ls;
580 struct thread_stat *ts;
581 struct lock_seq_stat *seq;
583 ls = lock_stat_findnew(release_event->addr, release_event->name);
584 if (ls->discard)
585 return;
587 ts = thread_stat_findnew(thread->pid);
588 seq = get_seq(ts, release_event->addr);
590 switch (seq->state) {
591 case SEQ_STATE_UNINITIALIZED:
592 goto end;
593 break;
594 case SEQ_STATE_ACQUIRED:
595 break;
596 case SEQ_STATE_READ_ACQUIRED:
597 seq->read_count--;
598 BUG_ON(seq->read_count < 0);
599 if (!seq->read_count) {
600 ls->nr_release++;
601 goto end;
603 break;
604 case SEQ_STATE_ACQUIRING:
605 case SEQ_STATE_CONTENDED:
606 case SEQ_STATE_RELEASED:
607 /* broken lock sequence, discard it */
608 ls->discard = 1;
609 bad_hist[BROKEN_RELEASE]++;
610 goto free_seq;
611 break;
612 default:
613 BUG_ON("Unknown state of lock sequence found!\n");
614 break;
617 ls->nr_release++;
618 free_seq:
619 list_del(&seq->list);
620 free(seq);
621 end:
622 return;
625 /* lock oriented handlers */
626 /* TODO: handlers for CPU oriented, thread oriented */
627 static struct trace_lock_handler report_lock_ops = {
628 .acquire_event = report_lock_acquire_event,
629 .acquired_event = report_lock_acquired_event,
630 .contended_event = report_lock_contended_event,
631 .release_event = report_lock_release_event,
634 static struct trace_lock_handler *trace_handler;
636 static void
637 process_lock_acquire_event(void *data,
638 struct event *event __used,
639 int cpu __used,
640 u64 timestamp __used,
641 struct thread *thread __used)
643 struct trace_acquire_event acquire_event;
644 u64 tmp; /* this is required for casting... */
646 tmp = raw_field_value(event, "lockdep_addr", data);
647 memcpy(&acquire_event.addr, &tmp, sizeof(void *));
648 acquire_event.name = (char *)raw_field_ptr(event, "name", data);
649 acquire_event.flag = (int)raw_field_value(event, "flag", data);
651 if (trace_handler->acquire_event)
652 trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
655 static void
656 process_lock_acquired_event(void *data,
657 struct event *event __used,
658 int cpu __used,
659 u64 timestamp __used,
660 struct thread *thread __used)
662 struct trace_acquired_event acquired_event;
663 u64 tmp; /* this is required for casting... */
665 tmp = raw_field_value(event, "lockdep_addr", data);
666 memcpy(&acquired_event.addr, &tmp, sizeof(void *));
667 acquired_event.name = (char *)raw_field_ptr(event, "name", data);
669 if (trace_handler->acquire_event)
670 trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
673 static void
674 process_lock_contended_event(void *data,
675 struct event *event __used,
676 int cpu __used,
677 u64 timestamp __used,
678 struct thread *thread __used)
680 struct trace_contended_event contended_event;
681 u64 tmp; /* this is required for casting... */
683 tmp = raw_field_value(event, "lockdep_addr", data);
684 memcpy(&contended_event.addr, &tmp, sizeof(void *));
685 contended_event.name = (char *)raw_field_ptr(event, "name", data);
687 if (trace_handler->acquire_event)
688 trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
691 static void
692 process_lock_release_event(void *data,
693 struct event *event __used,
694 int cpu __used,
695 u64 timestamp __used,
696 struct thread *thread __used)
698 struct trace_release_event release_event;
699 u64 tmp; /* this is required for casting... */
701 tmp = raw_field_value(event, "lockdep_addr", data);
702 memcpy(&release_event.addr, &tmp, sizeof(void *));
703 release_event.name = (char *)raw_field_ptr(event, "name", data);
705 if (trace_handler->acquire_event)
706 trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
709 static void
710 process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
712 struct event *event;
713 int type;
715 type = trace_parse_common_type(data);
716 event = trace_find_event(type);
718 if (!strcmp(event->name, "lock_acquire"))
719 process_lock_acquire_event(data, event, cpu, timestamp, thread);
720 if (!strcmp(event->name, "lock_acquired"))
721 process_lock_acquired_event(data, event, cpu, timestamp, thread);
722 if (!strcmp(event->name, "lock_contended"))
723 process_lock_contended_event(data, event, cpu, timestamp, thread);
724 if (!strcmp(event->name, "lock_release"))
725 process_lock_release_event(data, event, cpu, timestamp, thread);
728 static void print_bad_events(int bad, int total)
730 /* Output for debug, this have to be removed */
731 int i;
732 const char *name[4] =
733 { "acquire", "acquired", "contended", "release" };
735 pr_info("\n=== output for debug===\n\n");
736 pr_info("bad: %d, total: %d\n", bad, total);
737 pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100);
738 pr_info("histogram of events caused bad sequence\n");
739 for (i = 0; i < BROKEN_MAX; i++)
740 pr_info(" %10s: %d\n", name[i], bad_hist[i]);
743 /* TODO: various way to print, coloring, nano or milli sec */
744 static void print_result(void)
746 struct lock_stat *st;
747 char cut_name[20];
748 int bad, total;
750 pr_info("%20s ", "Name");
751 pr_info("%10s ", "acquired");
752 pr_info("%10s ", "contended");
754 pr_info("%15s ", "total wait (ns)");
755 pr_info("%15s ", "max wait (ns)");
756 pr_info("%15s ", "min wait (ns)");
758 pr_info("\n\n");
760 bad = total = 0;
761 while ((st = pop_from_result())) {
762 total++;
763 if (st->discard) {
764 bad++;
765 continue;
767 bzero(cut_name, 20);
769 if (strlen(st->name) < 16) {
770 /* output raw name */
771 pr_info("%20s ", st->name);
772 } else {
773 strncpy(cut_name, st->name, 16);
774 cut_name[16] = '.';
775 cut_name[17] = '.';
776 cut_name[18] = '.';
777 cut_name[19] = '\0';
778 /* cut off name for saving output style */
779 pr_info("%20s ", cut_name);
782 pr_info("%10u ", st->nr_acquired);
783 pr_info("%10u ", st->nr_contended);
785 pr_info("%15llu ", st->wait_time_total);
786 pr_info("%15llu ", st->wait_time_max);
787 pr_info("%15llu ", st->wait_time_min == ULLONG_MAX ?
788 0 : st->wait_time_min);
789 pr_info("\n");
792 print_bad_events(bad, total);
795 static bool info_threads, info_map;
797 static void dump_threads(void)
799 struct thread_stat *st;
800 struct rb_node *node;
801 struct thread *t;
803 pr_info("%10s: comm\n", "Thread ID");
805 node = rb_first(&thread_stats);
806 while (node) {
807 st = container_of(node, struct thread_stat, rb);
808 t = perf_session__findnew(session, st->tid);
809 pr_info("%10d: %s\n", st->tid, t->comm);
810 node = rb_next(node);
814 static void dump_map(void)
816 unsigned int i;
817 struct lock_stat *st;
819 pr_info("Address of instance: name of class\n");
820 for (i = 0; i < LOCKHASH_SIZE; i++) {
821 list_for_each_entry(st, &lockhash_table[i], hash_entry) {
822 pr_info(" %p: %s\n", st->addr, st->name);
827 static void dump_info(void)
829 if (info_threads)
830 dump_threads();
831 else if (info_map)
832 dump_map();
833 else
834 die("Unknown type of information\n");
837 static int process_sample_event(event_t *self, struct perf_session *s)
839 struct sample_data data;
840 struct thread *thread;
842 bzero(&data, sizeof(data));
843 event__parse_sample(self, s->sample_type, &data);
845 thread = perf_session__findnew(s, data.tid);
846 if (thread == NULL) {
847 pr_debug("problem processing %d event, skipping it.\n",
848 self->header.type);
849 return -1;
852 process_raw_event(data.raw_data, data.cpu, data.time, thread);
854 return 0;
857 static struct perf_event_ops eops = {
858 .sample = process_sample_event,
859 .comm = event__process_comm,
860 .ordered_samples = true,
863 static int read_events(void)
865 session = perf_session__new(input_name, O_RDONLY, 0, false);
866 if (!session)
867 die("Initializing perf session failed\n");
869 return perf_session__process_events(session, &eops);
872 static void sort_result(void)
874 unsigned int i;
875 struct lock_stat *st;
877 for (i = 0; i < LOCKHASH_SIZE; i++) {
878 list_for_each_entry(st, &lockhash_table[i], hash_entry) {
879 insert_to_result(st, compare);
884 static void __cmd_report(void)
886 setup_pager();
887 select_key();
888 read_events();
889 sort_result();
890 print_result();
893 static const char * const report_usage[] = {
894 "perf lock report [<options>]",
895 NULL
898 static const struct option report_options[] = {
899 OPT_STRING('k', "key", &sort_key, "acquired",
900 "key for sorting"),
901 /* TODO: type */
902 OPT_END()
905 static const char * const info_usage[] = {
906 "perf lock info [<options>]",
907 NULL
910 static const struct option info_options[] = {
911 OPT_BOOLEAN('t', "threads", &info_threads,
912 "dump thread list in perf.data"),
913 OPT_BOOLEAN('m', "map", &info_map,
914 "map of lock instances (name:address table)"),
915 OPT_END()
918 static const char * const lock_usage[] = {
919 "perf lock [<options>] {record|trace|report}",
920 NULL
923 static const struct option lock_options[] = {
924 OPT_STRING('i', "input", &input_name, "file", "input file name"),
925 OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
926 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
927 OPT_END()
930 static const char *record_args[] = {
931 "record",
932 "-R",
933 "-f",
934 "-m", "1024",
935 "-c", "1",
936 "-e", "lock:lock_acquire:r",
937 "-e", "lock:lock_acquired:r",
938 "-e", "lock:lock_contended:r",
939 "-e", "lock:lock_release:r",
942 static int __cmd_record(int argc, const char **argv)
944 unsigned int rec_argc, i, j;
945 const char **rec_argv;
947 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
948 rec_argv = calloc(rec_argc + 1, sizeof(char *));
950 for (i = 0; i < ARRAY_SIZE(record_args); i++)
951 rec_argv[i] = strdup(record_args[i]);
953 for (j = 1; j < (unsigned int)argc; j++, i++)
954 rec_argv[i] = argv[j];
956 BUG_ON(i != rec_argc);
958 return cmd_record(i, rec_argv, NULL);
961 int cmd_lock(int argc, const char **argv, const char *prefix __used)
963 unsigned int i;
965 symbol__init();
966 for (i = 0; i < LOCKHASH_SIZE; i++)
967 INIT_LIST_HEAD(lockhash_table + i);
969 argc = parse_options(argc, argv, lock_options, lock_usage,
970 PARSE_OPT_STOP_AT_NON_OPTION);
971 if (!argc)
972 usage_with_options(lock_usage, lock_options);
974 if (!strncmp(argv[0], "rec", 3)) {
975 return __cmd_record(argc, argv);
976 } else if (!strncmp(argv[0], "report", 6)) {
977 trace_handler = &report_lock_ops;
978 if (argc) {
979 argc = parse_options(argc, argv,
980 report_options, report_usage, 0);
981 if (argc)
982 usage_with_options(report_usage, report_options);
984 __cmd_report();
985 } else if (!strcmp(argv[0], "trace")) {
986 /* Aliased to 'perf trace' */
987 return cmd_trace(argc, argv, prefix);
988 } else if (!strcmp(argv[0], "info")) {
989 if (argc) {
990 argc = parse_options(argc, argv,
991 info_options, info_usage, 0);
992 if (argc)
993 usage_with_options(info_usage, info_options);
995 /* recycling report_lock_ops */
996 trace_handler = &report_lock_ops;
997 setup_pager();
998 read_events();
999 dump_info();
1000 } else {
1001 usage_with_options(lock_usage, lock_options);
1004 return 0;