perf: Make common SAMPLE_EVENT parser
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / tools / perf / builtin-sched.c
blob45c46c7904934fdc8342861a5de18a04b2e0486d
1 #include "builtin.h"
2 #include "perf.h"
4 #include "util/util.h"
5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
14 #include "util/data_map.h"
16 #include <sys/types.h>
17 #include <sys/prctl.h>
19 #include <semaphore.h>
20 #include <pthread.h>
21 #include <math.h>
23 static char const *input_name = "perf.data";
25 static struct perf_header *header;
26 static u64 sample_type;
28 static char default_sort_order[] = "avg, max, switch, runtime";
29 static char *sort_order = default_sort_order;
31 static int profile_cpu = -1;
33 #define PR_SET_NAME 15 /* Set process name */
34 #define MAX_CPUS 4096
36 static u64 run_measurement_overhead;
37 static u64 sleep_measurement_overhead;
39 #define COMM_LEN 20
40 #define SYM_LEN 129
42 #define MAX_PID 65536
44 static unsigned long nr_tasks;
46 struct sched_atom;
48 struct task_desc {
49 unsigned long nr;
50 unsigned long pid;
51 char comm[COMM_LEN];
53 unsigned long nr_events;
54 unsigned long curr_event;
55 struct sched_atom **atoms;
57 pthread_t thread;
58 sem_t sleep_sem;
60 sem_t ready_for_work;
61 sem_t work_done_sem;
63 u64 cpu_usage;
66 enum sched_event_type {
67 SCHED_EVENT_RUN,
68 SCHED_EVENT_SLEEP,
69 SCHED_EVENT_WAKEUP,
70 SCHED_EVENT_MIGRATION,
73 struct sched_atom {
74 enum sched_event_type type;
75 u64 timestamp;
76 u64 duration;
77 unsigned long nr;
78 int specific_wait;
79 sem_t *wait_sem;
80 struct task_desc *wakee;
83 static struct task_desc *pid_to_task[MAX_PID];
85 static struct task_desc **tasks;
87 static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
88 static u64 start_time;
90 static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
92 static unsigned long nr_run_events;
93 static unsigned long nr_sleep_events;
94 static unsigned long nr_wakeup_events;
96 static unsigned long nr_sleep_corrections;
97 static unsigned long nr_run_events_optimized;
99 static unsigned long targetless_wakeups;
100 static unsigned long multitarget_wakeups;
102 static u64 cpu_usage;
103 static u64 runavg_cpu_usage;
104 static u64 parent_cpu_usage;
105 static u64 runavg_parent_cpu_usage;
107 static unsigned long nr_runs;
108 static u64 sum_runtime;
109 static u64 sum_fluct;
110 static u64 run_avg;
112 static unsigned long replay_repeat = 10;
113 static unsigned long nr_timestamps;
114 static unsigned long nr_unordered_timestamps;
115 static unsigned long nr_state_machine_bugs;
116 static unsigned long nr_context_switch_bugs;
117 static unsigned long nr_events;
118 static unsigned long nr_lost_chunks;
119 static unsigned long nr_lost_events;
121 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
123 enum thread_state {
124 THREAD_SLEEPING = 0,
125 THREAD_WAIT_CPU,
126 THREAD_SCHED_IN,
127 THREAD_IGNORE
130 struct work_atom {
131 struct list_head list;
132 enum thread_state state;
133 u64 sched_out_time;
134 u64 wake_up_time;
135 u64 sched_in_time;
136 u64 runtime;
139 struct work_atoms {
140 struct list_head work_list;
141 struct thread *thread;
142 struct rb_node node;
143 u64 max_lat;
144 u64 total_lat;
145 u64 nb_atoms;
146 u64 total_runtime;
149 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
151 static struct rb_root atom_root, sorted_atom_root;
153 static u64 all_runtime;
154 static u64 all_count;
157 static u64 get_nsecs(void)
159 struct timespec ts;
161 clock_gettime(CLOCK_MONOTONIC, &ts);
163 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
166 static void burn_nsecs(u64 nsecs)
168 u64 T0 = get_nsecs(), T1;
170 do {
171 T1 = get_nsecs();
172 } while (T1 + run_measurement_overhead < T0 + nsecs);
175 static void sleep_nsecs(u64 nsecs)
177 struct timespec ts;
179 ts.tv_nsec = nsecs % 999999999;
180 ts.tv_sec = nsecs / 999999999;
182 nanosleep(&ts, NULL);
185 static void calibrate_run_measurement_overhead(void)
187 u64 T0, T1, delta, min_delta = 1000000000ULL;
188 int i;
190 for (i = 0; i < 10; i++) {
191 T0 = get_nsecs();
192 burn_nsecs(0);
193 T1 = get_nsecs();
194 delta = T1-T0;
195 min_delta = min(min_delta, delta);
197 run_measurement_overhead = min_delta;
199 printf("run measurement overhead: %Ld nsecs\n", min_delta);
202 static void calibrate_sleep_measurement_overhead(void)
204 u64 T0, T1, delta, min_delta = 1000000000ULL;
205 int i;
207 for (i = 0; i < 10; i++) {
208 T0 = get_nsecs();
209 sleep_nsecs(10000);
210 T1 = get_nsecs();
211 delta = T1-T0;
212 min_delta = min(min_delta, delta);
214 min_delta -= 10000;
215 sleep_measurement_overhead = min_delta;
217 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
220 static struct sched_atom *
221 get_new_event(struct task_desc *task, u64 timestamp)
223 struct sched_atom *event = zalloc(sizeof(*event));
224 unsigned long idx = task->nr_events;
225 size_t size;
227 event->timestamp = timestamp;
228 event->nr = idx;
230 task->nr_events++;
231 size = sizeof(struct sched_atom *) * task->nr_events;
232 task->atoms = realloc(task->atoms, size);
233 BUG_ON(!task->atoms);
235 task->atoms[idx] = event;
237 return event;
240 static struct sched_atom *last_event(struct task_desc *task)
242 if (!task->nr_events)
243 return NULL;
245 return task->atoms[task->nr_events - 1];
248 static void
249 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
251 struct sched_atom *event, *curr_event = last_event(task);
254 * optimize an existing RUN event by merging this one
255 * to it:
257 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
258 nr_run_events_optimized++;
259 curr_event->duration += duration;
260 return;
263 event = get_new_event(task, timestamp);
265 event->type = SCHED_EVENT_RUN;
266 event->duration = duration;
268 nr_run_events++;
271 static void
272 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
273 struct task_desc *wakee)
275 struct sched_atom *event, *wakee_event;
277 event = get_new_event(task, timestamp);
278 event->type = SCHED_EVENT_WAKEUP;
279 event->wakee = wakee;
281 wakee_event = last_event(wakee);
282 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
283 targetless_wakeups++;
284 return;
286 if (wakee_event->wait_sem) {
287 multitarget_wakeups++;
288 return;
291 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
292 sem_init(wakee_event->wait_sem, 0, 0);
293 wakee_event->specific_wait = 1;
294 event->wait_sem = wakee_event->wait_sem;
296 nr_wakeup_events++;
299 static void
300 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
301 u64 task_state __used)
303 struct sched_atom *event = get_new_event(task, timestamp);
305 event->type = SCHED_EVENT_SLEEP;
307 nr_sleep_events++;
310 static struct task_desc *register_pid(unsigned long pid, const char *comm)
312 struct task_desc *task;
314 BUG_ON(pid >= MAX_PID);
316 task = pid_to_task[pid];
318 if (task)
319 return task;
321 task = zalloc(sizeof(*task));
322 task->pid = pid;
323 task->nr = nr_tasks;
324 strcpy(task->comm, comm);
326 * every task starts in sleeping state - this gets ignored
327 * if there's no wakeup pointing to this sleep state:
329 add_sched_event_sleep(task, 0, 0);
331 pid_to_task[pid] = task;
332 nr_tasks++;
333 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
334 BUG_ON(!tasks);
335 tasks[task->nr] = task;
337 if (verbose)
338 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
340 return task;
344 static void print_task_traces(void)
346 struct task_desc *task;
347 unsigned long i;
349 for (i = 0; i < nr_tasks; i++) {
350 task = tasks[i];
351 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
352 task->nr, task->comm, task->pid, task->nr_events);
356 static void add_cross_task_wakeups(void)
358 struct task_desc *task1, *task2;
359 unsigned long i, j;
361 for (i = 0; i < nr_tasks; i++) {
362 task1 = tasks[i];
363 j = i + 1;
364 if (j == nr_tasks)
365 j = 0;
366 task2 = tasks[j];
367 add_sched_event_wakeup(task1, 0, task2);
371 static void
372 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
374 int ret = 0;
375 u64 now;
376 long long delta;
378 now = get_nsecs();
379 delta = start_time + atom->timestamp - now;
381 switch (atom->type) {
382 case SCHED_EVENT_RUN:
383 burn_nsecs(atom->duration);
384 break;
385 case SCHED_EVENT_SLEEP:
386 if (atom->wait_sem)
387 ret = sem_wait(atom->wait_sem);
388 BUG_ON(ret);
389 break;
390 case SCHED_EVENT_WAKEUP:
391 if (atom->wait_sem)
392 ret = sem_post(atom->wait_sem);
393 BUG_ON(ret);
394 break;
395 case SCHED_EVENT_MIGRATION:
396 break;
397 default:
398 BUG_ON(1);
402 static u64 get_cpu_usage_nsec_parent(void)
404 struct rusage ru;
405 u64 sum;
406 int err;
408 err = getrusage(RUSAGE_SELF, &ru);
409 BUG_ON(err);
411 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
412 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
414 return sum;
417 static u64 get_cpu_usage_nsec_self(void)
419 char filename [] = "/proc/1234567890/sched";
420 unsigned long msecs, nsecs;
421 char *line = NULL;
422 u64 total = 0;
423 size_t len = 0;
424 ssize_t chars;
425 FILE *file;
426 int ret;
428 sprintf(filename, "/proc/%d/sched", getpid());
429 file = fopen(filename, "r");
430 BUG_ON(!file);
432 while ((chars = getline(&line, &len, file)) != -1) {
433 ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
434 &msecs, &nsecs);
435 if (ret == 2) {
436 total = msecs*1e6 + nsecs;
437 break;
440 if (line)
441 free(line);
442 fclose(file);
444 return total;
447 static void *thread_func(void *ctx)
449 struct task_desc *this_task = ctx;
450 u64 cpu_usage_0, cpu_usage_1;
451 unsigned long i, ret;
452 char comm2[22];
454 sprintf(comm2, ":%s", this_task->comm);
455 prctl(PR_SET_NAME, comm2);
457 again:
458 ret = sem_post(&this_task->ready_for_work);
459 BUG_ON(ret);
460 ret = pthread_mutex_lock(&start_work_mutex);
461 BUG_ON(ret);
462 ret = pthread_mutex_unlock(&start_work_mutex);
463 BUG_ON(ret);
465 cpu_usage_0 = get_cpu_usage_nsec_self();
467 for (i = 0; i < this_task->nr_events; i++) {
468 this_task->curr_event = i;
469 process_sched_event(this_task, this_task->atoms[i]);
472 cpu_usage_1 = get_cpu_usage_nsec_self();
473 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
475 ret = sem_post(&this_task->work_done_sem);
476 BUG_ON(ret);
478 ret = pthread_mutex_lock(&work_done_wait_mutex);
479 BUG_ON(ret);
480 ret = pthread_mutex_unlock(&work_done_wait_mutex);
481 BUG_ON(ret);
483 goto again;
486 static void create_tasks(void)
488 struct task_desc *task;
489 pthread_attr_t attr;
490 unsigned long i;
491 int err;
493 err = pthread_attr_init(&attr);
494 BUG_ON(err);
495 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
496 BUG_ON(err);
497 err = pthread_mutex_lock(&start_work_mutex);
498 BUG_ON(err);
499 err = pthread_mutex_lock(&work_done_wait_mutex);
500 BUG_ON(err);
501 for (i = 0; i < nr_tasks; i++) {
502 task = tasks[i];
503 sem_init(&task->sleep_sem, 0, 0);
504 sem_init(&task->ready_for_work, 0, 0);
505 sem_init(&task->work_done_sem, 0, 0);
506 task->curr_event = 0;
507 err = pthread_create(&task->thread, &attr, thread_func, task);
508 BUG_ON(err);
512 static void wait_for_tasks(void)
514 u64 cpu_usage_0, cpu_usage_1;
515 struct task_desc *task;
516 unsigned long i, ret;
518 start_time = get_nsecs();
519 cpu_usage = 0;
520 pthread_mutex_unlock(&work_done_wait_mutex);
522 for (i = 0; i < nr_tasks; i++) {
523 task = tasks[i];
524 ret = sem_wait(&task->ready_for_work);
525 BUG_ON(ret);
526 sem_init(&task->ready_for_work, 0, 0);
528 ret = pthread_mutex_lock(&work_done_wait_mutex);
529 BUG_ON(ret);
531 cpu_usage_0 = get_cpu_usage_nsec_parent();
533 pthread_mutex_unlock(&start_work_mutex);
535 for (i = 0; i < nr_tasks; i++) {
536 task = tasks[i];
537 ret = sem_wait(&task->work_done_sem);
538 BUG_ON(ret);
539 sem_init(&task->work_done_sem, 0, 0);
540 cpu_usage += task->cpu_usage;
541 task->cpu_usage = 0;
544 cpu_usage_1 = get_cpu_usage_nsec_parent();
545 if (!runavg_cpu_usage)
546 runavg_cpu_usage = cpu_usage;
547 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
549 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
550 if (!runavg_parent_cpu_usage)
551 runavg_parent_cpu_usage = parent_cpu_usage;
552 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
553 parent_cpu_usage)/10;
555 ret = pthread_mutex_lock(&start_work_mutex);
556 BUG_ON(ret);
558 for (i = 0; i < nr_tasks; i++) {
559 task = tasks[i];
560 sem_init(&task->sleep_sem, 0, 0);
561 task->curr_event = 0;
565 static void run_one_test(void)
567 u64 T0, T1, delta, avg_delta, fluct, std_dev;
569 T0 = get_nsecs();
570 wait_for_tasks();
571 T1 = get_nsecs();
573 delta = T1 - T0;
574 sum_runtime += delta;
575 nr_runs++;
577 avg_delta = sum_runtime / nr_runs;
578 if (delta < avg_delta)
579 fluct = avg_delta - delta;
580 else
581 fluct = delta - avg_delta;
582 sum_fluct += fluct;
583 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
584 if (!run_avg)
585 run_avg = delta;
586 run_avg = (run_avg*9 + delta)/10;
588 printf("#%-3ld: %0.3f, ",
589 nr_runs, (double)delta/1000000.0);
591 printf("ravg: %0.2f, ",
592 (double)run_avg/1e6);
594 printf("cpu: %0.2f / %0.2f",
595 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
597 #if 0
599 * rusage statistics done by the parent, these are less
600 * accurate than the sum_exec_runtime based statistics:
602 printf(" [%0.2f / %0.2f]",
603 (double)parent_cpu_usage/1e6,
604 (double)runavg_parent_cpu_usage/1e6);
605 #endif
607 printf("\n");
609 if (nr_sleep_corrections)
610 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
611 nr_sleep_corrections = 0;
614 static void test_calibrations(void)
616 u64 T0, T1;
618 T0 = get_nsecs();
619 burn_nsecs(1e6);
620 T1 = get_nsecs();
622 printf("the run test took %Ld nsecs\n", T1-T0);
624 T0 = get_nsecs();
625 sleep_nsecs(1e6);
626 T1 = get_nsecs();
628 printf("the sleep test took %Ld nsecs\n", T1-T0);
631 struct raw_event_sample {
632 u32 size;
633 char data[0];
636 #define FILL_FIELD(ptr, field, event, data) \
637 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
639 #define FILL_ARRAY(ptr, array, event, data) \
640 do { \
641 void *__array = raw_field_ptr(event, #array, data); \
642 memcpy(ptr.array, __array, sizeof(ptr.array)); \
643 } while(0)
645 #define FILL_COMMON_FIELDS(ptr, event, data) \
646 do { \
647 FILL_FIELD(ptr, common_type, event, data); \
648 FILL_FIELD(ptr, common_flags, event, data); \
649 FILL_FIELD(ptr, common_preempt_count, event, data); \
650 FILL_FIELD(ptr, common_pid, event, data); \
651 FILL_FIELD(ptr, common_tgid, event, data); \
652 } while (0)
656 struct trace_switch_event {
657 u32 size;
659 u16 common_type;
660 u8 common_flags;
661 u8 common_preempt_count;
662 u32 common_pid;
663 u32 common_tgid;
665 char prev_comm[16];
666 u32 prev_pid;
667 u32 prev_prio;
668 u64 prev_state;
669 char next_comm[16];
670 u32 next_pid;
671 u32 next_prio;
674 struct trace_runtime_event {
675 u32 size;
677 u16 common_type;
678 u8 common_flags;
679 u8 common_preempt_count;
680 u32 common_pid;
681 u32 common_tgid;
683 char comm[16];
684 u32 pid;
685 u64 runtime;
686 u64 vruntime;
689 struct trace_wakeup_event {
690 u32 size;
692 u16 common_type;
693 u8 common_flags;
694 u8 common_preempt_count;
695 u32 common_pid;
696 u32 common_tgid;
698 char comm[16];
699 u32 pid;
701 u32 prio;
702 u32 success;
703 u32 cpu;
706 struct trace_fork_event {
707 u32 size;
709 u16 common_type;
710 u8 common_flags;
711 u8 common_preempt_count;
712 u32 common_pid;
713 u32 common_tgid;
715 char parent_comm[16];
716 u32 parent_pid;
717 char child_comm[16];
718 u32 child_pid;
721 struct trace_migrate_task_event {
722 u32 size;
724 u16 common_type;
725 u8 common_flags;
726 u8 common_preempt_count;
727 u32 common_pid;
728 u32 common_tgid;
730 char comm[16];
731 u32 pid;
733 u32 prio;
734 u32 cpu;
737 struct trace_sched_handler {
738 void (*switch_event)(struct trace_switch_event *,
739 struct event *,
740 int cpu,
741 u64 timestamp,
742 struct thread *thread);
744 void (*runtime_event)(struct trace_runtime_event *,
745 struct event *,
746 int cpu,
747 u64 timestamp,
748 struct thread *thread);
750 void (*wakeup_event)(struct trace_wakeup_event *,
751 struct event *,
752 int cpu,
753 u64 timestamp,
754 struct thread *thread);
756 void (*fork_event)(struct trace_fork_event *,
757 struct event *,
758 int cpu,
759 u64 timestamp,
760 struct thread *thread);
762 void (*migrate_task_event)(struct trace_migrate_task_event *,
763 struct event *,
764 int cpu,
765 u64 timestamp,
766 struct thread *thread);
770 static void
771 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
772 struct event *event,
773 int cpu __used,
774 u64 timestamp __used,
775 struct thread *thread __used)
777 struct task_desc *waker, *wakee;
779 if (verbose) {
780 printf("sched_wakeup event %p\n", event);
782 printf(" ... pid %d woke up %s/%d\n",
783 wakeup_event->common_pid,
784 wakeup_event->comm,
785 wakeup_event->pid);
788 waker = register_pid(wakeup_event->common_pid, "<unknown>");
789 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
791 add_sched_event_wakeup(waker, timestamp, wakee);
794 static u64 cpu_last_switched[MAX_CPUS];
796 static void
797 replay_switch_event(struct trace_switch_event *switch_event,
798 struct event *event,
799 int cpu,
800 u64 timestamp,
801 struct thread *thread __used)
803 struct task_desc *prev, *next;
804 u64 timestamp0;
805 s64 delta;
807 if (verbose)
808 printf("sched_switch event %p\n", event);
810 if (cpu >= MAX_CPUS || cpu < 0)
811 return;
813 timestamp0 = cpu_last_switched[cpu];
814 if (timestamp0)
815 delta = timestamp - timestamp0;
816 else
817 delta = 0;
819 if (delta < 0)
820 die("hm, delta: %Ld < 0 ?\n", delta);
822 if (verbose) {
823 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
824 switch_event->prev_comm, switch_event->prev_pid,
825 switch_event->next_comm, switch_event->next_pid,
826 delta);
829 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
830 next = register_pid(switch_event->next_pid, switch_event->next_comm);
832 cpu_last_switched[cpu] = timestamp;
834 add_sched_event_run(prev, timestamp, delta);
835 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
839 static void
840 replay_fork_event(struct trace_fork_event *fork_event,
841 struct event *event,
842 int cpu __used,
843 u64 timestamp __used,
844 struct thread *thread __used)
846 if (verbose) {
847 printf("sched_fork event %p\n", event);
848 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
849 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
851 register_pid(fork_event->parent_pid, fork_event->parent_comm);
852 register_pid(fork_event->child_pid, fork_event->child_comm);
855 static struct trace_sched_handler replay_ops = {
856 .wakeup_event = replay_wakeup_event,
857 .switch_event = replay_switch_event,
858 .fork_event = replay_fork_event,
861 struct sort_dimension {
862 const char *name;
863 sort_fn_t cmp;
864 struct list_head list;
867 static LIST_HEAD(cmp_pid);
869 static int
870 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
872 struct sort_dimension *sort;
873 int ret = 0;
875 BUG_ON(list_empty(list));
877 list_for_each_entry(sort, list, list) {
878 ret = sort->cmp(l, r);
879 if (ret)
880 return ret;
883 return ret;
886 static struct work_atoms *
887 thread_atoms_search(struct rb_root *root, struct thread *thread,
888 struct list_head *sort_list)
890 struct rb_node *node = root->rb_node;
891 struct work_atoms key = { .thread = thread };
893 while (node) {
894 struct work_atoms *atoms;
895 int cmp;
897 atoms = container_of(node, struct work_atoms, node);
899 cmp = thread_lat_cmp(sort_list, &key, atoms);
900 if (cmp > 0)
901 node = node->rb_left;
902 else if (cmp < 0)
903 node = node->rb_right;
904 else {
905 BUG_ON(thread != atoms->thread);
906 return atoms;
909 return NULL;
912 static void
913 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
914 struct list_head *sort_list)
916 struct rb_node **new = &(root->rb_node), *parent = NULL;
918 while (*new) {
919 struct work_atoms *this;
920 int cmp;
922 this = container_of(*new, struct work_atoms, node);
923 parent = *new;
925 cmp = thread_lat_cmp(sort_list, data, this);
927 if (cmp > 0)
928 new = &((*new)->rb_left);
929 else
930 new = &((*new)->rb_right);
933 rb_link_node(&data->node, parent, new);
934 rb_insert_color(&data->node, root);
937 static void thread_atoms_insert(struct thread *thread)
939 struct work_atoms *atoms = zalloc(sizeof(*atoms));
940 if (!atoms)
941 die("No memory");
943 atoms->thread = thread;
944 INIT_LIST_HEAD(&atoms->work_list);
945 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
948 static void
949 latency_fork_event(struct trace_fork_event *fork_event __used,
950 struct event *event __used,
951 int cpu __used,
952 u64 timestamp __used,
953 struct thread *thread __used)
955 /* should insert the newcomer */
958 __used
959 static char sched_out_state(struct trace_switch_event *switch_event)
961 const char *str = TASK_STATE_TO_CHAR_STR;
963 return str[switch_event->prev_state];
966 static void
967 add_sched_out_event(struct work_atoms *atoms,
968 char run_state,
969 u64 timestamp)
971 struct work_atom *atom = zalloc(sizeof(*atom));
972 if (!atom)
973 die("Non memory");
975 atom->sched_out_time = timestamp;
977 if (run_state == 'R') {
978 atom->state = THREAD_WAIT_CPU;
979 atom->wake_up_time = atom->sched_out_time;
982 list_add_tail(&atom->list, &atoms->work_list);
985 static void
986 add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
988 struct work_atom *atom;
990 BUG_ON(list_empty(&atoms->work_list));
992 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
994 atom->runtime += delta;
995 atoms->total_runtime += delta;
998 static void
999 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1001 struct work_atom *atom;
1002 u64 delta;
1004 if (list_empty(&atoms->work_list))
1005 return;
1007 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1009 if (atom->state != THREAD_WAIT_CPU)
1010 return;
1012 if (timestamp < atom->wake_up_time) {
1013 atom->state = THREAD_IGNORE;
1014 return;
1017 atom->state = THREAD_SCHED_IN;
1018 atom->sched_in_time = timestamp;
1020 delta = atom->sched_in_time - atom->wake_up_time;
1021 atoms->total_lat += delta;
1022 if (delta > atoms->max_lat)
1023 atoms->max_lat = delta;
1024 atoms->nb_atoms++;
1027 static void
1028 latency_switch_event(struct trace_switch_event *switch_event,
1029 struct event *event __used,
1030 int cpu,
1031 u64 timestamp,
1032 struct thread *thread __used)
1034 struct work_atoms *out_events, *in_events;
1035 struct thread *sched_out, *sched_in;
1036 u64 timestamp0;
1037 s64 delta;
1039 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1041 timestamp0 = cpu_last_switched[cpu];
1042 cpu_last_switched[cpu] = timestamp;
1043 if (timestamp0)
1044 delta = timestamp - timestamp0;
1045 else
1046 delta = 0;
1048 if (delta < 0)
1049 die("hm, delta: %Ld < 0 ?\n", delta);
1052 sched_out = threads__findnew(switch_event->prev_pid);
1053 sched_in = threads__findnew(switch_event->next_pid);
1055 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1056 if (!out_events) {
1057 thread_atoms_insert(sched_out);
1058 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1059 if (!out_events)
1060 die("out-event: Internal tree error");
1062 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1064 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1065 if (!in_events) {
1066 thread_atoms_insert(sched_in);
1067 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1068 if (!in_events)
1069 die("in-event: Internal tree error");
1071 * Take came in we have not heard about yet,
1072 * add in an initial atom in runnable state:
1074 add_sched_out_event(in_events, 'R', timestamp);
1076 add_sched_in_event(in_events, timestamp);
1079 static void
1080 latency_runtime_event(struct trace_runtime_event *runtime_event,
1081 struct event *event __used,
1082 int cpu,
1083 u64 timestamp,
1084 struct thread *this_thread __used)
1086 struct thread *thread = threads__findnew(runtime_event->pid);
1087 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1089 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1090 if (!atoms) {
1091 thread_atoms_insert(thread);
1092 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1093 if (!atoms)
1094 die("in-event: Internal tree error");
1095 add_sched_out_event(atoms, 'R', timestamp);
1098 add_runtime_event(atoms, runtime_event->runtime, timestamp);
1101 static void
1102 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1103 struct event *__event __used,
1104 int cpu __used,
1105 u64 timestamp,
1106 struct thread *thread __used)
1108 struct work_atoms *atoms;
1109 struct work_atom *atom;
1110 struct thread *wakee;
1112 /* Note for later, it may be interesting to observe the failing cases */
1113 if (!wakeup_event->success)
1114 return;
1116 wakee = threads__findnew(wakeup_event->pid);
1117 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1118 if (!atoms) {
1119 thread_atoms_insert(wakee);
1120 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1121 if (!atoms)
1122 die("wakeup-event: Internal tree error");
1123 add_sched_out_event(atoms, 'S', timestamp);
1126 BUG_ON(list_empty(&atoms->work_list));
1128 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1131 * You WILL be missing events if you've recorded only
1132 * one CPU, or are only looking at only one, so don't
1133 * make useless noise.
1135 if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1136 nr_state_machine_bugs++;
1138 nr_timestamps++;
1139 if (atom->sched_out_time > timestamp) {
1140 nr_unordered_timestamps++;
1141 return;
1144 atom->state = THREAD_WAIT_CPU;
1145 atom->wake_up_time = timestamp;
1148 static void
1149 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1150 struct event *__event __used,
1151 int cpu __used,
1152 u64 timestamp,
1153 struct thread *thread __used)
1155 struct work_atoms *atoms;
1156 struct work_atom *atom;
1157 struct thread *migrant;
1160 * Only need to worry about migration when profiling one CPU.
1162 if (profile_cpu == -1)
1163 return;
1165 migrant = threads__findnew(migrate_task_event->pid);
1166 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1167 if (!atoms) {
1168 thread_atoms_insert(migrant);
1169 register_pid(migrant->pid, migrant->comm);
1170 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1171 if (!atoms)
1172 die("migration-event: Internal tree error");
1173 add_sched_out_event(atoms, 'R', timestamp);
1176 BUG_ON(list_empty(&atoms->work_list));
1178 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1179 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1181 nr_timestamps++;
1183 if (atom->sched_out_time > timestamp)
1184 nr_unordered_timestamps++;
1187 static struct trace_sched_handler lat_ops = {
1188 .wakeup_event = latency_wakeup_event,
1189 .switch_event = latency_switch_event,
1190 .runtime_event = latency_runtime_event,
1191 .fork_event = latency_fork_event,
1192 .migrate_task_event = latency_migrate_task_event,
1195 static void output_lat_thread(struct work_atoms *work_list)
1197 int i;
1198 int ret;
1199 u64 avg;
1201 if (!work_list->nb_atoms)
1202 return;
1204 * Ignore idle threads:
1206 if (!strcmp(work_list->thread->comm, "swapper"))
1207 return;
1209 all_runtime += work_list->total_runtime;
1210 all_count += work_list->nb_atoms;
1212 ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
1214 for (i = 0; i < 24 - ret; i++)
1215 printf(" ");
1217 avg = work_list->total_lat / work_list->nb_atoms;
1219 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
1220 (double)work_list->total_runtime / 1e6,
1221 work_list->nb_atoms, (double)avg / 1e6,
1222 (double)work_list->max_lat / 1e6);
1225 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1227 if (l->thread->pid < r->thread->pid)
1228 return -1;
1229 if (l->thread->pid > r->thread->pid)
1230 return 1;
1232 return 0;
1235 static struct sort_dimension pid_sort_dimension = {
1236 .name = "pid",
1237 .cmp = pid_cmp,
1240 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1242 u64 avgl, avgr;
1244 if (!l->nb_atoms)
1245 return -1;
1247 if (!r->nb_atoms)
1248 return 1;
1250 avgl = l->total_lat / l->nb_atoms;
1251 avgr = r->total_lat / r->nb_atoms;
1253 if (avgl < avgr)
1254 return -1;
1255 if (avgl > avgr)
1256 return 1;
1258 return 0;
1261 static struct sort_dimension avg_sort_dimension = {
1262 .name = "avg",
1263 .cmp = avg_cmp,
1266 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1268 if (l->max_lat < r->max_lat)
1269 return -1;
1270 if (l->max_lat > r->max_lat)
1271 return 1;
1273 return 0;
1276 static struct sort_dimension max_sort_dimension = {
1277 .name = "max",
1278 .cmp = max_cmp,
1281 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1283 if (l->nb_atoms < r->nb_atoms)
1284 return -1;
1285 if (l->nb_atoms > r->nb_atoms)
1286 return 1;
1288 return 0;
1291 static struct sort_dimension switch_sort_dimension = {
1292 .name = "switch",
1293 .cmp = switch_cmp,
1296 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1298 if (l->total_runtime < r->total_runtime)
1299 return -1;
1300 if (l->total_runtime > r->total_runtime)
1301 return 1;
1303 return 0;
1306 static struct sort_dimension runtime_sort_dimension = {
1307 .name = "runtime",
1308 .cmp = runtime_cmp,
1311 static struct sort_dimension *available_sorts[] = {
1312 &pid_sort_dimension,
1313 &avg_sort_dimension,
1314 &max_sort_dimension,
1315 &switch_sort_dimension,
1316 &runtime_sort_dimension,
1319 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1321 static LIST_HEAD(sort_list);
1323 static int sort_dimension__add(const char *tok, struct list_head *list)
1325 int i;
1327 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1328 if (!strcmp(available_sorts[i]->name, tok)) {
1329 list_add_tail(&available_sorts[i]->list, list);
1331 return 0;
1335 return -1;
1338 static void setup_sorting(void);
1340 static void sort_lat(void)
1342 struct rb_node *node;
1344 for (;;) {
1345 struct work_atoms *data;
1346 node = rb_first(&atom_root);
1347 if (!node)
1348 break;
1350 rb_erase(node, &atom_root);
1351 data = rb_entry(node, struct work_atoms, node);
1352 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1356 static struct trace_sched_handler *trace_handler;
1358 static void
1359 process_sched_wakeup_event(struct raw_event_sample *raw,
1360 struct event *event,
1361 int cpu __used,
1362 u64 timestamp __used,
1363 struct thread *thread __used)
1365 struct trace_wakeup_event wakeup_event;
1367 FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
1369 FILL_ARRAY(wakeup_event, comm, event, raw->data);
1370 FILL_FIELD(wakeup_event, pid, event, raw->data);
1371 FILL_FIELD(wakeup_event, prio, event, raw->data);
1372 FILL_FIELD(wakeup_event, success, event, raw->data);
1373 FILL_FIELD(wakeup_event, cpu, event, raw->data);
1375 if (trace_handler->wakeup_event)
1376 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
1380 * Track the current task - that way we can know whether there's any
1381 * weird events, such as a task being switched away that is not current.
1383 static int max_cpu;
1385 static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1387 static struct thread *curr_thread[MAX_CPUS];
1389 static char next_shortname1 = 'A';
1390 static char next_shortname2 = '0';
1392 static void
1393 map_switch_event(struct trace_switch_event *switch_event,
1394 struct event *event __used,
1395 int this_cpu,
1396 u64 timestamp,
1397 struct thread *thread __used)
1399 struct thread *sched_out, *sched_in;
1400 int new_shortname;
1401 u64 timestamp0;
1402 s64 delta;
1403 int cpu;
1405 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1407 if (this_cpu > max_cpu)
1408 max_cpu = this_cpu;
1410 timestamp0 = cpu_last_switched[this_cpu];
1411 cpu_last_switched[this_cpu] = timestamp;
1412 if (timestamp0)
1413 delta = timestamp - timestamp0;
1414 else
1415 delta = 0;
1417 if (delta < 0)
1418 die("hm, delta: %Ld < 0 ?\n", delta);
1421 sched_out = threads__findnew(switch_event->prev_pid);
1422 sched_in = threads__findnew(switch_event->next_pid);
1424 curr_thread[this_cpu] = sched_in;
1426 printf(" ");
1428 new_shortname = 0;
1429 if (!sched_in->shortname[0]) {
1430 sched_in->shortname[0] = next_shortname1;
1431 sched_in->shortname[1] = next_shortname2;
1433 if (next_shortname1 < 'Z') {
1434 next_shortname1++;
1435 } else {
1436 next_shortname1='A';
1437 if (next_shortname2 < '9') {
1438 next_shortname2++;
1439 } else {
1440 next_shortname2='0';
1443 new_shortname = 1;
1446 for (cpu = 0; cpu <= max_cpu; cpu++) {
1447 if (cpu != this_cpu)
1448 printf(" ");
1449 else
1450 printf("*");
1452 if (curr_thread[cpu]) {
1453 if (curr_thread[cpu]->pid)
1454 printf("%2s ", curr_thread[cpu]->shortname);
1455 else
1456 printf(". ");
1457 } else
1458 printf(" ");
1461 printf(" %12.6f secs ", (double)timestamp/1e9);
1462 if (new_shortname) {
1463 printf("%s => %s:%d\n",
1464 sched_in->shortname, sched_in->comm, sched_in->pid);
1465 } else {
1466 printf("\n");
1471 static void
1472 process_sched_switch_event(struct raw_event_sample *raw,
1473 struct event *event,
1474 int this_cpu,
1475 u64 timestamp __used,
1476 struct thread *thread __used)
1478 struct trace_switch_event switch_event;
1480 FILL_COMMON_FIELDS(switch_event, event, raw->data);
1482 FILL_ARRAY(switch_event, prev_comm, event, raw->data);
1483 FILL_FIELD(switch_event, prev_pid, event, raw->data);
1484 FILL_FIELD(switch_event, prev_prio, event, raw->data);
1485 FILL_FIELD(switch_event, prev_state, event, raw->data);
1486 FILL_ARRAY(switch_event, next_comm, event, raw->data);
1487 FILL_FIELD(switch_event, next_pid, event, raw->data);
1488 FILL_FIELD(switch_event, next_prio, event, raw->data);
1490 if (curr_pid[this_cpu] != (u32)-1) {
1492 * Are we trying to switch away a PID that is
1493 * not current?
1495 if (curr_pid[this_cpu] != switch_event.prev_pid)
1496 nr_context_switch_bugs++;
1498 if (trace_handler->switch_event)
1499 trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread);
1501 curr_pid[this_cpu] = switch_event.next_pid;
1504 static void
1505 process_sched_runtime_event(struct raw_event_sample *raw,
1506 struct event *event,
1507 int cpu __used,
1508 u64 timestamp __used,
1509 struct thread *thread __used)
1511 struct trace_runtime_event runtime_event;
1513 FILL_ARRAY(runtime_event, comm, event, raw->data);
1514 FILL_FIELD(runtime_event, pid, event, raw->data);
1515 FILL_FIELD(runtime_event, runtime, event, raw->data);
1516 FILL_FIELD(runtime_event, vruntime, event, raw->data);
1518 if (trace_handler->runtime_event)
1519 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
1522 static void
1523 process_sched_fork_event(struct raw_event_sample *raw,
1524 struct event *event,
1525 int cpu __used,
1526 u64 timestamp __used,
1527 struct thread *thread __used)
1529 struct trace_fork_event fork_event;
1531 FILL_COMMON_FIELDS(fork_event, event, raw->data);
1533 FILL_ARRAY(fork_event, parent_comm, event, raw->data);
1534 FILL_FIELD(fork_event, parent_pid, event, raw->data);
1535 FILL_ARRAY(fork_event, child_comm, event, raw->data);
1536 FILL_FIELD(fork_event, child_pid, event, raw->data);
1538 if (trace_handler->fork_event)
1539 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
1542 static void
1543 process_sched_exit_event(struct event *event,
1544 int cpu __used,
1545 u64 timestamp __used,
1546 struct thread *thread __used)
1548 if (verbose)
1549 printf("sched_exit event %p\n", event);
1552 static void
1553 process_sched_migrate_task_event(struct raw_event_sample *raw,
1554 struct event *event,
1555 int cpu __used,
1556 u64 timestamp __used,
1557 struct thread *thread __used)
1559 struct trace_migrate_task_event migrate_task_event;
1561 FILL_COMMON_FIELDS(migrate_task_event, event, raw->data);
1563 FILL_ARRAY(migrate_task_event, comm, event, raw->data);
1564 FILL_FIELD(migrate_task_event, pid, event, raw->data);
1565 FILL_FIELD(migrate_task_event, prio, event, raw->data);
1566 FILL_FIELD(migrate_task_event, cpu, event, raw->data);
1568 if (trace_handler->migrate_task_event)
1569 trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
1572 static void
1573 process_raw_event(event_t *raw_event __used, void *more_data,
1574 int cpu, u64 timestamp, struct thread *thread)
1576 struct raw_event_sample *raw = more_data;
1577 struct event *event;
1578 int type;
1580 type = trace_parse_common_type(raw->data);
1581 event = trace_find_event(type);
1583 if (!strcmp(event->name, "sched_switch"))
1584 process_sched_switch_event(raw, event, cpu, timestamp, thread);
1585 if (!strcmp(event->name, "sched_stat_runtime"))
1586 process_sched_runtime_event(raw, event, cpu, timestamp, thread);
1587 if (!strcmp(event->name, "sched_wakeup"))
1588 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1589 if (!strcmp(event->name, "sched_wakeup_new"))
1590 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1591 if (!strcmp(event->name, "sched_process_fork"))
1592 process_sched_fork_event(raw, event, cpu, timestamp, thread);
1593 if (!strcmp(event->name, "sched_process_exit"))
1594 process_sched_exit_event(event, cpu, timestamp, thread);
1595 if (!strcmp(event->name, "sched_migrate_task"))
1596 process_sched_migrate_task_event(raw, event, cpu, timestamp, thread);
1599 static int process_sample_event(event_t *event)
1601 struct sample_data data;
1602 struct thread *thread;
1604 if (!(sample_type & PERF_SAMPLE_RAW))
1605 return 0;
1607 memset(&data, 0, sizeof(data));
1608 data.time = -1;
1609 data.cpu = -1;
1610 data.period = -1;
1612 event__parse_sample(event, sample_type, &data);
1614 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
1615 event->header.misc,
1616 data.pid, data.tid,
1617 (void *)(long)data.ip,
1618 (long long)data.period);
1620 thread = threads__findnew(data.pid);
1621 if (thread == NULL) {
1622 pr_debug("problem processing %d event, skipping it.\n",
1623 event->header.type);
1624 return -1;
1627 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1629 if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
1630 return 0;
1632 process_raw_event(event, data.raw_data, data.cpu, data.time, thread);
1634 return 0;
1637 static int process_lost_event(event_t *event __used)
1639 nr_lost_chunks++;
1640 nr_lost_events += event->lost.lost;
1642 return 0;
1645 static int sample_type_check(u64 type)
1647 sample_type = type;
1649 if (!(sample_type & PERF_SAMPLE_RAW)) {
1650 fprintf(stderr,
1651 "No trace sample to read. Did you call perf record "
1652 "without -R?");
1653 return -1;
1656 return 0;
1659 static struct perf_file_handler file_handler = {
1660 .process_sample_event = process_sample_event,
1661 .process_comm_event = event__process_comm,
1662 .process_lost_event = process_lost_event,
1663 .sample_type_check = sample_type_check,
1666 static int read_events(void)
1668 register_idle_thread();
1669 register_perf_file_handler(&file_handler);
1671 return mmap_dispatch_perf_file(&header, input_name, 0, 0,
1672 &event__cwdlen, &event__cwd);
1675 static void print_bad_events(void)
1677 if (nr_unordered_timestamps && nr_timestamps) {
1678 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1679 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1680 nr_unordered_timestamps, nr_timestamps);
1682 if (nr_lost_events && nr_events) {
1683 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1684 (double)nr_lost_events/(double)nr_events*100.0,
1685 nr_lost_events, nr_events, nr_lost_chunks);
1687 if (nr_state_machine_bugs && nr_timestamps) {
1688 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1689 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1690 nr_state_machine_bugs, nr_timestamps);
1691 if (nr_lost_events)
1692 printf(" (due to lost events?)");
1693 printf("\n");
1695 if (nr_context_switch_bugs && nr_timestamps) {
1696 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1697 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1698 nr_context_switch_bugs, nr_timestamps);
1699 if (nr_lost_events)
1700 printf(" (due to lost events?)");
1701 printf("\n");
1705 static void __cmd_lat(void)
1707 struct rb_node *next;
1709 setup_pager();
1710 read_events();
1711 sort_lat();
1713 printf("\n -----------------------------------------------------------------------------------------\n");
1714 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1715 printf(" -----------------------------------------------------------------------------------------\n");
1717 next = rb_first(&sorted_atom_root);
1719 while (next) {
1720 struct work_atoms *work_list;
1722 work_list = rb_entry(next, struct work_atoms, node);
1723 output_lat_thread(work_list);
1724 next = rb_next(next);
1727 printf(" -----------------------------------------------------------------------------------------\n");
1728 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1729 (double)all_runtime/1e6, all_count);
1731 printf(" ---------------------------------------------------\n");
1733 print_bad_events();
1734 printf("\n");
1738 static struct trace_sched_handler map_ops = {
1739 .wakeup_event = NULL,
1740 .switch_event = map_switch_event,
1741 .runtime_event = NULL,
1742 .fork_event = NULL,
1745 static void __cmd_map(void)
1747 max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1749 setup_pager();
1750 read_events();
1751 print_bad_events();
1754 static void __cmd_replay(void)
1756 unsigned long i;
1758 calibrate_run_measurement_overhead();
1759 calibrate_sleep_measurement_overhead();
1761 test_calibrations();
1763 read_events();
1765 printf("nr_run_events: %ld\n", nr_run_events);
1766 printf("nr_sleep_events: %ld\n", nr_sleep_events);
1767 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
1769 if (targetless_wakeups)
1770 printf("target-less wakeups: %ld\n", targetless_wakeups);
1771 if (multitarget_wakeups)
1772 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1773 if (nr_run_events_optimized)
1774 printf("run atoms optimized: %ld\n",
1775 nr_run_events_optimized);
1777 print_task_traces();
1778 add_cross_task_wakeups();
1780 create_tasks();
1781 printf("------------------------------------------------------------\n");
1782 for (i = 0; i < replay_repeat; i++)
1783 run_one_test();
1787 static const char * const sched_usage[] = {
1788 "perf sched [<options>] {record|latency|map|replay|trace}",
1789 NULL
1792 static const struct option sched_options[] = {
1793 OPT_STRING('i', "input", &input_name, "file",
1794 "input file name"),
1795 OPT_BOOLEAN('v', "verbose", &verbose,
1796 "be more verbose (show symbol address, etc)"),
1797 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1798 "dump raw trace in ASCII"),
1799 OPT_END()
1802 static const char * const latency_usage[] = {
1803 "perf sched latency [<options>]",
1804 NULL
1807 static const struct option latency_options[] = {
1808 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1809 "sort by key(s): runtime, switch, avg, max"),
1810 OPT_BOOLEAN('v', "verbose", &verbose,
1811 "be more verbose (show symbol address, etc)"),
1812 OPT_INTEGER('C', "CPU", &profile_cpu,
1813 "CPU to profile on"),
1814 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1815 "dump raw trace in ASCII"),
1816 OPT_END()
1819 static const char * const replay_usage[] = {
1820 "perf sched replay [<options>]",
1821 NULL
1824 static const struct option replay_options[] = {
1825 OPT_INTEGER('r', "repeat", &replay_repeat,
1826 "repeat the workload replay N times (-1: infinite)"),
1827 OPT_BOOLEAN('v', "verbose", &verbose,
1828 "be more verbose (show symbol address, etc)"),
1829 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1830 "dump raw trace in ASCII"),
1831 OPT_END()
1834 static void setup_sorting(void)
1836 char *tmp, *tok, *str = strdup(sort_order);
1838 for (tok = strtok_r(str, ", ", &tmp);
1839 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1840 if (sort_dimension__add(tok, &sort_list) < 0) {
1841 error("Unknown --sort key: `%s'", tok);
1842 usage_with_options(latency_usage, latency_options);
1846 free(str);
1848 sort_dimension__add("pid", &cmp_pid);
1851 static const char *record_args[] = {
1852 "record",
1853 "-a",
1854 "-R",
1855 "-M",
1856 "-f",
1857 "-m", "1024",
1858 "-c", "1",
1859 "-e", "sched:sched_switch:r",
1860 "-e", "sched:sched_stat_wait:r",
1861 "-e", "sched:sched_stat_sleep:r",
1862 "-e", "sched:sched_stat_iowait:r",
1863 "-e", "sched:sched_stat_runtime:r",
1864 "-e", "sched:sched_process_exit:r",
1865 "-e", "sched:sched_process_fork:r",
1866 "-e", "sched:sched_wakeup:r",
1867 "-e", "sched:sched_migrate_task:r",
1870 static int __cmd_record(int argc, const char **argv)
1872 unsigned int rec_argc, i, j;
1873 const char **rec_argv;
1875 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1876 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1878 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1879 rec_argv[i] = strdup(record_args[i]);
1881 for (j = 1; j < (unsigned int)argc; j++, i++)
1882 rec_argv[i] = argv[j];
1884 BUG_ON(i != rec_argc);
1886 return cmd_record(i, rec_argv, NULL);
1889 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1891 symbol__init(0);
1893 argc = parse_options(argc, argv, sched_options, sched_usage,
1894 PARSE_OPT_STOP_AT_NON_OPTION);
1895 if (!argc)
1896 usage_with_options(sched_usage, sched_options);
1898 if (!strncmp(argv[0], "rec", 3)) {
1899 return __cmd_record(argc, argv);
1900 } else if (!strncmp(argv[0], "lat", 3)) {
1901 trace_handler = &lat_ops;
1902 if (argc > 1) {
1903 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1904 if (argc)
1905 usage_with_options(latency_usage, latency_options);
1907 setup_sorting();
1908 __cmd_lat();
1909 } else if (!strcmp(argv[0], "map")) {
1910 trace_handler = &map_ops;
1911 setup_sorting();
1912 __cmd_map();
1913 } else if (!strncmp(argv[0], "rep", 3)) {
1914 trace_handler = &replay_ops;
1915 if (argc) {
1916 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1917 if (argc)
1918 usage_with_options(replay_usage, replay_options);
1920 __cmd_replay();
1921 } else if (!strcmp(argv[0], "trace")) {
1923 * Aliased to 'perf trace' for now:
1925 return cmd_trace(argc, argv, prefix);
1926 } else {
1927 usage_with_options(sched_usage, sched_options);
1930 return 0;