5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
15 #include <sys/types.h>
16 #include <sys/prctl.h>
18 #include <semaphore.h>
22 static char const *input_name
= "perf.data";
24 static unsigned long page_size
;
25 static unsigned long mmap_window
= 32;
27 static unsigned long total_comm
= 0;
29 static struct rb_root threads
;
30 static struct thread
*last_match
;
32 static struct perf_header
*header
;
33 static u64 sample_type
;
35 static char default_sort_order
[] = "avg, max, switch, runtime";
36 static char *sort_order
= default_sort_order
;
38 #define PR_SET_NAME 15 /* Set process name */
41 #define BUG_ON(x) assert(!(x))
43 static u64 run_measurement_overhead
;
44 static u64 sleep_measurement_overhead
;
51 static unsigned long nr_tasks
;
60 unsigned long nr_events
;
61 unsigned long curr_event
;
62 struct sched_event
**events
;
73 enum sched_event_type
{
80 enum sched_event_type type
;
86 struct task_desc
*wakee
;
89 static struct task_desc
*pid_to_task
[MAX_PID
];
91 static struct task_desc
**tasks
;
93 static pthread_mutex_t start_work_mutex
= PTHREAD_MUTEX_INITIALIZER
;
94 static u64 start_time
;
96 static pthread_mutex_t work_done_wait_mutex
= PTHREAD_MUTEX_INITIALIZER
;
98 static unsigned long nr_run_events
;
99 static unsigned long nr_sleep_events
;
100 static unsigned long nr_wakeup_events
;
102 static unsigned long nr_sleep_corrections
;
103 static unsigned long nr_run_events_optimized
;
105 static unsigned long targetless_wakeups
;
106 static unsigned long multitarget_wakeups
;
108 static u64 cpu_usage
;
109 static u64 runavg_cpu_usage
;
110 static u64 parent_cpu_usage
;
111 static u64 runavg_parent_cpu_usage
;
113 static unsigned long nr_runs
;
114 static u64 sum_runtime
;
115 static u64 sum_fluct
;
118 static unsigned long replay_repeat
= 10;
120 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
130 struct list_head list
;
131 enum thread_state state
;
138 struct list_head atom_list
;
139 struct thread
*thread
;
147 typedef int (*sort_fn_t
)(struct task_atoms
*, struct task_atoms
*);
149 static struct rb_root atom_root
, sorted_atom_root
;
151 static u64 all_runtime
;
152 static u64 all_count
;
154 static int read_events(void);
157 static u64
get_nsecs(void)
161 clock_gettime(CLOCK_MONOTONIC
, &ts
);
163 return ts
.tv_sec
* 1000000000ULL + ts
.tv_nsec
;
166 static void burn_nsecs(u64 nsecs
)
168 u64 T0
= get_nsecs(), T1
;
172 } while (T1
+ run_measurement_overhead
< T0
+ nsecs
);
175 static void sleep_nsecs(u64 nsecs
)
179 ts
.tv_nsec
= nsecs
% 999999999;
180 ts
.tv_sec
= nsecs
/ 999999999;
182 nanosleep(&ts
, NULL
);
185 static void calibrate_run_measurement_overhead(void)
187 u64 T0
, T1
, delta
, min_delta
= 1000000000ULL;
190 for (i
= 0; i
< 10; i
++) {
195 min_delta
= min(min_delta
, delta
);
197 run_measurement_overhead
= min_delta
;
199 printf("run measurement overhead: %Ld nsecs\n", min_delta
);
202 static void calibrate_sleep_measurement_overhead(void)
204 u64 T0
, T1
, delta
, min_delta
= 1000000000ULL;
207 for (i
= 0; i
< 10; i
++) {
212 min_delta
= min(min_delta
, delta
);
215 sleep_measurement_overhead
= min_delta
;
217 printf("sleep measurement overhead: %Ld nsecs\n", min_delta
);
220 static struct sched_event
*
221 get_new_event(struct task_desc
*task
, u64 timestamp
)
223 struct sched_event
*event
= calloc(1, sizeof(*event
));
224 unsigned long idx
= task
->nr_events
;
227 event
->timestamp
= timestamp
;
231 size
= sizeof(struct sched_event
*) * task
->nr_events
;
232 task
->events
= realloc(task
->events
, size
);
233 BUG_ON(!task
->events
);
235 task
->events
[idx
] = event
;
240 static struct sched_event
*last_event(struct task_desc
*task
)
242 if (!task
->nr_events
)
245 return task
->events
[task
->nr_events
- 1];
249 add_sched_event_run(struct task_desc
*task
, u64 timestamp
, u64 duration
)
251 struct sched_event
*event
, *curr_event
= last_event(task
);
254 * optimize an existing RUN event by merging this one
257 if (curr_event
&& curr_event
->type
== SCHED_EVENT_RUN
) {
258 nr_run_events_optimized
++;
259 curr_event
->duration
+= duration
;
263 event
= get_new_event(task
, timestamp
);
265 event
->type
= SCHED_EVENT_RUN
;
266 event
->duration
= duration
;
272 add_sched_event_wakeup(struct task_desc
*task
, u64 timestamp
,
273 struct task_desc
*wakee
)
275 struct sched_event
*event
, *wakee_event
;
277 event
= get_new_event(task
, timestamp
);
278 event
->type
= SCHED_EVENT_WAKEUP
;
279 event
->wakee
= wakee
;
281 wakee_event
= last_event(wakee
);
282 if (!wakee_event
|| wakee_event
->type
!= SCHED_EVENT_SLEEP
) {
283 targetless_wakeups
++;
286 if (wakee_event
->wait_sem
) {
287 multitarget_wakeups
++;
291 wakee_event
->wait_sem
= calloc(1, sizeof(*wakee_event
->wait_sem
));
292 sem_init(wakee_event
->wait_sem
, 0, 0);
293 wakee_event
->specific_wait
= 1;
294 event
->wait_sem
= wakee_event
->wait_sem
;
300 add_sched_event_sleep(struct task_desc
*task
, u64 timestamp
,
301 u64 task_state __used
)
303 struct sched_event
*event
= get_new_event(task
, timestamp
);
305 event
->type
= SCHED_EVENT_SLEEP
;
310 static struct task_desc
*register_pid(unsigned long pid
, const char *comm
)
312 struct task_desc
*task
;
314 BUG_ON(pid
>= MAX_PID
);
316 task
= pid_to_task
[pid
];
321 task
= calloc(1, sizeof(*task
));
324 strcpy(task
->comm
, comm
);
326 * every task starts in sleeping state - this gets ignored
327 * if there's no wakeup pointing to this sleep state:
329 add_sched_event_sleep(task
, 0, 0);
331 pid_to_task
[pid
] = task
;
333 tasks
= realloc(tasks
, nr_tasks
*sizeof(struct task_task
*));
335 tasks
[task
->nr
] = task
;
338 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks
, pid
, comm
);
344 static void print_task_traces(void)
346 struct task_desc
*task
;
349 for (i
= 0; i
< nr_tasks
; i
++) {
351 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
352 task
->nr
, task
->comm
, task
->pid
, task
->nr_events
);
356 static void add_cross_task_wakeups(void)
358 struct task_desc
*task1
, *task2
;
361 for (i
= 0; i
< nr_tasks
; i
++) {
367 add_sched_event_wakeup(task1
, 0, task2
);
372 process_sched_event(struct task_desc
*this_task __used
, struct sched_event
*event
)
379 delta
= start_time
+ event
->timestamp
- now
;
381 switch (event
->type
) {
382 case SCHED_EVENT_RUN
:
383 burn_nsecs(event
->duration
);
385 case SCHED_EVENT_SLEEP
:
387 ret
= sem_wait(event
->wait_sem
);
390 case SCHED_EVENT_WAKEUP
:
392 ret
= sem_post(event
->wait_sem
);
400 static u64
get_cpu_usage_nsec_parent(void)
406 err
= getrusage(RUSAGE_SELF
, &ru
);
409 sum
= ru
.ru_utime
.tv_sec
*1e9
+ ru
.ru_utime
.tv_usec
*1e3
;
410 sum
+= ru
.ru_stime
.tv_sec
*1e9
+ ru
.ru_stime
.tv_usec
*1e3
;
415 static u64
get_cpu_usage_nsec_self(void)
417 char filename
[] = "/proc/1234567890/sched";
418 unsigned long msecs
, nsecs
;
426 sprintf(filename
, "/proc/%d/sched", getpid());
427 file
= fopen(filename
, "r");
430 while ((chars
= getline(&line
, &len
, file
)) != -1) {
431 ret
= sscanf(line
, "se.sum_exec_runtime : %ld.%06ld\n",
434 total
= msecs
*1e6
+ nsecs
;
445 static void *thread_func(void *ctx
)
447 struct task_desc
*this_task
= ctx
;
448 u64 cpu_usage_0
, cpu_usage_1
;
449 unsigned long i
, ret
;
452 sprintf(comm2
, ":%s", this_task
->comm
);
453 prctl(PR_SET_NAME
, comm2
);
456 ret
= sem_post(&this_task
->ready_for_work
);
458 ret
= pthread_mutex_lock(&start_work_mutex
);
460 ret
= pthread_mutex_unlock(&start_work_mutex
);
463 cpu_usage_0
= get_cpu_usage_nsec_self();
465 for (i
= 0; i
< this_task
->nr_events
; i
++) {
466 this_task
->curr_event
= i
;
467 process_sched_event(this_task
, this_task
->events
[i
]);
470 cpu_usage_1
= get_cpu_usage_nsec_self();
471 this_task
->cpu_usage
= cpu_usage_1
- cpu_usage_0
;
473 ret
= sem_post(&this_task
->work_done_sem
);
476 ret
= pthread_mutex_lock(&work_done_wait_mutex
);
478 ret
= pthread_mutex_unlock(&work_done_wait_mutex
);
484 static void create_tasks(void)
486 struct task_desc
*task
;
491 err
= pthread_attr_init(&attr
);
493 err
= pthread_attr_setstacksize(&attr
, (size_t)(16*1024));
495 err
= pthread_mutex_lock(&start_work_mutex
);
497 err
= pthread_mutex_lock(&work_done_wait_mutex
);
499 for (i
= 0; i
< nr_tasks
; i
++) {
501 sem_init(&task
->sleep_sem
, 0, 0);
502 sem_init(&task
->ready_for_work
, 0, 0);
503 sem_init(&task
->work_done_sem
, 0, 0);
504 task
->curr_event
= 0;
505 err
= pthread_create(&task
->thread
, &attr
, thread_func
, task
);
510 static void wait_for_tasks(void)
512 u64 cpu_usage_0
, cpu_usage_1
;
513 struct task_desc
*task
;
514 unsigned long i
, ret
;
516 start_time
= get_nsecs();
518 pthread_mutex_unlock(&work_done_wait_mutex
);
520 for (i
= 0; i
< nr_tasks
; i
++) {
522 ret
= sem_wait(&task
->ready_for_work
);
524 sem_init(&task
->ready_for_work
, 0, 0);
526 ret
= pthread_mutex_lock(&work_done_wait_mutex
);
529 cpu_usage_0
= get_cpu_usage_nsec_parent();
531 pthread_mutex_unlock(&start_work_mutex
);
533 for (i
= 0; i
< nr_tasks
; i
++) {
535 ret
= sem_wait(&task
->work_done_sem
);
537 sem_init(&task
->work_done_sem
, 0, 0);
538 cpu_usage
+= task
->cpu_usage
;
542 cpu_usage_1
= get_cpu_usage_nsec_parent();
543 if (!runavg_cpu_usage
)
544 runavg_cpu_usage
= cpu_usage
;
545 runavg_cpu_usage
= (runavg_cpu_usage
*9 + cpu_usage
)/10;
547 parent_cpu_usage
= cpu_usage_1
- cpu_usage_0
;
548 if (!runavg_parent_cpu_usage
)
549 runavg_parent_cpu_usage
= parent_cpu_usage
;
550 runavg_parent_cpu_usage
= (runavg_parent_cpu_usage
*9 +
551 parent_cpu_usage
)/10;
553 ret
= pthread_mutex_lock(&start_work_mutex
);
556 for (i
= 0; i
< nr_tasks
; i
++) {
558 sem_init(&task
->sleep_sem
, 0, 0);
559 task
->curr_event
= 0;
563 static void run_one_test(void)
565 u64 T0
, T1
, delta
, avg_delta
, fluct
, std_dev
;
572 sum_runtime
+= delta
;
575 avg_delta
= sum_runtime
/ nr_runs
;
576 if (delta
< avg_delta
)
577 fluct
= avg_delta
- delta
;
579 fluct
= delta
- avg_delta
;
581 std_dev
= sum_fluct
/ nr_runs
/ sqrt(nr_runs
);
584 run_avg
= (run_avg
*9 + delta
)/10;
586 printf("#%-3ld: %0.3f, ",
587 nr_runs
, (double)delta
/1000000.0);
589 printf("ravg: %0.2f, ",
590 (double)run_avg
/1e6
);
592 printf("cpu: %0.2f / %0.2f",
593 (double)cpu_usage
/1e6
, (double)runavg_cpu_usage
/1e6
);
597 * rusage statistics done by the parent, these are less
598 * accurate than the sum_exec_runtime based statistics:
600 printf(" [%0.2f / %0.2f]",
601 (double)parent_cpu_usage
/1e6
,
602 (double)runavg_parent_cpu_usage
/1e6
);
607 if (nr_sleep_corrections
)
608 printf(" (%ld sleep corrections)\n", nr_sleep_corrections
);
609 nr_sleep_corrections
= 0;
612 static void test_calibrations(void)
620 printf("the run test took %Ld nsecs\n", T1
-T0
);
626 printf("the sleep test took %Ld nsecs\n", T1
-T0
);
629 static void __cmd_replay(void)
633 calibrate_run_measurement_overhead();
634 calibrate_sleep_measurement_overhead();
640 printf("nr_run_events: %ld\n", nr_run_events
);
641 printf("nr_sleep_events: %ld\n", nr_sleep_events
);
642 printf("nr_wakeup_events: %ld\n", nr_wakeup_events
);
644 if (targetless_wakeups
)
645 printf("target-less wakeups: %ld\n", targetless_wakeups
);
646 if (multitarget_wakeups
)
647 printf("multi-target wakeups: %ld\n", multitarget_wakeups
);
648 if (nr_run_events_optimized
)
649 printf("run events optimized: %ld\n",
650 nr_run_events_optimized
);
653 add_cross_task_wakeups();
656 printf("------------------------------------------------------------\n");
657 for (i
= 0; i
< replay_repeat
; i
++)
662 process_comm_event(event_t
*event
, unsigned long offset
, unsigned long head
)
664 struct thread
*thread
;
666 thread
= threads__findnew(event
->comm
.pid
, &threads
, &last_match
);
668 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
669 (void *)(offset
+ head
),
670 (void *)(long)(event
->header
.size
),
671 event
->comm
.comm
, event
->comm
.pid
);
673 if (thread
== NULL
||
674 thread__set_comm(thread
, event
->comm
.comm
)) {
675 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
684 struct raw_event_sample
{
689 #define FILL_FIELD(ptr, field, event, data) \
690 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
692 #define FILL_ARRAY(ptr, array, event, data) \
694 void *__array = raw_field_ptr(event, #array, data); \
695 memcpy(ptr.array, __array, sizeof(ptr.array)); \
698 #define FILL_COMMON_FIELDS(ptr, event, data) \
700 FILL_FIELD(ptr, common_type, event, data); \
701 FILL_FIELD(ptr, common_flags, event, data); \
702 FILL_FIELD(ptr, common_preempt_count, event, data); \
703 FILL_FIELD(ptr, common_pid, event, data); \
704 FILL_FIELD(ptr, common_tgid, event, data); \
709 struct trace_switch_event
{
714 u8 common_preempt_count
;
728 struct trace_wakeup_event
{
733 u8 common_preempt_count
;
745 struct trace_fork_event
{
750 u8 common_preempt_count
;
754 char parent_comm
[16];
760 struct trace_sched_handler
{
761 void (*switch_event
)(struct trace_switch_event
*,
765 struct thread
*thread
);
767 void (*wakeup_event
)(struct trace_wakeup_event
*,
771 struct thread
*thread
);
773 void (*fork_event
)(struct trace_fork_event
*,
777 struct thread
*thread
);
782 replay_wakeup_event(struct trace_wakeup_event
*wakeup_event
,
785 u64 timestamp __used
,
786 struct thread
*thread __used
)
788 struct task_desc
*waker
, *wakee
;
791 printf("sched_wakeup event %p\n", event
);
793 printf(" ... pid %d woke up %s/%d\n",
794 wakeup_event
->common_pid
,
799 waker
= register_pid(wakeup_event
->common_pid
, "<unknown>");
800 wakee
= register_pid(wakeup_event
->pid
, wakeup_event
->comm
);
802 add_sched_event_wakeup(waker
, timestamp
, wakee
);
805 static unsigned long cpu_last_switched
[MAX_CPUS
];
808 replay_switch_event(struct trace_switch_event
*switch_event
,
812 struct thread
*thread __used
)
814 struct task_desc
*prev
, *next
;
819 printf("sched_switch event %p\n", event
);
821 if (cpu
>= MAX_CPUS
|| cpu
< 0)
824 timestamp0
= cpu_last_switched
[cpu
];
826 delta
= timestamp
- timestamp0
;
831 die("hm, delta: %Ld < 0 ?\n", delta
);
834 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
835 switch_event
->prev_comm
, switch_event
->prev_pid
,
836 switch_event
->next_comm
, switch_event
->next_pid
,
840 prev
= register_pid(switch_event
->prev_pid
, switch_event
->prev_comm
);
841 next
= register_pid(switch_event
->next_pid
, switch_event
->next_comm
);
843 cpu_last_switched
[cpu
] = timestamp
;
845 add_sched_event_run(prev
, timestamp
, delta
);
846 add_sched_event_sleep(prev
, timestamp
, switch_event
->prev_state
);
851 replay_fork_event(struct trace_fork_event
*fork_event
,
854 u64 timestamp __used
,
855 struct thread
*thread __used
)
858 printf("sched_fork event %p\n", event
);
859 printf("... parent: %s/%d\n", fork_event
->parent_comm
, fork_event
->parent_pid
);
860 printf("... child: %s/%d\n", fork_event
->child_comm
, fork_event
->child_pid
);
862 register_pid(fork_event
->parent_pid
, fork_event
->parent_comm
);
863 register_pid(fork_event
->child_pid
, fork_event
->child_comm
);
866 static struct trace_sched_handler replay_ops
= {
867 .wakeup_event
= replay_wakeup_event
,
868 .switch_event
= replay_switch_event
,
869 .fork_event
= replay_fork_event
,
872 struct sort_dimension
{
875 struct list_head list
;
878 static LIST_HEAD(cmp_pid
);
881 thread_lat_cmp(struct list_head
*list
, struct task_atoms
*l
, struct task_atoms
*r
)
883 struct sort_dimension
*sort
;
886 BUG_ON(list_empty(list
));
888 list_for_each_entry(sort
, list
, list
) {
889 ret
= sort
->cmp(l
, r
);
897 static struct task_atoms
*
898 thread_atoms_search(struct rb_root
*root
, struct thread
*thread
,
899 struct list_head
*sort_list
)
901 struct rb_node
*node
= root
->rb_node
;
902 struct task_atoms key
= { .thread
= thread
};
905 struct task_atoms
*atoms
;
908 atoms
= container_of(node
, struct task_atoms
, node
);
910 cmp
= thread_lat_cmp(sort_list
, &key
, atoms
);
912 node
= node
->rb_left
;
914 node
= node
->rb_right
;
916 BUG_ON(thread
!= atoms
->thread
);
924 __thread_latency_insert(struct rb_root
*root
, struct task_atoms
*data
,
925 struct list_head
*sort_list
)
927 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
930 struct task_atoms
*this;
933 this = container_of(*new, struct task_atoms
, node
);
936 cmp
= thread_lat_cmp(sort_list
, data
, this);
939 new = &((*new)->rb_left
);
941 new = &((*new)->rb_right
);
944 rb_link_node(&data
->node
, parent
, new);
945 rb_insert_color(&data
->node
, root
);
948 static void thread_atoms_insert(struct thread
*thread
)
950 struct task_atoms
*atoms
;
952 atoms
= calloc(sizeof(*atoms
), 1);
956 atoms
->thread
= thread
;
957 INIT_LIST_HEAD(&atoms
->atom_list
);
958 __thread_latency_insert(&atom_root
, atoms
, &cmp_pid
);
962 latency_fork_event(struct trace_fork_event
*fork_event __used
,
963 struct event
*event __used
,
965 u64 timestamp __used
,
966 struct thread
*thread __used
)
968 /* should insert the newcomer */
972 static char sched_out_state(struct trace_switch_event
*switch_event
)
974 const char *str
= TASK_STATE_TO_CHAR_STR
;
976 return str
[switch_event
->prev_state
];
980 lat_sched_out(struct task_atoms
*atoms
,
981 struct trace_switch_event
*switch_event __used
,
985 struct work_atom
*atom
;
987 atom
= calloc(sizeof(*atom
), 1);
991 if (sched_out_state(switch_event
) == 'R') {
992 atom
->state
= THREAD_WAIT_CPU
;
993 atom
->wake_up_time
= timestamp
;
996 atom
->runtime
= delta
;
997 list_add_tail(&atom
->list
, &atoms
->atom_list
);
1001 lat_sched_in(struct task_atoms
*atoms
, u64 timestamp
)
1003 struct work_atom
*atom
;
1006 if (list_empty(&atoms
->atom_list
))
1009 atom
= list_entry(atoms
->atom_list
.prev
, struct work_atom
, list
);
1011 if (atom
->state
!= THREAD_WAIT_CPU
)
1014 if (timestamp
< atom
->wake_up_time
) {
1015 atom
->state
= THREAD_IGNORE
;
1019 atom
->state
= THREAD_SCHED_IN
;
1020 atom
->sched_in_time
= timestamp
;
1022 delta
= atom
->sched_in_time
- atom
->wake_up_time
;
1023 atoms
->total_lat
+= delta
;
1024 if (delta
> atoms
->max_lat
)
1025 atoms
->max_lat
= delta
;
1027 atoms
->total_runtime
+= atom
->runtime
;
1031 latency_switch_event(struct trace_switch_event
*switch_event
,
1032 struct event
*event __used
,
1035 struct thread
*thread __used
)
1037 struct task_atoms
*out_atoms
, *in_atoms
;
1038 struct thread
*sched_out
, *sched_in
;
1042 if (cpu
>= MAX_CPUS
|| cpu
< 0)
1045 timestamp0
= cpu_last_switched
[cpu
];
1046 cpu_last_switched
[cpu
] = timestamp
;
1048 delta
= timestamp
- timestamp0
;
1053 die("hm, delta: %Ld < 0 ?\n", delta
);
1056 sched_out
= threads__findnew(switch_event
->prev_pid
, &threads
, &last_match
);
1057 sched_in
= threads__findnew(switch_event
->next_pid
, &threads
, &last_match
);
1059 in_atoms
= thread_atoms_search(&atom_root
, sched_in
, &cmp_pid
);
1061 thread_atoms_insert(sched_in
);
1062 in_atoms
= thread_atoms_search(&atom_root
, sched_in
, &cmp_pid
);
1064 die("in-atom: Internal tree error");
1067 out_atoms
= thread_atoms_search(&atom_root
, sched_out
, &cmp_pid
);
1069 thread_atoms_insert(sched_out
);
1070 out_atoms
= thread_atoms_search(&atom_root
, sched_out
, &cmp_pid
);
1072 die("out-atom: Internal tree error");
1075 lat_sched_in(in_atoms
, timestamp
);
1076 lat_sched_out(out_atoms
, switch_event
, delta
, timestamp
);
1080 latency_wakeup_event(struct trace_wakeup_event
*wakeup_event
,
1081 struct event
*event __used
,
1084 struct thread
*thread __used
)
1086 struct task_atoms
*atoms
;
1087 struct work_atom
*atom
;
1088 struct thread
*wakee
;
1090 /* Note for later, it may be interesting to observe the failing cases */
1091 if (!wakeup_event
->success
)
1094 wakee
= threads__findnew(wakeup_event
->pid
, &threads
, &last_match
);
1095 atoms
= thread_atoms_search(&atom_root
, wakee
, &cmp_pid
);
1097 thread_atoms_insert(wakee
);
1101 if (list_empty(&atoms
->atom_list
))
1104 atom
= list_entry(atoms
->atom_list
.prev
, struct work_atom
, list
);
1106 if (atom
->state
!= THREAD_SLEEPING
)
1109 atom
->state
= THREAD_WAIT_CPU
;
1110 atom
->wake_up_time
= timestamp
;
1113 static struct trace_sched_handler lat_ops
= {
1114 .wakeup_event
= latency_wakeup_event
,
1115 .switch_event
= latency_switch_event
,
1116 .fork_event
= latency_fork_event
,
1119 static void output_lat_thread(struct task_atoms
*atom_list
)
1125 if (!atom_list
->nb_atoms
)
1128 all_runtime
+= atom_list
->total_runtime
;
1129 all_count
+= atom_list
->nb_atoms
;
1131 ret
= printf(" %s ", atom_list
->thread
->comm
);
1133 for (i
= 0; i
< 19 - ret
; i
++)
1136 avg
= atom_list
->total_lat
/ atom_list
->nb_atoms
;
1138 printf("|%9.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
1139 (double)atom_list
->total_runtime
/ 1e6
,
1140 atom_list
->nb_atoms
, (double)avg
/ 1e6
,
1141 (double)atom_list
->max_lat
/ 1e6
);
1144 static int pid_cmp(struct task_atoms
*l
, struct task_atoms
*r
)
1146 if (l
->thread
->pid
< r
->thread
->pid
)
1148 if (l
->thread
->pid
> r
->thread
->pid
)
1154 static struct sort_dimension pid_sort_dimension
= {
1159 static int avg_cmp(struct task_atoms
*l
, struct task_atoms
*r
)
1169 avgl
= l
->total_lat
/ l
->nb_atoms
;
1170 avgr
= r
->total_lat
/ r
->nb_atoms
;
1180 static struct sort_dimension avg_sort_dimension
= {
1185 static int max_cmp(struct task_atoms
*l
, struct task_atoms
*r
)
1187 if (l
->max_lat
< r
->max_lat
)
1189 if (l
->max_lat
> r
->max_lat
)
1195 static struct sort_dimension max_sort_dimension
= {
1200 static int switch_cmp(struct task_atoms
*l
, struct task_atoms
*r
)
1202 if (l
->nb_atoms
< r
->nb_atoms
)
1204 if (l
->nb_atoms
> r
->nb_atoms
)
1210 static struct sort_dimension switch_sort_dimension
= {
1215 static int runtime_cmp(struct task_atoms
*l
, struct task_atoms
*r
)
1217 if (l
->total_runtime
< r
->total_runtime
)
1219 if (l
->total_runtime
> r
->total_runtime
)
1225 static struct sort_dimension runtime_sort_dimension
= {
1230 static struct sort_dimension
*available_sorts
[] = {
1231 &pid_sort_dimension
,
1232 &avg_sort_dimension
,
1233 &max_sort_dimension
,
1234 &switch_sort_dimension
,
1235 &runtime_sort_dimension
,
1238 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1240 static LIST_HEAD(sort_list
);
1242 static int sort_dimension__add(char *tok
, struct list_head
*list
)
1246 for (i
= 0; i
< NB_AVAILABLE_SORTS
; i
++) {
1247 if (!strcmp(available_sorts
[i
]->name
, tok
)) {
1248 list_add_tail(&available_sorts
[i
]->list
, list
);
1257 static void setup_sorting(void);
1259 static void sort_lat(void)
1261 struct rb_node
*node
;
1264 struct task_atoms
*data
;
1265 node
= rb_first(&atom_root
);
1269 rb_erase(node
, &atom_root
);
1270 data
= rb_entry(node
, struct task_atoms
, node
);
1271 __thread_latency_insert(&sorted_atom_root
, data
, &sort_list
);
1275 static void __cmd_lat(void)
1277 struct rb_node
*next
;
1283 printf("-----------------------------------------------------------------------------------\n");
1284 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1285 printf("-----------------------------------------------------------------------------------\n");
1287 next
= rb_first(&sorted_atom_root
);
1290 struct task_atoms
*atom_list
;
1292 atom_list
= rb_entry(next
, struct task_atoms
, node
);
1293 output_lat_thread(atom_list
);
1294 next
= rb_next(next
);
1297 printf("-----------------------------------------------------------------------------------\n");
1298 printf(" TOTAL: |%9.3f ms |%9Ld |\n",
1299 (double)all_runtime
/1e6
, all_count
);
1300 printf("---------------------------------------------\n");
1303 static struct trace_sched_handler
*trace_handler
;
1306 process_sched_wakeup_event(struct raw_event_sample
*raw
,
1307 struct event
*event
,
1309 u64 timestamp __used
,
1310 struct thread
*thread __used
)
1312 struct trace_wakeup_event wakeup_event
;
1314 FILL_COMMON_FIELDS(wakeup_event
, event
, raw
->data
);
1316 FILL_ARRAY(wakeup_event
, comm
, event
, raw
->data
);
1317 FILL_FIELD(wakeup_event
, pid
, event
, raw
->data
);
1318 FILL_FIELD(wakeup_event
, prio
, event
, raw
->data
);
1319 FILL_FIELD(wakeup_event
, success
, event
, raw
->data
);
1320 FILL_FIELD(wakeup_event
, cpu
, event
, raw
->data
);
1322 trace_handler
->wakeup_event(&wakeup_event
, event
, cpu
, timestamp
, thread
);
1326 process_sched_switch_event(struct raw_event_sample
*raw
,
1327 struct event
*event
,
1329 u64 timestamp __used
,
1330 struct thread
*thread __used
)
1332 struct trace_switch_event switch_event
;
1334 FILL_COMMON_FIELDS(switch_event
, event
, raw
->data
);
1336 FILL_ARRAY(switch_event
, prev_comm
, event
, raw
->data
);
1337 FILL_FIELD(switch_event
, prev_pid
, event
, raw
->data
);
1338 FILL_FIELD(switch_event
, prev_prio
, event
, raw
->data
);
1339 FILL_FIELD(switch_event
, prev_state
, event
, raw
->data
);
1340 FILL_ARRAY(switch_event
, next_comm
, event
, raw
->data
);
1341 FILL_FIELD(switch_event
, next_pid
, event
, raw
->data
);
1342 FILL_FIELD(switch_event
, next_prio
, event
, raw
->data
);
1344 trace_handler
->switch_event(&switch_event
, event
, cpu
, timestamp
, thread
);
1348 process_sched_fork_event(struct raw_event_sample
*raw
,
1349 struct event
*event
,
1351 u64 timestamp __used
,
1352 struct thread
*thread __used
)
1354 struct trace_fork_event fork_event
;
1356 FILL_COMMON_FIELDS(fork_event
, event
, raw
->data
);
1358 FILL_ARRAY(fork_event
, parent_comm
, event
, raw
->data
);
1359 FILL_FIELD(fork_event
, parent_pid
, event
, raw
->data
);
1360 FILL_ARRAY(fork_event
, child_comm
, event
, raw
->data
);
1361 FILL_FIELD(fork_event
, child_pid
, event
, raw
->data
);
1363 trace_handler
->fork_event(&fork_event
, event
, cpu
, timestamp
, thread
);
1367 process_sched_exit_event(struct event
*event
,
1369 u64 timestamp __used
,
1370 struct thread
*thread __used
)
1373 printf("sched_exit event %p\n", event
);
1377 process_raw_event(event_t
*raw_event __used
, void *more_data
,
1378 int cpu
, u64 timestamp
, struct thread
*thread
)
1380 struct raw_event_sample
*raw
= more_data
;
1381 struct event
*event
;
1384 type
= trace_parse_common_type(raw
->data
);
1385 event
= trace_find_event(type
);
1387 if (!strcmp(event
->name
, "sched_switch"))
1388 process_sched_switch_event(raw
, event
, cpu
, timestamp
, thread
);
1389 if (!strcmp(event
->name
, "sched_wakeup"))
1390 process_sched_wakeup_event(raw
, event
, cpu
, timestamp
, thread
);
1391 if (!strcmp(event
->name
, "sched_wakeup_new"))
1392 process_sched_wakeup_event(raw
, event
, cpu
, timestamp
, thread
);
1393 if (!strcmp(event
->name
, "sched_process_fork"))
1394 process_sched_fork_event(raw
, event
, cpu
, timestamp
, thread
);
1395 if (!strcmp(event
->name
, "sched_process_exit"))
1396 process_sched_exit_event(event
, cpu
, timestamp
, thread
);
1400 process_sample_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1404 struct dso
*dso
= NULL
;
1405 struct thread
*thread
;
1406 u64 ip
= event
->ip
.ip
;
1410 void *more_data
= event
->ip
.__more_data
;
1413 thread
= threads__findnew(event
->ip
.pid
, &threads
, &last_match
);
1415 if (sample_type
& PERF_SAMPLE_TIME
) {
1416 timestamp
= *(u64
*)more_data
;
1417 more_data
+= sizeof(u64
);
1420 if (sample_type
& PERF_SAMPLE_CPU
) {
1421 cpu
= *(u32
*)more_data
;
1422 more_data
+= sizeof(u32
);
1423 more_data
+= sizeof(u32
); /* reserved */
1426 if (sample_type
& PERF_SAMPLE_PERIOD
) {
1427 period
= *(u64
*)more_data
;
1428 more_data
+= sizeof(u64
);
1431 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1432 (void *)(offset
+ head
),
1433 (void *)(long)(event
->header
.size
),
1435 event
->ip
.pid
, event
->ip
.tid
,
1439 dump_printf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
1441 if (thread
== NULL
) {
1442 eprintf("problem processing %d event, skipping it.\n",
1443 event
->header
.type
);
1447 cpumode
= event
->header
.misc
& PERF_EVENT_MISC_CPUMODE_MASK
;
1449 if (cpumode
== PERF_EVENT_MISC_KERNEL
) {
1455 dump_printf(" ...... dso: %s\n", dso
->name
);
1457 } else if (cpumode
== PERF_EVENT_MISC_USER
) {
1466 dso
= hypervisor_dso
;
1468 dump_printf(" ...... dso: [hypervisor]\n");
1471 if (sample_type
& PERF_SAMPLE_RAW
)
1472 process_raw_event(event
, more_data
, cpu
, timestamp
, thread
);
1478 process_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1482 switch (event
->header
.type
) {
1483 case PERF_EVENT_MMAP
... PERF_EVENT_LOST
:
1486 case PERF_EVENT_COMM
:
1487 return process_comm_event(event
, offset
, head
);
1489 case PERF_EVENT_EXIT
... PERF_EVENT_READ
:
1492 case PERF_EVENT_SAMPLE
:
1493 return process_sample_event(event
, offset
, head
);
1495 case PERF_EVENT_MAX
:
1503 static int read_events(void)
1505 int ret
, rc
= EXIT_FAILURE
;
1506 unsigned long offset
= 0;
1507 unsigned long head
= 0;
1508 struct stat perf_stat
;
1514 register_idle_thread(&threads
, &last_match
);
1516 input
= open(input_name
, O_RDONLY
);
1518 perror("failed to open file");
1522 ret
= fstat(input
, &perf_stat
);
1524 perror("failed to stat file");
1528 if (!perf_stat
.st_size
) {
1529 fprintf(stderr
, "zero-sized file, nothing to do!\n");
1532 header
= perf_header__read(input
);
1533 head
= header
->data_offset
;
1534 sample_type
= perf_header__sample_type(header
);
1536 if (!(sample_type
& PERF_SAMPLE_RAW
))
1537 die("No trace sample to read. Did you call perf record "
1540 if (load_kernel() < 0) {
1541 perror("failed to load kernel symbols");
1542 return EXIT_FAILURE
;
1546 buf
= (char *)mmap(NULL
, page_size
* mmap_window
, PROT_READ
,
1547 MAP_SHARED
, input
, offset
);
1548 if (buf
== MAP_FAILED
) {
1549 perror("failed to mmap file");
1554 event
= (event_t
*)(buf
+ head
);
1556 size
= event
->header
.size
;
1560 if (head
+ event
->header
.size
>= page_size
* mmap_window
) {
1561 unsigned long shift
= page_size
* (head
/ page_size
);
1564 res
= munmap(buf
, page_size
* mmap_window
);
1572 size
= event
->header
.size
;
1575 if (!size
|| process_event(event
, offset
, head
) < 0) {
1578 * assume we lost track of the stream, check alignment, and
1579 * increment a single u64 in the hope to catch on again 'soon'.
1582 if (unlikely(head
& 7))
1590 if (offset
+ head
< (unsigned long)perf_stat
.st_size
)
1599 static const char * const sched_usage
[] = {
1600 "perf sched [<options>] {record|latency|replay}",
1604 static const struct option sched_options
[] = {
1605 OPT_BOOLEAN('v', "verbose", &verbose
,
1606 "be more verbose (show symbol address, etc)"),
1607 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1608 "dump raw trace in ASCII"),
1612 static const char * const latency_usage
[] = {
1613 "perf sched latency [<options>]",
1617 static const struct option latency_options
[] = {
1618 OPT_STRING('s', "sort", &sort_order
, "key[,key2...]",
1619 "sort by key(s): runtime, switch, avg, max"),
1620 OPT_BOOLEAN('v', "verbose", &verbose
,
1621 "be more verbose (show symbol address, etc)"),
1622 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1623 "dump raw trace in ASCII"),
1627 static const char * const replay_usage
[] = {
1628 "perf sched replay [<options>]",
1632 static const struct option replay_options
[] = {
1633 OPT_INTEGER('r', "repeat", &replay_repeat
,
1634 "repeat the workload replay N times (-1: infinite)"),
1635 OPT_BOOLEAN('v', "verbose", &verbose
,
1636 "be more verbose (show symbol address, etc)"),
1637 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1638 "dump raw trace in ASCII"),
1642 static void setup_sorting(void)
1644 char *tmp
, *tok
, *str
= strdup(sort_order
);
1646 for (tok
= strtok_r(str
, ", ", &tmp
);
1647 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
1648 if (sort_dimension__add(tok
, &sort_list
) < 0) {
1649 error("Unknown --sort key: `%s'", tok
);
1650 usage_with_options(latency_usage
, latency_options
);
1656 sort_dimension__add((char *)"pid", &cmp_pid
);
1659 int cmd_sched(int argc
, const char **argv
, const char *prefix __used
)
1662 page_size
= getpagesize();
1664 argc
= parse_options(argc
, argv
, sched_options
, sched_usage
,
1665 PARSE_OPT_STOP_AT_NON_OPTION
);
1667 usage_with_options(sched_usage
, sched_options
);
1669 if (!strncmp(argv
[0], "lat", 3)) {
1670 trace_handler
= &lat_ops
;
1672 argc
= parse_options(argc
, argv
, latency_options
, latency_usage
, 0);
1674 usage_with_options(latency_usage
, latency_options
);
1678 } else if (!strncmp(argv
[0], "rep", 3)) {
1679 trace_handler
= &replay_ops
;
1681 argc
= parse_options(argc
, argv
, replay_options
, replay_usage
, 0);
1683 usage_with_options(replay_usage
, replay_options
);
1687 usage_with_options(sched_usage
, sched_options
);