3 * Function graph tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
16 #define TRACE_GRAPH_INDENT 2
19 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
20 #define TRACE_GRAPH_PRINT_CPU 0x2
21 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
22 #define TRACE_GRAPH_PRINT_PROC 0x8
24 static struct tracer_opt trace_opts
[] = {
25 /* Display overruns ? */
26 { TRACER_OPT(funcgraph
-overrun
, TRACE_GRAPH_PRINT_OVERRUN
) },
28 { TRACER_OPT(funcgraph
-cpu
, TRACE_GRAPH_PRINT_CPU
) },
29 /* Display Overhead ? */
30 { TRACER_OPT(funcgraph
-overhead
, TRACE_GRAPH_PRINT_OVERHEAD
) },
31 /* Display proc name/pid */
32 { TRACER_OPT(funcgraph
-proc
, TRACE_GRAPH_PRINT_PROC
) },
36 static struct tracer_flags tracer_flags
= {
37 /* Don't display overruns and proc by default */
38 .val
= TRACE_GRAPH_PRINT_CPU
| TRACE_GRAPH_PRINT_OVERHEAD
,
42 /* pid on the last trace processed */
43 static pid_t last_pid
[NR_CPUS
] = { [0 ... NR_CPUS
-1] = -1 };
45 /* Add a function return address to the trace stack on thread info.*/
47 ftrace_push_return_trace(unsigned long ret
, unsigned long long time
,
48 unsigned long func
, int *depth
)
52 if (!current
->ret_stack
)
55 /* The return trace stack is full */
56 if (current
->curr_ret_stack
== FTRACE_RETFUNC_DEPTH
- 1) {
57 atomic_inc(¤t
->trace_overrun
);
61 index
= ++current
->curr_ret_stack
;
63 current
->ret_stack
[index
].ret
= ret
;
64 current
->ret_stack
[index
].func
= func
;
65 current
->ret_stack
[index
].calltime
= time
;
71 /* Retrieve a function return address to the trace stack on thread info.*/
73 ftrace_pop_return_trace(struct ftrace_graph_ret
*trace
, unsigned long *ret
)
77 index
= current
->curr_ret_stack
;
79 if (unlikely(index
< 0)) {
82 /* Might as well panic, otherwise we have no where to go */
83 *ret
= (unsigned long)panic
;
87 *ret
= current
->ret_stack
[index
].ret
;
88 trace
->func
= current
->ret_stack
[index
].func
;
89 trace
->calltime
= current
->ret_stack
[index
].calltime
;
90 trace
->overrun
= atomic_read(¤t
->trace_overrun
);
93 current
->curr_ret_stack
--;
98 * Send the trace to the ring-buffer.
99 * @return the original return address.
101 unsigned long ftrace_return_to_handler(void)
103 struct ftrace_graph_ret trace
;
106 ftrace_pop_return_trace(&trace
, &ret
);
107 trace
.rettime
= cpu_clock(raw_smp_processor_id());
108 ftrace_graph_return(&trace
);
110 if (unlikely(!ret
)) {
113 /* Might as well panic. What else to do? */
114 ret
= (unsigned long)panic
;
120 static int graph_trace_init(struct trace_array
*tr
)
124 for_each_online_cpu(cpu
)
125 tracing_reset(tr
, cpu
);
127 ret
= register_ftrace_graph(&trace_graph_return
,
131 tracing_start_cmdline_record();
136 static void graph_trace_reset(struct trace_array
*tr
)
138 tracing_stop_cmdline_record();
139 unregister_ftrace_graph();
142 static inline int log10_cpu(int nb
)
151 static enum print_line_t
152 print_graph_cpu(struct trace_seq
*s
, int cpu
)
156 int log10_this
= log10_cpu(cpu
);
157 int log10_all
= log10_cpu(cpumask_weight(cpu_online_mask
));
161 * Start with a space character - to make it stand out
162 * to the right a bit when trace output is pasted into
165 ret
= trace_seq_printf(s
, " ");
168 * Tricky - we space the CPU field according to the max
169 * number of online CPUs. On a 2-cpu system it would take
170 * a maximum of 1 digit - on a 128 cpu system it would
171 * take up to 3 digits:
173 for (i
= 0; i
< log10_all
- log10_this
; i
++) {
174 ret
= trace_seq_printf(s
, " ");
176 return TRACE_TYPE_PARTIAL_LINE
;
178 ret
= trace_seq_printf(s
, "%d) ", cpu
);
180 return TRACE_TYPE_PARTIAL_LINE
;
182 return TRACE_TYPE_HANDLED
;
185 #define TRACE_GRAPH_PROCINFO_LENGTH 14
187 static enum print_line_t
188 print_graph_proc(struct trace_seq
*s
, pid_t pid
)
195 /* sign + log10(MAX_INT) + '\0' */
198 strncpy(comm
, trace_find_cmdline(pid
), 7);
200 sprintf(pid_str
, "%d", pid
);
202 /* 1 stands for the "-" character */
203 len
= strlen(comm
) + strlen(pid_str
) + 1;
205 if (len
< TRACE_GRAPH_PROCINFO_LENGTH
)
206 spaces
= TRACE_GRAPH_PROCINFO_LENGTH
- len
;
208 /* First spaces to align center */
209 for (i
= 0; i
< spaces
/ 2; i
++) {
210 ret
= trace_seq_printf(s
, " ");
212 return TRACE_TYPE_PARTIAL_LINE
;
215 ret
= trace_seq_printf(s
, "%s-%s", comm
, pid_str
);
217 return TRACE_TYPE_PARTIAL_LINE
;
219 /* Last spaces to align center */
220 for (i
= 0; i
< spaces
- (spaces
/ 2); i
++) {
221 ret
= trace_seq_printf(s
, " ");
223 return TRACE_TYPE_PARTIAL_LINE
;
225 return TRACE_TYPE_HANDLED
;
229 /* If the pid changed since the last trace, output this event */
230 static enum print_line_t
231 verif_pid(struct trace_seq
*s
, pid_t pid
, int cpu
)
236 if (last_pid
[cpu
] != -1 && last_pid
[cpu
] == pid
)
237 return TRACE_TYPE_HANDLED
;
239 prev_pid
= last_pid
[cpu
];
243 * Context-switch trace line:
245 ------------------------------------------
246 | 1) migration/0--1 => sshd-1755
247 ------------------------------------------
250 ret
= trace_seq_printf(s
,
251 " ------------------------------------------\n");
253 TRACE_TYPE_PARTIAL_LINE
;
255 ret
= print_graph_cpu(s
, cpu
);
256 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
257 TRACE_TYPE_PARTIAL_LINE
;
259 ret
= print_graph_proc(s
, prev_pid
);
260 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
261 TRACE_TYPE_PARTIAL_LINE
;
263 ret
= trace_seq_printf(s
, " => ");
265 TRACE_TYPE_PARTIAL_LINE
;
267 ret
= print_graph_proc(s
, pid
);
268 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
269 TRACE_TYPE_PARTIAL_LINE
;
271 ret
= trace_seq_printf(s
,
272 "\n ------------------------------------------\n\n");
274 TRACE_TYPE_PARTIAL_LINE
;
280 trace_branch_is_leaf(struct trace_iterator
*iter
,
281 struct ftrace_graph_ent_entry
*curr
)
283 struct ring_buffer_iter
*ring_iter
;
284 struct ring_buffer_event
*event
;
285 struct ftrace_graph_ret_entry
*next
;
287 ring_iter
= iter
->buffer_iter
[iter
->cpu
];
292 event
= ring_buffer_iter_peek(ring_iter
, NULL
);
297 next
= ring_buffer_event_data(event
);
299 if (next
->ent
.type
!= TRACE_GRAPH_RET
)
302 if (curr
->ent
.pid
!= next
->ent
.pid
||
303 curr
->graph_ent
.func
!= next
->ret
.func
)
309 static enum print_line_t
310 print_graph_irq(struct trace_seq
*s
, unsigned long addr
,
311 enum trace_type type
, int cpu
, pid_t pid
)
315 if (addr
< (unsigned long)__irqentry_text_start
||
316 addr
>= (unsigned long)__irqentry_text_end
)
317 return TRACE_TYPE_UNHANDLED
;
319 if (type
== TRACE_GRAPH_ENT
) {
320 ret
= trace_seq_printf(s
, "==========> | ");
323 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_CPU
) {
324 ret
= print_graph_cpu(s
, cpu
);
325 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
326 return TRACE_TYPE_PARTIAL_LINE
;
329 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_PROC
) {
330 ret
= print_graph_proc(s
, pid
);
331 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
332 return TRACE_TYPE_PARTIAL_LINE
;
334 ret
= trace_seq_printf(s
, " | ");
336 return TRACE_TYPE_PARTIAL_LINE
;
340 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_OVERHEAD
) {
341 ret
= trace_seq_printf(s
, " ");
343 return TRACE_TYPE_PARTIAL_LINE
;
346 ret
= trace_seq_printf(s
, "<========== |\n");
349 return TRACE_TYPE_PARTIAL_LINE
;
350 return TRACE_TYPE_HANDLED
;
353 static enum print_line_t
354 print_graph_duration(unsigned long long duration
, struct trace_seq
*s
)
356 unsigned long nsecs_rem
= do_div(duration
, 1000);
357 /* log10(ULONG_MAX) + '\0' */
363 sprintf(msecs_str
, "%lu", (unsigned long) duration
);
366 ret
= trace_seq_printf(s
, msecs_str
);
368 return TRACE_TYPE_PARTIAL_LINE
;
370 len
= strlen(msecs_str
);
372 /* Print nsecs (we don't want to exceed 7 numbers) */
374 snprintf(nsecs_str
, 8 - len
, "%03lu", nsecs_rem
);
375 ret
= trace_seq_printf(s
, ".%s", nsecs_str
);
377 return TRACE_TYPE_PARTIAL_LINE
;
378 len
+= strlen(nsecs_str
);
381 ret
= trace_seq_printf(s
, " us ");
383 return TRACE_TYPE_PARTIAL_LINE
;
385 /* Print remaining spaces to fit the row's width */
386 for (i
= len
; i
< 7; i
++) {
387 ret
= trace_seq_printf(s
, " ");
389 return TRACE_TYPE_PARTIAL_LINE
;
392 ret
= trace_seq_printf(s
, "| ");
394 return TRACE_TYPE_PARTIAL_LINE
;
395 return TRACE_TYPE_HANDLED
;
399 /* Signal a overhead of time execution to the output */
401 print_graph_overhead(unsigned long long duration
, struct trace_seq
*s
)
403 /* Duration exceeded 100 msecs */
404 if (duration
> 100000ULL)
405 return trace_seq_printf(s
, "! ");
407 /* Duration exceeded 10 msecs */
408 if (duration
> 10000ULL)
409 return trace_seq_printf(s
, "+ ");
411 return trace_seq_printf(s
, " ");
414 /* Case of a leaf function on its call entry */
415 static enum print_line_t
416 print_graph_entry_leaf(struct trace_iterator
*iter
,
417 struct ftrace_graph_ent_entry
*entry
, struct trace_seq
*s
)
419 struct ftrace_graph_ret_entry
*ret_entry
;
420 struct ftrace_graph_ret
*graph_ret
;
421 struct ring_buffer_event
*event
;
422 struct ftrace_graph_ent
*call
;
423 unsigned long long duration
;
427 event
= ring_buffer_read(iter
->buffer_iter
[iter
->cpu
], NULL
);
428 ret_entry
= ring_buffer_event_data(event
);
429 graph_ret
= &ret_entry
->ret
;
430 call
= &entry
->graph_ent
;
431 duration
= graph_ret
->rettime
- graph_ret
->calltime
;
434 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_OVERHEAD
) {
435 ret
= print_graph_overhead(duration
, s
);
437 return TRACE_TYPE_PARTIAL_LINE
;
441 ret
= print_graph_duration(duration
, s
);
442 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
443 return TRACE_TYPE_PARTIAL_LINE
;
446 for (i
= 0; i
< call
->depth
* TRACE_GRAPH_INDENT
; i
++) {
447 ret
= trace_seq_printf(s
, " ");
449 return TRACE_TYPE_PARTIAL_LINE
;
452 ret
= seq_print_ip_sym(s
, call
->func
, 0);
454 return TRACE_TYPE_PARTIAL_LINE
;
456 ret
= trace_seq_printf(s
, "();\n");
458 return TRACE_TYPE_PARTIAL_LINE
;
460 return TRACE_TYPE_HANDLED
;
463 static enum print_line_t
464 print_graph_entry_nested(struct ftrace_graph_ent_entry
*entry
,
465 struct trace_seq
*s
, pid_t pid
, int cpu
)
469 struct ftrace_graph_ent
*call
= &entry
->graph_ent
;
472 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_OVERHEAD
) {
473 ret
= trace_seq_printf(s
, " ");
475 return TRACE_TYPE_PARTIAL_LINE
;
479 ret
= print_graph_irq(s
, call
->func
, TRACE_GRAPH_ENT
, cpu
, pid
);
480 if (ret
== TRACE_TYPE_UNHANDLED
) {
482 ret
= trace_seq_printf(s
, " | ");
484 return TRACE_TYPE_PARTIAL_LINE
;
486 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
487 return TRACE_TYPE_PARTIAL_LINE
;
492 for (i
= 0; i
< call
->depth
* TRACE_GRAPH_INDENT
; i
++) {
493 ret
= trace_seq_printf(s
, " ");
495 return TRACE_TYPE_PARTIAL_LINE
;
498 ret
= seq_print_ip_sym(s
, call
->func
, 0);
500 return TRACE_TYPE_PARTIAL_LINE
;
502 ret
= trace_seq_printf(s
, "() {\n");
504 return TRACE_TYPE_PARTIAL_LINE
;
506 return TRACE_TYPE_HANDLED
;
509 static enum print_line_t
510 print_graph_entry(struct ftrace_graph_ent_entry
*field
, struct trace_seq
*s
,
511 struct trace_iterator
*iter
, int cpu
)
514 struct trace_entry
*ent
= iter
->ent
;
517 if (verif_pid(s
, ent
->pid
, cpu
) == TRACE_TYPE_PARTIAL_LINE
)
518 return TRACE_TYPE_PARTIAL_LINE
;
521 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_CPU
) {
522 ret
= print_graph_cpu(s
, cpu
);
523 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
524 return TRACE_TYPE_PARTIAL_LINE
;
528 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_PROC
) {
529 ret
= print_graph_proc(s
, ent
->pid
);
530 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
531 return TRACE_TYPE_PARTIAL_LINE
;
533 ret
= trace_seq_printf(s
, " | ");
535 return TRACE_TYPE_PARTIAL_LINE
;
538 if (trace_branch_is_leaf(iter
, field
))
539 return print_graph_entry_leaf(iter
, field
, s
);
541 return print_graph_entry_nested(field
, s
, iter
->ent
->pid
, cpu
);
545 static enum print_line_t
546 print_graph_return(struct ftrace_graph_ret
*trace
, struct trace_seq
*s
,
547 struct trace_entry
*ent
, int cpu
)
551 unsigned long long duration
= trace
->rettime
- trace
->calltime
;
554 if (verif_pid(s
, ent
->pid
, cpu
) == TRACE_TYPE_PARTIAL_LINE
)
555 return TRACE_TYPE_PARTIAL_LINE
;
558 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_CPU
) {
559 ret
= print_graph_cpu(s
, cpu
);
560 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
561 return TRACE_TYPE_PARTIAL_LINE
;
565 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_PROC
) {
566 ret
= print_graph_proc(s
, ent
->pid
);
567 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
568 return TRACE_TYPE_PARTIAL_LINE
;
570 ret
= trace_seq_printf(s
, " | ");
572 return TRACE_TYPE_PARTIAL_LINE
;
576 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_OVERHEAD
) {
577 ret
= print_graph_overhead(duration
, s
);
579 return TRACE_TYPE_PARTIAL_LINE
;
583 ret
= print_graph_duration(duration
, s
);
584 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
585 return TRACE_TYPE_PARTIAL_LINE
;
588 for (i
= 0; i
< trace
->depth
* TRACE_GRAPH_INDENT
; i
++) {
589 ret
= trace_seq_printf(s
, " ");
591 return TRACE_TYPE_PARTIAL_LINE
;
594 ret
= trace_seq_printf(s
, "}\n");
596 return TRACE_TYPE_PARTIAL_LINE
;
599 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_OVERRUN
) {
600 ret
= trace_seq_printf(s
, " (Overruns: %lu)\n",
603 return TRACE_TYPE_PARTIAL_LINE
;
606 ret
= print_graph_irq(s
, trace
->func
, TRACE_GRAPH_RET
, cpu
, ent
->pid
);
607 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
608 return TRACE_TYPE_PARTIAL_LINE
;
610 return TRACE_TYPE_HANDLED
;
613 static enum print_line_t
614 print_graph_comment(struct print_entry
*trace
, struct trace_seq
*s
,
615 struct trace_entry
*ent
, struct trace_iterator
*iter
)
621 if (verif_pid(s
, ent
->pid
, iter
->cpu
) == TRACE_TYPE_PARTIAL_LINE
)
622 return TRACE_TYPE_PARTIAL_LINE
;
625 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_CPU
) {
626 ret
= print_graph_cpu(s
, iter
->cpu
);
627 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
628 return TRACE_TYPE_PARTIAL_LINE
;
632 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_PROC
) {
633 ret
= print_graph_proc(s
, ent
->pid
);
634 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
635 return TRACE_TYPE_PARTIAL_LINE
;
637 ret
= trace_seq_printf(s
, " | ");
639 return TRACE_TYPE_PARTIAL_LINE
;
643 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_OVERHEAD
) {
644 ret
= trace_seq_printf(s
, " ");
646 return TRACE_TYPE_PARTIAL_LINE
;
650 ret
= trace_seq_printf(s
, " | ");
652 return TRACE_TYPE_PARTIAL_LINE
;
655 if (trace
->depth
> 0)
656 for (i
= 0; i
< (trace
->depth
+ 1) * TRACE_GRAPH_INDENT
; i
++) {
657 ret
= trace_seq_printf(s
, " ");
659 return TRACE_TYPE_PARTIAL_LINE
;
663 ret
= trace_seq_printf(s
, "/* %s", trace
->buf
);
665 return TRACE_TYPE_PARTIAL_LINE
;
667 if (ent
->flags
& TRACE_FLAG_CONT
)
668 trace_seq_print_cont(s
, iter
);
670 ret
= trace_seq_printf(s
, " */\n");
672 return TRACE_TYPE_PARTIAL_LINE
;
674 return TRACE_TYPE_HANDLED
;
679 print_graph_function(struct trace_iterator
*iter
)
681 struct trace_seq
*s
= &iter
->seq
;
682 struct trace_entry
*entry
= iter
->ent
;
684 switch (entry
->type
) {
685 case TRACE_GRAPH_ENT
: {
686 struct ftrace_graph_ent_entry
*field
;
687 trace_assign_type(field
, entry
);
688 return print_graph_entry(field
, s
, iter
,
691 case TRACE_GRAPH_RET
: {
692 struct ftrace_graph_ret_entry
*field
;
693 trace_assign_type(field
, entry
);
694 return print_graph_return(&field
->ret
, s
, entry
, iter
->cpu
);
697 struct print_entry
*field
;
698 trace_assign_type(field
, entry
);
699 return print_graph_comment(field
, s
, entry
, iter
);
702 return TRACE_TYPE_UNHANDLED
;
706 static void print_graph_headers(struct seq_file
*s
)
710 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_CPU
)
711 seq_printf(s
, "CPU ");
712 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_PROC
)
713 seq_printf(s
, "TASK/PID ");
714 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_OVERHEAD
)
715 seq_printf(s
, "OVERHEAD/");
716 seq_printf(s
, "DURATION FUNCTION CALLS\n");
720 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_CPU
)
722 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_PROC
)
723 seq_printf(s
, "| | ");
724 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_OVERHEAD
) {
726 seq_printf(s
, "| | | | |\n");
728 seq_printf(s
, " | | | | |\n");
730 static struct tracer graph_trace __read_mostly
= {
731 .name
= "function_graph",
732 .init
= graph_trace_init
,
733 .reset
= graph_trace_reset
,
734 .print_line
= print_graph_function
,
735 .print_header
= print_graph_headers
,
736 .flags
= &tracer_flags
,
739 static __init
int init_graph_trace(void)
741 return register_tracer(&graph_trace
);
744 device_initcall(init_graph_trace
);