2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
20 /* function tracing enabled */
21 static int ftrace_function_enabled
;
23 static struct trace_array
*func_trace
;
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
28 static int function_trace_init(struct trace_array
*tr
)
34 tracing_start_cmdline_record();
35 tracing_start_function_trace();
39 static void function_trace_reset(struct trace_array
*tr
)
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
45 static void function_trace_start(struct trace_array
*tr
)
47 tracing_reset_online_cpus(tr
);
51 function_trace_call_preempt_only(unsigned long ip
, unsigned long parent_ip
)
53 struct trace_array
*tr
= func_trace
;
54 struct trace_array_cpu
*data
;
60 if (unlikely(!ftrace_function_enabled
))
64 resched
= ftrace_preempt_disable();
65 local_save_flags(flags
);
66 cpu
= raw_smp_processor_id();
68 disabled
= atomic_inc_return(&data
->disabled
);
70 if (likely(disabled
== 1))
71 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
73 atomic_dec(&data
->disabled
);
74 ftrace_preempt_enable(resched
);
78 function_trace_call(unsigned long ip
, unsigned long parent_ip
)
80 struct trace_array
*tr
= func_trace
;
81 struct trace_array_cpu
*data
;
87 if (unlikely(!ftrace_function_enabled
))
91 * Need to use raw, since this must be called before the
92 * recursive protection is performed.
94 local_irq_save(flags
);
95 cpu
= raw_smp_processor_id();
97 disabled
= atomic_inc_return(&data
->disabled
);
99 if (likely(disabled
== 1)) {
100 pc
= preempt_count();
101 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
104 atomic_dec(&data
->disabled
);
105 local_irq_restore(flags
);
109 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
)
111 struct trace_array
*tr
= func_trace
;
112 struct trace_array_cpu
*data
;
118 if (unlikely(!ftrace_function_enabled
))
122 * Need to use raw, since this must be called before the
123 * recursive protection is performed.
125 local_irq_save(flags
);
126 cpu
= raw_smp_processor_id();
127 data
= tr
->data
[cpu
];
128 disabled
= atomic_inc_return(&data
->disabled
);
130 if (likely(disabled
== 1)) {
131 pc
= preempt_count();
132 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
135 * __ftrace_trace_stack,
137 * function_stack_trace_call
141 __trace_stack(tr
, flags
, 5, pc
);
144 atomic_dec(&data
->disabled
);
145 local_irq_restore(flags
);
149 static struct ftrace_ops trace_ops __read_mostly
=
151 .func
= function_trace_call
,
154 static struct ftrace_ops trace_stack_ops __read_mostly
=
156 .func
= function_stack_trace_call
,
159 /* Our two options */
161 TRACE_FUNC_OPT_STACK
= 0x1,
164 static struct tracer_opt func_opts
[] = {
165 #ifdef CONFIG_STACKTRACE
166 { TRACER_OPT(func_stack_trace
, TRACE_FUNC_OPT_STACK
) },
168 { } /* Always set a last empty entry */
171 static struct tracer_flags func_flags
= {
172 .val
= 0, /* By default: all flags disabled */
176 static void tracing_start_function_trace(void)
178 ftrace_function_enabled
= 0;
180 if (trace_flags
& TRACE_ITER_PREEMPTONLY
)
181 trace_ops
.func
= function_trace_call_preempt_only
;
183 trace_ops
.func
= function_trace_call
;
185 if (func_flags
.val
& TRACE_FUNC_OPT_STACK
)
186 register_ftrace_function(&trace_stack_ops
);
188 register_ftrace_function(&trace_ops
);
190 ftrace_function_enabled
= 1;
193 static void tracing_stop_function_trace(void)
195 ftrace_function_enabled
= 0;
197 if (func_flags
.val
& TRACE_FUNC_OPT_STACK
)
198 unregister_ftrace_function(&trace_stack_ops
);
200 unregister_ftrace_function(&trace_ops
);
203 static int func_set_flag(u32 old_flags
, u32 bit
, int set
)
205 if (bit
== TRACE_FUNC_OPT_STACK
) {
206 /* do nothing if already set */
207 if (!!set
== !!(func_flags
.val
& TRACE_FUNC_OPT_STACK
))
211 unregister_ftrace_function(&trace_ops
);
212 register_ftrace_function(&trace_stack_ops
);
214 unregister_ftrace_function(&trace_stack_ops
);
215 register_ftrace_function(&trace_ops
);
224 static struct tracer function_trace __read_mostly
=
227 .init
= function_trace_init
,
228 .reset
= function_trace_reset
,
229 .start
= function_trace_start
,
230 .wait_pipe
= poll_wait_pipe
,
231 .flags
= &func_flags
,
232 .set_flag
= func_set_flag
,
233 #ifdef CONFIG_FTRACE_SELFTEST
234 .selftest
= trace_selftest_startup_function
,
238 #ifdef CONFIG_DYNAMIC_FTRACE
240 ftrace_traceon(unsigned long ip
, unsigned long parent_ip
, void **data
)
242 long *count
= (long *)data
;
257 ftrace_traceoff(unsigned long ip
, unsigned long parent_ip
, void **data
)
259 long *count
= (long *)data
;
261 if (!tracing_is_on())
274 ftrace_trace_onoff_print(struct seq_file
*m
, unsigned long ip
,
275 struct ftrace_probe_ops
*ops
, void *data
);
277 static struct ftrace_probe_ops traceon_probe_ops
= {
278 .func
= ftrace_traceon
,
279 .print
= ftrace_trace_onoff_print
,
282 static struct ftrace_probe_ops traceoff_probe_ops
= {
283 .func
= ftrace_traceoff
,
284 .print
= ftrace_trace_onoff_print
,
288 ftrace_trace_onoff_print(struct seq_file
*m
, unsigned long ip
,
289 struct ftrace_probe_ops
*ops
, void *data
)
291 char str
[KSYM_SYMBOL_LEN
];
292 long count
= (long)data
;
294 kallsyms_lookup(ip
, NULL
, NULL
, NULL
, str
);
295 seq_printf(m
, "%s:", str
);
297 if (ops
== &traceon_probe_ops
)
298 seq_printf(m
, "traceon");
300 seq_printf(m
, "traceoff");
303 seq_printf(m
, ":unlimited\n");
305 seq_printf(m
, ":count=%ld\n", count
);
311 ftrace_trace_onoff_unreg(char *glob
, char *cmd
, char *param
)
313 struct ftrace_probe_ops
*ops
;
315 /* we register both traceon and traceoff to this callback */
316 if (strcmp(cmd
, "traceon") == 0)
317 ops
= &traceon_probe_ops
;
319 ops
= &traceoff_probe_ops
;
321 unregister_ftrace_function_probe_func(glob
, ops
);
327 ftrace_trace_onoff_callback(char *glob
, char *cmd
, char *param
, int enable
)
329 struct ftrace_probe_ops
*ops
;
330 void *count
= (void *)-1;
334 /* hash funcs only work with set_ftrace_filter */
339 return ftrace_trace_onoff_unreg(glob
+1, cmd
, param
);
341 /* we register both traceon and traceoff to this callback */
342 if (strcmp(cmd
, "traceon") == 0)
343 ops
= &traceon_probe_ops
;
345 ops
= &traceoff_probe_ops
;
350 number
= strsep(¶m
, ":");
356 * We use the callback data field (which is a pointer)
359 ret
= strict_strtoul(number
, 0, (unsigned long *)&count
);
364 ret
= register_ftrace_function_probe(glob
, ops
, count
);
366 return ret
< 0 ? ret
: 0;
369 static struct ftrace_func_command ftrace_traceon_cmd
= {
371 .func
= ftrace_trace_onoff_callback
,
374 static struct ftrace_func_command ftrace_traceoff_cmd
= {
376 .func
= ftrace_trace_onoff_callback
,
379 static int __init
init_func_cmd_traceon(void)
383 ret
= register_ftrace_command(&ftrace_traceoff_cmd
);
387 ret
= register_ftrace_command(&ftrace_traceon_cmd
);
389 unregister_ftrace_command(&ftrace_traceoff_cmd
);
393 static inline int init_func_cmd_traceon(void)
397 #endif /* CONFIG_DYNAMIC_FTRACE */
399 static __init
int init_function_trace(void)
401 init_func_cmd_traceon();
402 return register_tracer(&function_trace
);
404 device_initcall(init_function_trace
);