2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
20 /* function tracing enabled */
21 static int ftrace_function_enabled
;
23 static struct trace_array
*func_trace
;
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
28 static int function_trace_init(struct trace_array
*tr
)
34 tracing_start_cmdline_record();
35 tracing_start_function_trace();
39 static void function_trace_reset(struct trace_array
*tr
)
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
45 static void function_trace_start(struct trace_array
*tr
)
47 tracing_reset_online_cpus(tr
);
51 function_trace_call_preempt_only(unsigned long ip
, unsigned long parent_ip
)
53 struct trace_array
*tr
= func_trace
;
54 struct trace_array_cpu
*data
;
60 if (unlikely(!ftrace_function_enabled
))
64 resched
= ftrace_preempt_disable();
65 local_save_flags(flags
);
66 cpu
= raw_smp_processor_id();
68 disabled
= atomic_inc_return(&data
->disabled
);
70 if (likely(disabled
== 1))
71 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
73 atomic_dec(&data
->disabled
);
74 ftrace_preempt_enable(resched
);
78 function_trace_call(unsigned long ip
, unsigned long parent_ip
)
80 struct trace_array
*tr
= func_trace
;
81 struct trace_array_cpu
*data
;
87 if (unlikely(!ftrace_function_enabled
))
91 * Need to use raw, since this must be called before the
92 * recursive protection is performed.
94 local_irq_save(flags
);
95 cpu
= raw_smp_processor_id();
97 disabled
= atomic_inc_return(&data
->disabled
);
99 if (likely(disabled
== 1)) {
100 pc
= preempt_count();
101 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
104 atomic_dec(&data
->disabled
);
105 local_irq_restore(flags
);
109 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
)
111 struct trace_array
*tr
= func_trace
;
112 struct trace_array_cpu
*data
;
118 if (unlikely(!ftrace_function_enabled
))
122 * Need to use raw, since this must be called before the
123 * recursive protection is performed.
125 local_irq_save(flags
);
126 cpu
= raw_smp_processor_id();
127 data
= tr
->data
[cpu
];
128 disabled
= atomic_inc_return(&data
->disabled
);
130 if (likely(disabled
== 1)) {
131 pc
= preempt_count();
132 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
135 * __ftrace_trace_stack,
137 * function_stack_trace_call
141 __trace_stack(tr
, flags
, 5, pc
);
144 atomic_dec(&data
->disabled
);
145 local_irq_restore(flags
);
149 static struct ftrace_ops trace_ops __read_mostly
=
151 .func
= function_trace_call
,
154 static struct ftrace_ops trace_stack_ops __read_mostly
=
156 .func
= function_stack_trace_call
,
159 /* Our two options */
161 TRACE_FUNC_OPT_STACK
= 0x1,
164 static struct tracer_opt func_opts
[] = {
165 #ifdef CONFIG_STACKTRACE
166 { TRACER_OPT(func_stack_trace
, TRACE_FUNC_OPT_STACK
) },
168 { } /* Always set a last empty entry */
171 static struct tracer_flags func_flags
= {
172 .val
= 0, /* By default: all flags disabled */
176 static void tracing_start_function_trace(void)
178 ftrace_function_enabled
= 0;
180 if (trace_flags
& TRACE_ITER_PREEMPTONLY
)
181 trace_ops
.func
= function_trace_call_preempt_only
;
183 trace_ops
.func
= function_trace_call
;
185 if (func_flags
.val
& TRACE_FUNC_OPT_STACK
)
186 register_ftrace_function(&trace_stack_ops
);
188 register_ftrace_function(&trace_ops
);
190 ftrace_function_enabled
= 1;
193 static void tracing_stop_function_trace(void)
195 ftrace_function_enabled
= 0;
197 if (func_flags
.val
& TRACE_FUNC_OPT_STACK
)
198 unregister_ftrace_function(&trace_stack_ops
);
200 unregister_ftrace_function(&trace_ops
);
203 static int func_set_flag(u32 old_flags
, u32 bit
, int set
)
205 if (bit
== TRACE_FUNC_OPT_STACK
) {
206 /* do nothing if already set */
207 if (!!set
== !!(func_flags
.val
& TRACE_FUNC_OPT_STACK
))
211 unregister_ftrace_function(&trace_ops
);
212 register_ftrace_function(&trace_stack_ops
);
214 unregister_ftrace_function(&trace_stack_ops
);
215 register_ftrace_function(&trace_ops
);
224 static struct tracer function_trace __read_mostly
=
227 .init
= function_trace_init
,
228 .reset
= function_trace_reset
,
229 .start
= function_trace_start
,
230 .wait_pipe
= poll_wait_pipe
,
231 .flags
= &func_flags
,
232 .set_flag
= func_set_flag
,
233 #ifdef CONFIG_FTRACE_SELFTEST
234 .selftest
= trace_selftest_startup_function
,
238 #ifdef CONFIG_DYNAMIC_FTRACE
240 ftrace_traceon(unsigned long ip
, unsigned long parent_ip
, void **data
)
242 long *count
= (long *)data
;
257 ftrace_traceoff(unsigned long ip
, unsigned long parent_ip
, void **data
)
259 long *count
= (long *)data
;
261 if (!tracing_is_on())
274 ftrace_trace_onoff_print(struct seq_file
*m
, unsigned long ip
,
275 struct ftrace_probe_ops
*ops
, void *data
);
277 static struct ftrace_probe_ops traceon_probe_ops
= {
278 .func
= ftrace_traceon
,
279 .print
= ftrace_trace_onoff_print
,
282 static struct ftrace_probe_ops traceoff_probe_ops
= {
283 .func
= ftrace_traceoff
,
284 .print
= ftrace_trace_onoff_print
,
288 ftrace_trace_onoff_print(struct seq_file
*m
, unsigned long ip
,
289 struct ftrace_probe_ops
*ops
, void *data
)
291 long count
= (long)data
;
293 seq_printf(m
, "%ps:", (void *)ip
);
295 if (ops
== &traceon_probe_ops
)
296 seq_printf(m
, "traceon");
298 seq_printf(m
, "traceoff");
301 seq_printf(m
, ":unlimited\n");
303 seq_printf(m
, ":count=%ld\n", count
);
309 ftrace_trace_onoff_unreg(char *glob
, char *cmd
, char *param
)
311 struct ftrace_probe_ops
*ops
;
313 /* we register both traceon and traceoff to this callback */
314 if (strcmp(cmd
, "traceon") == 0)
315 ops
= &traceon_probe_ops
;
317 ops
= &traceoff_probe_ops
;
319 unregister_ftrace_function_probe_func(glob
, ops
);
325 ftrace_trace_onoff_callback(char *glob
, char *cmd
, char *param
, int enable
)
327 struct ftrace_probe_ops
*ops
;
328 void *count
= (void *)-1;
332 /* hash funcs only work with set_ftrace_filter */
337 return ftrace_trace_onoff_unreg(glob
+1, cmd
, param
);
339 /* we register both traceon and traceoff to this callback */
340 if (strcmp(cmd
, "traceon") == 0)
341 ops
= &traceon_probe_ops
;
343 ops
= &traceoff_probe_ops
;
348 number
= strsep(¶m
, ":");
354 * We use the callback data field (which is a pointer)
357 ret
= strict_strtoul(number
, 0, (unsigned long *)&count
);
362 ret
= register_ftrace_function_probe(glob
, ops
, count
);
364 return ret
< 0 ? ret
: 0;
367 static struct ftrace_func_command ftrace_traceon_cmd
= {
369 .func
= ftrace_trace_onoff_callback
,
372 static struct ftrace_func_command ftrace_traceoff_cmd
= {
374 .func
= ftrace_trace_onoff_callback
,
377 static int __init
init_func_cmd_traceon(void)
381 ret
= register_ftrace_command(&ftrace_traceoff_cmd
);
385 ret
= register_ftrace_command(&ftrace_traceon_cmd
);
387 unregister_ftrace_command(&ftrace_traceoff_cmd
);
391 static inline int init_func_cmd_traceon(void)
395 #endif /* CONFIG_DYNAMIC_FTRACE */
397 static __init
int init_function_trace(void)
399 init_func_cmd_traceon();
400 return register_tracer(&function_trace
);
402 device_initcall(init_function_trace
);