2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
20 /* function tracing enabled */
21 static int ftrace_function_enabled
;
23 static struct trace_array
*func_trace
;
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
28 static int function_trace_init(struct trace_array
*tr
)
34 tracing_start_cmdline_record();
35 tracing_start_function_trace();
39 static void function_trace_reset(struct trace_array
*tr
)
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
45 static void function_trace_start(struct trace_array
*tr
)
47 tracing_reset_online_cpus(tr
);
51 function_trace_call_preempt_only(unsigned long ip
, unsigned long parent_ip
)
53 struct trace_array
*tr
= func_trace
;
54 struct trace_array_cpu
*data
;
60 if (unlikely(!ftrace_function_enabled
))
64 preempt_disable_notrace();
65 local_save_flags(flags
);
66 cpu
= raw_smp_processor_id();
68 disabled
= atomic_inc_return(&data
->disabled
);
70 if (likely(disabled
== 1))
71 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
73 atomic_dec(&data
->disabled
);
74 preempt_enable_notrace();
78 function_trace_call(unsigned long ip
, unsigned long parent_ip
)
80 struct trace_array
*tr
= func_trace
;
81 struct trace_array_cpu
*data
;
87 if (unlikely(!ftrace_function_enabled
))
91 * Need to use raw, since this must be called before the
92 * recursive protection is performed.
94 local_irq_save(flags
);
95 cpu
= raw_smp_processor_id();
97 disabled
= atomic_inc_return(&data
->disabled
);
99 if (likely(disabled
== 1)) {
100 pc
= preempt_count();
101 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
104 atomic_dec(&data
->disabled
);
105 local_irq_restore(flags
);
109 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
)
111 struct trace_array
*tr
= func_trace
;
112 struct trace_array_cpu
*data
;
118 if (unlikely(!ftrace_function_enabled
))
122 * Need to use raw, since this must be called before the
123 * recursive protection is performed.
125 local_irq_save(flags
);
126 cpu
= raw_smp_processor_id();
127 data
= tr
->data
[cpu
];
128 disabled
= atomic_inc_return(&data
->disabled
);
130 if (likely(disabled
== 1)) {
131 pc
= preempt_count();
132 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
135 * __ftrace_trace_stack,
137 * function_stack_trace_call
141 __trace_stack(tr
, flags
, 5, pc
);
144 atomic_dec(&data
->disabled
);
145 local_irq_restore(flags
);
149 static struct ftrace_ops trace_ops __read_mostly
=
151 .func
= function_trace_call
,
152 .flags
= FTRACE_OPS_FL_GLOBAL
,
155 static struct ftrace_ops trace_stack_ops __read_mostly
=
157 .func
= function_stack_trace_call
,
158 .flags
= FTRACE_OPS_FL_GLOBAL
,
161 /* Our two options */
163 TRACE_FUNC_OPT_STACK
= 0x1,
166 static struct tracer_opt func_opts
[] = {
167 #ifdef CONFIG_STACKTRACE
168 { TRACER_OPT(func_stack_trace
, TRACE_FUNC_OPT_STACK
) },
170 { } /* Always set a last empty entry */
173 static struct tracer_flags func_flags
= {
174 .val
= 0, /* By default: all flags disabled */
178 static void tracing_start_function_trace(void)
180 ftrace_function_enabled
= 0;
182 if (trace_flags
& TRACE_ITER_PREEMPTONLY
)
183 trace_ops
.func
= function_trace_call_preempt_only
;
185 trace_ops
.func
= function_trace_call
;
187 if (func_flags
.val
& TRACE_FUNC_OPT_STACK
)
188 register_ftrace_function(&trace_stack_ops
);
190 register_ftrace_function(&trace_ops
);
192 ftrace_function_enabled
= 1;
195 static void tracing_stop_function_trace(void)
197 ftrace_function_enabled
= 0;
199 if (func_flags
.val
& TRACE_FUNC_OPT_STACK
)
200 unregister_ftrace_function(&trace_stack_ops
);
202 unregister_ftrace_function(&trace_ops
);
205 static int func_set_flag(u32 old_flags
, u32 bit
, int set
)
207 if (bit
== TRACE_FUNC_OPT_STACK
) {
208 /* do nothing if already set */
209 if (!!set
== !!(func_flags
.val
& TRACE_FUNC_OPT_STACK
))
213 unregister_ftrace_function(&trace_ops
);
214 register_ftrace_function(&trace_stack_ops
);
216 unregister_ftrace_function(&trace_stack_ops
);
217 register_ftrace_function(&trace_ops
);
226 static struct tracer function_trace __read_mostly
=
229 .init
= function_trace_init
,
230 .reset
= function_trace_reset
,
231 .start
= function_trace_start
,
232 .wait_pipe
= poll_wait_pipe
,
233 .flags
= &func_flags
,
234 .set_flag
= func_set_flag
,
235 #ifdef CONFIG_FTRACE_SELFTEST
236 .selftest
= trace_selftest_startup_function
,
240 #ifdef CONFIG_DYNAMIC_FTRACE
242 ftrace_traceon(unsigned long ip
, unsigned long parent_ip
, void **data
)
244 long *count
= (long *)data
;
259 ftrace_traceoff(unsigned long ip
, unsigned long parent_ip
, void **data
)
261 long *count
= (long *)data
;
263 if (!tracing_is_on())
276 ftrace_trace_onoff_print(struct seq_file
*m
, unsigned long ip
,
277 struct ftrace_probe_ops
*ops
, void *data
);
279 static struct ftrace_probe_ops traceon_probe_ops
= {
280 .func
= ftrace_traceon
,
281 .print
= ftrace_trace_onoff_print
,
284 static struct ftrace_probe_ops traceoff_probe_ops
= {
285 .func
= ftrace_traceoff
,
286 .print
= ftrace_trace_onoff_print
,
290 ftrace_trace_onoff_print(struct seq_file
*m
, unsigned long ip
,
291 struct ftrace_probe_ops
*ops
, void *data
)
293 long count
= (long)data
;
295 seq_printf(m
, "%ps:", (void *)ip
);
297 if (ops
== &traceon_probe_ops
)
298 seq_printf(m
, "traceon");
300 seq_printf(m
, "traceoff");
303 seq_printf(m
, ":unlimited\n");
305 seq_printf(m
, ":count=%ld\n", count
);
311 ftrace_trace_onoff_unreg(char *glob
, char *cmd
, char *param
)
313 struct ftrace_probe_ops
*ops
;
315 /* we register both traceon and traceoff to this callback */
316 if (strcmp(cmd
, "traceon") == 0)
317 ops
= &traceon_probe_ops
;
319 ops
= &traceoff_probe_ops
;
321 unregister_ftrace_function_probe_func(glob
, ops
);
327 ftrace_trace_onoff_callback(struct ftrace_hash
*hash
,
328 char *glob
, char *cmd
, char *param
, int enable
)
330 struct ftrace_probe_ops
*ops
;
331 void *count
= (void *)-1;
335 /* hash funcs only work with set_ftrace_filter */
340 return ftrace_trace_onoff_unreg(glob
+1, cmd
, param
);
342 /* we register both traceon and traceoff to this callback */
343 if (strcmp(cmd
, "traceon") == 0)
344 ops
= &traceon_probe_ops
;
346 ops
= &traceoff_probe_ops
;
351 number
= strsep(¶m
, ":");
357 * We use the callback data field (which is a pointer)
360 ret
= strict_strtoul(number
, 0, (unsigned long *)&count
);
365 ret
= register_ftrace_function_probe(glob
, ops
, count
);
367 return ret
< 0 ? ret
: 0;
370 static struct ftrace_func_command ftrace_traceon_cmd
= {
372 .func
= ftrace_trace_onoff_callback
,
375 static struct ftrace_func_command ftrace_traceoff_cmd
= {
377 .func
= ftrace_trace_onoff_callback
,
380 static int __init
init_func_cmd_traceon(void)
384 ret
= register_ftrace_command(&ftrace_traceoff_cmd
);
388 ret
= register_ftrace_command(&ftrace_traceon_cmd
);
390 unregister_ftrace_command(&ftrace_traceoff_cmd
);
394 static inline int init_func_cmd_traceon(void)
398 #endif /* CONFIG_DYNAMIC_FTRACE */
400 static __init
int init_function_trace(void)
402 init_func_cmd_traceon();
403 return register_tracer(&function_trace
);
405 device_initcall(init_function_trace
);