trace: Call tracing_reset_online_cpus before tracer->init()
[linux-2.6.git] / kernel / trace / trace_functions.c
blob36bf9568ccd9d7e2ab29a815be5659cf1d53e92c
1 /*
2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
14 #include <linux/ftrace.h>
15 #include <linux/fs.h>
17 #include "trace.h"
19 /* function tracing enabled */
20 static int ftrace_function_enabled;
22 static struct trace_array *func_trace;
24 static void tracing_start_function_trace(void);
25 static void tracing_stop_function_trace(void);
27 static int function_trace_init(struct trace_array *tr)
29 func_trace = tr;
30 tr->cpu = get_cpu();
31 put_cpu();
33 tracing_start_cmdline_record();
34 tracing_start_function_trace();
35 return 0;
38 static void function_trace_reset(struct trace_array *tr)
40 tracing_stop_function_trace();
41 tracing_stop_cmdline_record();
44 static void function_trace_start(struct trace_array *tr)
46 tracing_reset_online_cpus(tr);
49 static void
50 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
52 struct trace_array *tr = func_trace;
53 struct trace_array_cpu *data;
54 unsigned long flags;
55 long disabled;
56 int cpu, resched;
57 int pc;
59 if (unlikely(!ftrace_function_enabled))
60 return;
62 pc = preempt_count();
63 resched = ftrace_preempt_disable();
64 local_save_flags(flags);
65 cpu = raw_smp_processor_id();
66 data = tr->data[cpu];
67 disabled = atomic_inc_return(&data->disabled);
69 if (likely(disabled == 1))
70 trace_function(tr, ip, parent_ip, flags, pc);
72 atomic_dec(&data->disabled);
73 ftrace_preempt_enable(resched);
76 static void
77 function_trace_call(unsigned long ip, unsigned long parent_ip)
79 struct trace_array *tr = func_trace;
80 struct trace_array_cpu *data;
81 unsigned long flags;
82 long disabled;
83 int cpu;
84 int pc;
86 if (unlikely(!ftrace_function_enabled))
87 return;
90 * Need to use raw, since this must be called before the
91 * recursive protection is performed.
93 local_irq_save(flags);
94 cpu = raw_smp_processor_id();
95 data = tr->data[cpu];
96 disabled = atomic_inc_return(&data->disabled);
98 if (likely(disabled == 1)) {
99 pc = preempt_count();
100 trace_function(tr, ip, parent_ip, flags, pc);
103 atomic_dec(&data->disabled);
104 local_irq_restore(flags);
107 static void
108 function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
110 struct trace_array *tr = func_trace;
111 struct trace_array_cpu *data;
112 unsigned long flags;
113 long disabled;
114 int cpu;
115 int pc;
117 if (unlikely(!ftrace_function_enabled))
118 return;
121 * Need to use raw, since this must be called before the
122 * recursive protection is performed.
124 local_irq_save(flags);
125 cpu = raw_smp_processor_id();
126 data = tr->data[cpu];
127 disabled = atomic_inc_return(&data->disabled);
129 if (likely(disabled == 1)) {
130 pc = preempt_count();
131 trace_function(tr, ip, parent_ip, flags, pc);
133 * skip over 5 funcs:
134 * __ftrace_trace_stack,
135 * __trace_stack,
136 * function_stack_trace_call
137 * ftrace_list_func
138 * ftrace_call
140 __trace_stack(tr, flags, 5, pc);
143 atomic_dec(&data->disabled);
144 local_irq_restore(flags);
148 static struct ftrace_ops trace_ops __read_mostly =
150 .func = function_trace_call,
153 static struct ftrace_ops trace_stack_ops __read_mostly =
155 .func = function_stack_trace_call,
158 /* Our two options */
159 enum {
160 TRACE_FUNC_OPT_STACK = 0x1,
163 static struct tracer_opt func_opts[] = {
164 #ifdef CONFIG_STACKTRACE
165 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
166 #endif
167 { } /* Always set a last empty entry */
170 static struct tracer_flags func_flags = {
171 .val = 0, /* By default: all flags disabled */
172 .opts = func_opts
175 static void tracing_start_function_trace(void)
177 ftrace_function_enabled = 0;
179 if (trace_flags & TRACE_ITER_PREEMPTONLY)
180 trace_ops.func = function_trace_call_preempt_only;
181 else
182 trace_ops.func = function_trace_call;
184 if (func_flags.val & TRACE_FUNC_OPT_STACK)
185 register_ftrace_function(&trace_stack_ops);
186 else
187 register_ftrace_function(&trace_ops);
189 ftrace_function_enabled = 1;
192 static void tracing_stop_function_trace(void)
194 ftrace_function_enabled = 0;
195 /* OK if they are not registered */
196 unregister_ftrace_function(&trace_stack_ops);
197 unregister_ftrace_function(&trace_ops);
200 static int func_set_flag(u32 old_flags, u32 bit, int set)
202 if (bit == TRACE_FUNC_OPT_STACK) {
203 /* do nothing if already set */
204 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
205 return 0;
207 if (set) {
208 unregister_ftrace_function(&trace_ops);
209 register_ftrace_function(&trace_stack_ops);
210 } else {
211 unregister_ftrace_function(&trace_stack_ops);
212 register_ftrace_function(&trace_ops);
215 return 0;
218 return -EINVAL;
221 static struct tracer function_trace __read_mostly =
223 .name = "function",
224 .init = function_trace_init,
225 .reset = function_trace_reset,
226 .start = function_trace_start,
227 .flags = &func_flags,
228 .set_flag = func_set_flag,
229 #ifdef CONFIG_FTRACE_SELFTEST
230 .selftest = trace_selftest_startup_function,
231 #endif
234 static __init int init_function_trace(void)
236 return register_tracer(&function_trace);
239 device_initcall(init_function_trace);