tracing/ftrace: change the type of the init() callback
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / trace / trace_branch.c
blob44bd39539d61aa747c4dca3194ef981728ee4845
1 /*
2 * unlikely profiler
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/ftrace.h>
13 #include <linux/hash.h>
14 #include <linux/fs.h>
15 #include <asm/local.h>
16 #include "trace.h"
18 #ifdef CONFIG_BRANCH_TRACER
20 static int branch_tracing_enabled __read_mostly;
21 static DEFINE_MUTEX(branch_tracing_mutex);
22 static struct trace_array *branch_tracer;
24 static void
25 probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
27 struct trace_array *tr = branch_tracer;
28 struct ring_buffer_event *event;
29 struct trace_branch *entry;
30 unsigned long flags, irq_flags;
31 int cpu, pc;
32 const char *p;
35 * I would love to save just the ftrace_likely_data pointer, but
36 * this code can also be used by modules. Ugly things can happen
37 * if the module is unloaded, and then we go and read the
38 * pointer. This is slower, but much safer.
41 if (unlikely(!tr))
42 return;
44 local_irq_save(flags);
45 cpu = raw_smp_processor_id();
46 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
47 goto out;
49 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
50 &irq_flags);
51 if (!event)
52 goto out;
54 pc = preempt_count();
55 entry = ring_buffer_event_data(event);
56 tracing_generic_entry_update(&entry->ent, flags, pc);
57 entry->ent.type = TRACE_BRANCH;
59 /* Strip off the path, only save the file */
60 p = f->file + strlen(f->file);
61 while (p >= f->file && *p != '/')
62 p--;
63 p++;
65 strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
66 strncpy(entry->file, p, TRACE_FILE_SIZE);
67 entry->func[TRACE_FUNC_SIZE] = 0;
68 entry->file[TRACE_FILE_SIZE] = 0;
69 entry->line = f->line;
70 entry->correct = val == expect;
72 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
74 out:
75 atomic_dec(&tr->data[cpu]->disabled);
76 local_irq_restore(flags);
79 static inline
80 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
82 if (!branch_tracing_enabled)
83 return;
85 probe_likely_condition(f, val, expect);
88 int enable_branch_tracing(struct trace_array *tr)
90 int ret = 0;
92 mutex_lock(&branch_tracing_mutex);
93 branch_tracer = tr;
95 * Must be seen before enabling. The reader is a condition
96 * where we do not need a matching rmb()
98 smp_wmb();
99 branch_tracing_enabled++;
100 mutex_unlock(&branch_tracing_mutex);
102 return ret;
105 void disable_branch_tracing(void)
107 mutex_lock(&branch_tracing_mutex);
109 if (!branch_tracing_enabled)
110 goto out_unlock;
112 branch_tracing_enabled--;
114 out_unlock:
115 mutex_unlock(&branch_tracing_mutex);
118 static void start_branch_trace(struct trace_array *tr)
120 enable_branch_tracing(tr);
123 static void stop_branch_trace(struct trace_array *tr)
125 disable_branch_tracing();
128 static int branch_trace_init(struct trace_array *tr)
130 int cpu;
132 for_each_online_cpu(cpu)
133 tracing_reset(tr, cpu);
135 start_branch_trace(tr);
136 return 0;
139 static void branch_trace_reset(struct trace_array *tr)
141 stop_branch_trace(tr);
144 struct tracer branch_trace __read_mostly =
146 .name = "branch",
147 .init = branch_trace_init,
148 .reset = branch_trace_reset,
149 #ifdef CONFIG_FTRACE_SELFTEST
150 .selftest = trace_selftest_startup_branch,
151 #endif
154 __init static int init_branch_trace(void)
156 return register_tracer(&branch_trace);
159 device_initcall(init_branch_trace);
160 #else
161 static inline
162 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
165 #endif /* CONFIG_BRANCH_TRACER */
167 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
170 * I would love to have a trace point here instead, but the
171 * trace point code is so inundated with unlikely and likely
172 * conditions that the recursive nightmare that exists is too
173 * much to try to get working. At least for now.
175 trace_likely_condition(f, val, expect);
177 /* FIXME: Make this atomic! */
178 if (val == expect)
179 f->correct++;
180 else
181 f->incorrect++;
183 EXPORT_SYMBOL(ftrace_likely_update);
185 struct ftrace_pointer {
186 void *start;
187 void *stop;
190 static void *
191 t_next(struct seq_file *m, void *v, loff_t *pos)
193 struct ftrace_pointer *f = m->private;
194 struct ftrace_branch_data *p = v;
196 (*pos)++;
198 if (v == (void *)1)
199 return f->start;
201 ++p;
203 if ((void *)p >= (void *)f->stop)
204 return NULL;
206 return p;
209 static void *t_start(struct seq_file *m, loff_t *pos)
211 void *t = (void *)1;
212 loff_t l = 0;
214 for (; t && l < *pos; t = t_next(m, t, &l))
217 return t;
220 static void t_stop(struct seq_file *m, void *p)
224 static int t_show(struct seq_file *m, void *v)
226 struct ftrace_branch_data *p = v;
227 const char *f;
228 unsigned long percent;
230 if (v == (void *)1) {
231 seq_printf(m, " correct incorrect %% "
232 " Function "
233 " File Line\n"
234 " ------- --------- - "
235 " -------- "
236 " ---- ----\n");
237 return 0;
240 /* Only print the file, not the path */
241 f = p->file + strlen(p->file);
242 while (f >= p->file && *f != '/')
243 f--;
244 f++;
246 if (p->correct) {
247 percent = p->incorrect * 100;
248 percent /= p->correct + p->incorrect;
249 } else
250 percent = p->incorrect ? 100 : 0;
252 seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent);
253 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
254 return 0;
257 static struct seq_operations tracing_likely_seq_ops = {
258 .start = t_start,
259 .next = t_next,
260 .stop = t_stop,
261 .show = t_show,
264 static int tracing_likely_open(struct inode *inode, struct file *file)
266 int ret;
268 ret = seq_open(file, &tracing_likely_seq_ops);
269 if (!ret) {
270 struct seq_file *m = file->private_data;
271 m->private = (void *)inode->i_private;
274 return ret;
277 static struct file_operations tracing_likely_fops = {
278 .open = tracing_likely_open,
279 .read = seq_read,
280 .llseek = seq_lseek,
283 extern unsigned long __start_likely_profile[];
284 extern unsigned long __stop_likely_profile[];
285 extern unsigned long __start_unlikely_profile[];
286 extern unsigned long __stop_unlikely_profile[];
288 static struct ftrace_pointer ftrace_likely_pos = {
289 .start = __start_likely_profile,
290 .stop = __stop_likely_profile,
293 static struct ftrace_pointer ftrace_unlikely_pos = {
294 .start = __start_unlikely_profile,
295 .stop = __stop_unlikely_profile,
298 static __init int ftrace_branch_init(void)
300 struct dentry *d_tracer;
301 struct dentry *entry;
303 d_tracer = tracing_init_dentry();
305 entry = debugfs_create_file("profile_likely", 0444, d_tracer,
306 &ftrace_likely_pos,
307 &tracing_likely_fops);
308 if (!entry)
309 pr_warning("Could not create debugfs 'profile_likely' entry\n");
311 entry = debugfs_create_file("profile_unlikely", 0444, d_tracer,
312 &ftrace_unlikely_pos,
313 &tracing_likely_fops);
314 if (!entry)
315 pr_warning("Could not create debugfs"
316 " 'profile_unlikely' entry\n");
318 return 0;
321 device_initcall(ftrace_branch_init);