i2c: imx: Add arbitration lost check
[linux-2.6/btrfs-unstable.git] / kernel / trace / trace_stack.c
blob8a4e5cb66a4c4c1b9726f942e3e54b8e52a45ff5
1 /*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
4 */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 #include <linux/magic.h>
18 #include <asm/setup.h>
20 #include "trace.h"
22 #define STACK_TRACE_ENTRIES 500
24 #ifdef CC_USING_FENTRY
25 # define fentry 1
26 #else
27 # define fentry 0
28 #endif
30 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
31 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
32 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
35 * Reserve one entry for the passed in ip. This will allow
36 * us to remove most or all of the stack size overhead
37 * added by the stack tracer itself.
39 static struct stack_trace max_stack_trace = {
40 .max_entries = STACK_TRACE_ENTRIES - 1,
41 .entries = &stack_dump_trace[1],
44 static unsigned long max_stack_size;
45 static arch_spinlock_t max_stack_lock =
46 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
48 static DEFINE_PER_CPU(int, trace_active);
49 static DEFINE_MUTEX(stack_sysctl_mutex);
51 int stack_tracer_enabled;
52 static int last_stack_tracer_enabled;
54 static inline void print_max_stack(void)
56 long i;
57 int size;
59 pr_emerg(" Depth Size Location (%d entries)\n"
60 " ----- ---- --------\n",
61 max_stack_trace.nr_entries - 1);
63 for (i = 0; i < max_stack_trace.nr_entries; i++) {
64 if (stack_dump_trace[i] == ULONG_MAX)
65 break;
66 if (i+1 == max_stack_trace.nr_entries ||
67 stack_dump_trace[i+1] == ULONG_MAX)
68 size = stack_dump_index[i];
69 else
70 size = stack_dump_index[i] - stack_dump_index[i+1];
72 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
73 size, (void *)stack_dump_trace[i]);
77 static inline void
78 check_stack(unsigned long ip, unsigned long *stack)
80 unsigned long this_size, flags; unsigned long *p, *top, *start;
81 static int tracer_frame;
82 int frame_size = ACCESS_ONCE(tracer_frame);
83 int i;
85 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
86 this_size = THREAD_SIZE - this_size;
87 /* Remove the frame of the tracer */
88 this_size -= frame_size;
90 if (this_size <= max_stack_size)
91 return;
93 /* we do not handle interrupt stacks yet */
94 if (!object_is_on_stack(stack))
95 return;
97 local_irq_save(flags);
98 arch_spin_lock(&max_stack_lock);
100 /* In case another CPU set the tracer_frame on us */
101 if (unlikely(!frame_size))
102 this_size -= tracer_frame;
104 /* a race could have already updated it */
105 if (this_size <= max_stack_size)
106 goto out;
108 max_stack_size = this_size;
110 max_stack_trace.nr_entries = 0;
112 if (using_ftrace_ops_list_func())
113 max_stack_trace.skip = 4;
114 else
115 max_stack_trace.skip = 3;
117 save_stack_trace(&max_stack_trace);
120 * Add the passed in ip from the function tracer.
121 * Searching for this on the stack will skip over
122 * most of the overhead from the stack tracer itself.
124 stack_dump_trace[0] = ip;
125 max_stack_trace.nr_entries++;
128 * Now find where in the stack these are.
130 i = 0;
131 start = stack;
132 top = (unsigned long *)
133 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
136 * Loop through all the entries. One of the entries may
137 * for some reason be missed on the stack, so we may
138 * have to account for them. If they are all there, this
139 * loop will only happen once. This code only takes place
140 * on a new max, so it is far from a fast path.
142 while (i < max_stack_trace.nr_entries) {
143 int found = 0;
145 stack_dump_index[i] = this_size;
146 p = start;
148 for (; p < top && i < max_stack_trace.nr_entries; p++) {
149 if (*p == stack_dump_trace[i]) {
150 this_size = stack_dump_index[i++] =
151 (top - p) * sizeof(unsigned long);
152 found = 1;
153 /* Start the search from here */
154 start = p + 1;
156 * We do not want to show the overhead
157 * of the stack tracer stack in the
158 * max stack. If we haven't figured
159 * out what that is, then figure it out
160 * now.
162 if (unlikely(!tracer_frame) && i == 1) {
163 tracer_frame = (p - stack) *
164 sizeof(unsigned long);
165 max_stack_size -= tracer_frame;
170 if (!found)
171 i++;
174 if ((current != &init_task &&
175 *(end_of_stack(current)) != STACK_END_MAGIC)) {
176 print_max_stack();
177 BUG();
180 out:
181 arch_spin_unlock(&max_stack_lock);
182 local_irq_restore(flags);
185 static void
186 stack_trace_call(unsigned long ip, unsigned long parent_ip,
187 struct ftrace_ops *op, struct pt_regs *pt_regs)
189 unsigned long stack;
190 int cpu;
192 preempt_disable_notrace();
194 cpu = raw_smp_processor_id();
195 /* no atomic needed, we only modify this variable by this cpu */
196 if (per_cpu(trace_active, cpu)++ != 0)
197 goto out;
200 * When fentry is used, the traced function does not get
201 * its stack frame set up, and we lose the parent.
202 * The ip is pretty useless because the function tracer
203 * was called before that function set up its stack frame.
204 * In this case, we use the parent ip.
206 * By adding the return address of either the parent ip
207 * or the current ip we can disregard most of the stack usage
208 * caused by the stack tracer itself.
210 * The function tracer always reports the address of where the
211 * mcount call was, but the stack will hold the return address.
213 if (fentry)
214 ip = parent_ip;
215 else
216 ip += MCOUNT_INSN_SIZE;
218 check_stack(ip, &stack);
220 out:
221 per_cpu(trace_active, cpu)--;
222 /* prevent recursion in schedule */
223 preempt_enable_notrace();
226 static struct ftrace_ops trace_ops __read_mostly =
228 .func = stack_trace_call,
229 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
232 static ssize_t
233 stack_max_size_read(struct file *filp, char __user *ubuf,
234 size_t count, loff_t *ppos)
236 unsigned long *ptr = filp->private_data;
237 char buf[64];
238 int r;
240 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
241 if (r > sizeof(buf))
242 r = sizeof(buf);
243 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
246 static ssize_t
247 stack_max_size_write(struct file *filp, const char __user *ubuf,
248 size_t count, loff_t *ppos)
250 long *ptr = filp->private_data;
251 unsigned long val, flags;
252 int ret;
253 int cpu;
255 ret = kstrtoul_from_user(ubuf, count, 10, &val);
256 if (ret)
257 return ret;
259 local_irq_save(flags);
262 * In case we trace inside arch_spin_lock() or after (NMI),
263 * we will cause circular lock, so we also need to increase
264 * the percpu trace_active here.
266 cpu = smp_processor_id();
267 per_cpu(trace_active, cpu)++;
269 arch_spin_lock(&max_stack_lock);
270 *ptr = val;
271 arch_spin_unlock(&max_stack_lock);
273 per_cpu(trace_active, cpu)--;
274 local_irq_restore(flags);
276 return count;
279 static const struct file_operations stack_max_size_fops = {
280 .open = tracing_open_generic,
281 .read = stack_max_size_read,
282 .write = stack_max_size_write,
283 .llseek = default_llseek,
286 static void *
287 __next(struct seq_file *m, loff_t *pos)
289 long n = *pos - 1;
291 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
292 return NULL;
294 m->private = (void *)n;
295 return &m->private;
298 static void *
299 t_next(struct seq_file *m, void *v, loff_t *pos)
301 (*pos)++;
302 return __next(m, pos);
305 static void *t_start(struct seq_file *m, loff_t *pos)
307 int cpu;
309 local_irq_disable();
311 cpu = smp_processor_id();
312 per_cpu(trace_active, cpu)++;
314 arch_spin_lock(&max_stack_lock);
316 if (*pos == 0)
317 return SEQ_START_TOKEN;
319 return __next(m, pos);
322 static void t_stop(struct seq_file *m, void *p)
324 int cpu;
326 arch_spin_unlock(&max_stack_lock);
328 cpu = smp_processor_id();
329 per_cpu(trace_active, cpu)--;
331 local_irq_enable();
334 static int trace_lookup_stack(struct seq_file *m, long i)
336 unsigned long addr = stack_dump_trace[i];
338 return seq_printf(m, "%pS\n", (void *)addr);
341 static void print_disabled(struct seq_file *m)
343 seq_puts(m, "#\n"
344 "# Stack tracer disabled\n"
345 "#\n"
346 "# To enable the stack tracer, either add 'stacktrace' to the\n"
347 "# kernel command line\n"
348 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
349 "#\n");
352 static int t_show(struct seq_file *m, void *v)
354 long i;
355 int size;
357 if (v == SEQ_START_TOKEN) {
358 seq_printf(m, " Depth Size Location"
359 " (%d entries)\n"
360 " ----- ---- --------\n",
361 max_stack_trace.nr_entries - 1);
363 if (!stack_tracer_enabled && !max_stack_size)
364 print_disabled(m);
366 return 0;
369 i = *(long *)v;
371 if (i >= max_stack_trace.nr_entries ||
372 stack_dump_trace[i] == ULONG_MAX)
373 return 0;
375 if (i+1 == max_stack_trace.nr_entries ||
376 stack_dump_trace[i+1] == ULONG_MAX)
377 size = stack_dump_index[i];
378 else
379 size = stack_dump_index[i] - stack_dump_index[i+1];
381 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
383 trace_lookup_stack(m, i);
385 return 0;
388 static const struct seq_operations stack_trace_seq_ops = {
389 .start = t_start,
390 .next = t_next,
391 .stop = t_stop,
392 .show = t_show,
395 static int stack_trace_open(struct inode *inode, struct file *file)
397 return seq_open(file, &stack_trace_seq_ops);
400 static const struct file_operations stack_trace_fops = {
401 .open = stack_trace_open,
402 .read = seq_read,
403 .llseek = seq_lseek,
404 .release = seq_release,
407 static int
408 stack_trace_filter_open(struct inode *inode, struct file *file)
410 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
411 inode, file);
414 static const struct file_operations stack_trace_filter_fops = {
415 .open = stack_trace_filter_open,
416 .read = seq_read,
417 .write = ftrace_filter_write,
418 .llseek = tracing_lseek,
419 .release = ftrace_regex_release,
423 stack_trace_sysctl(struct ctl_table *table, int write,
424 void __user *buffer, size_t *lenp,
425 loff_t *ppos)
427 int ret;
429 mutex_lock(&stack_sysctl_mutex);
431 ret = proc_dointvec(table, write, buffer, lenp, ppos);
433 if (ret || !write ||
434 (last_stack_tracer_enabled == !!stack_tracer_enabled))
435 goto out;
437 last_stack_tracer_enabled = !!stack_tracer_enabled;
439 if (stack_tracer_enabled)
440 register_ftrace_function(&trace_ops);
441 else
442 unregister_ftrace_function(&trace_ops);
444 out:
445 mutex_unlock(&stack_sysctl_mutex);
446 return ret;
449 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
451 static __init int enable_stacktrace(char *str)
453 if (strncmp(str, "_filter=", 8) == 0)
454 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
456 stack_tracer_enabled = 1;
457 last_stack_tracer_enabled = 1;
458 return 1;
460 __setup("stacktrace", enable_stacktrace);
462 static __init int stack_trace_init(void)
464 struct dentry *d_tracer;
466 d_tracer = tracing_init_dentry();
467 if (!d_tracer)
468 return 0;
470 trace_create_file("stack_max_size", 0644, d_tracer,
471 &max_stack_size, &stack_max_size_fops);
473 trace_create_file("stack_trace", 0444, d_tracer,
474 NULL, &stack_trace_fops);
476 trace_create_file("stack_trace_filter", 0444, d_tracer,
477 NULL, &stack_trace_filter_fops);
479 if (stack_trace_filter_buf[0])
480 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
482 if (stack_tracer_enabled)
483 register_ftrace_function(&trace_ops);
485 return 0;
488 device_initcall(stack_trace_init);