staging/lustre/libcfs: remove unused cfs_timer_done
[linux-2.6/btrfs-unstable.git] / kernel / trace / trace_stack.c
blobb746399ab59c01e422da63468aa370b1b642a860
1 /*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
4 */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/module.h>
12 #include <linux/sysctl.h>
13 #include <linux/init.h>
15 #include <asm/setup.h>
17 #include "trace.h"
19 #define STACK_TRACE_ENTRIES 500
21 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
22 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
23 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
26 * Reserve one entry for the passed in ip. This will allow
27 * us to remove most or all of the stack size overhead
28 * added by the stack tracer itself.
30 static struct stack_trace max_stack_trace = {
31 .max_entries = STACK_TRACE_ENTRIES - 1,
32 .entries = &stack_dump_trace[0],
35 static unsigned long max_stack_size;
36 static arch_spinlock_t max_stack_lock =
37 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
39 static DEFINE_PER_CPU(int, trace_active);
40 static DEFINE_MUTEX(stack_sysctl_mutex);
42 int stack_tracer_enabled;
43 static int last_stack_tracer_enabled;
45 static inline void print_max_stack(void)
47 long i;
48 int size;
50 pr_emerg(" Depth Size Location (%d entries)\n"
51 " ----- ---- --------\n",
52 max_stack_trace.nr_entries);
54 for (i = 0; i < max_stack_trace.nr_entries; i++) {
55 if (stack_dump_trace[i] == ULONG_MAX)
56 break;
57 if (i+1 == max_stack_trace.nr_entries ||
58 stack_dump_trace[i+1] == ULONG_MAX)
59 size = stack_dump_index[i];
60 else
61 size = stack_dump_index[i] - stack_dump_index[i+1];
63 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
64 size, (void *)stack_dump_trace[i]);
68 static inline void
69 check_stack(unsigned long ip, unsigned long *stack)
71 unsigned long this_size, flags; unsigned long *p, *top, *start;
72 static int tracer_frame;
73 int frame_size = ACCESS_ONCE(tracer_frame);
74 int i, x;
76 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
77 this_size = THREAD_SIZE - this_size;
78 /* Remove the frame of the tracer */
79 this_size -= frame_size;
81 if (this_size <= max_stack_size)
82 return;
84 /* we do not handle interrupt stacks yet */
85 if (!object_is_on_stack(stack))
86 return;
88 local_irq_save(flags);
89 arch_spin_lock(&max_stack_lock);
91 /* In case another CPU set the tracer_frame on us */
92 if (unlikely(!frame_size))
93 this_size -= tracer_frame;
95 /* a race could have already updated it */
96 if (this_size <= max_stack_size)
97 goto out;
99 max_stack_size = this_size;
101 max_stack_trace.nr_entries = 0;
102 max_stack_trace.skip = 3;
104 save_stack_trace(&max_stack_trace);
106 /* Skip over the overhead of the stack tracer itself */
107 for (i = 0; i < max_stack_trace.nr_entries; i++) {
108 if (stack_dump_trace[i] == ip)
109 break;
113 * Now find where in the stack these are.
115 x = 0;
116 start = stack;
117 top = (unsigned long *)
118 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
121 * Loop through all the entries. One of the entries may
122 * for some reason be missed on the stack, so we may
123 * have to account for them. If they are all there, this
124 * loop will only happen once. This code only takes place
125 * on a new max, so it is far from a fast path.
127 while (i < max_stack_trace.nr_entries) {
128 int found = 0;
130 stack_dump_index[x] = this_size;
131 p = start;
133 for (; p < top && i < max_stack_trace.nr_entries; p++) {
134 if (stack_dump_trace[i] == ULONG_MAX)
135 break;
136 if (*p == stack_dump_trace[i]) {
137 stack_dump_trace[x] = stack_dump_trace[i++];
138 this_size = stack_dump_index[x++] =
139 (top - p) * sizeof(unsigned long);
140 found = 1;
141 /* Start the search from here */
142 start = p + 1;
144 * We do not want to show the overhead
145 * of the stack tracer stack in the
146 * max stack. If we haven't figured
147 * out what that is, then figure it out
148 * now.
150 if (unlikely(!tracer_frame)) {
151 tracer_frame = (p - stack) *
152 sizeof(unsigned long);
153 max_stack_size -= tracer_frame;
158 if (!found)
159 i++;
162 max_stack_trace.nr_entries = x;
163 for (; x < i; x++)
164 stack_dump_trace[x] = ULONG_MAX;
166 if (task_stack_end_corrupted(current)) {
167 print_max_stack();
168 BUG();
171 out:
172 arch_spin_unlock(&max_stack_lock);
173 local_irq_restore(flags);
176 static void
177 stack_trace_call(unsigned long ip, unsigned long parent_ip,
178 struct ftrace_ops *op, struct pt_regs *pt_regs)
180 unsigned long stack;
181 int cpu;
183 preempt_disable_notrace();
185 cpu = raw_smp_processor_id();
186 /* no atomic needed, we only modify this variable by this cpu */
187 if (per_cpu(trace_active, cpu)++ != 0)
188 goto out;
190 ip += MCOUNT_INSN_SIZE;
192 check_stack(ip, &stack);
194 out:
195 per_cpu(trace_active, cpu)--;
196 /* prevent recursion in schedule */
197 preempt_enable_notrace();
200 static struct ftrace_ops trace_ops __read_mostly =
202 .func = stack_trace_call,
203 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
206 static ssize_t
207 stack_max_size_read(struct file *filp, char __user *ubuf,
208 size_t count, loff_t *ppos)
210 unsigned long *ptr = filp->private_data;
211 char buf[64];
212 int r;
214 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
215 if (r > sizeof(buf))
216 r = sizeof(buf);
217 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
220 static ssize_t
221 stack_max_size_write(struct file *filp, const char __user *ubuf,
222 size_t count, loff_t *ppos)
224 long *ptr = filp->private_data;
225 unsigned long val, flags;
226 int ret;
227 int cpu;
229 ret = kstrtoul_from_user(ubuf, count, 10, &val);
230 if (ret)
231 return ret;
233 local_irq_save(flags);
236 * In case we trace inside arch_spin_lock() or after (NMI),
237 * we will cause circular lock, so we also need to increase
238 * the percpu trace_active here.
240 cpu = smp_processor_id();
241 per_cpu(trace_active, cpu)++;
243 arch_spin_lock(&max_stack_lock);
244 *ptr = val;
245 arch_spin_unlock(&max_stack_lock);
247 per_cpu(trace_active, cpu)--;
248 local_irq_restore(flags);
250 return count;
253 static const struct file_operations stack_max_size_fops = {
254 .open = tracing_open_generic,
255 .read = stack_max_size_read,
256 .write = stack_max_size_write,
257 .llseek = default_llseek,
260 static void *
261 __next(struct seq_file *m, loff_t *pos)
263 long n = *pos - 1;
265 if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
266 return NULL;
268 m->private = (void *)n;
269 return &m->private;
272 static void *
273 t_next(struct seq_file *m, void *v, loff_t *pos)
275 (*pos)++;
276 return __next(m, pos);
279 static void *t_start(struct seq_file *m, loff_t *pos)
281 int cpu;
283 local_irq_disable();
285 cpu = smp_processor_id();
286 per_cpu(trace_active, cpu)++;
288 arch_spin_lock(&max_stack_lock);
290 if (*pos == 0)
291 return SEQ_START_TOKEN;
293 return __next(m, pos);
296 static void t_stop(struct seq_file *m, void *p)
298 int cpu;
300 arch_spin_unlock(&max_stack_lock);
302 cpu = smp_processor_id();
303 per_cpu(trace_active, cpu)--;
305 local_irq_enable();
308 static void trace_lookup_stack(struct seq_file *m, long i)
310 unsigned long addr = stack_dump_trace[i];
312 seq_printf(m, "%pS\n", (void *)addr);
315 static void print_disabled(struct seq_file *m)
317 seq_puts(m, "#\n"
318 "# Stack tracer disabled\n"
319 "#\n"
320 "# To enable the stack tracer, either add 'stacktrace' to the\n"
321 "# kernel command line\n"
322 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
323 "#\n");
326 static int t_show(struct seq_file *m, void *v)
328 long i;
329 int size;
331 if (v == SEQ_START_TOKEN) {
332 seq_printf(m, " Depth Size Location"
333 " (%d entries)\n"
334 " ----- ---- --------\n",
335 max_stack_trace.nr_entries);
337 if (!stack_tracer_enabled && !max_stack_size)
338 print_disabled(m);
340 return 0;
343 i = *(long *)v;
345 if (i >= max_stack_trace.nr_entries ||
346 stack_dump_trace[i] == ULONG_MAX)
347 return 0;
349 if (i+1 == max_stack_trace.nr_entries ||
350 stack_dump_trace[i+1] == ULONG_MAX)
351 size = stack_dump_index[i];
352 else
353 size = stack_dump_index[i] - stack_dump_index[i+1];
355 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
357 trace_lookup_stack(m, i);
359 return 0;
362 static const struct seq_operations stack_trace_seq_ops = {
363 .start = t_start,
364 .next = t_next,
365 .stop = t_stop,
366 .show = t_show,
369 static int stack_trace_open(struct inode *inode, struct file *file)
371 return seq_open(file, &stack_trace_seq_ops);
374 static const struct file_operations stack_trace_fops = {
375 .open = stack_trace_open,
376 .read = seq_read,
377 .llseek = seq_lseek,
378 .release = seq_release,
381 static int
382 stack_trace_filter_open(struct inode *inode, struct file *file)
384 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
385 inode, file);
388 static const struct file_operations stack_trace_filter_fops = {
389 .open = stack_trace_filter_open,
390 .read = seq_read,
391 .write = ftrace_filter_write,
392 .llseek = tracing_lseek,
393 .release = ftrace_regex_release,
397 stack_trace_sysctl(struct ctl_table *table, int write,
398 void __user *buffer, size_t *lenp,
399 loff_t *ppos)
401 int ret;
403 mutex_lock(&stack_sysctl_mutex);
405 ret = proc_dointvec(table, write, buffer, lenp, ppos);
407 if (ret || !write ||
408 (last_stack_tracer_enabled == !!stack_tracer_enabled))
409 goto out;
411 last_stack_tracer_enabled = !!stack_tracer_enabled;
413 if (stack_tracer_enabled)
414 register_ftrace_function(&trace_ops);
415 else
416 unregister_ftrace_function(&trace_ops);
418 out:
419 mutex_unlock(&stack_sysctl_mutex);
420 return ret;
423 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
425 static __init int enable_stacktrace(char *str)
427 if (strncmp(str, "_filter=", 8) == 0)
428 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
430 stack_tracer_enabled = 1;
431 last_stack_tracer_enabled = 1;
432 return 1;
434 __setup("stacktrace", enable_stacktrace);
436 static __init int stack_trace_init(void)
438 struct dentry *d_tracer;
440 d_tracer = tracing_init_dentry();
441 if (IS_ERR(d_tracer))
442 return 0;
444 trace_create_file("stack_max_size", 0644, d_tracer,
445 &max_stack_size, &stack_max_size_fops);
447 trace_create_file("stack_trace", 0444, d_tracer,
448 NULL, &stack_trace_fops);
450 trace_create_file("stack_trace_filter", 0444, d_tracer,
451 NULL, &stack_trace_filter_fops);
453 if (stack_trace_filter_buf[0])
454 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
456 if (stack_tracer_enabled)
457 register_ftrace_function(&trace_ops);
459 return 0;
462 device_initcall(stack_trace_init);