locking/atomic, arch/s390: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
[linux-2.6/btrfs-unstable.git] / kernel / trace / trace_stack.c
blob2a1abbaca10ec96126f3d8e380244214f77ee2a8
1 /*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
4 */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/module.h>
12 #include <linux/sysctl.h>
13 #include <linux/init.h>
15 #include <asm/setup.h>
17 #include "trace.h"
19 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21 unsigned stack_trace_index[STACK_TRACE_ENTRIES];
24 * Reserve one entry for the passed in ip. This will allow
25 * us to remove most or all of the stack size overhead
26 * added by the stack tracer itself.
28 struct stack_trace stack_trace_max = {
29 .max_entries = STACK_TRACE_ENTRIES - 1,
30 .entries = &stack_dump_trace[0],
33 unsigned long stack_trace_max_size;
34 arch_spinlock_t stack_trace_max_lock =
35 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
37 static DEFINE_PER_CPU(int, trace_active);
38 static DEFINE_MUTEX(stack_sysctl_mutex);
40 int stack_tracer_enabled;
41 static int last_stack_tracer_enabled;
43 void stack_trace_print(void)
45 long i;
46 int size;
48 pr_emerg(" Depth Size Location (%d entries)\n"
49 " ----- ---- --------\n",
50 stack_trace_max.nr_entries);
52 for (i = 0; i < stack_trace_max.nr_entries; i++) {
53 if (stack_dump_trace[i] == ULONG_MAX)
54 break;
55 if (i+1 == stack_trace_max.nr_entries ||
56 stack_dump_trace[i+1] == ULONG_MAX)
57 size = stack_trace_index[i];
58 else
59 size = stack_trace_index[i] - stack_trace_index[i+1];
61 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
62 size, (void *)stack_dump_trace[i]);
67 * When arch-specific code overides this function, the following
68 * data should be filled up, assuming stack_trace_max_lock is held to
69 * prevent concurrent updates.
70 * stack_trace_index[]
71 * stack_trace_max
72 * stack_trace_max_size
74 void __weak
75 check_stack(unsigned long ip, unsigned long *stack)
77 unsigned long this_size, flags; unsigned long *p, *top, *start;
78 static int tracer_frame;
79 int frame_size = ACCESS_ONCE(tracer_frame);
80 int i, x;
82 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
83 this_size = THREAD_SIZE - this_size;
84 /* Remove the frame of the tracer */
85 this_size -= frame_size;
87 if (this_size <= stack_trace_max_size)
88 return;
90 /* we do not handle interrupt stacks yet */
91 if (!object_is_on_stack(stack))
92 return;
94 /* Can't do this from NMI context (can cause deadlocks) */
95 if (in_nmi())
96 return;
98 local_irq_save(flags);
99 arch_spin_lock(&stack_trace_max_lock);
102 * RCU may not be watching, make it see us.
103 * The stack trace code uses rcu_sched.
105 rcu_irq_enter();
107 /* In case another CPU set the tracer_frame on us */
108 if (unlikely(!frame_size))
109 this_size -= tracer_frame;
111 /* a race could have already updated it */
112 if (this_size <= stack_trace_max_size)
113 goto out;
115 stack_trace_max_size = this_size;
117 stack_trace_max.nr_entries = 0;
118 stack_trace_max.skip = 3;
120 save_stack_trace(&stack_trace_max);
122 /* Skip over the overhead of the stack tracer itself */
123 for (i = 0; i < stack_trace_max.nr_entries; i++) {
124 if (stack_dump_trace[i] == ip)
125 break;
129 * Some archs may not have the passed in ip in the dump.
130 * If that happens, we need to show everything.
132 if (i == stack_trace_max.nr_entries)
133 i = 0;
136 * Now find where in the stack these are.
138 x = 0;
139 start = stack;
140 top = (unsigned long *)
141 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
144 * Loop through all the entries. One of the entries may
145 * for some reason be missed on the stack, so we may
146 * have to account for them. If they are all there, this
147 * loop will only happen once. This code only takes place
148 * on a new max, so it is far from a fast path.
150 while (i < stack_trace_max.nr_entries) {
151 int found = 0;
153 stack_trace_index[x] = this_size;
154 p = start;
156 for (; p < top && i < stack_trace_max.nr_entries; p++) {
157 if (stack_dump_trace[i] == ULONG_MAX)
158 break;
160 * The READ_ONCE_NOCHECK is used to let KASAN know that
161 * this is not a stack-out-of-bounds error.
163 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
164 stack_dump_trace[x] = stack_dump_trace[i++];
165 this_size = stack_trace_index[x++] =
166 (top - p) * sizeof(unsigned long);
167 found = 1;
168 /* Start the search from here */
169 start = p + 1;
171 * We do not want to show the overhead
172 * of the stack tracer stack in the
173 * max stack. If we haven't figured
174 * out what that is, then figure it out
175 * now.
177 if (unlikely(!tracer_frame)) {
178 tracer_frame = (p - stack) *
179 sizeof(unsigned long);
180 stack_trace_max_size -= tracer_frame;
185 if (!found)
186 i++;
189 stack_trace_max.nr_entries = x;
190 for (; x < i; x++)
191 stack_dump_trace[x] = ULONG_MAX;
193 if (task_stack_end_corrupted(current)) {
194 stack_trace_print();
195 BUG();
198 out:
199 rcu_irq_exit();
200 arch_spin_unlock(&stack_trace_max_lock);
201 local_irq_restore(flags);
204 static void
205 stack_trace_call(unsigned long ip, unsigned long parent_ip,
206 struct ftrace_ops *op, struct pt_regs *pt_regs)
208 unsigned long stack;
209 int cpu;
211 preempt_disable_notrace();
213 cpu = raw_smp_processor_id();
214 /* no atomic needed, we only modify this variable by this cpu */
215 if (per_cpu(trace_active, cpu)++ != 0)
216 goto out;
218 ip += MCOUNT_INSN_SIZE;
220 check_stack(ip, &stack);
222 out:
223 per_cpu(trace_active, cpu)--;
224 /* prevent recursion in schedule */
225 preempt_enable_notrace();
228 static struct ftrace_ops trace_ops __read_mostly =
230 .func = stack_trace_call,
231 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
234 static ssize_t
235 stack_max_size_read(struct file *filp, char __user *ubuf,
236 size_t count, loff_t *ppos)
238 unsigned long *ptr = filp->private_data;
239 char buf[64];
240 int r;
242 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
243 if (r > sizeof(buf))
244 r = sizeof(buf);
245 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
248 static ssize_t
249 stack_max_size_write(struct file *filp, const char __user *ubuf,
250 size_t count, loff_t *ppos)
252 long *ptr = filp->private_data;
253 unsigned long val, flags;
254 int ret;
255 int cpu;
257 ret = kstrtoul_from_user(ubuf, count, 10, &val);
258 if (ret)
259 return ret;
261 local_irq_save(flags);
264 * In case we trace inside arch_spin_lock() or after (NMI),
265 * we will cause circular lock, so we also need to increase
266 * the percpu trace_active here.
268 cpu = smp_processor_id();
269 per_cpu(trace_active, cpu)++;
271 arch_spin_lock(&stack_trace_max_lock);
272 *ptr = val;
273 arch_spin_unlock(&stack_trace_max_lock);
275 per_cpu(trace_active, cpu)--;
276 local_irq_restore(flags);
278 return count;
281 static const struct file_operations stack_max_size_fops = {
282 .open = tracing_open_generic,
283 .read = stack_max_size_read,
284 .write = stack_max_size_write,
285 .llseek = default_llseek,
288 static void *
289 __next(struct seq_file *m, loff_t *pos)
291 long n = *pos - 1;
293 if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
294 return NULL;
296 m->private = (void *)n;
297 return &m->private;
300 static void *
301 t_next(struct seq_file *m, void *v, loff_t *pos)
303 (*pos)++;
304 return __next(m, pos);
307 static void *t_start(struct seq_file *m, loff_t *pos)
309 int cpu;
311 local_irq_disable();
313 cpu = smp_processor_id();
314 per_cpu(trace_active, cpu)++;
316 arch_spin_lock(&stack_trace_max_lock);
318 if (*pos == 0)
319 return SEQ_START_TOKEN;
321 return __next(m, pos);
324 static void t_stop(struct seq_file *m, void *p)
326 int cpu;
328 arch_spin_unlock(&stack_trace_max_lock);
330 cpu = smp_processor_id();
331 per_cpu(trace_active, cpu)--;
333 local_irq_enable();
336 static void trace_lookup_stack(struct seq_file *m, long i)
338 unsigned long addr = stack_dump_trace[i];
340 seq_printf(m, "%pS\n", (void *)addr);
343 static void print_disabled(struct seq_file *m)
345 seq_puts(m, "#\n"
346 "# Stack tracer disabled\n"
347 "#\n"
348 "# To enable the stack tracer, either add 'stacktrace' to the\n"
349 "# kernel command line\n"
350 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
351 "#\n");
354 static int t_show(struct seq_file *m, void *v)
356 long i;
357 int size;
359 if (v == SEQ_START_TOKEN) {
360 seq_printf(m, " Depth Size Location"
361 " (%d entries)\n"
362 " ----- ---- --------\n",
363 stack_trace_max.nr_entries);
365 if (!stack_tracer_enabled && !stack_trace_max_size)
366 print_disabled(m);
368 return 0;
371 i = *(long *)v;
373 if (i >= stack_trace_max.nr_entries ||
374 stack_dump_trace[i] == ULONG_MAX)
375 return 0;
377 if (i+1 == stack_trace_max.nr_entries ||
378 stack_dump_trace[i+1] == ULONG_MAX)
379 size = stack_trace_index[i];
380 else
381 size = stack_trace_index[i] - stack_trace_index[i+1];
383 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
385 trace_lookup_stack(m, i);
387 return 0;
390 static const struct seq_operations stack_trace_seq_ops = {
391 .start = t_start,
392 .next = t_next,
393 .stop = t_stop,
394 .show = t_show,
397 static int stack_trace_open(struct inode *inode, struct file *file)
399 return seq_open(file, &stack_trace_seq_ops);
402 static const struct file_operations stack_trace_fops = {
403 .open = stack_trace_open,
404 .read = seq_read,
405 .llseek = seq_lseek,
406 .release = seq_release,
409 static int
410 stack_trace_filter_open(struct inode *inode, struct file *file)
412 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
413 inode, file);
416 static const struct file_operations stack_trace_filter_fops = {
417 .open = stack_trace_filter_open,
418 .read = seq_read,
419 .write = ftrace_filter_write,
420 .llseek = tracing_lseek,
421 .release = ftrace_regex_release,
425 stack_trace_sysctl(struct ctl_table *table, int write,
426 void __user *buffer, size_t *lenp,
427 loff_t *ppos)
429 int ret;
431 mutex_lock(&stack_sysctl_mutex);
433 ret = proc_dointvec(table, write, buffer, lenp, ppos);
435 if (ret || !write ||
436 (last_stack_tracer_enabled == !!stack_tracer_enabled))
437 goto out;
439 last_stack_tracer_enabled = !!stack_tracer_enabled;
441 if (stack_tracer_enabled)
442 register_ftrace_function(&trace_ops);
443 else
444 unregister_ftrace_function(&trace_ops);
446 out:
447 mutex_unlock(&stack_sysctl_mutex);
448 return ret;
451 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
453 static __init int enable_stacktrace(char *str)
455 if (strncmp(str, "_filter=", 8) == 0)
456 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
458 stack_tracer_enabled = 1;
459 last_stack_tracer_enabled = 1;
460 return 1;
462 __setup("stacktrace", enable_stacktrace);
464 static __init int stack_trace_init(void)
466 struct dentry *d_tracer;
468 d_tracer = tracing_init_dentry();
469 if (IS_ERR(d_tracer))
470 return 0;
472 trace_create_file("stack_max_size", 0644, d_tracer,
473 &stack_trace_max_size, &stack_max_size_fops);
475 trace_create_file("stack_trace", 0444, d_tracer,
476 NULL, &stack_trace_fops);
478 trace_create_file("stack_trace_filter", 0444, d_tracer,
479 NULL, &stack_trace_filter_fops);
481 if (stack_trace_filter_buf[0])
482 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
484 if (stack_tracer_enabled)
485 register_ftrace_function(&trace_ops);
487 return 0;
490 device_initcall(stack_trace_init);