ARM: l2c: ux500: implement dummy write_sec method
[linux-2.6/btrfs-unstable.git] / kernel / trace / trace_stack.c
blob21b320e5d163c6c1958d5fbddc4528a533f4a6ff
1 /*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
4 */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 #include <linux/magic.h>
18 #include <asm/setup.h>
20 #include "trace.h"
22 #define STACK_TRACE_ENTRIES 500
24 #ifdef CC_USING_FENTRY
25 # define fentry 1
26 #else
27 # define fentry 0
28 #endif
30 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
31 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
32 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
35 * Reserve one entry for the passed in ip. This will allow
36 * us to remove most or all of the stack size overhead
37 * added by the stack tracer itself.
39 static struct stack_trace max_stack_trace = {
40 .max_entries = STACK_TRACE_ENTRIES - 1,
41 .entries = &stack_dump_trace[1],
44 static unsigned long max_stack_size;
45 static arch_spinlock_t max_stack_lock =
46 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
48 static DEFINE_PER_CPU(int, trace_active);
49 static DEFINE_MUTEX(stack_sysctl_mutex);
51 int stack_tracer_enabled;
52 static int last_stack_tracer_enabled;
54 static inline void
55 check_stack(unsigned long ip, unsigned long *stack)
57 unsigned long this_size, flags;
58 unsigned long *p, *top, *start;
59 static int tracer_frame;
60 int frame_size = ACCESS_ONCE(tracer_frame);
61 int i;
63 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
64 this_size = THREAD_SIZE - this_size;
65 /* Remove the frame of the tracer */
66 this_size -= frame_size;
68 if (this_size <= max_stack_size)
69 return;
71 /* we do not handle interrupt stacks yet */
72 if (!object_is_on_stack(stack))
73 return;
75 local_irq_save(flags);
76 arch_spin_lock(&max_stack_lock);
78 /* In case another CPU set the tracer_frame on us */
79 if (unlikely(!frame_size))
80 this_size -= tracer_frame;
82 /* a race could have already updated it */
83 if (this_size <= max_stack_size)
84 goto out;
86 max_stack_size = this_size;
88 max_stack_trace.nr_entries = 0;
89 max_stack_trace.skip = 3;
91 save_stack_trace(&max_stack_trace);
94 * Add the passed in ip from the function tracer.
95 * Searching for this on the stack will skip over
96 * most of the overhead from the stack tracer itself.
98 stack_dump_trace[0] = ip;
99 max_stack_trace.nr_entries++;
102 * Now find where in the stack these are.
104 i = 0;
105 start = stack;
106 top = (unsigned long *)
107 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
110 * Loop through all the entries. One of the entries may
111 * for some reason be missed on the stack, so we may
112 * have to account for them. If they are all there, this
113 * loop will only happen once. This code only takes place
114 * on a new max, so it is far from a fast path.
116 while (i < max_stack_trace.nr_entries) {
117 int found = 0;
119 stack_dump_index[i] = this_size;
120 p = start;
122 for (; p < top && i < max_stack_trace.nr_entries; p++) {
123 if (*p == stack_dump_trace[i]) {
124 this_size = stack_dump_index[i++] =
125 (top - p) * sizeof(unsigned long);
126 found = 1;
127 /* Start the search from here */
128 start = p + 1;
130 * We do not want to show the overhead
131 * of the stack tracer stack in the
132 * max stack. If we haven't figured
133 * out what that is, then figure it out
134 * now.
136 if (unlikely(!tracer_frame) && i == 1) {
137 tracer_frame = (p - stack) *
138 sizeof(unsigned long);
139 max_stack_size -= tracer_frame;
144 if (!found)
145 i++;
148 BUG_ON(current != &init_task &&
149 *(end_of_stack(current)) != STACK_END_MAGIC);
150 out:
151 arch_spin_unlock(&max_stack_lock);
152 local_irq_restore(flags);
155 static void
156 stack_trace_call(unsigned long ip, unsigned long parent_ip,
157 struct ftrace_ops *op, struct pt_regs *pt_regs)
159 unsigned long stack;
160 int cpu;
162 preempt_disable_notrace();
164 cpu = raw_smp_processor_id();
165 /* no atomic needed, we only modify this variable by this cpu */
166 if (per_cpu(trace_active, cpu)++ != 0)
167 goto out;
170 * When fentry is used, the traced function does not get
171 * its stack frame set up, and we lose the parent.
172 * The ip is pretty useless because the function tracer
173 * was called before that function set up its stack frame.
174 * In this case, we use the parent ip.
176 * By adding the return address of either the parent ip
177 * or the current ip we can disregard most of the stack usage
178 * caused by the stack tracer itself.
180 * The function tracer always reports the address of where the
181 * mcount call was, but the stack will hold the return address.
183 if (fentry)
184 ip = parent_ip;
185 else
186 ip += MCOUNT_INSN_SIZE;
188 check_stack(ip, &stack);
190 out:
191 per_cpu(trace_active, cpu)--;
192 /* prevent recursion in schedule */
193 preempt_enable_notrace();
196 static struct ftrace_ops trace_ops __read_mostly =
198 .func = stack_trace_call,
199 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
202 static ssize_t
203 stack_max_size_read(struct file *filp, char __user *ubuf,
204 size_t count, loff_t *ppos)
206 unsigned long *ptr = filp->private_data;
207 char buf[64];
208 int r;
210 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
211 if (r > sizeof(buf))
212 r = sizeof(buf);
213 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
216 static ssize_t
217 stack_max_size_write(struct file *filp, const char __user *ubuf,
218 size_t count, loff_t *ppos)
220 long *ptr = filp->private_data;
221 unsigned long val, flags;
222 int ret;
223 int cpu;
225 ret = kstrtoul_from_user(ubuf, count, 10, &val);
226 if (ret)
227 return ret;
229 local_irq_save(flags);
232 * In case we trace inside arch_spin_lock() or after (NMI),
233 * we will cause circular lock, so we also need to increase
234 * the percpu trace_active here.
236 cpu = smp_processor_id();
237 per_cpu(trace_active, cpu)++;
239 arch_spin_lock(&max_stack_lock);
240 *ptr = val;
241 arch_spin_unlock(&max_stack_lock);
243 per_cpu(trace_active, cpu)--;
244 local_irq_restore(flags);
246 return count;
249 static const struct file_operations stack_max_size_fops = {
250 .open = tracing_open_generic,
251 .read = stack_max_size_read,
252 .write = stack_max_size_write,
253 .llseek = default_llseek,
256 static void *
257 __next(struct seq_file *m, loff_t *pos)
259 long n = *pos - 1;
261 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
262 return NULL;
264 m->private = (void *)n;
265 return &m->private;
268 static void *
269 t_next(struct seq_file *m, void *v, loff_t *pos)
271 (*pos)++;
272 return __next(m, pos);
275 static void *t_start(struct seq_file *m, loff_t *pos)
277 int cpu;
279 local_irq_disable();
281 cpu = smp_processor_id();
282 per_cpu(trace_active, cpu)++;
284 arch_spin_lock(&max_stack_lock);
286 if (*pos == 0)
287 return SEQ_START_TOKEN;
289 return __next(m, pos);
292 static void t_stop(struct seq_file *m, void *p)
294 int cpu;
296 arch_spin_unlock(&max_stack_lock);
298 cpu = smp_processor_id();
299 per_cpu(trace_active, cpu)--;
301 local_irq_enable();
304 static int trace_lookup_stack(struct seq_file *m, long i)
306 unsigned long addr = stack_dump_trace[i];
308 return seq_printf(m, "%pS\n", (void *)addr);
311 static void print_disabled(struct seq_file *m)
313 seq_puts(m, "#\n"
314 "# Stack tracer disabled\n"
315 "#\n"
316 "# To enable the stack tracer, either add 'stacktrace' to the\n"
317 "# kernel command line\n"
318 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
319 "#\n");
322 static int t_show(struct seq_file *m, void *v)
324 long i;
325 int size;
327 if (v == SEQ_START_TOKEN) {
328 seq_printf(m, " Depth Size Location"
329 " (%d entries)\n"
330 " ----- ---- --------\n",
331 max_stack_trace.nr_entries - 1);
333 if (!stack_tracer_enabled && !max_stack_size)
334 print_disabled(m);
336 return 0;
339 i = *(long *)v;
341 if (i >= max_stack_trace.nr_entries ||
342 stack_dump_trace[i] == ULONG_MAX)
343 return 0;
345 if (i+1 == max_stack_trace.nr_entries ||
346 stack_dump_trace[i+1] == ULONG_MAX)
347 size = stack_dump_index[i];
348 else
349 size = stack_dump_index[i] - stack_dump_index[i+1];
351 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
353 trace_lookup_stack(m, i);
355 return 0;
358 static const struct seq_operations stack_trace_seq_ops = {
359 .start = t_start,
360 .next = t_next,
361 .stop = t_stop,
362 .show = t_show,
365 static int stack_trace_open(struct inode *inode, struct file *file)
367 return seq_open(file, &stack_trace_seq_ops);
370 static const struct file_operations stack_trace_fops = {
371 .open = stack_trace_open,
372 .read = seq_read,
373 .llseek = seq_lseek,
374 .release = seq_release,
377 static int
378 stack_trace_filter_open(struct inode *inode, struct file *file)
380 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
381 inode, file);
384 static const struct file_operations stack_trace_filter_fops = {
385 .open = stack_trace_filter_open,
386 .read = seq_read,
387 .write = ftrace_filter_write,
388 .llseek = tracing_lseek,
389 .release = ftrace_regex_release,
393 stack_trace_sysctl(struct ctl_table *table, int write,
394 void __user *buffer, size_t *lenp,
395 loff_t *ppos)
397 int ret;
399 mutex_lock(&stack_sysctl_mutex);
401 ret = proc_dointvec(table, write, buffer, lenp, ppos);
403 if (ret || !write ||
404 (last_stack_tracer_enabled == !!stack_tracer_enabled))
405 goto out;
407 last_stack_tracer_enabled = !!stack_tracer_enabled;
409 if (stack_tracer_enabled)
410 register_ftrace_function(&trace_ops);
411 else
412 unregister_ftrace_function(&trace_ops);
414 out:
415 mutex_unlock(&stack_sysctl_mutex);
416 return ret;
419 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
421 static __init int enable_stacktrace(char *str)
423 if (strncmp(str, "_filter=", 8) == 0)
424 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
426 stack_tracer_enabled = 1;
427 last_stack_tracer_enabled = 1;
428 return 1;
430 __setup("stacktrace", enable_stacktrace);
432 static __init int stack_trace_init(void)
434 struct dentry *d_tracer;
436 d_tracer = tracing_init_dentry();
437 if (!d_tracer)
438 return 0;
440 trace_create_file("stack_max_size", 0644, d_tracer,
441 &max_stack_size, &stack_max_size_fops);
443 trace_create_file("stack_trace", 0444, d_tracer,
444 NULL, &stack_trace_fops);
446 trace_create_file("stack_trace_filter", 0444, d_tracer,
447 NULL, &stack_trace_filter_fops);
449 if (stack_trace_filter_buf[0])
450 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
452 if (stack_tracer_enabled)
453 register_ftrace_function(&trace_ops);
455 return 0;
458 device_initcall(stack_trace_init);