added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / kernel / trace / trace_stack.c
blob56ed2101c5b3fff2e42017a404b6ef0604ffbffc
1 /*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
4 */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 #include "trace.h"
18 #define STACK_TRACE_ENTRIES 500
20 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
21 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
22 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
24 static struct stack_trace max_stack_trace = {
25 .max_entries = STACK_TRACE_ENTRIES,
26 .entries = stack_dump_trace,
29 static unsigned long max_stack_size;
30 static __raw_spinlock_t max_stack_lock = __RAW_SPIN_LOCK_UNLOCKED;
32 static int stack_trace_disabled __read_mostly;
33 static DEFINE_PER_CPU(int, trace_active);
34 static DEFINE_MUTEX(stack_sysctl_mutex);
36 int stack_tracer_enabled;
37 static int last_stack_tracer_enabled;
39 static inline void check_stack(void)
41 unsigned long this_size, flags;
42 unsigned long *p, *top, *start;
43 int i;
45 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
46 this_size = THREAD_SIZE - this_size;
48 if (this_size <= max_stack_size)
49 return;
51 /* we do not handle interrupt stacks yet */
52 if (!object_is_on_stack(&this_size))
53 return;
55 local_irq_save(flags);
56 __raw_spin_lock(&max_stack_lock);
58 /* a race could have already updated it */
59 if (this_size <= max_stack_size)
60 goto out;
62 max_stack_size = this_size;
64 max_stack_trace.nr_entries = 0;
65 max_stack_trace.skip = 3;
67 save_stack_trace(&max_stack_trace);
70 * Now find where in the stack these are.
72 i = 0;
73 start = &this_size;
74 top = (unsigned long *)
75 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
78 * Loop through all the entries. One of the entries may
79 * for some reason be missed on the stack, so we may
80 * have to account for them. If they are all there, this
81 * loop will only happen once. This code only takes place
82 * on a new max, so it is far from a fast path.
84 while (i < max_stack_trace.nr_entries) {
85 int found = 0;
87 stack_dump_index[i] = this_size;
88 p = start;
90 for (; p < top && i < max_stack_trace.nr_entries; p++) {
91 if (*p == stack_dump_trace[i]) {
92 this_size = stack_dump_index[i++] =
93 (top - p) * sizeof(unsigned long);
94 found = 1;
95 /* Start the search from here */
96 start = p + 1;
100 if (!found)
101 i++;
104 out:
105 __raw_spin_unlock(&max_stack_lock);
106 local_irq_restore(flags);
109 static void
110 stack_trace_call(unsigned long ip, unsigned long parent_ip)
112 int cpu, resched;
114 if (unlikely(!ftrace_enabled || stack_trace_disabled))
115 return;
117 resched = ftrace_preempt_disable();
119 cpu = raw_smp_processor_id();
120 /* no atomic needed, we only modify this variable by this cpu */
121 if (per_cpu(trace_active, cpu)++ != 0)
122 goto out;
124 check_stack();
126 out:
127 per_cpu(trace_active, cpu)--;
128 /* prevent recursion in schedule */
129 ftrace_preempt_enable(resched);
132 static struct ftrace_ops trace_ops __read_mostly =
134 .func = stack_trace_call,
137 static ssize_t
138 stack_max_size_read(struct file *filp, char __user *ubuf,
139 size_t count, loff_t *ppos)
141 unsigned long *ptr = filp->private_data;
142 char buf[64];
143 int r;
145 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
146 if (r > sizeof(buf))
147 r = sizeof(buf);
148 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
151 static ssize_t
152 stack_max_size_write(struct file *filp, const char __user *ubuf,
153 size_t count, loff_t *ppos)
155 long *ptr = filp->private_data;
156 unsigned long val, flags;
157 char buf[64];
158 int ret;
160 if (count >= sizeof(buf))
161 return -EINVAL;
163 if (copy_from_user(&buf, ubuf, count))
164 return -EFAULT;
166 buf[count] = 0;
168 ret = strict_strtoul(buf, 10, &val);
169 if (ret < 0)
170 return ret;
172 local_irq_save(flags);
173 __raw_spin_lock(&max_stack_lock);
174 *ptr = val;
175 __raw_spin_unlock(&max_stack_lock);
176 local_irq_restore(flags);
178 return count;
181 static const struct file_operations stack_max_size_fops = {
182 .open = tracing_open_generic,
183 .read = stack_max_size_read,
184 .write = stack_max_size_write,
187 static void *
188 t_next(struct seq_file *m, void *v, loff_t *pos)
190 long i;
192 (*pos)++;
194 if (v == SEQ_START_TOKEN)
195 i = 0;
196 else {
197 i = *(long *)v;
198 i++;
201 if (i >= max_stack_trace.nr_entries ||
202 stack_dump_trace[i] == ULONG_MAX)
203 return NULL;
205 m->private = (void *)i;
207 return &m->private;
210 static void *t_start(struct seq_file *m, loff_t *pos)
212 void *t = SEQ_START_TOKEN;
213 loff_t l = 0;
215 local_irq_disable();
216 __raw_spin_lock(&max_stack_lock);
218 if (*pos == 0)
219 return SEQ_START_TOKEN;
221 for (; t && l < *pos; t = t_next(m, t, &l))
224 return t;
227 static void t_stop(struct seq_file *m, void *p)
229 __raw_spin_unlock(&max_stack_lock);
230 local_irq_enable();
233 static int trace_lookup_stack(struct seq_file *m, long i)
235 unsigned long addr = stack_dump_trace[i];
236 #ifdef CONFIG_KALLSYMS
237 char str[KSYM_SYMBOL_LEN];
239 sprint_symbol(str, addr);
241 return seq_printf(m, "%s\n", str);
242 #else
243 return seq_printf(m, "%p\n", (void*)addr);
244 #endif
247 static void print_disabled(struct seq_file *m)
249 seq_puts(m, "#\n"
250 "# Stack tracer disabled\n"
251 "#\n"
252 "# To enable the stack tracer, either add 'stacktrace' to the\n"
253 "# kernel command line\n"
254 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
255 "#\n");
258 static int t_show(struct seq_file *m, void *v)
260 long i;
261 int size;
263 if (v == SEQ_START_TOKEN) {
264 seq_printf(m, " Depth Size Location"
265 " (%d entries)\n"
266 " ----- ---- --------\n",
267 max_stack_trace.nr_entries);
269 if (!stack_tracer_enabled && !max_stack_size)
270 print_disabled(m);
272 return 0;
275 i = *(long *)v;
277 if (i >= max_stack_trace.nr_entries ||
278 stack_dump_trace[i] == ULONG_MAX)
279 return 0;
281 if (i+1 == max_stack_trace.nr_entries ||
282 stack_dump_trace[i+1] == ULONG_MAX)
283 size = stack_dump_index[i];
284 else
285 size = stack_dump_index[i] - stack_dump_index[i+1];
287 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
289 trace_lookup_stack(m, i);
291 return 0;
294 static const struct seq_operations stack_trace_seq_ops = {
295 .start = t_start,
296 .next = t_next,
297 .stop = t_stop,
298 .show = t_show,
301 static int stack_trace_open(struct inode *inode, struct file *file)
303 int ret;
305 ret = seq_open(file, &stack_trace_seq_ops);
307 return ret;
310 static const struct file_operations stack_trace_fops = {
311 .open = stack_trace_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
317 stack_trace_sysctl(struct ctl_table *table, int write,
318 struct file *file, void __user *buffer, size_t *lenp,
319 loff_t *ppos)
321 int ret;
323 mutex_lock(&stack_sysctl_mutex);
325 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
327 if (ret || !write ||
328 (last_stack_tracer_enabled == stack_tracer_enabled))
329 goto out;
331 last_stack_tracer_enabled = stack_tracer_enabled;
333 if (stack_tracer_enabled)
334 register_ftrace_function(&trace_ops);
335 else
336 unregister_ftrace_function(&trace_ops);
338 out:
339 mutex_unlock(&stack_sysctl_mutex);
340 return ret;
343 static __init int enable_stacktrace(char *str)
345 stack_tracer_enabled = 1;
346 last_stack_tracer_enabled = 1;
347 return 1;
349 __setup("stacktrace", enable_stacktrace);
351 static __init int stack_trace_init(void)
353 struct dentry *d_tracer;
354 struct dentry *entry;
356 d_tracer = tracing_init_dentry();
358 entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
359 &max_stack_size, &stack_max_size_fops);
360 if (!entry)
361 pr_warning("Could not create debugfs 'stack_max_size' entry\n");
363 entry = debugfs_create_file("stack_trace", 0444, d_tracer,
364 NULL, &stack_trace_fops);
365 if (!entry)
366 pr_warning("Could not create debugfs 'stack_trace' entry\n");
368 if (stack_tracer_enabled)
369 register_ftrace_function(&trace_ops);
371 return 0;
374 device_initcall(stack_trace_init);