2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
18 #define STACK_TRACE_ENTRIES 500
20 static unsigned long stack_dump_trace
[STACK_TRACE_ENTRIES
+1] =
21 { [0 ... (STACK_TRACE_ENTRIES
)] = ULONG_MAX
};
22 static unsigned stack_dump_index
[STACK_TRACE_ENTRIES
];
24 static struct stack_trace max_stack_trace
= {
25 .max_entries
= STACK_TRACE_ENTRIES
,
26 .entries
= stack_dump_trace
,
29 static unsigned long max_stack_size
;
30 static arch_spinlock_t max_stack_lock
=
31 (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
33 static int stack_trace_disabled __read_mostly
;
34 static DEFINE_PER_CPU(int, trace_active
);
35 static DEFINE_MUTEX(stack_sysctl_mutex
);
37 int stack_tracer_enabled
;
38 static int last_stack_tracer_enabled
;
40 static inline void check_stack(void)
42 unsigned long this_size
, flags
;
43 unsigned long *p
, *top
, *start
;
46 this_size
= ((unsigned long)&this_size
) & (THREAD_SIZE
-1);
47 this_size
= THREAD_SIZE
- this_size
;
49 if (this_size
<= max_stack_size
)
52 /* we do not handle interrupt stacks yet */
53 if (!object_is_on_stack(&this_size
))
56 local_irq_save(flags
);
57 arch_spin_lock(&max_stack_lock
);
59 /* a race could have already updated it */
60 if (this_size
<= max_stack_size
)
63 max_stack_size
= this_size
;
65 max_stack_trace
.nr_entries
= 0;
66 max_stack_trace
.skip
= 3;
68 save_stack_trace(&max_stack_trace
);
71 * Now find where in the stack these are.
75 top
= (unsigned long *)
76 (((unsigned long)start
& ~(THREAD_SIZE
-1)) + THREAD_SIZE
);
79 * Loop through all the entries. One of the entries may
80 * for some reason be missed on the stack, so we may
81 * have to account for them. If they are all there, this
82 * loop will only happen once. This code only takes place
83 * on a new max, so it is far from a fast path.
85 while (i
< max_stack_trace
.nr_entries
) {
88 stack_dump_index
[i
] = this_size
;
91 for (; p
< top
&& i
< max_stack_trace
.nr_entries
; p
++) {
92 if (*p
== stack_dump_trace
[i
]) {
93 this_size
= stack_dump_index
[i
++] =
94 (top
- p
) * sizeof(unsigned long);
96 /* Start the search from here */
106 arch_spin_unlock(&max_stack_lock
);
107 local_irq_restore(flags
);
111 stack_trace_call(unsigned long ip
, unsigned long parent_ip
)
115 if (unlikely(!ftrace_enabled
|| stack_trace_disabled
))
118 preempt_disable_notrace();
120 cpu
= raw_smp_processor_id();
121 /* no atomic needed, we only modify this variable by this cpu */
122 if (per_cpu(trace_active
, cpu
)++ != 0)
128 per_cpu(trace_active
, cpu
)--;
129 /* prevent recursion in schedule */
130 preempt_enable_notrace();
133 static struct ftrace_ops trace_ops __read_mostly
=
135 .func
= stack_trace_call
,
136 .flags
= FTRACE_OPS_FL_GLOBAL
,
140 stack_max_size_read(struct file
*filp
, char __user
*ubuf
,
141 size_t count
, loff_t
*ppos
)
143 unsigned long *ptr
= filp
->private_data
;
147 r
= snprintf(buf
, sizeof(buf
), "%ld\n", *ptr
);
150 return simple_read_from_buffer(ubuf
, count
, ppos
, buf
, r
);
154 stack_max_size_write(struct file
*filp
, const char __user
*ubuf
,
155 size_t count
, loff_t
*ppos
)
157 long *ptr
= filp
->private_data
;
158 unsigned long val
, flags
;
162 ret
= kstrtoul_from_user(ubuf
, count
, 10, &val
);
166 local_irq_save(flags
);
169 * In case we trace inside arch_spin_lock() or after (NMI),
170 * we will cause circular lock, so we also need to increase
171 * the percpu trace_active here.
173 cpu
= smp_processor_id();
174 per_cpu(trace_active
, cpu
)++;
176 arch_spin_lock(&max_stack_lock
);
178 arch_spin_unlock(&max_stack_lock
);
180 per_cpu(trace_active
, cpu
)--;
181 local_irq_restore(flags
);
186 static const struct file_operations stack_max_size_fops
= {
187 .open
= tracing_open_generic
,
188 .read
= stack_max_size_read
,
189 .write
= stack_max_size_write
,
190 .llseek
= default_llseek
,
194 __next(struct seq_file
*m
, loff_t
*pos
)
198 if (n
>= max_stack_trace
.nr_entries
|| stack_dump_trace
[n
] == ULONG_MAX
)
201 m
->private = (void *)n
;
206 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
209 return __next(m
, pos
);
212 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
218 cpu
= smp_processor_id();
219 per_cpu(trace_active
, cpu
)++;
221 arch_spin_lock(&max_stack_lock
);
224 return SEQ_START_TOKEN
;
226 return __next(m
, pos
);
229 static void t_stop(struct seq_file
*m
, void *p
)
233 arch_spin_unlock(&max_stack_lock
);
235 cpu
= smp_processor_id();
236 per_cpu(trace_active
, cpu
)--;
241 static int trace_lookup_stack(struct seq_file
*m
, long i
)
243 unsigned long addr
= stack_dump_trace
[i
];
245 return seq_printf(m
, "%pS\n", (void *)addr
);
248 static void print_disabled(struct seq_file
*m
)
251 "# Stack tracer disabled\n"
253 "# To enable the stack tracer, either add 'stacktrace' to the\n"
254 "# kernel command line\n"
255 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
259 static int t_show(struct seq_file
*m
, void *v
)
264 if (v
== SEQ_START_TOKEN
) {
265 seq_printf(m
, " Depth Size Location"
267 " ----- ---- --------\n",
268 max_stack_trace
.nr_entries
- 1);
270 if (!stack_tracer_enabled
&& !max_stack_size
)
278 if (i
>= max_stack_trace
.nr_entries
||
279 stack_dump_trace
[i
] == ULONG_MAX
)
282 if (i
+1 == max_stack_trace
.nr_entries
||
283 stack_dump_trace
[i
+1] == ULONG_MAX
)
284 size
= stack_dump_index
[i
];
286 size
= stack_dump_index
[i
] - stack_dump_index
[i
+1];
288 seq_printf(m
, "%3ld) %8d %5d ", i
, stack_dump_index
[i
], size
);
290 trace_lookup_stack(m
, i
);
295 static const struct seq_operations stack_trace_seq_ops
= {
302 static int stack_trace_open(struct inode
*inode
, struct file
*file
)
304 return seq_open(file
, &stack_trace_seq_ops
);
307 static const struct file_operations stack_trace_fops
= {
308 .open
= stack_trace_open
,
311 .release
= seq_release
,
315 stack_trace_sysctl(struct ctl_table
*table
, int write
,
316 void __user
*buffer
, size_t *lenp
,
321 mutex_lock(&stack_sysctl_mutex
);
323 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
326 (last_stack_tracer_enabled
== !!stack_tracer_enabled
))
329 last_stack_tracer_enabled
= !!stack_tracer_enabled
;
331 if (stack_tracer_enabled
)
332 register_ftrace_function(&trace_ops
);
334 unregister_ftrace_function(&trace_ops
);
337 mutex_unlock(&stack_sysctl_mutex
);
341 static __init
int enable_stacktrace(char *str
)
343 stack_tracer_enabled
= 1;
344 last_stack_tracer_enabled
= 1;
347 __setup("stacktrace", enable_stacktrace
);
349 static __init
int stack_trace_init(void)
351 struct dentry
*d_tracer
;
353 d_tracer
= tracing_init_dentry();
355 trace_create_file("stack_max_size", 0644, d_tracer
,
356 &max_stack_size
, &stack_max_size_fops
);
358 trace_create_file("stack_trace", 0444, d_tracer
,
359 NULL
, &stack_trace_fops
);
361 if (stack_tracer_enabled
)
362 register_ftrace_function(&trace_ops
);
367 device_initcall(stack_trace_init
);