2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/module.h>
12 #include <linux/sysctl.h>
13 #include <linux/init.h>
15 #include <asm/setup.h>
19 #define STACK_TRACE_ENTRIES 500
21 static unsigned long stack_dump_trace
[STACK_TRACE_ENTRIES
+1] =
22 { [0 ... (STACK_TRACE_ENTRIES
)] = ULONG_MAX
};
23 static unsigned stack_dump_index
[STACK_TRACE_ENTRIES
];
26 * Reserve one entry for the passed in ip. This will allow
27 * us to remove most or all of the stack size overhead
28 * added by the stack tracer itself.
30 static struct stack_trace max_stack_trace
= {
31 .max_entries
= STACK_TRACE_ENTRIES
- 1,
32 .entries
= &stack_dump_trace
[0],
35 static unsigned long max_stack_size
;
36 static arch_spinlock_t max_stack_lock
=
37 (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
39 static DEFINE_PER_CPU(int, trace_active
);
40 static DEFINE_MUTEX(stack_sysctl_mutex
);
42 int stack_tracer_enabled
;
43 static int last_stack_tracer_enabled
;
45 static inline void print_max_stack(void)
50 pr_emerg(" Depth Size Location (%d entries)\n"
51 " ----- ---- --------\n",
52 max_stack_trace
.nr_entries
);
54 for (i
= 0; i
< max_stack_trace
.nr_entries
; i
++) {
55 if (stack_dump_trace
[i
] == ULONG_MAX
)
57 if (i
+1 == max_stack_trace
.nr_entries
||
58 stack_dump_trace
[i
+1] == ULONG_MAX
)
59 size
= stack_dump_index
[i
];
61 size
= stack_dump_index
[i
] - stack_dump_index
[i
+1];
63 pr_emerg("%3ld) %8d %5d %pS\n", i
, stack_dump_index
[i
],
64 size
, (void *)stack_dump_trace
[i
]);
69 check_stack(unsigned long ip
, unsigned long *stack
)
71 unsigned long this_size
, flags
; unsigned long *p
, *top
, *start
;
72 static int tracer_frame
;
73 int frame_size
= ACCESS_ONCE(tracer_frame
);
76 this_size
= ((unsigned long)stack
) & (THREAD_SIZE
-1);
77 this_size
= THREAD_SIZE
- this_size
;
78 /* Remove the frame of the tracer */
79 this_size
-= frame_size
;
81 if (this_size
<= max_stack_size
)
84 /* we do not handle interrupt stacks yet */
85 if (!object_is_on_stack(stack
))
88 local_irq_save(flags
);
89 arch_spin_lock(&max_stack_lock
);
91 /* In case another CPU set the tracer_frame on us */
92 if (unlikely(!frame_size
))
93 this_size
-= tracer_frame
;
95 /* a race could have already updated it */
96 if (this_size
<= max_stack_size
)
99 max_stack_size
= this_size
;
101 max_stack_trace
.nr_entries
= 0;
102 max_stack_trace
.skip
= 3;
104 save_stack_trace(&max_stack_trace
);
106 /* Skip over the overhead of the stack tracer itself */
107 for (i
= 0; i
< max_stack_trace
.nr_entries
; i
++) {
108 if (stack_dump_trace
[i
] == ip
)
113 * Now find where in the stack these are.
117 top
= (unsigned long *)
118 (((unsigned long)start
& ~(THREAD_SIZE
-1)) + THREAD_SIZE
);
121 * Loop through all the entries. One of the entries may
122 * for some reason be missed on the stack, so we may
123 * have to account for them. If they are all there, this
124 * loop will only happen once. This code only takes place
125 * on a new max, so it is far from a fast path.
127 while (i
< max_stack_trace
.nr_entries
) {
130 stack_dump_index
[x
] = this_size
;
133 for (; p
< top
&& i
< max_stack_trace
.nr_entries
; p
++) {
134 if (stack_dump_trace
[i
] == ULONG_MAX
)
136 if (*p
== stack_dump_trace
[i
]) {
137 stack_dump_trace
[x
] = stack_dump_trace
[i
++];
138 this_size
= stack_dump_index
[x
++] =
139 (top
- p
) * sizeof(unsigned long);
141 /* Start the search from here */
144 * We do not want to show the overhead
145 * of the stack tracer stack in the
146 * max stack. If we haven't figured
147 * out what that is, then figure it out
150 if (unlikely(!tracer_frame
)) {
151 tracer_frame
= (p
- stack
) *
152 sizeof(unsigned long);
153 max_stack_size
-= tracer_frame
;
162 max_stack_trace
.nr_entries
= x
;
164 stack_dump_trace
[x
] = ULONG_MAX
;
166 if (task_stack_end_corrupted(current
)) {
172 arch_spin_unlock(&max_stack_lock
);
173 local_irq_restore(flags
);
177 stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
178 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
183 preempt_disable_notrace();
185 cpu
= raw_smp_processor_id();
186 /* no atomic needed, we only modify this variable by this cpu */
187 if (per_cpu(trace_active
, cpu
)++ != 0)
190 ip
+= MCOUNT_INSN_SIZE
;
192 check_stack(ip
, &stack
);
195 per_cpu(trace_active
, cpu
)--;
196 /* prevent recursion in schedule */
197 preempt_enable_notrace();
200 static struct ftrace_ops trace_ops __read_mostly
=
202 .func
= stack_trace_call
,
203 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
,
207 stack_max_size_read(struct file
*filp
, char __user
*ubuf
,
208 size_t count
, loff_t
*ppos
)
210 unsigned long *ptr
= filp
->private_data
;
214 r
= snprintf(buf
, sizeof(buf
), "%ld\n", *ptr
);
217 return simple_read_from_buffer(ubuf
, count
, ppos
, buf
, r
);
221 stack_max_size_write(struct file
*filp
, const char __user
*ubuf
,
222 size_t count
, loff_t
*ppos
)
224 long *ptr
= filp
->private_data
;
225 unsigned long val
, flags
;
229 ret
= kstrtoul_from_user(ubuf
, count
, 10, &val
);
233 local_irq_save(flags
);
236 * In case we trace inside arch_spin_lock() or after (NMI),
237 * we will cause circular lock, so we also need to increase
238 * the percpu trace_active here.
240 cpu
= smp_processor_id();
241 per_cpu(trace_active
, cpu
)++;
243 arch_spin_lock(&max_stack_lock
);
245 arch_spin_unlock(&max_stack_lock
);
247 per_cpu(trace_active
, cpu
)--;
248 local_irq_restore(flags
);
253 static const struct file_operations stack_max_size_fops
= {
254 .open
= tracing_open_generic
,
255 .read
= stack_max_size_read
,
256 .write
= stack_max_size_write
,
257 .llseek
= default_llseek
,
261 __next(struct seq_file
*m
, loff_t
*pos
)
265 if (n
> max_stack_trace
.nr_entries
|| stack_dump_trace
[n
] == ULONG_MAX
)
268 m
->private = (void *)n
;
273 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
276 return __next(m
, pos
);
279 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
285 cpu
= smp_processor_id();
286 per_cpu(trace_active
, cpu
)++;
288 arch_spin_lock(&max_stack_lock
);
291 return SEQ_START_TOKEN
;
293 return __next(m
, pos
);
296 static void t_stop(struct seq_file
*m
, void *p
)
300 arch_spin_unlock(&max_stack_lock
);
302 cpu
= smp_processor_id();
303 per_cpu(trace_active
, cpu
)--;
308 static void trace_lookup_stack(struct seq_file
*m
, long i
)
310 unsigned long addr
= stack_dump_trace
[i
];
312 seq_printf(m
, "%pS\n", (void *)addr
);
315 static void print_disabled(struct seq_file
*m
)
318 "# Stack tracer disabled\n"
320 "# To enable the stack tracer, either add 'stacktrace' to the\n"
321 "# kernel command line\n"
322 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
326 static int t_show(struct seq_file
*m
, void *v
)
331 if (v
== SEQ_START_TOKEN
) {
332 seq_printf(m
, " Depth Size Location"
334 " ----- ---- --------\n",
335 max_stack_trace
.nr_entries
);
337 if (!stack_tracer_enabled
&& !max_stack_size
)
345 if (i
>= max_stack_trace
.nr_entries
||
346 stack_dump_trace
[i
] == ULONG_MAX
)
349 if (i
+1 == max_stack_trace
.nr_entries
||
350 stack_dump_trace
[i
+1] == ULONG_MAX
)
351 size
= stack_dump_index
[i
];
353 size
= stack_dump_index
[i
] - stack_dump_index
[i
+1];
355 seq_printf(m
, "%3ld) %8d %5d ", i
, stack_dump_index
[i
], size
);
357 trace_lookup_stack(m
, i
);
362 static const struct seq_operations stack_trace_seq_ops
= {
369 static int stack_trace_open(struct inode
*inode
, struct file
*file
)
371 return seq_open(file
, &stack_trace_seq_ops
);
374 static const struct file_operations stack_trace_fops
= {
375 .open
= stack_trace_open
,
378 .release
= seq_release
,
382 stack_trace_filter_open(struct inode
*inode
, struct file
*file
)
384 return ftrace_regex_open(&trace_ops
, FTRACE_ITER_FILTER
,
388 static const struct file_operations stack_trace_filter_fops
= {
389 .open
= stack_trace_filter_open
,
391 .write
= ftrace_filter_write
,
392 .llseek
= tracing_lseek
,
393 .release
= ftrace_regex_release
,
397 stack_trace_sysctl(struct ctl_table
*table
, int write
,
398 void __user
*buffer
, size_t *lenp
,
403 mutex_lock(&stack_sysctl_mutex
);
405 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
408 (last_stack_tracer_enabled
== !!stack_tracer_enabled
))
411 last_stack_tracer_enabled
= !!stack_tracer_enabled
;
413 if (stack_tracer_enabled
)
414 register_ftrace_function(&trace_ops
);
416 unregister_ftrace_function(&trace_ops
);
419 mutex_unlock(&stack_sysctl_mutex
);
423 static char stack_trace_filter_buf
[COMMAND_LINE_SIZE
+1] __initdata
;
425 static __init
int enable_stacktrace(char *str
)
427 if (strncmp(str
, "_filter=", 8) == 0)
428 strncpy(stack_trace_filter_buf
, str
+8, COMMAND_LINE_SIZE
);
430 stack_tracer_enabled
= 1;
431 last_stack_tracer_enabled
= 1;
434 __setup("stacktrace", enable_stacktrace
);
436 static __init
int stack_trace_init(void)
438 struct dentry
*d_tracer
;
440 d_tracer
= tracing_init_dentry();
441 if (IS_ERR(d_tracer
))
444 trace_create_file("stack_max_size", 0644, d_tracer
,
445 &max_stack_size
, &stack_max_size_fops
);
447 trace_create_file("stack_trace", 0444, d_tracer
,
448 NULL
, &stack_trace_fops
);
450 trace_create_file("stack_trace_filter", 0444, d_tracer
,
451 NULL
, &stack_trace_filter_fops
);
453 if (stack_trace_filter_buf
[0])
454 ftrace_set_early_filter(&trace_ops
, stack_trace_filter_buf
, 1);
456 if (stack_tracer_enabled
)
457 register_ftrace_function(&trace_ops
);
462 device_initcall(stack_trace_init
);