2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/ftrace.h>
14 #include <linux/kexec.h>
15 #include <linux/bug.h>
16 #include <linux/nmi.h>
17 #include <linux/sysfs.h>
19 #include <asm/stacktrace.h>
22 int panic_on_unrecovered_nmi
;
24 unsigned int code_bytes
= 64;
25 int kstack_depth_to_print
= 3 * STACKSLOTS_PER_LINE
;
26 static int die_counter
;
28 void printk_address(unsigned long address
, int reliable
)
30 printk(" [<%p>] %s%pS\n", (void *) address
,
31 reliable
? "" : "? ", (void *) address
);
34 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
36 print_ftrace_graph_addr(unsigned long addr
, void *data
,
37 const struct stacktrace_ops
*ops
,
38 struct thread_info
*tinfo
, int *graph
)
40 struct task_struct
*task
= tinfo
->task
;
41 unsigned long ret_addr
;
42 int index
= task
->curr_ret_stack
;
44 if (addr
!= (unsigned long)return_to_handler
)
47 if (!task
->ret_stack
|| index
< *graph
)
51 ret_addr
= task
->ret_stack
[index
].ret
;
53 ops
->address(data
, ret_addr
, 1);
59 print_ftrace_graph_addr(unsigned long addr
, void *data
,
60 const struct stacktrace_ops
*ops
,
61 struct thread_info
*tinfo
, int *graph
)
66 * x86-64 can have up to three kernel stacks:
69 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
72 static inline int valid_stack_ptr(struct thread_info
*tinfo
,
73 void *p
, unsigned int size
, void *end
)
77 if (p
< end
&& p
>= (end
-THREAD_SIZE
))
82 return p
> t
&& p
< t
+ THREAD_SIZE
- size
;
86 print_context_stack(struct thread_info
*tinfo
,
87 unsigned long *stack
, unsigned long bp
,
88 const struct stacktrace_ops
*ops
, void *data
,
89 unsigned long *end
, int *graph
)
91 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
93 while (valid_stack_ptr(tinfo
, stack
, sizeof(*stack
), end
)) {
97 if (__kernel_text_address(addr
)) {
98 if ((unsigned long) stack
== bp
+ sizeof(long)) {
99 ops
->address(data
, addr
, 1);
100 frame
= frame
->next_frame
;
101 bp
= (unsigned long) frame
;
103 ops
->address(data
, addr
, 0);
105 print_ftrace_graph_addr(addr
, data
, ops
, tinfo
, graph
);
111 EXPORT_SYMBOL_GPL(print_context_stack
);
114 print_context_stack_bp(struct thread_info
*tinfo
,
115 unsigned long *stack
, unsigned long bp
,
116 const struct stacktrace_ops
*ops
, void *data
,
117 unsigned long *end
, int *graph
)
119 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
120 unsigned long *ret_addr
= &frame
->return_address
;
122 while (valid_stack_ptr(tinfo
, ret_addr
, sizeof(*ret_addr
), end
)) {
123 unsigned long addr
= *ret_addr
;
125 if (!__kernel_text_address(addr
))
128 ops
->address(data
, addr
, 1);
129 frame
= frame
->next_frame
;
130 ret_addr
= &frame
->return_address
;
131 print_ftrace_graph_addr(addr
, data
, ops
, tinfo
, graph
);
134 return (unsigned long)frame
;
136 EXPORT_SYMBOL_GPL(print_context_stack_bp
);
140 print_trace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
143 print_symbol(msg
, symbol
);
147 static void print_trace_warning(void *data
, char *msg
)
149 printk("%s%s\n", (char *)data
, msg
);
152 static int print_trace_stack(void *data
, char *name
)
154 printk("%s <%s> ", (char *)data
, name
);
159 * Print one address/symbol entries per line.
161 static void print_trace_address(void *data
, unsigned long addr
, int reliable
)
163 touch_nmi_watchdog();
165 printk_address(addr
, reliable
);
168 static const struct stacktrace_ops print_trace_ops
= {
169 .warning
= print_trace_warning
,
170 .warning_symbol
= print_trace_warning_symbol
,
171 .stack
= print_trace_stack
,
172 .address
= print_trace_address
,
173 .walk_stack
= print_context_stack
,
177 show_trace_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
178 unsigned long *stack
, unsigned long bp
, char *log_lvl
)
180 printk("%sCall Trace:\n", log_lvl
);
181 dump_trace(task
, regs
, stack
, bp
, &print_trace_ops
, log_lvl
);
184 void show_trace(struct task_struct
*task
, struct pt_regs
*regs
,
185 unsigned long *stack
, unsigned long bp
)
187 show_trace_log_lvl(task
, regs
, stack
, bp
, "");
190 void show_stack(struct task_struct
*task
, unsigned long *sp
)
192 show_stack_log_lvl(task
, NULL
, sp
, 0, "");
196 * The architecture-independent dump_stack generator
198 void dump_stack(void)
200 unsigned long bp
= 0;
203 #ifdef CONFIG_FRAME_POINTER
208 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
209 current
->pid
, current
->comm
, print_tainted(),
210 init_utsname()->release
,
211 (int)strcspn(init_utsname()->version
, " "),
212 init_utsname()->version
);
213 show_trace(NULL
, NULL
, &stack
, bp
);
215 EXPORT_SYMBOL(dump_stack
);
217 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
218 static int die_owner
= -1;
219 static unsigned int die_nest_count
;
221 unsigned __kprobes
long oops_begin(void)
228 /* racy, but better than risking deadlock. */
229 raw_local_irq_save(flags
);
230 cpu
= smp_processor_id();
231 if (!arch_spin_trylock(&die_lock
)) {
232 if (cpu
== die_owner
)
233 /* nested oops. should stop eventually */;
235 arch_spin_lock(&die_lock
);
244 void __kprobes
oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
246 if (regs
&& kexec_should_crash(current
))
251 add_taint(TAINT_DIE
);
254 /* Nest count reaches zero, release the lock. */
255 arch_spin_unlock(&die_lock
);
256 raw_local_irq_restore(flags
);
262 panic("Fatal exception in interrupt");
264 panic("Fatal exception");
268 int __kprobes
__die(const char *str
, struct pt_regs
*regs
, long err
)
274 printk(KERN_EMERG
"%s: %04lx [#%d] ", str
, err
& 0xffff, ++die_counter
);
275 #ifdef CONFIG_PREEMPT
281 #ifdef CONFIG_DEBUG_PAGEALLOC
282 printk("DEBUG_PAGEALLOC");
285 sysfs_printk_last_file();
286 if (notify_die(DIE_OOPS
, str
, regs
, err
,
287 current
->thread
.trap_no
, SIGSEGV
) == NOTIFY_STOP
)
290 show_registers(regs
);
292 if (user_mode_vm(regs
)) {
294 ss
= regs
->ss
& 0xffff;
296 sp
= kernel_stack_pointer(regs
);
299 printk(KERN_EMERG
"EIP: [<%08lx>] ", regs
->ip
);
300 print_symbol("%s", regs
->ip
);
301 printk(" SS:ESP %04x:%08lx\n", ss
, sp
);
303 /* Executive summary in case the oops scrolled away */
304 printk(KERN_ALERT
"RIP ");
305 printk_address(regs
->ip
, 1);
306 printk(" RSP <%016lx>\n", regs
->sp
);
312 * This is gone through when something in the kernel has done something bad
313 * and is about to be terminated:
315 void die(const char *str
, struct pt_regs
*regs
, long err
)
317 unsigned long flags
= oops_begin();
320 if (!user_mode_vm(regs
))
321 report_bug(regs
->ip
, regs
);
323 if (__die(str
, regs
, err
))
325 oops_end(flags
, regs
, sig
);
328 void notrace __kprobes
329 die_nmi(char *str
, struct pt_regs
*regs
, int do_panic
)
333 if (notify_die(DIE_NMIWATCHDOG
, str
, regs
, 0, 2, SIGINT
) == NOTIFY_STOP
)
337 * We are in trouble anyway, lets at least try
338 * to get a message out.
340 flags
= oops_begin();
341 printk(KERN_EMERG
"%s", str
);
342 printk(" on CPU%d, ip %08lx, registers:\n",
343 smp_processor_id(), regs
->ip
);
344 show_registers(regs
);
345 oops_end(flags
, regs
, 0);
346 if (do_panic
|| panic_on_oops
)
347 panic("Non maskable interrupt");
353 static int __init
oops_setup(char *s
)
357 if (!strcmp(s
, "panic"))
361 early_param("oops", oops_setup
);
363 static int __init
kstack_setup(char *s
)
367 kstack_depth_to_print
= simple_strtoul(s
, NULL
, 0);
370 early_param("kstack", kstack_setup
);
372 static int __init
code_bytes_setup(char *s
)
374 code_bytes
= simple_strtoul(s
, NULL
, 0);
375 if (code_bytes
> 8192)
380 __setup("code_bytes=", code_bytes_setup
);