2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/ftrace.h>
14 #include <linux/kexec.h>
15 #include <linux/bug.h>
16 #include <linux/nmi.h>
17 #include <linux/sysfs.h>
19 #include <asm/stacktrace.h>
22 int panic_on_unrecovered_nmi
;
24 unsigned int code_bytes
= 64;
25 int kstack_depth_to_print
= 3 * STACKSLOTS_PER_LINE
;
26 static int die_counter
;
28 void printk_address(unsigned long address
, int reliable
)
30 printk(" [<%p>] %s%pS\n", (void *) address
,
31 reliable
? "" : "? ", (void *) address
);
34 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
36 print_ftrace_graph_addr(unsigned long addr
, void *data
,
37 const struct stacktrace_ops
*ops
,
38 struct thread_info
*tinfo
, int *graph
)
40 struct task_struct
*task
= tinfo
->task
;
41 unsigned long ret_addr
;
42 int index
= task
->curr_ret_stack
;
44 if (addr
!= (unsigned long)return_to_handler
)
47 if (!task
->ret_stack
|| index
< *graph
)
51 ret_addr
= task
->ret_stack
[index
].ret
;
53 ops
->address(data
, ret_addr
, 1);
59 print_ftrace_graph_addr(unsigned long addr
, void *data
,
60 const struct stacktrace_ops
*ops
,
61 struct thread_info
*tinfo
, int *graph
)
66 * x86-64 can have up to three kernel stacks:
69 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
72 static inline int valid_stack_ptr(struct thread_info
*tinfo
,
73 void *p
, unsigned int size
, void *end
)
77 if (p
< end
&& p
>= (end
-THREAD_SIZE
))
82 return p
> t
&& p
< t
+ THREAD_SIZE
- size
;
86 print_context_stack(struct thread_info
*tinfo
,
87 unsigned long *stack
, unsigned long bp
,
88 const struct stacktrace_ops
*ops
, void *data
,
89 unsigned long *end
, int *graph
)
91 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
93 while (valid_stack_ptr(tinfo
, stack
, sizeof(*stack
), end
)) {
97 if (__kernel_text_address(addr
)) {
98 if ((unsigned long) stack
== bp
+ sizeof(long)) {
99 ops
->address(data
, addr
, 1);
100 frame
= frame
->next_frame
;
101 bp
= (unsigned long) frame
;
103 ops
->address(data
, addr
, 0);
105 print_ftrace_graph_addr(addr
, data
, ops
, tinfo
, graph
);
111 EXPORT_SYMBOL_GPL(print_context_stack
);
114 print_context_stack_bp(struct thread_info
*tinfo
,
115 unsigned long *stack
, unsigned long bp
,
116 const struct stacktrace_ops
*ops
, void *data
,
117 unsigned long *end
, int *graph
)
119 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
120 unsigned long *ret_addr
= &frame
->return_address
;
122 while (valid_stack_ptr(tinfo
, ret_addr
, sizeof(*ret_addr
), end
)) {
123 unsigned long addr
= *ret_addr
;
125 if (!__kernel_text_address(addr
))
128 ops
->address(data
, addr
, 1);
129 frame
= frame
->next_frame
;
130 ret_addr
= &frame
->return_address
;
131 print_ftrace_graph_addr(addr
, data
, ops
, tinfo
, graph
);
134 return (unsigned long)frame
;
136 EXPORT_SYMBOL_GPL(print_context_stack_bp
);
140 print_trace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
143 print_symbol(msg
, symbol
);
147 static void print_trace_warning(void *data
, char *msg
)
149 printk("%s%s\n", (char *)data
, msg
);
152 static int print_trace_stack(void *data
, char *name
)
154 printk("%s <%s> ", (char *)data
, name
);
159 * Print one address/symbol entries per line.
161 static void print_trace_address(void *data
, unsigned long addr
, int reliable
)
163 touch_nmi_watchdog();
165 printk_address(addr
, reliable
);
168 static const struct stacktrace_ops print_trace_ops
= {
169 .warning
= print_trace_warning
,
170 .warning_symbol
= print_trace_warning_symbol
,
171 .stack
= print_trace_stack
,
172 .address
= print_trace_address
,
173 .walk_stack
= print_context_stack
,
177 show_trace_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
178 unsigned long *stack
, char *log_lvl
)
180 printk("%sCall Trace:\n", log_lvl
);
181 dump_trace(task
, regs
, stack
, &print_trace_ops
, log_lvl
);
184 void show_trace(struct task_struct
*task
, struct pt_regs
*regs
,
185 unsigned long *stack
)
187 show_trace_log_lvl(task
, regs
, stack
, "");
190 void show_stack(struct task_struct
*task
, unsigned long *sp
)
192 show_stack_log_lvl(task
, NULL
, sp
, "");
196 * The architecture-independent dump_stack generator
198 void dump_stack(void)
202 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
203 current
->pid
, current
->comm
, print_tainted(),
204 init_utsname()->release
,
205 (int)strcspn(init_utsname()->version
, " "),
206 init_utsname()->version
);
207 show_trace(NULL
, NULL
, &stack
);
209 EXPORT_SYMBOL(dump_stack
);
211 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
212 static int die_owner
= -1;
213 static unsigned int die_nest_count
;
215 unsigned __kprobes
long oops_begin(void)
222 /* racy, but better than risking deadlock. */
223 raw_local_irq_save(flags
);
224 cpu
= smp_processor_id();
225 if (!arch_spin_trylock(&die_lock
)) {
226 if (cpu
== die_owner
)
227 /* nested oops. should stop eventually */;
229 arch_spin_lock(&die_lock
);
238 void __kprobes
oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
240 if (regs
&& kexec_should_crash(current
))
245 add_taint(TAINT_DIE
);
248 /* Nest count reaches zero, release the lock. */
249 arch_spin_unlock(&die_lock
);
250 raw_local_irq_restore(flags
);
256 panic("Fatal exception in interrupt");
258 panic("Fatal exception");
262 int __kprobes
__die(const char *str
, struct pt_regs
*regs
, long err
)
268 printk(KERN_EMERG
"%s: %04lx [#%d] ", str
, err
& 0xffff, ++die_counter
);
269 #ifdef CONFIG_PREEMPT
275 #ifdef CONFIG_DEBUG_PAGEALLOC
276 printk("DEBUG_PAGEALLOC");
279 sysfs_printk_last_file();
280 if (notify_die(DIE_OOPS
, str
, regs
, err
,
281 current
->thread
.trap_no
, SIGSEGV
) == NOTIFY_STOP
)
284 show_registers(regs
);
286 if (user_mode_vm(regs
)) {
288 ss
= regs
->ss
& 0xffff;
290 sp
= kernel_stack_pointer(regs
);
293 printk(KERN_EMERG
"EIP: [<%08lx>] ", regs
->ip
);
294 print_symbol("%s", regs
->ip
);
295 printk(" SS:ESP %04x:%08lx\n", ss
, sp
);
297 /* Executive summary in case the oops scrolled away */
298 printk(KERN_ALERT
"RIP ");
299 printk_address(regs
->ip
, 1);
300 printk(" RSP <%016lx>\n", regs
->sp
);
306 * This is gone through when something in the kernel has done something bad
307 * and is about to be terminated:
309 void die(const char *str
, struct pt_regs
*regs
, long err
)
311 unsigned long flags
= oops_begin();
314 if (!user_mode_vm(regs
))
315 report_bug(regs
->ip
, regs
);
317 if (__die(str
, regs
, err
))
319 oops_end(flags
, regs
, sig
);
322 void notrace __kprobes
323 die_nmi(char *str
, struct pt_regs
*regs
, int do_panic
)
327 if (notify_die(DIE_NMIWATCHDOG
, str
, regs
, 0, 2, SIGINT
) == NOTIFY_STOP
)
331 * We are in trouble anyway, lets at least try
332 * to get a message out.
334 flags
= oops_begin();
335 printk(KERN_EMERG
"%s", str
);
336 printk(" on CPU%d, ip %08lx, registers:\n",
337 smp_processor_id(), regs
->ip
);
338 show_registers(regs
);
339 oops_end(flags
, regs
, 0);
340 if (do_panic
|| panic_on_oops
)
341 panic("Non maskable interrupt");
347 static int __init
oops_setup(char *s
)
351 if (!strcmp(s
, "panic"))
355 early_param("oops", oops_setup
);
357 static int __init
kstack_setup(char *s
)
361 kstack_depth_to_print
= simple_strtoul(s
, NULL
, 0);
364 early_param("kstack", kstack_setup
);
366 static int __init
code_bytes_setup(char *s
)
368 code_bytes
= simple_strtoul(s
, NULL
, 0);
369 if (code_bytes
> 8192)
374 __setup("code_bytes=", code_bytes_setup
);