2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/kexec.h>
14 #include <linux/bug.h>
15 #include <linux/nmi.h>
17 #include <asm/stacktrace.h>
19 #define STACKSLOTS_PER_LINE 8
20 #define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
22 int panic_on_unrecovered_nmi
;
23 int kstack_depth_to_print
= 3 * STACKSLOTS_PER_LINE
;
24 static unsigned int code_bytes
= 64;
25 static int die_counter
;
27 void printk_address(unsigned long address
, int reliable
)
29 printk(" [<%p>] %s%pS\n", (void *) address
,
30 reliable
? "" : "? ", (void *) address
);
33 static inline int valid_stack_ptr(struct thread_info
*tinfo
,
34 void *p
, unsigned int size
, void *end
)
38 if (p
< end
&& p
>= (end
-THREAD_SIZE
))
43 return p
> t
&& p
< t
+ THREAD_SIZE
- size
;
46 /* The form of the top of the frame on the stack */
48 struct stack_frame
*next_frame
;
49 unsigned long return_address
;
52 static inline unsigned long
53 print_context_stack(struct thread_info
*tinfo
,
54 unsigned long *stack
, unsigned long bp
,
55 const struct stacktrace_ops
*ops
, void *data
,
58 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
60 while (valid_stack_ptr(tinfo
, stack
, sizeof(*stack
), end
)) {
64 if (__kernel_text_address(addr
)) {
65 if ((unsigned long) stack
== bp
+ sizeof(long)) {
66 ops
->address(data
, addr
, 1);
67 frame
= frame
->next_frame
;
68 bp
= (unsigned long) frame
;
70 ops
->address(data
, addr
, bp
== 0);
78 void dump_trace(struct task_struct
*task
, struct pt_regs
*regs
,
79 unsigned long *stack
, unsigned long bp
,
80 const struct stacktrace_ops
*ops
, void *data
)
88 if (task
&& task
!= current
)
89 stack
= (unsigned long *)task
->thread
.sp
;
92 #ifdef CONFIG_FRAME_POINTER
94 if (task
== current
) {
95 /* Grab bp right from our regs */
98 /* bp is the last reg pushed by switch_to */
99 bp
= *(unsigned long *) task
->thread
.sp
;
105 struct thread_info
*context
;
107 context
= (struct thread_info
*)
108 ((unsigned long)stack
& (~(THREAD_SIZE
- 1)));
109 bp
= print_context_stack(context
, stack
, bp
, ops
, data
, NULL
);
111 stack
= (unsigned long *)context
->previous_esp
;
114 if (ops
->stack(data
, "IRQ") < 0)
116 touch_nmi_watchdog();
119 EXPORT_SYMBOL(dump_trace
);
122 print_trace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
125 print_symbol(msg
, symbol
);
129 static void print_trace_warning(void *data
, char *msg
)
131 printk("%s%s\n", (char *)data
, msg
);
134 static int print_trace_stack(void *data
, char *name
)
136 printk("%s <%s> ", (char *)data
, name
);
141 * Print one address/symbol entries per line.
143 static void print_trace_address(void *data
, unsigned long addr
, int reliable
)
145 touch_nmi_watchdog();
147 printk_address(addr
, reliable
);
150 static const struct stacktrace_ops print_trace_ops
= {
151 .warning
= print_trace_warning
,
152 .warning_symbol
= print_trace_warning_symbol
,
153 .stack
= print_trace_stack
,
154 .address
= print_trace_address
,
158 show_trace_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
159 unsigned long *stack
, unsigned long bp
, char *log_lvl
)
161 printk("%sCall Trace:\n", log_lvl
);
162 dump_trace(task
, regs
, stack
, bp
, &print_trace_ops
, log_lvl
);
165 void show_trace(struct task_struct
*task
, struct pt_regs
*regs
,
166 unsigned long *stack
, unsigned long bp
)
168 show_trace_log_lvl(task
, regs
, stack
, bp
, "");
172 show_stack_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
173 unsigned long *sp
, unsigned long bp
, char *log_lvl
)
175 unsigned long *stack
;
180 sp
= (unsigned long *)task
->thread
.sp
;
182 sp
= (unsigned long *)&sp
;
186 for (i
= 0; i
< kstack_depth_to_print
; i
++) {
187 if (kstack_end(stack
))
189 if (i
&& ((i
% STACKSLOTS_PER_LINE
) == 0))
190 printk("\n%s", log_lvl
);
191 printk(" %08lx", *stack
++);
192 touch_nmi_watchdog();
195 show_trace_log_lvl(task
, regs
, sp
, bp
, log_lvl
);
198 void show_stack(struct task_struct
*task
, unsigned long *sp
)
200 show_stack_log_lvl(task
, NULL
, sp
, 0, "");
204 * The architecture-independent dump_stack generator
206 void dump_stack(void)
208 unsigned long bp
= 0;
211 #ifdef CONFIG_FRAME_POINTER
216 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
217 current
->pid
, current
->comm
, print_tainted(),
218 init_utsname()->release
,
219 (int)strcspn(init_utsname()->version
, " "),
220 init_utsname()->version
);
221 show_trace(NULL
, NULL
, &stack
, bp
);
224 EXPORT_SYMBOL(dump_stack
);
226 void show_registers(struct pt_regs
*regs
)
231 __show_regs(regs
, 0);
233 printk(KERN_EMERG
"Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
234 TASK_COMM_LEN
, current
->comm
, task_pid_nr(current
),
235 current_thread_info(), current
, task_thread_info(current
));
237 * When in-kernel, we also print out the stack and code at the
238 * time of the fault..
240 if (!user_mode_vm(regs
)) {
241 unsigned int code_prologue
= code_bytes
* 43 / 64;
242 unsigned int code_len
= code_bytes
;
246 printk(KERN_EMERG
"Stack:\n");
247 show_stack_log_lvl(NULL
, regs
, ®s
->sp
,
250 printk(KERN_EMERG
"Code: ");
252 ip
= (u8
*)regs
->ip
- code_prologue
;
253 if (ip
< (u8
*)PAGE_OFFSET
|| probe_kernel_address(ip
, c
)) {
254 /* try starting at IP */
256 code_len
= code_len
- code_prologue
+ 1;
258 for (i
= 0; i
< code_len
; i
++, ip
++) {
259 if (ip
< (u8
*)PAGE_OFFSET
||
260 probe_kernel_address(ip
, c
)) {
261 printk(" Bad EIP value.");
264 if (ip
== (u8
*)regs
->ip
)
265 printk("<%02x> ", c
);
273 int is_valid_bugaddr(unsigned long ip
)
277 if (ip
< PAGE_OFFSET
)
279 if (probe_kernel_address((unsigned short *)ip
, ud2
))
282 return ud2
== 0x0b0f;
285 static raw_spinlock_t die_lock
= __RAW_SPIN_LOCK_UNLOCKED
;
286 static int die_owner
= -1;
287 static unsigned int die_nest_count
;
289 unsigned __kprobes
long oops_begin(void)
295 if (die_owner
!= raw_smp_processor_id()) {
297 raw_local_irq_save(flags
);
298 __raw_spin_lock(&die_lock
);
299 die_owner
= smp_processor_id();
303 raw_local_irq_save(flags
);
309 void __kprobes
oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
313 add_taint(TAINT_DIE
);
314 __raw_spin_unlock(&die_lock
);
315 raw_local_irq_restore(flags
);
320 if (kexec_should_crash(current
))
323 panic("Fatal exception in interrupt");
325 panic("Fatal exception");
330 int __kprobes
__die(const char *str
, struct pt_regs
*regs
, long err
)
335 printk(KERN_EMERG
"%s: %04lx [#%d] ", str
, err
& 0xffff, ++die_counter
);
336 #ifdef CONFIG_PREEMPT
342 #ifdef CONFIG_DEBUG_PAGEALLOC
343 printk("DEBUG_PAGEALLOC");
346 if (notify_die(DIE_OOPS
, str
, regs
, err
,
347 current
->thread
.trap_no
, SIGSEGV
) == NOTIFY_STOP
)
350 show_registers(regs
);
351 /* Executive summary in case the oops scrolled away */
352 sp
= (unsigned long) (®s
->sp
);
354 if (user_mode(regs
)) {
356 ss
= regs
->ss
& 0xffff;
358 printk(KERN_EMERG
"EIP: [<%08lx>] ", regs
->ip
);
359 print_symbol("%s", regs
->ip
);
360 printk(" SS:ESP %04x:%08lx\n", ss
, sp
);
365 * This is gone through when something in the kernel has done something bad
366 * and is about to be terminated:
368 void die(const char *str
, struct pt_regs
*regs
, long err
)
370 unsigned long flags
= oops_begin();
372 if (die_nest_count
< 3) {
373 report_bug(regs
->ip
, regs
);
375 if (__die(str
, regs
, err
))
378 printk(KERN_EMERG
"Recursive die() failure, output suppressed\n");
381 oops_end(flags
, regs
, SIGSEGV
);
384 static DEFINE_SPINLOCK(nmi_print_lock
);
386 void notrace __kprobes
387 die_nmi(char *str
, struct pt_regs
*regs
, int do_panic
)
389 if (notify_die(DIE_NMIWATCHDOG
, str
, regs
, 0, 2, SIGINT
) == NOTIFY_STOP
)
392 spin_lock(&nmi_print_lock
);
394 * We are in trouble anyway, lets at least try
395 * to get a message out:
398 printk(KERN_EMERG
"%s", str
);
399 printk(" on CPU%d, ip %08lx, registers:\n",
400 smp_processor_id(), regs
->ip
);
401 show_registers(regs
);
403 panic("Non maskable interrupt");
405 spin_unlock(&nmi_print_lock
);
409 * If we are in kernel we are probably nested up pretty bad
410 * and might aswell get out now while we still can:
412 if (!user_mode_vm(regs
)) {
413 current
->thread
.trap_no
= 2;
420 static int __init
oops_setup(char *s
)
424 if (!strcmp(s
, "panic"))
428 early_param("oops", oops_setup
);
430 static int __init
kstack_setup(char *s
)
434 kstack_depth_to_print
= simple_strtoul(s
, NULL
, 0);
437 early_param("kstack", kstack_setup
);
439 static int __init
code_bytes_setup(char *s
)
441 code_bytes
= simple_strtoul(s
, NULL
, 0);
442 if (code_bytes
> 8192)
447 __setup("code_bytes=", code_bytes_setup
);