2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * 'Traps.c' handles hardware traps and faults after we have saved some
13 #include <linux/moduleparam.h>
14 #include <linux/interrupt.h>
15 #include <linux/kallsyms.h>
16 #include <linux/spinlock.h>
17 #include <linux/kprobes.h>
18 #include <linux/uaccess.h>
19 #include <linux/utsname.h>
20 #include <linux/kdebug.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/ptrace.h>
24 #include <linux/string.h>
25 #include <linux/unwind.h>
26 #include <linux/delay.h>
27 #include <linux/errno.h>
28 #include <linux/kexec.h>
29 #include <linux/sched.h>
30 #include <linux/timer.h>
31 #include <linux/init.h>
32 #include <linux/bug.h>
33 #include <linux/nmi.h>
36 #if defined(CONFIG_EDAC)
37 #include <linux/edac.h>
40 #include <asm/stacktrace.h>
41 #include <asm/processor.h>
42 #include <asm/debugreg.h>
43 #include <asm/atomic.h>
44 #include <asm/system.h>
45 #include <asm/unwind.h>
51 #include <asm/pgalloc.h>
52 #include <asm/proto.h>
55 #include <mach_traps.h>
57 asmlinkage
void divide_error(void);
58 asmlinkage
void debug(void);
59 asmlinkage
void nmi(void);
60 asmlinkage
void int3(void);
61 asmlinkage
void overflow(void);
62 asmlinkage
void bounds(void);
63 asmlinkage
void invalid_op(void);
64 asmlinkage
void device_not_available(void);
65 asmlinkage
void double_fault(void);
66 asmlinkage
void coprocessor_segment_overrun(void);
67 asmlinkage
void invalid_TSS(void);
68 asmlinkage
void segment_not_present(void);
69 asmlinkage
void stack_segment(void);
70 asmlinkage
void general_protection(void);
71 asmlinkage
void page_fault(void);
72 asmlinkage
void coprocessor_error(void);
73 asmlinkage
void simd_coprocessor_error(void);
74 asmlinkage
void alignment_check(void);
75 asmlinkage
void spurious_interrupt_bug(void);
76 asmlinkage
void machine_check(void);
78 int panic_on_unrecovered_nmi
;
79 int kstack_depth_to_print
= 12;
80 static unsigned int code_bytes
= 64;
81 static int ignore_nmis
;
82 static int die_counter
;
84 static inline void conditional_sti(struct pt_regs
*regs
)
86 if (regs
->flags
& X86_EFLAGS_IF
)
90 static inline void preempt_conditional_sti(struct pt_regs
*regs
)
93 if (regs
->flags
& X86_EFLAGS_IF
)
97 static inline void preempt_conditional_cli(struct pt_regs
*regs
)
99 if (regs
->flags
& X86_EFLAGS_IF
)
101 /* Make sure to not schedule here because we could be running
102 on an exception stack. */
106 void printk_address(unsigned long address
, int reliable
)
108 #ifdef CONFIG_KALLSYMS
109 unsigned long offset
= 0, symsize
;
113 char namebuf
[KSYM_NAME_LEN
];
116 symname
= kallsyms_lookup(address
, &symsize
, &offset
,
119 printk(" [<%016lx>]\n", address
);
123 strcpy(reliab
, "? ");
126 modname
= delim
= "";
127 printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
128 address
, reliab
, delim
, modname
, delim
, symname
, offset
, symsize
);
130 printk(" [<%016lx>]\n", address
);
134 static unsigned long *in_exception_stack(unsigned cpu
, unsigned long stack
,
135 unsigned *usedp
, char **idp
)
137 static char ids
[][8] = {
138 [DEBUG_STACK
- 1] = "#DB",
139 [NMI_STACK
- 1] = "NMI",
140 [DOUBLEFAULT_STACK
- 1] = "#DF",
141 [STACKFAULT_STACK
- 1] = "#SS",
142 [MCE_STACK
- 1] = "#MC",
143 #if DEBUG_STKSZ > EXCEPTION_STKSZ
144 [N_EXCEPTION_STACKS
... N_EXCEPTION_STACKS
+ DEBUG_STKSZ
/ EXCEPTION_STKSZ
- 2] = "#DB[?]"
150 * Iterate over all exception stacks, and figure out whether
151 * 'stack' is in one of them:
153 for (k
= 0; k
< N_EXCEPTION_STACKS
; k
++) {
154 unsigned long end
= per_cpu(orig_ist
, cpu
).ist
[k
];
156 * Is 'stack' above this exception frame's end?
157 * If yes then skip to the next frame.
162 * Is 'stack' above this exception frame's start address?
163 * If yes then we found the right frame.
165 if (stack
>= end
- EXCEPTION_STKSZ
) {
167 * Make sure we only iterate through an exception
168 * stack once. If it comes up for the second time
169 * then there's something wrong going on - just
170 * break out and return NULL:
172 if (*usedp
& (1U << k
))
176 return (unsigned long *)end
;
179 * If this is a debug stack, and if it has a larger size than
180 * the usual exception stacks, then 'stack' might still
181 * be within the lower portion of the debug stack:
183 #if DEBUG_STKSZ > EXCEPTION_STKSZ
184 if (k
== DEBUG_STACK
- 1 && stack
>= end
- DEBUG_STKSZ
) {
185 unsigned j
= N_EXCEPTION_STACKS
- 1;
188 * Black magic. A large debug stack is composed of
189 * multiple exception stack entries, which we
190 * iterate through now. Dont look:
194 end
-= EXCEPTION_STKSZ
;
195 ids
[j
][4] = '1' + (j
- N_EXCEPTION_STACKS
);
196 } while (stack
< end
- EXCEPTION_STKSZ
);
197 if (*usedp
& (1U << j
))
201 return (unsigned long *)end
;
209 * x86-64 can have up to three kernel stacks:
212 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
215 static inline int valid_stack_ptr(struct thread_info
*tinfo
,
216 void *p
, unsigned int size
, void *end
)
220 if (p
< end
&& p
>= (end
-THREAD_SIZE
))
225 return p
> t
&& p
< t
+ THREAD_SIZE
- size
;
228 /* The form of the top of the frame on the stack */
230 struct stack_frame
*next_frame
;
231 unsigned long return_address
;
234 static inline unsigned long
235 print_context_stack(struct thread_info
*tinfo
,
236 unsigned long *stack
, unsigned long bp
,
237 const struct stacktrace_ops
*ops
, void *data
,
240 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
242 while (valid_stack_ptr(tinfo
, stack
, sizeof(*stack
), end
)) {
246 if (__kernel_text_address(addr
)) {
247 if ((unsigned long) stack
== bp
+ 8) {
248 ops
->address(data
, addr
, 1);
249 frame
= frame
->next_frame
;
250 bp
= (unsigned long) frame
;
252 ops
->address(data
, addr
, bp
== 0);
260 void dump_trace(struct task_struct
*task
, struct pt_regs
*regs
,
261 unsigned long *stack
, unsigned long bp
,
262 const struct stacktrace_ops
*ops
, void *data
)
264 const unsigned cpu
= get_cpu();
265 unsigned long *irqstack_end
= (unsigned long*)cpu_pda(cpu
)->irqstackptr
;
267 struct thread_info
*tinfo
;
271 tinfo
= task_thread_info(task
);
276 if (task
&& task
!= current
)
277 stack
= (unsigned long *)task
->thread
.sp
;
280 #ifdef CONFIG_FRAME_POINTER
282 if (task
== current
) {
283 /* Grab bp right from our regs */
284 asm("movq %%rbp, %0" : "=r" (bp
) :);
286 /* bp is the last reg pushed by switch_to */
287 bp
= *(unsigned long *) task
->thread
.sp
;
293 * Print function call entries in all stacks, starting at the
294 * current stack address. If the stacks consist of nested
299 unsigned long *estack_end
;
300 estack_end
= in_exception_stack(cpu
, (unsigned long)stack
,
304 if (ops
->stack(data
, id
) < 0)
307 bp
= print_context_stack(tinfo
, stack
, bp
, ops
,
309 ops
->stack(data
, "<EOE>");
311 * We link to the next stack via the
312 * second-to-last pointer (index -2 to end) in the
315 stack
= (unsigned long *) estack_end
[-2];
319 unsigned long *irqstack
;
320 irqstack
= irqstack_end
-
321 (IRQSTACKSIZE
- 64) / sizeof(*irqstack
);
323 if (stack
>= irqstack
&& stack
< irqstack_end
) {
324 if (ops
->stack(data
, "IRQ") < 0)
326 bp
= print_context_stack(tinfo
, stack
, bp
,
327 ops
, data
, irqstack_end
);
329 * We link to the next stack (which would be
330 * the process stack normally) the last
331 * pointer (index -1 to end) in the IRQ stack:
333 stack
= (unsigned long *) (irqstack_end
[-1]);
335 ops
->stack(data
, "EOI");
343 * This handles the process stack:
345 bp
= print_context_stack(tinfo
, stack
, bp
, ops
, data
, NULL
);
348 EXPORT_SYMBOL(dump_trace
);
351 print_trace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
353 print_symbol(msg
, symbol
);
357 static void print_trace_warning(void *data
, char *msg
)
362 static int print_trace_stack(void *data
, char *name
)
364 printk(" <%s> ", name
);
368 static void print_trace_address(void *data
, unsigned long addr
, int reliable
)
370 touch_nmi_watchdog();
371 printk_address(addr
, reliable
);
374 static const struct stacktrace_ops print_trace_ops
= {
375 .warning
= print_trace_warning
,
376 .warning_symbol
= print_trace_warning_symbol
,
377 .stack
= print_trace_stack
,
378 .address
= print_trace_address
,
381 void show_trace(struct task_struct
*task
, struct pt_regs
*regs
,
382 unsigned long *stack
, unsigned long bp
)
384 printk("\nCall Trace:\n");
385 dump_trace(task
, regs
, stack
, bp
, &print_trace_ops
, NULL
);
390 _show_stack(struct task_struct
*task
, struct pt_regs
*regs
,
391 unsigned long *sp
, unsigned long bp
)
393 unsigned long *stack
;
395 const int cpu
= smp_processor_id();
396 unsigned long *irqstack_end
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
);
397 unsigned long *irqstack
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
- IRQSTACKSIZE
);
399 // debugging aid: "show_stack(NULL, NULL);" prints the
400 // back trace for this cpu.
404 sp
= (unsigned long *)task
->thread
.sp
;
406 sp
= (unsigned long *)&sp
;
410 for (i
= 0; i
< kstack_depth_to_print
; i
++) {
411 if (stack
>= irqstack
&& stack
<= irqstack_end
) {
412 if (stack
== irqstack_end
) {
413 stack
= (unsigned long *) (irqstack_end
[-1]);
417 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
420 if (i
&& ((i
% 4) == 0))
422 printk(" %016lx", *stack
++);
423 touch_nmi_watchdog();
425 show_trace(task
, regs
, sp
, bp
);
428 void show_stack(struct task_struct
*task
, unsigned long *sp
)
430 _show_stack(task
, NULL
, sp
, 0);
434 * The architecture-independent dump_stack generator
436 void dump_stack(void)
439 unsigned long bp
= 0;
441 #ifdef CONFIG_FRAME_POINTER
443 asm("movq %%rbp, %0" : "=r" (bp
):);
446 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
447 current
->pid
, current
->comm
, print_tainted(),
448 init_utsname()->release
,
449 (int)strcspn(init_utsname()->version
, " "),
450 init_utsname()->version
);
451 show_trace(NULL
, NULL
, &stack
, bp
);
454 EXPORT_SYMBOL(dump_stack
);
456 void show_registers(struct pt_regs
*regs
)
460 const int cpu
= smp_processor_id();
461 struct task_struct
*cur
= cpu_pda(cpu
)->pcurrent
;
463 unsigned int code_prologue
= code_bytes
* 43 / 64;
464 unsigned int code_len
= code_bytes
;
467 ip
= (u8
*) regs
->ip
- code_prologue
;
468 printk("CPU %d ", cpu
);
470 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
471 cur
->comm
, cur
->pid
, task_thread_info(cur
), cur
);
474 * When in-kernel, we also print out the stack and code at the
475 * time of the fault..
477 if (!user_mode(regs
)) {
480 _show_stack(NULL
, regs
, (unsigned long *)sp
, regs
->bp
);
483 printk(KERN_EMERG
"Code: ");
484 if (ip
< (u8
*)PAGE_OFFSET
|| probe_kernel_address(ip
, c
)) {
485 /* try starting at RIP */
487 code_len
= code_len
- code_prologue
+ 1;
489 for (i
= 0; i
< code_len
; i
++, ip
++) {
490 if (ip
< (u8
*)PAGE_OFFSET
||
491 probe_kernel_address(ip
, c
)) {
492 printk(" Bad RIP value.");
495 if (ip
== (u8
*)regs
->ip
)
496 printk("<%02x> ", c
);
504 int is_valid_bugaddr(unsigned long ip
)
508 if (__copy_from_user(&ud2
, (const void __user
*) ip
, sizeof(ud2
)))
511 return ud2
== 0x0b0f;
514 static raw_spinlock_t die_lock
= __RAW_SPIN_LOCK_UNLOCKED
;
515 static int die_owner
= -1;
516 static unsigned int die_nest_count
;
518 unsigned __kprobes
long oops_begin(void)
525 /* racy, but better than risking deadlock. */
526 raw_local_irq_save(flags
);
527 cpu
= smp_processor_id();
528 if (!__raw_spin_trylock(&die_lock
)) {
529 if (cpu
== die_owner
)
530 /* nested oops. should stop eventually */;
532 __raw_spin_lock(&die_lock
);
541 void __kprobes
oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
547 /* Nest count reaches zero, release the lock. */
548 __raw_spin_unlock(&die_lock
);
549 raw_local_irq_restore(flags
);
555 panic("Fatal exception");
560 int __kprobes
__die(const char *str
, struct pt_regs
*regs
, long err
)
562 printk(KERN_EMERG
"%s: %04lx [%u] ", str
, err
& 0xffff, ++die_counter
);
563 #ifdef CONFIG_PREEMPT
569 #ifdef CONFIG_DEBUG_PAGEALLOC
570 printk("DEBUG_PAGEALLOC");
573 if (notify_die(DIE_OOPS
, str
, regs
, err
,
574 current
->thread
.trap_no
, SIGSEGV
) == NOTIFY_STOP
)
577 show_registers(regs
);
578 add_taint(TAINT_DIE
);
579 /* Executive summary in case the oops scrolled away */
580 printk(KERN_ALERT
"RIP ");
581 printk_address(regs
->ip
, 1);
582 printk(" RSP <%016lx>\n", regs
->sp
);
583 if (kexec_should_crash(current
))
588 void die(const char * str
, struct pt_regs
*regs
, long err
)
590 unsigned long flags
= oops_begin();
592 if (!user_mode(regs
))
593 report_bug(regs
->ip
, regs
);
595 if (__die(str
, regs
, err
))
597 oops_end(flags
, regs
, SIGSEGV
);
600 notrace __kprobes
void
601 die_nmi(char *str
, struct pt_regs
*regs
, int do_panic
)
605 if (notify_die(DIE_NMIWATCHDOG
, str
, regs
, 0, 2, SIGINT
) == NOTIFY_STOP
)
608 flags
= oops_begin();
610 * We are in trouble anyway, lets at least try
611 * to get a message out.
613 printk(KERN_EMERG
"%s", str
);
614 printk(" on CPU%d, ip %08lx, registers:\n",
615 smp_processor_id(), regs
->ip
);
616 show_registers(regs
);
617 if (kexec_should_crash(current
))
619 if (do_panic
|| panic_on_oops
)
620 panic("Non maskable interrupt");
621 oops_end(flags
, NULL
, SIGBUS
);
627 static void __kprobes
628 do_trap(int trapnr
, int signr
, char *str
, struct pt_regs
*regs
,
629 long error_code
, siginfo_t
*info
)
631 struct task_struct
*tsk
= current
;
633 if (!user_mode(regs
))
637 * We want error_code and trap_no set for userspace faults and
638 * kernelspace faults which result in die(), but not
639 * kernelspace faults which are fixed up. die() gives the
640 * process no chance to handle the signal and notice the
641 * kernel fault information, so that won't result in polluting
642 * the information about previously queued, but not yet
643 * delivered, faults. See also do_general_protection below.
645 tsk
->thread
.error_code
= error_code
;
646 tsk
->thread
.trap_no
= trapnr
;
648 if (show_unhandled_signals
&& unhandled_signal(tsk
, signr
) &&
649 printk_ratelimit()) {
651 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
652 tsk
->comm
, tsk
->pid
, str
,
653 regs
->ip
, regs
->sp
, error_code
);
654 print_vma_addr(" in ", regs
->ip
);
659 force_sig_info(signr
, info
, tsk
);
661 force_sig(signr
, tsk
);
665 if (!fixup_exception(regs
)) {
666 tsk
->thread
.error_code
= error_code
;
667 tsk
->thread
.trap_no
= trapnr
;
668 die(str
, regs
, error_code
);
673 #define DO_ERROR(trapnr, signr, str, name) \
674 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
676 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
679 conditional_sti(regs); \
680 do_trap(trapnr, signr, str, regs, error_code, NULL); \
683 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
684 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
687 info.si_signo = signr; \
689 info.si_code = sicode; \
690 info.si_addr = (void __user *)siaddr; \
691 trace_hardirqs_fixup(); \
692 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
695 conditional_sti(regs); \
696 do_trap(trapnr, signr, str, regs, error_code, &info); \
699 DO_ERROR_INFO(0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->ip
)
700 DO_ERROR(4, SIGSEGV
, "overflow", overflow
)
701 DO_ERROR(5, SIGSEGV
, "bounds", bounds
)
702 DO_ERROR_INFO(6, SIGILL
, "invalid opcode", invalid_op
, ILL_ILLOPN
, regs
->ip
)
703 DO_ERROR(9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
704 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
705 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
706 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, 0)
708 /* Runs on IST stack */
709 asmlinkage
void do_stack_segment(struct pt_regs
*regs
, long error_code
)
711 if (notify_die(DIE_TRAP
, "stack segment", regs
, error_code
,
712 12, SIGBUS
) == NOTIFY_STOP
)
714 preempt_conditional_sti(regs
);
715 do_trap(12, SIGBUS
, "stack segment", regs
, error_code
, NULL
);
716 preempt_conditional_cli(regs
);
719 asmlinkage
void do_double_fault(struct pt_regs
* regs
, long error_code
)
721 static const char str
[] = "double fault";
722 struct task_struct
*tsk
= current
;
724 /* Return not checked because double check cannot be ignored */
725 notify_die(DIE_TRAP
, str
, regs
, error_code
, 8, SIGSEGV
);
727 tsk
->thread
.error_code
= error_code
;
728 tsk
->thread
.trap_no
= 8;
730 /* This is always a kernel trap and never fixable (and thus must
733 die(str
, regs
, error_code
);
736 asmlinkage
void __kprobes
737 do_general_protection(struct pt_regs
*regs
, long error_code
)
739 struct task_struct
*tsk
;
741 conditional_sti(regs
);
744 if (!user_mode(regs
))
747 tsk
->thread
.error_code
= error_code
;
748 tsk
->thread
.trap_no
= 13;
750 if (show_unhandled_signals
&& unhandled_signal(tsk
, SIGSEGV
) &&
751 printk_ratelimit()) {
753 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
755 regs
->ip
, regs
->sp
, error_code
);
756 print_vma_addr(" in ", regs
->ip
);
760 force_sig(SIGSEGV
, tsk
);
764 if (fixup_exception(regs
))
767 tsk
->thread
.error_code
= error_code
;
768 tsk
->thread
.trap_no
= 13;
769 if (notify_die(DIE_GPF
, "general protection fault", regs
,
770 error_code
, 13, SIGSEGV
) == NOTIFY_STOP
)
772 die("general protection fault", regs
, error_code
);
775 static notrace __kprobes
void
776 mem_parity_error(unsigned char reason
, struct pt_regs
*regs
)
778 printk(KERN_EMERG
"Uhhuh. NMI received for unknown reason %02x.\n",
780 printk(KERN_EMERG
"You have some hardware problem, likely on the PCI bus.\n");
782 #if defined(CONFIG_EDAC)
783 if (edac_handler_set()) {
784 edac_atomic_assert_error();
789 if (panic_on_unrecovered_nmi
)
790 panic("NMI: Not continuing");
792 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
794 /* Clear and disable the memory parity error line. */
795 reason
= (reason
& 0xf) | 4;
799 static notrace __kprobes
void
800 io_check_error(unsigned char reason
, struct pt_regs
*regs
)
802 printk("NMI: IOCK error (debug interrupt?)\n");
803 show_registers(regs
);
805 /* Re-enable the IOCK line, wait for a few seconds */
806 reason
= (reason
& 0xf) | 8;
813 static notrace __kprobes
void
814 unknown_nmi_error(unsigned char reason
, struct pt_regs
* regs
)
816 if (notify_die(DIE_NMIUNKNOWN
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
818 printk(KERN_EMERG
"Uhhuh. NMI received for unknown reason %02x.\n",
820 printk(KERN_EMERG
"Do you have a strange power saving mode enabled?\n");
822 if (panic_on_unrecovered_nmi
)
823 panic("NMI: Not continuing");
825 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
828 /* Runs on IST stack. This code must keep interrupts off all the time.
829 Nested NMIs are prevented by the CPU. */
830 asmlinkage notrace __kprobes
void default_do_nmi(struct pt_regs
*regs
)
832 unsigned char reason
= 0;
835 cpu
= smp_processor_id();
837 /* Only the BSP gets external NMIs from the system. */
839 reason
= get_nmi_reason();
841 if (!(reason
& 0xc0)) {
842 if (notify_die(DIE_NMI_IPI
, "nmi_ipi", regs
, reason
, 2, SIGINT
)
846 * Ok, so this is none of the documented NMI sources,
847 * so it must be the NMI watchdog.
849 if (nmi_watchdog_tick(regs
, reason
))
851 if (!do_nmi_callback(regs
, cpu
))
852 unknown_nmi_error(reason
, regs
);
856 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
859 /* AK: following checks seem to be broken on modern chipsets. FIXME */
861 mem_parity_error(reason
, regs
);
863 io_check_error(reason
, regs
);
866 asmlinkage notrace __kprobes
void
867 do_nmi(struct pt_regs
*regs
, long error_code
)
871 add_pda(__nmi_count
, 1);
874 default_do_nmi(regs
);
885 void restart_nmi(void)
891 /* runs on IST stack. */
892 asmlinkage
void __kprobes
do_int3(struct pt_regs
*regs
, long error_code
)
894 trace_hardirqs_fixup();
896 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
)
900 preempt_conditional_sti(regs
);
901 do_trap(3, SIGTRAP
, "int3", regs
, error_code
, NULL
);
902 preempt_conditional_cli(regs
);
905 /* Help handler running on IST stack to switch back to user stack
906 for scheduling or signal handling. The actual stack switch is done in
908 asmlinkage __kprobes
struct pt_regs
*sync_regs(struct pt_regs
*eregs
)
910 struct pt_regs
*regs
= eregs
;
911 /* Did already sync */
912 if (eregs
== (struct pt_regs
*)eregs
->sp
)
914 /* Exception from user space */
915 else if (user_mode(eregs
))
916 regs
= task_pt_regs(current
);
917 /* Exception from kernel and interrupts are enabled. Move to
918 kernel process stack. */
919 else if (eregs
->flags
& X86_EFLAGS_IF
)
920 regs
= (struct pt_regs
*)(eregs
->sp
-= sizeof(struct pt_regs
));
926 /* runs on IST stack. */
927 asmlinkage
void __kprobes
do_debug(struct pt_regs
* regs
,
928 unsigned long error_code
)
930 unsigned long condition
;
931 struct task_struct
*tsk
= current
;
934 trace_hardirqs_fixup();
936 get_debugreg(condition
, 6);
939 * The processor cleared BTF, so don't mark that we need it set.
941 clear_tsk_thread_flag(tsk
, TIF_DEBUGCTLMSR
);
942 tsk
->thread
.debugctlmsr
= 0;
944 if (notify_die(DIE_DEBUG
, "debug", regs
, condition
, error_code
,
945 SIGTRAP
) == NOTIFY_STOP
)
948 preempt_conditional_sti(regs
);
950 /* Mask out spurious debug traps due to lazy DR7 setting */
951 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
952 if (!tsk
->thread
.debugreg7
)
956 tsk
->thread
.debugreg6
= condition
;
959 * Single-stepping through TF: make sure we ignore any events in
960 * kernel space (but re-enable TF when returning to user mode).
962 if (condition
& DR_STEP
) {
963 if (!user_mode(regs
))
964 goto clear_TF_reenable
;
967 /* Ok, finally something we can handle */
968 tsk
->thread
.trap_no
= 1;
969 tsk
->thread
.error_code
= error_code
;
970 info
.si_signo
= SIGTRAP
;
972 info
.si_code
= TRAP_BRKPT
;
973 info
.si_addr
= user_mode(regs
) ? (void __user
*)regs
->ip
: NULL
;
974 force_sig_info(SIGTRAP
, &info
, tsk
);
978 preempt_conditional_cli(regs
);
982 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
983 regs
->flags
&= ~X86_EFLAGS_TF
;
984 preempt_conditional_cli(regs
);
988 static int kernel_math_error(struct pt_regs
*regs
, const char *str
, int trapnr
)
990 if (fixup_exception(regs
))
993 notify_die(DIE_GPF
, str
, regs
, 0, trapnr
, SIGFPE
);
994 /* Illegal floating point operation in the kernel */
995 current
->thread
.trap_no
= trapnr
;
1001 * Note that we play around with the 'TS' bit in an attempt to get
1002 * the correct behaviour even in the presence of the asynchronous
1005 asmlinkage
void do_coprocessor_error(struct pt_regs
*regs
)
1007 void __user
*ip
= (void __user
*)(regs
->ip
);
1008 struct task_struct
*task
;
1010 unsigned short cwd
, swd
;
1012 conditional_sti(regs
);
1013 if (!user_mode(regs
) &&
1014 kernel_math_error(regs
, "kernel x87 math error", 16))
1018 * Save the info for the exception handler and clear the error.
1021 save_init_fpu(task
);
1022 task
->thread
.trap_no
= 16;
1023 task
->thread
.error_code
= 0;
1024 info
.si_signo
= SIGFPE
;
1026 info
.si_code
= __SI_FAULT
;
1029 * (~cwd & swd) will mask out exceptions that are not set to unmasked
1030 * status. 0x3f is the exception bits in these regs, 0x200 is the
1031 * C1 reg you need in case of a stack fault, 0x040 is the stack
1032 * fault bit. We should only be taking one exception at a time,
1033 * so if this combination doesn't produce any single exception,
1034 * then we have a bad program that isn't synchronizing its FPU usage
1035 * and it will suffer the consequences since we won't be able to
1036 * fully reproduce the context of the exception
1038 cwd
= get_fpu_cwd(task
);
1039 swd
= get_fpu_swd(task
);
1040 switch (swd
& ~cwd
& 0x3f) {
1041 case 0x000: /* No unmasked exception */
1042 default: /* Multiple exceptions */
1044 case 0x001: /* Invalid Op */
1046 * swd & 0x240 == 0x040: Stack Underflow
1047 * swd & 0x240 == 0x240: Stack Overflow
1048 * User must clear the SF bit (0x40) if set
1050 info
.si_code
= FPE_FLTINV
;
1052 case 0x002: /* Denormalize */
1053 case 0x010: /* Underflow */
1054 info
.si_code
= FPE_FLTUND
;
1056 case 0x004: /* Zero Divide */
1057 info
.si_code
= FPE_FLTDIV
;
1059 case 0x008: /* Overflow */
1060 info
.si_code
= FPE_FLTOVF
;
1062 case 0x020: /* Precision */
1063 info
.si_code
= FPE_FLTRES
;
1066 force_sig_info(SIGFPE
, &info
, task
);
1069 asmlinkage
void bad_intr(void)
1071 printk("bad interrupt");
1074 asmlinkage
void do_simd_coprocessor_error(struct pt_regs
*regs
)
1076 void __user
*ip
= (void __user
*)(regs
->ip
);
1077 struct task_struct
*task
;
1079 unsigned short mxcsr
;
1081 conditional_sti(regs
);
1082 if (!user_mode(regs
) &&
1083 kernel_math_error(regs
, "kernel simd math error", 19))
1087 * Save the info for the exception handler and clear the error.
1090 save_init_fpu(task
);
1091 task
->thread
.trap_no
= 19;
1092 task
->thread
.error_code
= 0;
1093 info
.si_signo
= SIGFPE
;
1095 info
.si_code
= __SI_FAULT
;
1098 * The SIMD FPU exceptions are handled a little differently, as there
1099 * is only a single status/control register. Thus, to determine which
1100 * unmasked exception was caught we must mask the exception mask bits
1101 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1103 mxcsr
= get_fpu_mxcsr(task
);
1104 switch (~((mxcsr
& 0x1f80) >> 7) & (mxcsr
& 0x3f)) {
1108 case 0x001: /* Invalid Op */
1109 info
.si_code
= FPE_FLTINV
;
1111 case 0x002: /* Denormalize */
1112 case 0x010: /* Underflow */
1113 info
.si_code
= FPE_FLTUND
;
1115 case 0x004: /* Zero Divide */
1116 info
.si_code
= FPE_FLTDIV
;
1118 case 0x008: /* Overflow */
1119 info
.si_code
= FPE_FLTOVF
;
1121 case 0x020: /* Precision */
1122 info
.si_code
= FPE_FLTRES
;
1125 force_sig_info(SIGFPE
, &info
, task
);
1128 asmlinkage
void do_spurious_interrupt_bug(struct pt_regs
* regs
)
1132 asmlinkage
void __attribute__((weak
)) smp_thermal_interrupt(void)
1136 asmlinkage
void __attribute__((weak
)) mce_threshold_interrupt(void)
1141 * 'math_state_restore()' saves the current math information in the
1142 * old math state array, and gets the new ones from the current task
1144 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1145 * Don't touch unless you *really* know how it works.
1147 asmlinkage
void math_state_restore(void)
1149 struct task_struct
*me
= current
;
1154 * does a slab alloc which can sleep
1158 * ran out of memory!
1160 do_group_exit(SIGKILL
);
1163 local_irq_disable();
1166 clts(); /* Allow maths ops (or we recurse) */
1167 restore_fpu_checking(&me
->thread
.xstate
->fxsave
);
1168 task_thread_info(me
)->status
|= TS_USEDFPU
;
1171 EXPORT_SYMBOL_GPL(math_state_restore
);
1173 void __init
trap_init(void)
1175 set_intr_gate(0, ÷_error
);
1176 set_intr_gate_ist(1, &debug
, DEBUG_STACK
);
1177 set_intr_gate_ist(2, &nmi
, NMI_STACK
);
1178 set_system_gate_ist(3, &int3
, DEBUG_STACK
); /* int3 can be called from all */
1179 set_system_gate(4, &overflow
); /* int4 can be called from all */
1180 set_intr_gate(5, &bounds
);
1181 set_intr_gate(6, &invalid_op
);
1182 set_intr_gate(7, &device_not_available
);
1183 set_intr_gate_ist(8, &double_fault
, DOUBLEFAULT_STACK
);
1184 set_intr_gate(9, &coprocessor_segment_overrun
);
1185 set_intr_gate(10, &invalid_TSS
);
1186 set_intr_gate(11, &segment_not_present
);
1187 set_intr_gate_ist(12, &stack_segment
, STACKFAULT_STACK
);
1188 set_intr_gate(13, &general_protection
);
1189 set_intr_gate(14, &page_fault
);
1190 set_intr_gate(15, &spurious_interrupt_bug
);
1191 set_intr_gate(16, &coprocessor_error
);
1192 set_intr_gate(17, &alignment_check
);
1193 #ifdef CONFIG_X86_MCE
1194 set_intr_gate_ist(18, &machine_check
, MCE_STACK
);
1196 set_intr_gate(19, &simd_coprocessor_error
);
1198 #ifdef CONFIG_IA32_EMULATION
1199 set_system_gate(IA32_SYSCALL_VECTOR
, ia32_syscall
);
1202 * initialize the per thread extended state:
1204 init_thread_xstate();
1206 * Should be a barrier for any external CPU state:
1211 static int __init
oops_setup(char *s
)
1215 if (!strcmp(s
, "panic"))
1219 early_param("oops", oops_setup
);
1221 static int __init
kstack_setup(char *s
)
1225 kstack_depth_to_print
= simple_strtoul(s
, NULL
, 0);
1228 early_param("kstack", kstack_setup
);
1230 static int __init
code_bytes_setup(char *s
)
1232 code_bytes
= simple_strtoul(s
, NULL
, 0);
1233 if (code_bytes
> 8192)
1238 __setup("code_bytes=", code_bytes_setup
);