2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * 'Traps.c' handles hardware traps and faults after we have saved some
14 #include <linux/config.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/timer.h>
21 #include <linux/init.h>
22 #include <linux/delay.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/highmem.h>
26 #include <linux/kallsyms.h>
27 #include <linux/ptrace.h>
28 #include <linux/utsname.h>
29 #include <linux/kprobes.h>
30 #include <linux/kexec.h>
33 #include <linux/ioport.h>
34 #include <linux/eisa.h>
38 #include <linux/mca.h>
41 #include <asm/processor.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
45 #include <asm/atomic.h>
46 #include <asm/debugreg.h>
52 #include <asm/arch_hooks.h>
53 #include <asm/kdebug.h>
55 #include <linux/module.h>
57 #include "mach_traps.h"
59 asmlinkage
int system_call(void);
61 struct desc_struct default_ldt
[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
64 /* Do we ignore FPU interrupts ? */
65 char ignore_fpu_irq
= 0;
68 * The IDT has to be page-aligned to simplify the Pentium
69 * F0 0F bug workaround.. We have a special link segment
72 struct desc_struct idt_table
[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
74 asmlinkage
void divide_error(void);
75 asmlinkage
void debug(void);
76 asmlinkage
void nmi(void);
77 asmlinkage
void int3(void);
78 asmlinkage
void overflow(void);
79 asmlinkage
void bounds(void);
80 asmlinkage
void invalid_op(void);
81 asmlinkage
void device_not_available(void);
82 asmlinkage
void coprocessor_segment_overrun(void);
83 asmlinkage
void invalid_TSS(void);
84 asmlinkage
void segment_not_present(void);
85 asmlinkage
void stack_segment(void);
86 asmlinkage
void general_protection(void);
87 asmlinkage
void page_fault(void);
88 asmlinkage
void coprocessor_error(void);
89 asmlinkage
void simd_coprocessor_error(void);
90 asmlinkage
void alignment_check(void);
91 asmlinkage
void spurious_interrupt_bug(void);
92 asmlinkage
void machine_check(void);
94 static int kstack_depth_to_print
= 24;
95 struct notifier_block
*i386die_chain
;
96 static DEFINE_SPINLOCK(die_notifier_lock
);
98 int register_die_notifier(struct notifier_block
*nb
)
102 spin_lock_irqsave(&die_notifier_lock
, flags
);
103 err
= notifier_chain_register(&i386die_chain
, nb
);
104 spin_unlock_irqrestore(&die_notifier_lock
, flags
);
107 EXPORT_SYMBOL(register_die_notifier
);
109 static inline int valid_stack_ptr(struct thread_info
*tinfo
, void *p
)
111 return p
> (void *)tinfo
&&
112 p
< (void *)tinfo
+ THREAD_SIZE
- 3;
116 * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line.
118 static inline int print_addr_and_symbol(unsigned long addr
, char *log_lvl
,
124 #if CONFIG_STACK_BACKTRACE_COLS == 1
125 printk(" [<%08lx>] ", addr
);
127 printk(" <%08lx> ", addr
);
129 print_symbol("%s", addr
);
131 printed
= (printed
+ 1) % CONFIG_STACK_BACKTRACE_COLS
;
141 static inline unsigned long print_context_stack(struct thread_info
*tinfo
,
142 unsigned long *stack
, unsigned long ebp
,
146 int printed
= 0; /* nr of entries already printed on current line */
148 #ifdef CONFIG_FRAME_POINTER
149 while (valid_stack_ptr(tinfo
, (void *)ebp
)) {
150 addr
= *(unsigned long *)(ebp
+ 4);
151 printed
= print_addr_and_symbol(addr
, log_lvl
, printed
);
152 ebp
= *(unsigned long *)ebp
;
155 while (valid_stack_ptr(tinfo
, stack
)) {
157 if (__kernel_text_address(addr
))
158 printed
= print_addr_and_symbol(addr
, log_lvl
, printed
);
167 static void show_trace_log_lvl(struct task_struct
*task
,
168 unsigned long *stack
, char *log_lvl
)
175 if (task
== current
) {
176 /* Grab ebp right from our regs */
177 asm ("movl %%ebp, %0" : "=r" (ebp
) : );
179 /* ebp is the last reg pushed by switch_to */
180 ebp
= *(unsigned long *) task
->thread
.esp
;
184 struct thread_info
*context
;
185 context
= (struct thread_info
*)
186 ((unsigned long)stack
& (~(THREAD_SIZE
- 1)));
187 ebp
= print_context_stack(context
, stack
, ebp
, log_lvl
);
188 stack
= (unsigned long*)context
->previous_esp
;
191 printk("%s =======================\n", log_lvl
);
195 void show_trace(struct task_struct
*task
, unsigned long * stack
)
197 show_trace_log_lvl(task
, stack
, "");
200 static void show_stack_log_lvl(struct task_struct
*task
, unsigned long *esp
,
203 unsigned long *stack
;
208 esp
= (unsigned long*)task
->thread
.esp
;
210 esp
= (unsigned long *)&esp
;
215 for(i
= 0; i
< kstack_depth_to_print
; i
++) {
216 if (kstack_end(stack
))
218 if (i
&& ((i
% 8) == 0)) {
220 printk("%s ", log_lvl
);
222 printk("%08lx ", *stack
++);
225 printk("%sCall Trace:\n", log_lvl
);
226 show_trace_log_lvl(task
, esp
, log_lvl
);
229 void show_stack(struct task_struct
*task
, unsigned long *esp
)
231 show_stack_log_lvl(task
, esp
, "");
235 * The architecture-independent dump_stack generator
237 void dump_stack(void)
241 show_trace(current
, &stack
);
244 EXPORT_SYMBOL(dump_stack
);
246 void show_registers(struct pt_regs
*regs
)
253 esp
= (unsigned long) (®s
->esp
);
255 if (user_mode(regs
)) {
258 ss
= regs
->xss
& 0xffff;
261 printk(KERN_EMERG
"CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
262 "EFLAGS: %08lx (%s %.*s) \n",
263 smp_processor_id(), 0xffff & regs
->xcs
, regs
->eip
,
264 print_tainted(), regs
->eflags
, system_utsname
.release
,
265 (int)strcspn(system_utsname
.version
, " "),
266 system_utsname
.version
);
267 print_symbol(KERN_EMERG
"EIP is at %s\n", regs
->eip
);
268 printk(KERN_EMERG
"eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
269 regs
->eax
, regs
->ebx
, regs
->ecx
, regs
->edx
);
270 printk(KERN_EMERG
"esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
271 regs
->esi
, regs
->edi
, regs
->ebp
, esp
);
272 printk(KERN_EMERG
"ds: %04x es: %04x ss: %04x\n",
273 regs
->xds
& 0xffff, regs
->xes
& 0xffff, ss
);
274 printk(KERN_EMERG
"Process %s (pid: %d, threadinfo=%p task=%p)",
275 current
->comm
, current
->pid
, current_thread_info(), current
);
277 * When in-kernel, we also print out the stack and code at the
278 * time of the fault..
283 printk("\n" KERN_EMERG
"Stack: ");
284 show_stack_log_lvl(NULL
, (unsigned long *)esp
, KERN_EMERG
);
286 printk(KERN_EMERG
"Code: ");
288 eip
= (u8 __user
*)regs
->eip
- 43;
289 for (i
= 0; i
< 64; i
++, eip
++) {
292 if (eip
< (u8 __user
*)PAGE_OFFSET
|| __get_user(c
, eip
)) {
293 printk(" Bad EIP value.");
296 if (eip
== (u8 __user
*)regs
->eip
)
297 printk("<%02x> ", c
);
305 static void handle_BUG(struct pt_regs
*regs
)
315 if (eip
< PAGE_OFFSET
)
317 if (__get_user(ud2
, (unsigned short __user
*)eip
))
321 if (__get_user(line
, (unsigned short __user
*)(eip
+ 2)))
323 if (__get_user(file
, (char * __user
*)(eip
+ 4)) ||
324 (unsigned long)file
< PAGE_OFFSET
|| __get_user(c
, file
))
325 file
= "<bad filename>";
327 printk(KERN_EMERG
"------------[ cut here ]------------\n");
328 printk(KERN_EMERG
"kernel BUG at %s:%d!\n", file
, line
);
333 /* Here we know it was a BUG but file-n-line is unavailable */
335 printk(KERN_EMERG
"Kernel BUG\n");
338 /* This is gone through when something in the kernel
339 * has done something bad and is about to be terminated.
341 void die(const char * str
, struct pt_regs
* regs
, long err
)
346 int lock_owner_depth
;
348 .lock
= SPIN_LOCK_UNLOCKED
,
350 .lock_owner_depth
= 0
352 static int die_counter
;
355 if (die
.lock_owner
!= raw_smp_processor_id()) {
357 spin_lock_irqsave(&die
.lock
, flags
);
358 die
.lock_owner
= smp_processor_id();
359 die
.lock_owner_depth
= 0;
363 local_save_flags(flags
);
365 if (++die
.lock_owner_depth
< 3) {
368 printk(KERN_EMERG
"%s: %04lx [#%d]\n", str
, err
& 0xffff, ++die_counter
);
369 #ifdef CONFIG_PREEMPT
370 printk(KERN_EMERG
"PREEMPT ");
379 #ifdef CONFIG_DEBUG_PAGEALLOC
382 printk("DEBUG_PAGEALLOC");
387 notify_die(DIE_OOPS
, (char *)str
, regs
, err
, 255, SIGSEGV
);
388 show_registers(regs
);
390 printk(KERN_EMERG
"Recursive die() failure, output suppressed\n");
394 spin_unlock_irqrestore(&die
.lock
, flags
);
396 if (kexec_should_crash(current
))
400 panic("Fatal exception in interrupt");
403 printk(KERN_EMERG
"Fatal exception: panic in 5 seconds\n");
405 panic("Fatal exception");
410 static inline void die_if_kernel(const char * str
, struct pt_regs
* regs
, long err
)
412 if (!user_mode_vm(regs
))
416 static void __kprobes
do_trap(int trapnr
, int signr
, char *str
, int vm86
,
417 struct pt_regs
* regs
, long error_code
,
420 struct task_struct
*tsk
= current
;
421 tsk
->thread
.error_code
= error_code
;
422 tsk
->thread
.trap_no
= trapnr
;
424 if (regs
->eflags
& VM_MASK
) {
430 if (!user_mode(regs
))
435 force_sig_info(signr
, info
, tsk
);
437 force_sig(signr
, tsk
);
442 if (!fixup_exception(regs
))
443 die(str
, regs
, error_code
);
448 int ret
= handle_vm86_trap((struct kernel_vm86_regs
*) regs
, error_code
, trapnr
);
449 if (ret
) goto trap_signal
;
454 #define DO_ERROR(trapnr, signr, str, name) \
455 fastcall void do_##name(struct pt_regs * regs, long error_code) \
457 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
460 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
463 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
464 fastcall void do_##name(struct pt_regs * regs, long error_code) \
467 info.si_signo = signr; \
469 info.si_code = sicode; \
470 info.si_addr = (void __user *)siaddr; \
471 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
474 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
477 #define DO_VM86_ERROR(trapnr, signr, str, name) \
478 fastcall void do_##name(struct pt_regs * regs, long error_code) \
480 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
483 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
486 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
487 fastcall void do_##name(struct pt_regs * regs, long error_code) \
490 info.si_signo = signr; \
492 info.si_code = sicode; \
493 info.si_addr = (void __user *)siaddr; \
494 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
497 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
500 DO_VM86_ERROR_INFO( 0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->eip
)
501 #ifndef CONFIG_KPROBES
502 DO_VM86_ERROR( 3, SIGTRAP
, "int3", int3
)
504 DO_VM86_ERROR( 4, SIGSEGV
, "overflow", overflow
)
505 DO_VM86_ERROR( 5, SIGSEGV
, "bounds", bounds
)
506 DO_ERROR_INFO( 6, SIGILL
, "invalid opcode", invalid_op
, ILL_ILLOPN
, regs
->eip
)
507 DO_ERROR( 9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
508 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
509 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
510 DO_ERROR(12, SIGBUS
, "stack segment", stack_segment
)
511 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, 0)
512 DO_ERROR_INFO(32, SIGSEGV
, "iret exception", iret_error
, ILL_BADSTK
, 0)
514 fastcall
void __kprobes
do_general_protection(struct pt_regs
* regs
,
518 struct tss_struct
*tss
= &per_cpu(init_tss
, cpu
);
519 struct thread_struct
*thread
= ¤t
->thread
;
522 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
523 * invalid offset set (the LAZY one) and the faulting thread has
524 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
525 * and we set the offset field correctly. Then we let the CPU to
526 * restart the faulting instruction.
528 if (tss
->io_bitmap_base
== INVALID_IO_BITMAP_OFFSET_LAZY
&&
529 thread
->io_bitmap_ptr
) {
530 memcpy(tss
->io_bitmap
, thread
->io_bitmap_ptr
,
531 thread
->io_bitmap_max
);
533 * If the previously set map was extending to higher ports
534 * than the current one, pad extra space with 0xff (no access).
536 if (thread
->io_bitmap_max
< tss
->io_bitmap_max
)
537 memset((char *) tss
->io_bitmap
+
538 thread
->io_bitmap_max
, 0xff,
539 tss
->io_bitmap_max
- thread
->io_bitmap_max
);
540 tss
->io_bitmap_max
= thread
->io_bitmap_max
;
541 tss
->io_bitmap_base
= IO_BITMAP_OFFSET
;
542 tss
->io_bitmap_owner
= thread
;
548 current
->thread
.error_code
= error_code
;
549 current
->thread
.trap_no
= 13;
551 if (regs
->eflags
& VM_MASK
)
554 if (!user_mode(regs
))
557 current
->thread
.error_code
= error_code
;
558 current
->thread
.trap_no
= 13;
559 force_sig(SIGSEGV
, current
);
564 handle_vm86_fault((struct kernel_vm86_regs
*) regs
, error_code
);
568 if (!fixup_exception(regs
)) {
569 if (notify_die(DIE_GPF
, "general protection fault", regs
,
570 error_code
, 13, SIGSEGV
) == NOTIFY_STOP
)
572 die("general protection fault", regs
, error_code
);
576 static void mem_parity_error(unsigned char reason
, struct pt_regs
* regs
)
578 printk(KERN_EMERG
"Uhhuh. NMI received. Dazed and confused, but trying "
580 printk(KERN_EMERG
"You probably have a hardware problem with your RAM "
583 /* Clear and disable the memory parity error line. */
584 clear_mem_error(reason
);
587 static void io_check_error(unsigned char reason
, struct pt_regs
* regs
)
591 printk(KERN_EMERG
"NMI: IOCK error (debug interrupt?)\n");
592 show_registers(regs
);
594 /* Re-enable the IOCK line, wait for a few seconds */
595 reason
= (reason
& 0xf) | 8;
598 while (--i
) udelay(1000);
603 static void unknown_nmi_error(unsigned char reason
, struct pt_regs
* regs
)
606 /* Might actually be able to figure out what the guilty party
613 printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
614 reason
, smp_processor_id());
615 printk("Dazed and confused, but trying to continue\n");
616 printk("Do you have a strange power saving mode enabled?\n");
619 static DEFINE_SPINLOCK(nmi_print_lock
);
621 void die_nmi (struct pt_regs
*regs
, const char *msg
)
623 if (notify_die(DIE_NMIWATCHDOG
, msg
, regs
, 0, 0, SIGINT
) ==
627 spin_lock(&nmi_print_lock
);
629 * We are in trouble anyway, lets at least try
630 * to get a message out.
633 printk(KERN_EMERG
"%s", msg
);
634 printk(" on CPU%d, eip %08lx, registers:\n",
635 smp_processor_id(), regs
->eip
);
636 show_registers(regs
);
637 printk(KERN_EMERG
"console shuts up ...\n");
639 spin_unlock(&nmi_print_lock
);
642 /* If we are in kernel we are probably nested up pretty bad
643 * and might aswell get out now while we still can.
645 if (!user_mode(regs
)) {
646 current
->thread
.trap_no
= 2;
653 static void default_do_nmi(struct pt_regs
* regs
)
655 unsigned char reason
= 0;
657 /* Only the BSP gets external NMIs from the system. */
658 if (!smp_processor_id())
659 reason
= get_nmi_reason();
661 if (!(reason
& 0xc0)) {
662 if (notify_die(DIE_NMI_IPI
, "nmi_ipi", regs
, reason
, 0, SIGINT
)
665 #ifdef CONFIG_X86_LOCAL_APIC
667 * Ok, so this is none of the documented NMI sources,
668 * so it must be the NMI watchdog.
671 nmi_watchdog_tick(regs
);
675 unknown_nmi_error(reason
, regs
);
678 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 0, SIGINT
) == NOTIFY_STOP
)
681 mem_parity_error(reason
, regs
);
683 io_check_error(reason
, regs
);
685 * Reassert NMI in case it became active meanwhile
686 * as it's edge-triggered.
691 static int dummy_nmi_callback(struct pt_regs
* regs
, int cpu
)
696 static nmi_callback_t nmi_callback
= dummy_nmi_callback
;
698 fastcall
void do_nmi(struct pt_regs
* regs
, long error_code
)
704 cpu
= smp_processor_id();
708 if (!rcu_dereference(nmi_callback
)(regs
, cpu
))
709 default_do_nmi(regs
);
714 void set_nmi_callback(nmi_callback_t callback
)
716 rcu_assign_pointer(nmi_callback
, callback
);
718 EXPORT_SYMBOL_GPL(set_nmi_callback
);
720 void unset_nmi_callback(void)
722 nmi_callback
= dummy_nmi_callback
;
724 EXPORT_SYMBOL_GPL(unset_nmi_callback
);
726 #ifdef CONFIG_KPROBES
727 fastcall
void __kprobes
do_int3(struct pt_regs
*regs
, long error_code
)
729 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
)
732 /* This is an interrupt gate, because kprobes wants interrupts
733 disabled. Normal trap handlers don't. */
734 restore_interrupts(regs
);
735 do_trap(3, SIGTRAP
, "int3", 1, regs
, error_code
, NULL
);
740 * Our handling of the processor debug registers is non-trivial.
741 * We do not clear them on entry and exit from the kernel. Therefore
742 * it is possible to get a watchpoint trap here from inside the kernel.
743 * However, the code in ./ptrace.c has ensured that the user can
744 * only set watchpoints on userspace addresses. Therefore the in-kernel
745 * watchpoint trap can only occur in code which is reading/writing
746 * from user space. Such code must not hold kernel locks (since it
747 * can equally take a page fault), therefore it is safe to call
748 * force_sig_info even though that claims and releases locks.
750 * Code in ./signal.c ensures that the debug control register
751 * is restored before we deliver any signal, and therefore that
752 * user code runs with the correct debug control register even though
755 * Being careful here means that we don't have to be as careful in a
756 * lot of more complicated places (task switching can be a bit lazy
757 * about restoring all the debug state, and ptrace doesn't have to
758 * find every occurrence of the TF bit that could be saved away even
761 fastcall
void __kprobes
do_debug(struct pt_regs
* regs
, long error_code
)
763 unsigned int condition
;
764 struct task_struct
*tsk
= current
;
766 get_debugreg(condition
, 6);
768 if (notify_die(DIE_DEBUG
, "debug", regs
, condition
, error_code
,
769 SIGTRAP
) == NOTIFY_STOP
)
771 /* It's safe to allow irq's after DR6 has been saved */
772 if (regs
->eflags
& X86_EFLAGS_IF
)
775 /* Mask out spurious debug traps due to lazy DR7 setting */
776 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
777 if (!tsk
->thread
.debugreg
[7])
781 if (regs
->eflags
& VM_MASK
)
784 /* Save debug status register where ptrace can see it */
785 tsk
->thread
.debugreg
[6] = condition
;
788 * Single-stepping through TF: make sure we ignore any events in
789 * kernel space (but re-enable TF when returning to user mode).
791 if (condition
& DR_STEP
) {
793 * We already checked v86 mode above, so we can
794 * check for kernel mode by just checking the CPL
797 if (!user_mode(regs
))
798 goto clear_TF_reenable
;
801 /* Ok, finally something we can handle */
802 send_sigtrap(tsk
, regs
, error_code
);
804 /* Disable additional traps. They'll be re-enabled when
805 * the signal is delivered.
812 handle_vm86_trap((struct kernel_vm86_regs
*) regs
, error_code
, 1);
816 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
817 regs
->eflags
&= ~TF_MASK
;
822 * Note that we play around with the 'TS' bit in an attempt to get
823 * the correct behaviour even in the presence of the asynchronous
826 void math_error(void __user
*eip
)
828 struct task_struct
* task
;
830 unsigned short cwd
, swd
;
833 * Save the info for the exception handler and clear the error.
837 task
->thread
.trap_no
= 16;
838 task
->thread
.error_code
= 0;
839 info
.si_signo
= SIGFPE
;
841 info
.si_code
= __SI_FAULT
;
844 * (~cwd & swd) will mask out exceptions that are not set to unmasked
845 * status. 0x3f is the exception bits in these regs, 0x200 is the
846 * C1 reg you need in case of a stack fault, 0x040 is the stack
847 * fault bit. We should only be taking one exception at a time,
848 * so if this combination doesn't produce any single exception,
849 * then we have a bad program that isn't syncronizing its FPU usage
850 * and it will suffer the consequences since we won't be able to
851 * fully reproduce the context of the exception
853 cwd
= get_fpu_cwd(task
);
854 swd
= get_fpu_swd(task
);
855 switch (swd
& ~cwd
& 0x3f) {
856 case 0x000: /* No unmasked exception */
858 default: /* Multiple exceptions */
860 case 0x001: /* Invalid Op */
862 * swd & 0x240 == 0x040: Stack Underflow
863 * swd & 0x240 == 0x240: Stack Overflow
864 * User must clear the SF bit (0x40) if set
866 info
.si_code
= FPE_FLTINV
;
868 case 0x002: /* Denormalize */
869 case 0x010: /* Underflow */
870 info
.si_code
= FPE_FLTUND
;
872 case 0x004: /* Zero Divide */
873 info
.si_code
= FPE_FLTDIV
;
875 case 0x008: /* Overflow */
876 info
.si_code
= FPE_FLTOVF
;
878 case 0x020: /* Precision */
879 info
.si_code
= FPE_FLTRES
;
882 force_sig_info(SIGFPE
, &info
, task
);
885 fastcall
void do_coprocessor_error(struct pt_regs
* regs
, long error_code
)
888 math_error((void __user
*)regs
->eip
);
891 static void simd_math_error(void __user
*eip
)
893 struct task_struct
* task
;
895 unsigned short mxcsr
;
898 * Save the info for the exception handler and clear the error.
902 task
->thread
.trap_no
= 19;
903 task
->thread
.error_code
= 0;
904 info
.si_signo
= SIGFPE
;
906 info
.si_code
= __SI_FAULT
;
909 * The SIMD FPU exceptions are handled a little differently, as there
910 * is only a single status/control register. Thus, to determine which
911 * unmasked exception was caught we must mask the exception mask bits
912 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
914 mxcsr
= get_fpu_mxcsr(task
);
915 switch (~((mxcsr
& 0x1f80) >> 7) & (mxcsr
& 0x3f)) {
919 case 0x001: /* Invalid Op */
920 info
.si_code
= FPE_FLTINV
;
922 case 0x002: /* Denormalize */
923 case 0x010: /* Underflow */
924 info
.si_code
= FPE_FLTUND
;
926 case 0x004: /* Zero Divide */
927 info
.si_code
= FPE_FLTDIV
;
929 case 0x008: /* Overflow */
930 info
.si_code
= FPE_FLTOVF
;
932 case 0x020: /* Precision */
933 info
.si_code
= FPE_FLTRES
;
936 force_sig_info(SIGFPE
, &info
, task
);
939 fastcall
void do_simd_coprocessor_error(struct pt_regs
* regs
,
943 /* Handle SIMD FPU exceptions on PIII+ processors. */
945 simd_math_error((void __user
*)regs
->eip
);
948 * Handle strange cache flush from user space exception
949 * in all other cases. This is undocumented behaviour.
951 if (regs
->eflags
& VM_MASK
) {
952 handle_vm86_fault((struct kernel_vm86_regs
*)regs
,
956 current
->thread
.trap_no
= 19;
957 current
->thread
.error_code
= error_code
;
958 die_if_kernel("cache flush denied", regs
, error_code
);
959 force_sig(SIGSEGV
, current
);
963 fastcall
void do_spurious_interrupt_bug(struct pt_regs
* regs
,
967 /* No need to warn about this any longer. */
968 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
972 fastcall
void setup_x86_bogus_stack(unsigned char * stk
)
974 unsigned long *switch16_ptr
, *switch32_ptr
;
975 struct pt_regs
*regs
;
976 unsigned long stack_top
, stack_bot
;
977 unsigned short iret_frame16_off
;
978 int cpu
= smp_processor_id();
979 /* reserve the space on 32bit stack for the magic switch16 pointer */
980 memmove(stk
, stk
+ 8, sizeof(struct pt_regs
));
981 switch16_ptr
= (unsigned long *)(stk
+ sizeof(struct pt_regs
));
982 regs
= (struct pt_regs
*)stk
;
983 /* now the switch32 on 16bit stack */
984 stack_bot
= (unsigned long)&per_cpu(cpu_16bit_stack
, cpu
);
985 stack_top
= stack_bot
+ CPU_16BIT_STACK_SIZE
;
986 switch32_ptr
= (unsigned long *)(stack_top
- 8);
987 iret_frame16_off
= CPU_16BIT_STACK_SIZE
- 8 - 20;
988 /* copy iret frame on 16bit stack */
989 memcpy((void *)(stack_bot
+ iret_frame16_off
), ®s
->eip
, 20);
990 /* fill in the switch pointers */
991 switch16_ptr
[0] = (regs
->esp
& 0xffff0000) | iret_frame16_off
;
992 switch16_ptr
[1] = __ESPFIX_SS
;
993 switch32_ptr
[0] = (unsigned long)stk
+ sizeof(struct pt_regs
) +
994 8 - CPU_16BIT_STACK_SIZE
;
995 switch32_ptr
[1] = __KERNEL_DS
;
998 fastcall
unsigned char * fixup_x86_bogus_stack(unsigned short sp
)
1000 unsigned long *switch32_ptr
;
1001 unsigned char *stack16
, *stack32
;
1002 unsigned long stack_top
, stack_bot
;
1004 int cpu
= smp_processor_id();
1005 stack_bot
= (unsigned long)&per_cpu(cpu_16bit_stack
, cpu
);
1006 stack_top
= stack_bot
+ CPU_16BIT_STACK_SIZE
;
1007 switch32_ptr
= (unsigned long *)(stack_top
- 8);
1008 /* copy the data from 16bit stack to 32bit stack */
1009 len
= CPU_16BIT_STACK_SIZE
- 8 - sp
;
1010 stack16
= (unsigned char *)(stack_bot
+ sp
);
1011 stack32
= (unsigned char *)
1012 (switch32_ptr
[0] + CPU_16BIT_STACK_SIZE
- 8 - len
);
1013 memcpy(stack32
, stack16
, len
);
1018 * 'math_state_restore()' saves the current math information in the
1019 * old math state array, and gets the new ones from the current task
1021 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1022 * Don't touch unless you *really* know how it works.
1024 * Must be called with kernel preemption disabled (in this case,
1025 * local interrupts are disabled at the call-site in entry.S).
1027 asmlinkage
void math_state_restore(struct pt_regs regs
)
1029 struct thread_info
*thread
= current_thread_info();
1030 struct task_struct
*tsk
= thread
->task
;
1032 clts(); /* Allow maths ops (or we recurse) */
1033 if (!tsk_used_math(tsk
))
1036 thread
->status
|= TS_USEDFPU
; /* So we fnsave on switch_to() */
1039 #ifndef CONFIG_MATH_EMULATION
1041 asmlinkage
void math_emulate(long arg
)
1043 printk(KERN_EMERG
"math-emulation not enabled and no coprocessor found.\n");
1044 printk(KERN_EMERG
"killing %s.\n",current
->comm
);
1045 force_sig(SIGFPE
,current
);
1049 #endif /* CONFIG_MATH_EMULATION */
1051 #ifdef CONFIG_X86_F00F_BUG
1052 void __init
trap_init_f00f_bug(void)
1054 __set_fixmap(FIX_F00F_IDT
, __pa(&idt_table
), PAGE_KERNEL_RO
);
1057 * Update the IDT descriptor and reload the IDT so that
1058 * it uses the read-only mapped virtual address.
1060 idt_descr
.address
= fix_to_virt(FIX_F00F_IDT
);
1061 load_idt(&idt_descr
);
1065 #define _set_gate(gate_addr,type,dpl,addr,seg) \
1068 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
1069 "movw %4,%%dx\n\t" \
1070 "movl %%eax,%0\n\t" \
1072 :"=m" (*((long *) (gate_addr))), \
1073 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
1074 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
1075 "3" ((char *) (addr)),"2" ((seg) << 16)); \
1080 * This needs to use 'idt_table' rather than 'idt', and
1081 * thus use the _nonmapped_ version of the IDT, as the
1082 * Pentium F0 0F bugfix can have resulted in the mapped
1083 * IDT being write-protected.
1085 void set_intr_gate(unsigned int n
, void *addr
)
1087 _set_gate(idt_table
+n
,14,0,addr
,__KERNEL_CS
);
1091 * This routine sets up an interrupt gate at directory privilege level 3.
1093 static inline void set_system_intr_gate(unsigned int n
, void *addr
)
1095 _set_gate(idt_table
+n
, 14, 3, addr
, __KERNEL_CS
);
1098 static void __init
set_trap_gate(unsigned int n
, void *addr
)
1100 _set_gate(idt_table
+n
,15,0,addr
,__KERNEL_CS
);
1103 static void __init
set_system_gate(unsigned int n
, void *addr
)
1105 _set_gate(idt_table
+n
,15,3,addr
,__KERNEL_CS
);
1108 static void __init
set_task_gate(unsigned int n
, unsigned int gdt_entry
)
1110 _set_gate(idt_table
+n
,5,0,0,(gdt_entry
<<3));
1114 void __init
trap_init(void)
1117 void __iomem
*p
= ioremap(0x0FFFD9, 4);
1118 if (readl(p
) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
1124 #ifdef CONFIG_X86_LOCAL_APIC
1125 init_apic_mappings();
1128 set_trap_gate(0,÷_error
);
1129 set_intr_gate(1,&debug
);
1130 set_intr_gate(2,&nmi
);
1131 set_system_intr_gate(3, &int3
); /* int3/4 can be called from all */
1132 set_system_gate(4,&overflow
);
1133 set_trap_gate(5,&bounds
);
1134 set_trap_gate(6,&invalid_op
);
1135 set_trap_gate(7,&device_not_available
);
1136 set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS
);
1137 set_trap_gate(9,&coprocessor_segment_overrun
);
1138 set_trap_gate(10,&invalid_TSS
);
1139 set_trap_gate(11,&segment_not_present
);
1140 set_trap_gate(12,&stack_segment
);
1141 set_trap_gate(13,&general_protection
);
1142 set_intr_gate(14,&page_fault
);
1143 set_trap_gate(15,&spurious_interrupt_bug
);
1144 set_trap_gate(16,&coprocessor_error
);
1145 set_trap_gate(17,&alignment_check
);
1146 #ifdef CONFIG_X86_MCE
1147 set_trap_gate(18,&machine_check
);
1149 set_trap_gate(19,&simd_coprocessor_error
);
1153 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
1154 * Generates a compile-time "error: zero width for bit-field" if
1155 * the alignment is wrong.
1157 struct fxsrAlignAssert
{
1158 int _
:!(offsetof(struct task_struct
,
1159 thread
.i387
.fxsave
) & 15);
1162 printk(KERN_INFO
"Enabling fast FPU save and restore... ");
1163 set_in_cr4(X86_CR4_OSFXSR
);
1167 printk(KERN_INFO
"Enabling unmasked SIMD FPU exception "
1169 set_in_cr4(X86_CR4_OSXMMEXCPT
);
1173 set_system_gate(SYSCALL_VECTOR
,&system_call
);
1176 * Should be a barrier for any external CPU state.
1183 static int __init
kstack_setup(char *s
)
1185 kstack_depth_to_print
= simple_strtoul(s
, NULL
, 0);
1188 __setup("kstack=", kstack_setup
);