Merge with Linux 2.5.48.
[linux-2.6/linux-mips.git] / arch / i386 / kernel / traps.c
blobc462d415ae3f375ec74ab96409b1920fcbb1d52b
1 /*
2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
11 * 'Traps.c' handles hardware traps and faults after we have saved some
12 * state in 'asm.s'.
14 #include <linux/config.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/timer.h>
20 #include <linux/mm.h>
21 #include <linux/init.h>
22 #include <linux/delay.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/highmem.h>
27 #ifdef CONFIG_EISA
28 #include <linux/ioport.h>
29 #endif
31 #ifdef CONFIG_MCA
32 #include <linux/mca.h>
33 #include <asm/processor.h>
34 #endif
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <asm/atomic.h>
40 #include <asm/debugreg.h>
41 #include <asm/desc.h>
42 #include <asm/i387.h>
43 #include <asm/nmi.h>
45 #include <asm/smp.h>
46 #include <asm/pgalloc.h>
47 #include <asm/arch_hooks.h>
49 #include <linux/irq.h>
50 #include <linux/module.h>
52 asmlinkage int system_call(void);
53 asmlinkage void lcall7(void);
54 asmlinkage void lcall27(void);
56 struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
57 { 0, 0 }, { 0, 0 } };
60 * The IDT has to be page-aligned to simplify the Pentium
61 * F0 0F bug workaround.. We have a special link segment
62 * for this.
64 struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
66 asmlinkage void divide_error(void);
67 asmlinkage void debug(void);
68 asmlinkage void nmi(void);
69 asmlinkage void int3(void);
70 asmlinkage void overflow(void);
71 asmlinkage void bounds(void);
72 asmlinkage void invalid_op(void);
73 asmlinkage void device_not_available(void);
74 asmlinkage void double_fault(void);
75 asmlinkage void coprocessor_segment_overrun(void);
76 asmlinkage void invalid_TSS(void);
77 asmlinkage void segment_not_present(void);
78 asmlinkage void stack_segment(void);
79 asmlinkage void general_protection(void);
80 asmlinkage void page_fault(void);
81 asmlinkage void coprocessor_error(void);
82 asmlinkage void simd_coprocessor_error(void);
83 asmlinkage void alignment_check(void);
84 asmlinkage void spurious_interrupt_bug(void);
85 asmlinkage void machine_check(void);
87 static int kstack_depth_to_print = 24;
91 * If the address is either in the .text section of the
92 * kernel, or in the vmalloc'ed module regions, it *may*
93 * be the address of a calling routine
96 #ifdef CONFIG_MODULES
98 /* FIXME: Accessed without a lock --RR */
99 extern struct list_head modules;
101 static inline int kernel_text_address(unsigned long addr)
103 int retval = 0;
104 struct module *mod;
106 if (addr >= (unsigned long) &_stext &&
107 addr <= (unsigned long) &_etext)
108 return 1;
110 list_for_each_entry(mod, &modules, list) {
111 /* mod_bound tests for addr being inside the vmalloc'ed
112 * module area. Of course it'd be better to test only
113 * for the .text subset... */
114 if (mod_bound((void *)addr, 0, mod)) {
115 retval = 1;
116 break;
120 return retval;
123 #else
125 static inline int kernel_text_address(unsigned long addr)
127 return (addr >= (unsigned long) &_stext &&
128 addr <= (unsigned long) &_etext);
131 #endif
133 void show_trace(unsigned long * stack)
135 int i;
136 unsigned long addr;
138 if (!stack)
139 stack = (unsigned long*)&stack;
141 printk("Call Trace:");
142 #if CONFIG_KALLSYMS
143 printk("\n");
144 #endif
145 i = 1;
146 while (((long) stack & (THREAD_SIZE-1)) != 0) {
147 addr = *stack++;
148 if (kernel_text_address(addr)) {
149 printk(" [<%08lx>] ", addr);
150 print_symbol("%s\n", addr);
153 printk("\n");
156 void show_trace_task(struct task_struct *tsk)
158 unsigned long esp = tsk->thread.esp;
160 /* User space on another CPU? */
161 if ((esp ^ (unsigned long)tsk->thread_info) & (PAGE_MASK<<1))
162 return;
163 show_trace((unsigned long *)esp);
166 void show_stack(unsigned long * esp)
168 unsigned long *stack;
169 int i;
171 // debugging aid: "show_stack(NULL);" prints the
172 // back trace for this cpu.
174 if(esp==NULL)
175 esp=(unsigned long*)&esp;
177 stack = esp;
178 for(i=0; i < kstack_depth_to_print; i++) {
179 if (((long) stack & (THREAD_SIZE-1)) == 0)
180 break;
181 if (i && ((i % 8) == 0))
182 printk("\n ");
183 printk("%08lx ", *stack++);
185 printk("\n");
186 show_trace(esp);
190 * The architecture-independent dump_stack generator
192 void dump_stack(void)
194 unsigned long stack;
196 show_trace(&stack);
199 void show_registers(struct pt_regs *regs)
201 int i;
202 int in_kernel = 1;
203 unsigned long esp;
204 unsigned short ss;
206 esp = (unsigned long) (&regs->esp);
207 ss = __KERNEL_DS;
208 if (regs->xcs & 3) {
209 in_kernel = 0;
210 esp = regs->esp;
211 ss = regs->xss & 0xffff;
213 print_modules();
214 printk("CPU: %d\nEIP: %04x:[<%08lx>] %s\nEFLAGS: %08lx\n",
215 smp_processor_id(), 0xffff & regs->xcs, regs->eip, print_tainted(), regs->eflags);
217 print_symbol("EIP is at %s\n", regs->eip);
218 printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
219 regs->eax, regs->ebx, regs->ecx, regs->edx);
220 printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
221 regs->esi, regs->edi, regs->ebp, esp);
222 printk("ds: %04x es: %04x ss: %04x\n",
223 regs->xds & 0xffff, regs->xes & 0xffff, ss);
224 printk("Process %s (pid: %d, threadinfo=%p task=%p)",
225 current->comm, current->pid, current_thread_info(), current);
227 * When in-kernel, we also print out the stack and code at the
228 * time of the fault..
230 if (in_kernel) {
232 printk("\nStack: ");
233 show_stack((unsigned long*)esp);
235 printk("Code: ");
236 if(regs->eip < PAGE_OFFSET)
237 goto bad;
239 for(i=0;i<20;i++)
241 unsigned char c;
242 if(__get_user(c, &((unsigned char*)regs->eip)[i])) {
243 bad:
244 printk(" Bad EIP value.");
245 break;
247 printk("%02x ", c);
250 printk("\n");
253 static void handle_BUG(struct pt_regs *regs)
255 unsigned short ud2;
256 unsigned short line;
257 char *file;
258 char c;
259 unsigned long eip;
261 if (regs->xcs & 3)
262 goto no_bug; /* Not in kernel */
264 eip = regs->eip;
266 if (eip < PAGE_OFFSET)
267 goto no_bug;
268 if (__get_user(ud2, (unsigned short *)eip))
269 goto no_bug;
270 if (ud2 != 0x0b0f)
271 goto no_bug;
272 if (__get_user(line, (unsigned short *)(eip + 2)))
273 goto bug;
274 if (__get_user(file, (char **)(eip + 4)) ||
275 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
276 file = "<bad filename>";
278 printk("------------[ cut here ]------------\n");
279 printk("kernel BUG at %s:%d!\n", file, line);
281 no_bug:
282 return;
284 /* Here we know it was a BUG but file-n-line is unavailable */
285 bug:
286 printk("Kernel BUG\n");
289 spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
291 void die(const char * str, struct pt_regs * regs, long err)
293 console_verbose();
294 spin_lock_irq(&die_lock);
295 bust_spinlocks(1);
296 handle_BUG(regs);
297 printk("%s: %04lx\n", str, err & 0xffff);
298 show_registers(regs);
299 bust_spinlocks(0);
300 spin_unlock_irq(&die_lock);
301 do_exit(SIGSEGV);
304 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
306 if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs))
307 die(str, regs, err);
310 static inline unsigned long get_cr2(void)
312 unsigned long address;
314 /* get the address */
315 __asm__("movl %%cr2,%0":"=r" (address));
316 return address;
319 static void inline do_trap(int trapnr, int signr, char *str, int vm86,
320 struct pt_regs * regs, long error_code, siginfo_t *info)
322 if (vm86 && regs->eflags & VM_MASK)
323 goto vm86_trap;
325 if (!(regs->xcs & 3))
326 goto kernel_trap;
328 trap_signal: {
329 struct task_struct *tsk = current;
330 tsk->thread.error_code = error_code;
331 tsk->thread.trap_no = trapnr;
332 if (info)
333 force_sig_info(signr, info, tsk);
334 else
335 force_sig(signr, tsk);
336 return;
339 kernel_trap: {
340 unsigned long fixup;
341 #ifdef CONFIG_PNPBIOS
342 if (unlikely((regs->xcs | 8) == 0x88)) /* 0x80 or 0x88 */
344 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
345 extern u32 pnp_bios_is_utter_crap;
346 pnp_bios_is_utter_crap = 1;
347 printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
348 __asm__ volatile(
349 "movl %0, %%esp\n\t"
350 "jmp *%1\n\t"
351 : "=a" (pnp_bios_fault_esp), "=b" (pnp_bios_fault_eip));
352 panic("do_trap: can't hit this");
354 #endif
356 fixup = search_exception_table(regs->eip);
357 if (fixup)
358 regs->eip = fixup;
359 else
360 die(str, regs, error_code);
361 return;
364 vm86_trap: {
365 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
366 if (ret) goto trap_signal;
367 return;
371 #define DO_ERROR(trapnr, signr, str, name) \
372 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
374 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
377 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
378 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
380 siginfo_t info; \
381 info.si_signo = signr; \
382 info.si_errno = 0; \
383 info.si_code = sicode; \
384 info.si_addr = (void *)siaddr; \
385 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
388 #define DO_VM86_ERROR(trapnr, signr, str, name) \
389 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
391 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
394 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
395 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
397 siginfo_t info; \
398 info.si_signo = signr; \
399 info.si_errno = 0; \
400 info.si_code = sicode; \
401 info.si_addr = (void *)siaddr; \
402 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
405 DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
406 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
407 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
408 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
409 DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
410 DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
411 DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
412 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
413 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
414 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
415 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
416 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, get_cr2())
418 asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
420 if (regs->eflags & VM_MASK)
421 goto gp_in_vm86;
423 if (!(regs->xcs & 3))
424 goto gp_in_kernel;
426 current->thread.error_code = error_code;
427 current->thread.trap_no = 13;
428 force_sig(SIGSEGV, current);
429 return;
431 gp_in_vm86:
432 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
433 return;
435 gp_in_kernel:
437 unsigned long fixup;
438 fixup = search_exception_table(regs->eip);
439 if (fixup) {
440 regs->eip = fixup;
441 return;
443 die("general protection fault", regs, error_code);
447 static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
449 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
450 printk("You probably have a hardware problem with your RAM chips\n");
452 /* Clear and disable the memory parity error line. */
453 reason = (reason & 0xf) | 4;
454 outb(reason, 0x61);
457 static void io_check_error(unsigned char reason, struct pt_regs * regs)
459 unsigned long i;
461 printk("NMI: IOCK error (debug interrupt?)\n");
462 show_registers(regs);
464 /* Re-enable the IOCK line, wait for a few seconds */
465 reason = (reason & 0xf) | 8;
466 outb(reason, 0x61);
467 i = 2000;
468 while (--i) udelay(1000);
469 reason &= ~8;
470 outb(reason, 0x61);
473 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
475 #ifdef CONFIG_MCA
476 /* Might actually be able to figure out what the guilty party
477 * is. */
478 if( MCA_bus ) {
479 mca_handle_nmi();
480 return;
482 #endif
483 printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
484 reason, smp_processor_id());
485 printk("Dazed and confused, but trying to continue\n");
486 printk("Do you have a strange power saving mode enabled?\n");
489 static void default_do_nmi(struct pt_regs * regs)
491 unsigned char reason = inb(0x61);
493 if (!(reason & 0xc0)) {
494 #if CONFIG_X86_LOCAL_APIC
496 * Ok, so this is none of the documented NMI sources,
497 * so it must be the NMI watchdog.
499 if (nmi_watchdog) {
500 nmi_watchdog_tick(regs);
501 return;
503 #endif
504 unknown_nmi_error(reason, regs);
505 return;
507 if (reason & 0x80)
508 mem_parity_error(reason, regs);
509 if (reason & 0x40)
510 io_check_error(reason, regs);
512 * Reassert NMI in case it became active meanwhile
513 * as it's edge-triggered.
515 outb(0x8f, 0x70);
516 inb(0x71); /* dummy */
517 outb(0x0f, 0x70);
518 inb(0x71); /* dummy */
521 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
523 return 0;
526 static nmi_callback_t nmi_callback = dummy_nmi_callback;
528 asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
530 int cpu;
532 nmi_enter();
534 cpu = smp_processor_id();
535 ++nmi_count(cpu);
537 if (!nmi_callback(regs, cpu))
538 default_do_nmi(regs);
540 nmi_exit();
543 void set_nmi_callback(nmi_callback_t callback)
545 nmi_callback = callback;
548 void unset_nmi_callback(void)
550 nmi_callback = dummy_nmi_callback;
554 * Our handling of the processor debug registers is non-trivial.
555 * We do not clear them on entry and exit from the kernel. Therefore
556 * it is possible to get a watchpoint trap here from inside the kernel.
557 * However, the code in ./ptrace.c has ensured that the user can
558 * only set watchpoints on userspace addresses. Therefore the in-kernel
559 * watchpoint trap can only occur in code which is reading/writing
560 * from user space. Such code must not hold kernel locks (since it
561 * can equally take a page fault), therefore it is safe to call
562 * force_sig_info even though that claims and releases locks.
564 * Code in ./signal.c ensures that the debug control register
565 * is restored before we deliver any signal, and therefore that
566 * user code runs with the correct debug control register even though
567 * we clear it here.
569 * Being careful here means that we don't have to be as careful in a
570 * lot of more complicated places (task switching can be a bit lazy
571 * about restoring all the debug state, and ptrace doesn't have to
572 * find every occurrence of the TF bit that could be saved away even
573 * by user code)
575 asmlinkage void do_debug(struct pt_regs * regs, long error_code)
577 unsigned int condition;
578 struct task_struct *tsk = current;
579 siginfo_t info;
581 __asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
583 /* Mask out spurious debug traps due to lazy DR7 setting */
584 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
585 if (!tsk->thread.debugreg[7])
586 goto clear_dr7;
589 if (regs->eflags & VM_MASK)
590 goto debug_vm86;
592 /* Save debug status register where ptrace can see it */
593 tsk->thread.debugreg[6] = condition;
595 /* Mask out spurious TF errors due to lazy TF clearing */
596 if (condition & DR_STEP) {
598 * The TF error should be masked out only if the current
599 * process is not traced and if the TRAP flag has been set
600 * previously by a tracing process (condition detected by
601 * the PT_DTRACE flag); remember that the i386 TRAP flag
602 * can be modified by the process itself in user mode,
603 * allowing programs to debug themselves without the ptrace()
604 * interface.
606 if ((regs->xcs & 3) == 0)
607 goto clear_TF;
608 if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
609 goto clear_TF;
612 /* Ok, finally something we can handle */
613 tsk->thread.trap_no = 1;
614 tsk->thread.error_code = error_code;
615 info.si_signo = SIGTRAP;
616 info.si_errno = 0;
617 info.si_code = TRAP_BRKPT;
619 /* If this is a kernel mode trap, save the user PC on entry to
620 * the kernel, that's what the debugger can make sense of.
622 info.si_addr = ((regs->xcs & 3) == 0) ? (void *)tsk->thread.eip :
623 (void *)regs->eip;
624 force_sig_info(SIGTRAP, &info, tsk);
626 /* Disable additional traps. They'll be re-enabled when
627 * the signal is delivered.
629 clear_dr7:
630 __asm__("movl %0,%%db7"
631 : /* no output */
632 : "r" (0));
633 return;
635 debug_vm86:
636 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
637 return;
639 clear_TF:
640 regs->eflags &= ~TF_MASK;
641 return;
645 * Note that we play around with the 'TS' bit in an attempt to get
646 * the correct behaviour even in the presence of the asynchronous
647 * IRQ13 behaviour
649 void math_error(void *eip)
651 struct task_struct * task;
652 siginfo_t info;
653 unsigned short cwd, swd;
656 * Save the info for the exception handler and clear the error.
658 task = current;
659 save_init_fpu(task);
660 task->thread.trap_no = 16;
661 task->thread.error_code = 0;
662 info.si_signo = SIGFPE;
663 info.si_errno = 0;
664 info.si_code = __SI_FAULT;
665 info.si_addr = eip;
667 * (~cwd & swd) will mask out exceptions that are not set to unmasked
668 * status. 0x3f is the exception bits in these regs, 0x200 is the
669 * C1 reg you need in case of a stack fault, 0x040 is the stack
670 * fault bit. We should only be taking one exception at a time,
671 * so if this combination doesn't produce any single exception,
672 * then we have a bad program that isn't syncronizing its FPU usage
673 * and it will suffer the consequences since we won't be able to
674 * fully reproduce the context of the exception
676 cwd = get_fpu_cwd(task);
677 swd = get_fpu_swd(task);
678 switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
679 case 0x000:
680 default:
681 break;
682 case 0x001: /* Invalid Op */
683 case 0x040: /* Stack Fault */
684 case 0x240: /* Stack Fault | Direction */
685 info.si_code = FPE_FLTINV;
686 break;
687 case 0x002: /* Denormalize */
688 case 0x010: /* Underflow */
689 info.si_code = FPE_FLTUND;
690 break;
691 case 0x004: /* Zero Divide */
692 info.si_code = FPE_FLTDIV;
693 break;
694 case 0x008: /* Overflow */
695 info.si_code = FPE_FLTOVF;
696 break;
697 case 0x020: /* Precision */
698 info.si_code = FPE_FLTRES;
699 break;
701 force_sig_info(SIGFPE, &info, task);
704 asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
706 ignore_irq13 = 1;
707 math_error((void *)regs->eip);
710 void simd_math_error(void *eip)
712 struct task_struct * task;
713 siginfo_t info;
714 unsigned short mxcsr;
717 * Save the info for the exception handler and clear the error.
719 task = current;
720 save_init_fpu(task);
721 task->thread.trap_no = 19;
722 task->thread.error_code = 0;
723 info.si_signo = SIGFPE;
724 info.si_errno = 0;
725 info.si_code = __SI_FAULT;
726 info.si_addr = eip;
728 * The SIMD FPU exceptions are handled a little differently, as there
729 * is only a single status/control register. Thus, to determine which
730 * unmasked exception was caught we must mask the exception mask bits
731 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
733 mxcsr = get_fpu_mxcsr(task);
734 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
735 case 0x000:
736 default:
737 break;
738 case 0x001: /* Invalid Op */
739 info.si_code = FPE_FLTINV;
740 break;
741 case 0x002: /* Denormalize */
742 case 0x010: /* Underflow */
743 info.si_code = FPE_FLTUND;
744 break;
745 case 0x004: /* Zero Divide */
746 info.si_code = FPE_FLTDIV;
747 break;
748 case 0x008: /* Overflow */
749 info.si_code = FPE_FLTOVF;
750 break;
751 case 0x020: /* Precision */
752 info.si_code = FPE_FLTRES;
753 break;
755 force_sig_info(SIGFPE, &info, task);
758 asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs,
759 long error_code)
761 if (cpu_has_xmm) {
762 /* Handle SIMD FPU exceptions on PIII+ processors. */
763 ignore_irq13 = 1;
764 simd_math_error((void *)regs->eip);
765 } else {
767 * Handle strange cache flush from user space exception
768 * in all other cases. This is undocumented behaviour.
770 if (regs->eflags & VM_MASK) {
771 handle_vm86_fault((struct kernel_vm86_regs *)regs,
772 error_code);
773 return;
775 die_if_kernel("cache flush denied", regs, error_code);
776 current->thread.trap_no = 19;
777 current->thread.error_code = error_code;
778 force_sig(SIGSEGV, current);
782 asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
783 long error_code)
785 #if 0
786 /* No need to warn about this any longer. */
787 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
788 #endif
792 * 'math_state_restore()' saves the current math information in the
793 * old math state array, and gets the new ones from the current task
795 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
796 * Don't touch unless you *really* know how it works.
798 * Must be called with kernel preemption disabled.
800 asmlinkage void math_state_restore(struct pt_regs regs)
802 struct task_struct *tsk = current;
803 clts(); /* Allow maths ops (or we recurse) */
805 if (!tsk->used_math)
806 init_fpu(tsk);
807 restore_fpu(tsk);
808 set_thread_flag(TIF_USEDFPU); /* So we fnsave on switch_to() */
811 #ifndef CONFIG_MATH_EMULATION
813 asmlinkage void math_emulate(long arg)
815 printk("math-emulation not enabled and no coprocessor found.\n");
816 printk("killing %s.\n",current->comm);
817 force_sig(SIGFPE,current);
818 schedule();
821 #endif /* CONFIG_MATH_EMULATION */
823 #ifdef CONFIG_X86_F00F_BUG
824 void __init trap_init_f00f_bug(void)
826 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
829 * Update the IDT descriptor and reload the IDT so that
830 * it uses the read-only mapped virtual address.
832 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
833 __asm__ __volatile__("lidt %0": "=m" (idt_descr));
835 #endif
837 #define _set_gate(gate_addr,type,dpl,addr) \
838 do { \
839 int __d0, __d1; \
840 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
841 "movw %4,%%dx\n\t" \
842 "movl %%eax,%0\n\t" \
843 "movl %%edx,%1" \
844 :"=m" (*((long *) (gate_addr))), \
845 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
846 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
847 "3" ((char *) (addr)),"2" (__KERNEL_CS << 16)); \
848 } while (0)
852 * This needs to use 'idt_table' rather than 'idt', and
853 * thus use the _nonmapped_ version of the IDT, as the
854 * Pentium F0 0F bugfix can have resulted in the mapped
855 * IDT being write-protected.
857 void set_intr_gate(unsigned int n, void *addr)
859 _set_gate(idt_table+n,14,0,addr);
862 static void __init set_trap_gate(unsigned int n, void *addr)
864 _set_gate(idt_table+n,15,0,addr);
867 static void __init set_system_gate(unsigned int n, void *addr)
869 _set_gate(idt_table+n,15,3,addr);
872 static void __init set_call_gate(void *a, void *addr)
874 _set_gate(a,12,3,addr);
878 #ifdef CONFIG_EISA
879 int EISA_bus;
880 static struct resource eisa_id = { "EISA ID", 0xc80, 0xc83, IORESOURCE_BUSY };
881 #endif
883 void __init trap_init(void)
885 #ifdef CONFIG_EISA
886 if (isa_readl(0x0FFFD9) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
887 EISA_bus = 1;
888 if (request_resource(&ioport_resource, &eisa_id) == -EBUSY)
889 printk ("EISA port was EBUSY :-(\n");
891 #endif
893 #ifdef CONFIG_X86_LOCAL_APIC
894 init_apic_mappings();
895 #endif
897 set_trap_gate(0,&divide_error);
898 set_trap_gate(1,&debug);
899 set_intr_gate(2,&nmi);
900 set_system_gate(3,&int3); /* int3-5 can be called from all */
901 set_system_gate(4,&overflow);
902 set_system_gate(5,&bounds);
903 set_trap_gate(6,&invalid_op);
904 set_trap_gate(7,&device_not_available);
905 set_trap_gate(8,&double_fault);
906 set_trap_gate(9,&coprocessor_segment_overrun);
907 set_trap_gate(10,&invalid_TSS);
908 set_trap_gate(11,&segment_not_present);
909 set_trap_gate(12,&stack_segment);
910 set_trap_gate(13,&general_protection);
911 set_intr_gate(14,&page_fault);
912 set_trap_gate(15,&spurious_interrupt_bug);
913 set_trap_gate(16,&coprocessor_error);
914 set_trap_gate(17,&alignment_check);
915 #ifdef CONFIG_X86_MCE
916 set_trap_gate(18,&machine_check);
917 #endif
918 set_trap_gate(19,&simd_coprocessor_error);
920 set_system_gate(SYSCALL_VECTOR,&system_call);
923 * default LDT is a single-entry callgate to lcall7 for iBCS
924 * and a callgate to lcall27 for Solaris/x86 binaries
926 set_call_gate(&default_ldt[0],lcall7);
927 set_call_gate(&default_ldt[4],lcall27);
930 * Should be a barrier for any external CPU state.
932 cpu_init();
934 trap_init_hook();