2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * 'Traps.c' handles hardware traps and faults after we have saved some
14 #include <linux/config.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/timer.h>
21 #include <linux/init.h>
22 #include <linux/delay.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/highmem.h>
28 #include <linux/ioport.h>
32 #include <linux/mca.h>
33 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
39 #include <asm/atomic.h>
40 #include <asm/debugreg.h>
46 #include <asm/pgalloc.h>
47 #include <asm/arch_hooks.h>
49 #include <linux/irq.h>
50 #include <linux/module.h>
52 asmlinkage
int system_call(void);
53 asmlinkage
void lcall7(void);
54 asmlinkage
void lcall27(void);
56 struct desc_struct default_ldt
[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
60 * The IDT has to be page-aligned to simplify the Pentium
61 * F0 0F bug workaround.. We have a special link segment
64 struct desc_struct idt_table
[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
66 asmlinkage
void divide_error(void);
67 asmlinkage
void debug(void);
68 asmlinkage
void nmi(void);
69 asmlinkage
void int3(void);
70 asmlinkage
void overflow(void);
71 asmlinkage
void bounds(void);
72 asmlinkage
void invalid_op(void);
73 asmlinkage
void device_not_available(void);
74 asmlinkage
void double_fault(void);
75 asmlinkage
void coprocessor_segment_overrun(void);
76 asmlinkage
void invalid_TSS(void);
77 asmlinkage
void segment_not_present(void);
78 asmlinkage
void stack_segment(void);
79 asmlinkage
void general_protection(void);
80 asmlinkage
void page_fault(void);
81 asmlinkage
void coprocessor_error(void);
82 asmlinkage
void simd_coprocessor_error(void);
83 asmlinkage
void alignment_check(void);
84 asmlinkage
void spurious_interrupt_bug(void);
85 asmlinkage
void machine_check(void);
87 static int kstack_depth_to_print
= 24;
91 * If the address is either in the .text section of the
92 * kernel, or in the vmalloc'ed module regions, it *may*
93 * be the address of a calling routine
98 /* FIXME: Accessed without a lock --RR */
99 extern struct list_head modules
;
101 static inline int kernel_text_address(unsigned long addr
)
106 if (addr
>= (unsigned long) &_stext
&&
107 addr
<= (unsigned long) &_etext
)
110 list_for_each_entry(mod
, &modules
, list
) {
111 /* mod_bound tests for addr being inside the vmalloc'ed
112 * module area. Of course it'd be better to test only
113 * for the .text subset... */
114 if (mod_bound((void *)addr
, 0, mod
)) {
125 static inline int kernel_text_address(unsigned long addr
)
127 return (addr
>= (unsigned long) &_stext
&&
128 addr
<= (unsigned long) &_etext
);
133 void show_trace(unsigned long * stack
)
139 stack
= (unsigned long*)&stack
;
141 printk("Call Trace:");
146 while (((long) stack
& (THREAD_SIZE
-1)) != 0) {
148 if (kernel_text_address(addr
)) {
149 printk(" [<%08lx>] ", addr
);
150 print_symbol("%s\n", addr
);
156 void show_trace_task(struct task_struct
*tsk
)
158 unsigned long esp
= tsk
->thread
.esp
;
160 /* User space on another CPU? */
161 if ((esp
^ (unsigned long)tsk
->thread_info
) & (PAGE_MASK
<<1))
163 show_trace((unsigned long *)esp
);
166 void show_stack(unsigned long * esp
)
168 unsigned long *stack
;
171 // debugging aid: "show_stack(NULL);" prints the
172 // back trace for this cpu.
175 esp
=(unsigned long*)&esp
;
178 for(i
=0; i
< kstack_depth_to_print
; i
++) {
179 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
181 if (i
&& ((i
% 8) == 0))
183 printk("%08lx ", *stack
++);
190 * The architecture-independent dump_stack generator
192 void dump_stack(void)
199 void show_registers(struct pt_regs
*regs
)
206 esp
= (unsigned long) (®s
->esp
);
211 ss
= regs
->xss
& 0xffff;
214 printk("CPU: %d\nEIP: %04x:[<%08lx>] %s\nEFLAGS: %08lx\n",
215 smp_processor_id(), 0xffff & regs
->xcs
, regs
->eip
, print_tainted(), regs
->eflags
);
217 print_symbol("EIP is at %s\n", regs
->eip
);
218 printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
219 regs
->eax
, regs
->ebx
, regs
->ecx
, regs
->edx
);
220 printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
221 regs
->esi
, regs
->edi
, regs
->ebp
, esp
);
222 printk("ds: %04x es: %04x ss: %04x\n",
223 regs
->xds
& 0xffff, regs
->xes
& 0xffff, ss
);
224 printk("Process %s (pid: %d, threadinfo=%p task=%p)",
225 current
->comm
, current
->pid
, current_thread_info(), current
);
227 * When in-kernel, we also print out the stack and code at the
228 * time of the fault..
233 show_stack((unsigned long*)esp
);
236 if(regs
->eip
< PAGE_OFFSET
)
242 if(__get_user(c
, &((unsigned char*)regs
->eip
)[i
])) {
244 printk(" Bad EIP value.");
253 static void handle_BUG(struct pt_regs
*regs
)
262 goto no_bug
; /* Not in kernel */
266 if (eip
< PAGE_OFFSET
)
268 if (__get_user(ud2
, (unsigned short *)eip
))
272 if (__get_user(line
, (unsigned short *)(eip
+ 2)))
274 if (__get_user(file
, (char **)(eip
+ 4)) ||
275 (unsigned long)file
< PAGE_OFFSET
|| __get_user(c
, file
))
276 file
= "<bad filename>";
278 printk("------------[ cut here ]------------\n");
279 printk("kernel BUG at %s:%d!\n", file
, line
);
284 /* Here we know it was a BUG but file-n-line is unavailable */
286 printk("Kernel BUG\n");
289 spinlock_t die_lock
= SPIN_LOCK_UNLOCKED
;
291 void die(const char * str
, struct pt_regs
* regs
, long err
)
294 spin_lock_irq(&die_lock
);
297 printk("%s: %04lx\n", str
, err
& 0xffff);
298 show_registers(regs
);
300 spin_unlock_irq(&die_lock
);
304 static inline void die_if_kernel(const char * str
, struct pt_regs
* regs
, long err
)
306 if (!(regs
->eflags
& VM_MASK
) && !(3 & regs
->xcs
))
310 static inline unsigned long get_cr2(void)
312 unsigned long address
;
314 /* get the address */
315 __asm__("movl %%cr2,%0":"=r" (address
));
319 static void inline do_trap(int trapnr
, int signr
, char *str
, int vm86
,
320 struct pt_regs
* regs
, long error_code
, siginfo_t
*info
)
322 if (vm86
&& regs
->eflags
& VM_MASK
)
325 if (!(regs
->xcs
& 3))
329 struct task_struct
*tsk
= current
;
330 tsk
->thread
.error_code
= error_code
;
331 tsk
->thread
.trap_no
= trapnr
;
333 force_sig_info(signr
, info
, tsk
);
335 force_sig(signr
, tsk
);
341 #ifdef CONFIG_PNPBIOS
342 if (unlikely((regs
->xcs
| 8) == 0x88)) /* 0x80 or 0x88 */
344 extern u32 pnp_bios_fault_eip
, pnp_bios_fault_esp
;
345 extern u32 pnp_bios_is_utter_crap
;
346 pnp_bios_is_utter_crap
= 1;
347 printk(KERN_CRIT
"PNPBIOS fault.. attempting recovery.\n");
351 : "=a" (pnp_bios_fault_esp
), "=b" (pnp_bios_fault_eip
));
352 panic("do_trap: can't hit this");
356 fixup
= search_exception_table(regs
->eip
);
360 die(str
, regs
, error_code
);
365 int ret
= handle_vm86_trap((struct kernel_vm86_regs
*) regs
, error_code
, trapnr
);
366 if (ret
) goto trap_signal
;
371 #define DO_ERROR(trapnr, signr, str, name) \
372 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
374 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
377 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
378 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
381 info.si_signo = signr; \
383 info.si_code = sicode; \
384 info.si_addr = (void *)siaddr; \
385 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
388 #define DO_VM86_ERROR(trapnr, signr, str, name) \
389 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
391 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
394 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
395 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
398 info.si_signo = signr; \
400 info.si_code = sicode; \
401 info.si_addr = (void *)siaddr; \
402 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
405 DO_VM86_ERROR_INFO( 0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->eip
)
406 DO_VM86_ERROR( 3, SIGTRAP
, "int3", int3
)
407 DO_VM86_ERROR( 4, SIGSEGV
, "overflow", overflow
)
408 DO_VM86_ERROR( 5, SIGSEGV
, "bounds", bounds
)
409 DO_ERROR_INFO( 6, SIGILL
, "invalid operand", invalid_op
, ILL_ILLOPN
, regs
->eip
)
410 DO_VM86_ERROR( 7, SIGSEGV
, "device not available", device_not_available
)
411 DO_ERROR( 8, SIGSEGV
, "double fault", double_fault
)
412 DO_ERROR( 9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
413 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
414 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
415 DO_ERROR(12, SIGBUS
, "stack segment", stack_segment
)
416 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, get_cr2())
418 asmlinkage
void do_general_protection(struct pt_regs
* regs
, long error_code
)
420 if (regs
->eflags
& VM_MASK
)
423 if (!(regs
->xcs
& 3))
426 current
->thread
.error_code
= error_code
;
427 current
->thread
.trap_no
= 13;
428 force_sig(SIGSEGV
, current
);
432 handle_vm86_fault((struct kernel_vm86_regs
*) regs
, error_code
);
438 fixup
= search_exception_table(regs
->eip
);
443 die("general protection fault", regs
, error_code
);
447 static void mem_parity_error(unsigned char reason
, struct pt_regs
* regs
)
449 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
450 printk("You probably have a hardware problem with your RAM chips\n");
452 /* Clear and disable the memory parity error line. */
453 reason
= (reason
& 0xf) | 4;
457 static void io_check_error(unsigned char reason
, struct pt_regs
* regs
)
461 printk("NMI: IOCK error (debug interrupt?)\n");
462 show_registers(regs
);
464 /* Re-enable the IOCK line, wait for a few seconds */
465 reason
= (reason
& 0xf) | 8;
468 while (--i
) udelay(1000);
473 static void unknown_nmi_error(unsigned char reason
, struct pt_regs
* regs
)
476 /* Might actually be able to figure out what the guilty party
483 printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
484 reason
, smp_processor_id());
485 printk("Dazed and confused, but trying to continue\n");
486 printk("Do you have a strange power saving mode enabled?\n");
489 static void default_do_nmi(struct pt_regs
* regs
)
491 unsigned char reason
= inb(0x61);
493 if (!(reason
& 0xc0)) {
494 #if CONFIG_X86_LOCAL_APIC
496 * Ok, so this is none of the documented NMI sources,
497 * so it must be the NMI watchdog.
500 nmi_watchdog_tick(regs
);
504 unknown_nmi_error(reason
, regs
);
508 mem_parity_error(reason
, regs
);
510 io_check_error(reason
, regs
);
512 * Reassert NMI in case it became active meanwhile
513 * as it's edge-triggered.
516 inb(0x71); /* dummy */
518 inb(0x71); /* dummy */
521 static int dummy_nmi_callback(struct pt_regs
* regs
, int cpu
)
526 static nmi_callback_t nmi_callback
= dummy_nmi_callback
;
528 asmlinkage
void do_nmi(struct pt_regs
* regs
, long error_code
)
534 cpu
= smp_processor_id();
537 if (!nmi_callback(regs
, cpu
))
538 default_do_nmi(regs
);
543 void set_nmi_callback(nmi_callback_t callback
)
545 nmi_callback
= callback
;
548 void unset_nmi_callback(void)
550 nmi_callback
= dummy_nmi_callback
;
554 * Our handling of the processor debug registers is non-trivial.
555 * We do not clear them on entry and exit from the kernel. Therefore
556 * it is possible to get a watchpoint trap here from inside the kernel.
557 * However, the code in ./ptrace.c has ensured that the user can
558 * only set watchpoints on userspace addresses. Therefore the in-kernel
559 * watchpoint trap can only occur in code which is reading/writing
560 * from user space. Such code must not hold kernel locks (since it
561 * can equally take a page fault), therefore it is safe to call
562 * force_sig_info even though that claims and releases locks.
564 * Code in ./signal.c ensures that the debug control register
565 * is restored before we deliver any signal, and therefore that
566 * user code runs with the correct debug control register even though
569 * Being careful here means that we don't have to be as careful in a
570 * lot of more complicated places (task switching can be a bit lazy
571 * about restoring all the debug state, and ptrace doesn't have to
572 * find every occurrence of the TF bit that could be saved away even
575 asmlinkage
void do_debug(struct pt_regs
* regs
, long error_code
)
577 unsigned int condition
;
578 struct task_struct
*tsk
= current
;
581 __asm__
__volatile__("movl %%db6,%0" : "=r" (condition
));
583 /* Mask out spurious debug traps due to lazy DR7 setting */
584 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
585 if (!tsk
->thread
.debugreg
[7])
589 if (regs
->eflags
& VM_MASK
)
592 /* Save debug status register where ptrace can see it */
593 tsk
->thread
.debugreg
[6] = condition
;
595 /* Mask out spurious TF errors due to lazy TF clearing */
596 if (condition
& DR_STEP
) {
598 * The TF error should be masked out only if the current
599 * process is not traced and if the TRAP flag has been set
600 * previously by a tracing process (condition detected by
601 * the PT_DTRACE flag); remember that the i386 TRAP flag
602 * can be modified by the process itself in user mode,
603 * allowing programs to debug themselves without the ptrace()
606 if ((regs
->xcs
& 3) == 0)
608 if ((tsk
->ptrace
& (PT_DTRACE
|PT_PTRACED
)) == PT_DTRACE
)
612 /* Ok, finally something we can handle */
613 tsk
->thread
.trap_no
= 1;
614 tsk
->thread
.error_code
= error_code
;
615 info
.si_signo
= SIGTRAP
;
617 info
.si_code
= TRAP_BRKPT
;
619 /* If this is a kernel mode trap, save the user PC on entry to
620 * the kernel, that's what the debugger can make sense of.
622 info
.si_addr
= ((regs
->xcs
& 3) == 0) ? (void *)tsk
->thread
.eip
:
624 force_sig_info(SIGTRAP
, &info
, tsk
);
626 /* Disable additional traps. They'll be re-enabled when
627 * the signal is delivered.
630 __asm__("movl %0,%%db7"
636 handle_vm86_trap((struct kernel_vm86_regs
*) regs
, error_code
, 1);
640 regs
->eflags
&= ~TF_MASK
;
645 * Note that we play around with the 'TS' bit in an attempt to get
646 * the correct behaviour even in the presence of the asynchronous
649 void math_error(void *eip
)
651 struct task_struct
* task
;
653 unsigned short cwd
, swd
;
656 * Save the info for the exception handler and clear the error.
660 task
->thread
.trap_no
= 16;
661 task
->thread
.error_code
= 0;
662 info
.si_signo
= SIGFPE
;
664 info
.si_code
= __SI_FAULT
;
667 * (~cwd & swd) will mask out exceptions that are not set to unmasked
668 * status. 0x3f is the exception bits in these regs, 0x200 is the
669 * C1 reg you need in case of a stack fault, 0x040 is the stack
670 * fault bit. We should only be taking one exception at a time,
671 * so if this combination doesn't produce any single exception,
672 * then we have a bad program that isn't syncronizing its FPU usage
673 * and it will suffer the consequences since we won't be able to
674 * fully reproduce the context of the exception
676 cwd
= get_fpu_cwd(task
);
677 swd
= get_fpu_swd(task
);
678 switch (((~cwd
) & swd
& 0x3f) | (swd
& 0x240)) {
682 case 0x001: /* Invalid Op */
683 case 0x040: /* Stack Fault */
684 case 0x240: /* Stack Fault | Direction */
685 info
.si_code
= FPE_FLTINV
;
687 case 0x002: /* Denormalize */
688 case 0x010: /* Underflow */
689 info
.si_code
= FPE_FLTUND
;
691 case 0x004: /* Zero Divide */
692 info
.si_code
= FPE_FLTDIV
;
694 case 0x008: /* Overflow */
695 info
.si_code
= FPE_FLTOVF
;
697 case 0x020: /* Precision */
698 info
.si_code
= FPE_FLTRES
;
701 force_sig_info(SIGFPE
, &info
, task
);
704 asmlinkage
void do_coprocessor_error(struct pt_regs
* regs
, long error_code
)
707 math_error((void *)regs
->eip
);
710 void simd_math_error(void *eip
)
712 struct task_struct
* task
;
714 unsigned short mxcsr
;
717 * Save the info for the exception handler and clear the error.
721 task
->thread
.trap_no
= 19;
722 task
->thread
.error_code
= 0;
723 info
.si_signo
= SIGFPE
;
725 info
.si_code
= __SI_FAULT
;
728 * The SIMD FPU exceptions are handled a little differently, as there
729 * is only a single status/control register. Thus, to determine which
730 * unmasked exception was caught we must mask the exception mask bits
731 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
733 mxcsr
= get_fpu_mxcsr(task
);
734 switch (~((mxcsr
& 0x1f80) >> 7) & (mxcsr
& 0x3f)) {
738 case 0x001: /* Invalid Op */
739 info
.si_code
= FPE_FLTINV
;
741 case 0x002: /* Denormalize */
742 case 0x010: /* Underflow */
743 info
.si_code
= FPE_FLTUND
;
745 case 0x004: /* Zero Divide */
746 info
.si_code
= FPE_FLTDIV
;
748 case 0x008: /* Overflow */
749 info
.si_code
= FPE_FLTOVF
;
751 case 0x020: /* Precision */
752 info
.si_code
= FPE_FLTRES
;
755 force_sig_info(SIGFPE
, &info
, task
);
758 asmlinkage
void do_simd_coprocessor_error(struct pt_regs
* regs
,
762 /* Handle SIMD FPU exceptions on PIII+ processors. */
764 simd_math_error((void *)regs
->eip
);
767 * Handle strange cache flush from user space exception
768 * in all other cases. This is undocumented behaviour.
770 if (regs
->eflags
& VM_MASK
) {
771 handle_vm86_fault((struct kernel_vm86_regs
*)regs
,
775 die_if_kernel("cache flush denied", regs
, error_code
);
776 current
->thread
.trap_no
= 19;
777 current
->thread
.error_code
= error_code
;
778 force_sig(SIGSEGV
, current
);
782 asmlinkage
void do_spurious_interrupt_bug(struct pt_regs
* regs
,
786 /* No need to warn about this any longer. */
787 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
792 * 'math_state_restore()' saves the current math information in the
793 * old math state array, and gets the new ones from the current task
795 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
796 * Don't touch unless you *really* know how it works.
798 * Must be called with kernel preemption disabled.
800 asmlinkage
void math_state_restore(struct pt_regs regs
)
802 struct task_struct
*tsk
= current
;
803 clts(); /* Allow maths ops (or we recurse) */
808 set_thread_flag(TIF_USEDFPU
); /* So we fnsave on switch_to() */
811 #ifndef CONFIG_MATH_EMULATION
813 asmlinkage
void math_emulate(long arg
)
815 printk("math-emulation not enabled and no coprocessor found.\n");
816 printk("killing %s.\n",current
->comm
);
817 force_sig(SIGFPE
,current
);
821 #endif /* CONFIG_MATH_EMULATION */
823 #ifdef CONFIG_X86_F00F_BUG
824 void __init
trap_init_f00f_bug(void)
826 __set_fixmap(FIX_F00F_IDT
, __pa(&idt_table
), PAGE_KERNEL_RO
);
829 * Update the IDT descriptor and reload the IDT so that
830 * it uses the read-only mapped virtual address.
832 idt_descr
.address
= fix_to_virt(FIX_F00F_IDT
);
833 __asm__
__volatile__("lidt %0": "=m" (idt_descr
));
837 #define _set_gate(gate_addr,type,dpl,addr) \
840 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
842 "movl %%eax,%0\n\t" \
844 :"=m" (*((long *) (gate_addr))), \
845 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
846 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
847 "3" ((char *) (addr)),"2" (__KERNEL_CS << 16)); \
852 * This needs to use 'idt_table' rather than 'idt', and
853 * thus use the _nonmapped_ version of the IDT, as the
854 * Pentium F0 0F bugfix can have resulted in the mapped
855 * IDT being write-protected.
857 void set_intr_gate(unsigned int n
, void *addr
)
859 _set_gate(idt_table
+n
,14,0,addr
);
862 static void __init
set_trap_gate(unsigned int n
, void *addr
)
864 _set_gate(idt_table
+n
,15,0,addr
);
867 static void __init
set_system_gate(unsigned int n
, void *addr
)
869 _set_gate(idt_table
+n
,15,3,addr
);
872 static void __init
set_call_gate(void *a
, void *addr
)
874 _set_gate(a
,12,3,addr
);
880 static struct resource eisa_id
= { "EISA ID", 0xc80, 0xc83, IORESOURCE_BUSY
};
883 void __init
trap_init(void)
886 if (isa_readl(0x0FFFD9) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
888 if (request_resource(&ioport_resource
, &eisa_id
) == -EBUSY
)
889 printk ("EISA port was EBUSY :-(\n");
893 #ifdef CONFIG_X86_LOCAL_APIC
894 init_apic_mappings();
897 set_trap_gate(0,÷_error
);
898 set_trap_gate(1,&debug
);
899 set_intr_gate(2,&nmi
);
900 set_system_gate(3,&int3
); /* int3-5 can be called from all */
901 set_system_gate(4,&overflow
);
902 set_system_gate(5,&bounds
);
903 set_trap_gate(6,&invalid_op
);
904 set_trap_gate(7,&device_not_available
);
905 set_trap_gate(8,&double_fault
);
906 set_trap_gate(9,&coprocessor_segment_overrun
);
907 set_trap_gate(10,&invalid_TSS
);
908 set_trap_gate(11,&segment_not_present
);
909 set_trap_gate(12,&stack_segment
);
910 set_trap_gate(13,&general_protection
);
911 set_intr_gate(14,&page_fault
);
912 set_trap_gate(15,&spurious_interrupt_bug
);
913 set_trap_gate(16,&coprocessor_error
);
914 set_trap_gate(17,&alignment_check
);
915 #ifdef CONFIG_X86_MCE
916 set_trap_gate(18,&machine_check
);
918 set_trap_gate(19,&simd_coprocessor_error
);
920 set_system_gate(SYSCALL_VECTOR
,&system_call
);
923 * default LDT is a single-entry callgate to lcall7 for iBCS
924 * and a callgate to lcall27 for Solaris/x86 binaries
926 set_call_gate(&default_ldt
[0],lcall7
);
927 set_call_gate(&default_ldt
[4],lcall27
);
930 * Should be a barrier for any external CPU state.