2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
4 * Copyright (c) 2018 Joyent, Inc.
8 * Copyright (c) 1989, 1990 William F. Jolitz.
9 * Copyright (c) 1990 The Regents of the University of California.
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
43 #include <sys/asm_linkage.h>
44 #include <sys/asm_misc.h>
47 #include <sys/regset.h>
48 #include <sys/privregs.h>
49 #include <sys/dtrace.h>
50 #include <sys/x86_archext.h>
51 #include <sys/traptrace.h>
52 #include <sys/machparam.h>
57 * push $0 on stack for traps that do not
58 * generate an error code. This is so the rest
59 * of the kernel can expect a consistent stack
60 * from from any exception.
62 * Note that for all exceptions for amd64
63 * %r11 and %rcx are on the stack. Just pop
64 * them back into their appropriate registers and let
65 * it get saved as is running native.
68 #define TRAP_NOERR(trapno) \
72 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
75 * error code already pushed by hw
78 #define TRAP_ERR(trapno) \
82 * These are the stacks used on cpu0 for taking double faults,
83 * NMIs and MCEs (the latter two only on amd64 where we have IST).
85 * We define them here instead of in a C file so that we can page-align
86 * them (gcc won't do that in a .c file).
89 DGDEF3
(dblfault_stack0
, DEFAULTSTKSZ
, MMU_PAGESIZE
)
90 .fill DEFAULTSTKSZ, 1, 0
91 DGDEF3
(nmi_stack0
, DEFAULTSTKSZ
, MMU_PAGESIZE
)
92 .fill DEFAULTSTKSZ, 1, 0
93 DGDEF3
(mce_stack0
, DEFAULTSTKSZ
, MMU_PAGESIZE
)
94 .fill DEFAULTSTKSZ, 1, 0
100 TRAP_NOERR
(T_ZERODIV
) /* $0 */
107 * Fetch %dr6 and clear it, handing off the value to the
108 * cmntrap code in %r15/%esi
111 TRAP_NOERR
(T_SGLSTP
) /* $1 */
115 * If we get here as a result of single-stepping a sysenter
116 * instruction, we suddenly find ourselves taking a #db
117 * in kernel mode -before- we've swapgs'ed. So before we can
118 * take the trap, we do the swapgs here, and fix the return
119 * %rip in trap() so that we return immediately after the
120 * swapgs in the sysenter handler to avoid doing the swapgs again.
122 * Nobody said that the design of sysenter was particularly
129 * At this point the stack looks like this:
131 * (high address) r_ss
135 * r_rip <-- %rsp + 24
136 * r_err <-- %rsp + 16
137 * r_trapno <-- %rsp + 8
138 * (low address) %r11 <-- %rsp
140 leaq sys_sysenter
(%rip
), %r11
141 cmpq
%r11, 24(%rsp
) /* Compare to saved r_rip on the stack */
143 leaq brand_sys_sysenter
(%rip
), %r11
144 cmpq
%r11, 24(%rsp
) /* Compare to saved r_rip on the stack */
146 leaq tr_sys_sysenter
(%rip
), %r11
149 leaq tr_brand_sys_sysenter
(%rip
), %r11
160 #elif defined(__i386)
174 * Macro to set the gsbase or kgsbase to the address of the struct cpu
175 * for this processor. If we came from userland, set kgsbase else
176 * set gsbase. We find the proper cpu struct by looping through
177 * the cpu structs for all processors till we find a match for the gdt
178 * of the trapping processor. The stack is expected to be pointing at
179 * the standard regs pushed by hardware on a trap (plus error code and trapno).
181 * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
182 * and kgsbase set to the same value) because we're not going back the normal
183 * way out of here (via IRET). Where we're going, we don't need no user %gs.
185 #define SET_CPU_GSBASE \
186 subq $REGOFF_TRAPNO
, %rsp;
/* save regs */ \
187 movq
%rax
, REGOFF_RAX
(%rsp
); \
188 movq
%rbx
, REGOFF_RBX
(%rsp
); \
189 movq
%rcx
, REGOFF_RCX
(%rsp
); \
190 movq
%rdx
, REGOFF_RDX
(%rsp
); \
191 movq
%rbp
, REGOFF_RBP
(%rsp
); \
193 subq $
16, %rsp;
/* space for gdt */ \
195 movq
8(%rsp
), %rcx;
/* %rcx has gdt to match */ \
196 xorl
%ebx
, %ebx;
/* loop index */ \
197 leaq cpu
(%rip
), %rdx;
/* cpu pointer array */ \
199 movq
(%rdx
, %rbx
, CLONGSIZE
), %rax;
/* get cpu[i] */ \
200 cmpq $
0x0, %rax;
/* cpu[i] == NULL ? */ \
201 je
2f;
/* yes, continue */ \
202 cmpq
%rcx
, CPU_GDT
(%rax
);
/* gdt == cpu[i]->cpu_gdt ? */ \
203 je
3f;
/* yes, go set gsbase */ \
205 incl
%ebx;
/* i++ */ \
206 cmpl $NCPU
, %ebx;
/* i < NCPU ? */ \
207 jb
1b;
/* yes, loop */ \
208 /* XXX BIG trouble if we fall thru here. We didn't find a gdt match */ \
210 movl $MSR_AMD_KGSBASE
, %ecx; \
211 cmpw $KCS_SEL
, REGOFF_CS
(%rbp
);
/* trap from kernel? */ \
212 jne
4f;
/* no, go set KGSBASE */ \
213 movl $MSR_AMD_GSBASE
, %ecx;
/* yes, set GSBASE */ \
214 mfence;
/* OPTERON_ERRATUM_88 */ \
216 movq
%rax
, %rdx;
/* write base register */ \
219 movq REGOFF_RDX
(%rbp
), %rdx;
/* restore regs */ \
220 movq REGOFF_RCX
(%rbp
), %rcx; \
221 movq REGOFF_RBX
(%rbp
), %rbx; \
222 movq REGOFF_RAX
(%rbp
), %rax; \
224 movq REGOFF_RBP
(%rsp
), %rbp; \
225 addq $REGOFF_TRAPNO
, %rsp
/* pop stack */
238 TRAP_NOERR
(T_NMIFLT
) /* $2 */
243 * Save all registers and setup segment registers
244 * with kernel selectors.
247 INTGATE_INIT_KERNEL_FLAGS
249 TRACE_PTR
(%r12, %rax
, %eax
, %rdx
, $TT_TRAP
)
250 TRACE_REGS
(%r12, %rsp
, %rax
, %rbx
)
256 call av_dispatch_nmivect
263 #elif defined(__i386)
269 TRAP_NOERR
(T_NMIFLT
) /* $2 */
272 * Save all registers and setup segment registers
273 * with kernel selectors.
276 INTGATE_INIT_KERNEL_FLAGS
278 TRACE_PTR
(%edi
, %ebx
, %ebx
, %ecx
, $TT_TRAP
)
279 TRACE_REGS
(%edi
, %esp
, %ebx
, %ecx
)
285 call av_dispatch_nmivect
301 cmpw $KCS_SEL
, 8(%rsp
)
305 * This is a breakpoint in the kernel -- it is very likely that this
306 * is DTrace-induced. To unify DTrace handling, we spoof this as an
307 * invalid opcode (#UD) fault. Note that #BP is a trap, not a fault --
308 * we must decrement the trapping %rip to make it appear as a fault.
309 * We then push a non-zero error code to indicate that this is coming
313 push $
1 /* error code -- non-zero for #BP */
319 NPTRAP_NOERR
(T_BPTFLT
) /* $3 */
328 TRAP_NOERR
(T_OVFLW
) /* $4 */
336 TRAP_NOERR
(T_BOUNDFLT
) /* $5 */
346 cmpw $KCS_SEL
, 8(%rsp
)
349 push $
0 /* error code -- zero for #UD */
351 push $
0xdddd /* a dummy trap number */
353 movq REGOFF_RIP
(%rsp
), %rdi
354 movq REGOFF_RSP
(%rsp
), %rsi
355 movq REGOFF_RAX
(%rsp
), %rdx
360 ALTENTRY
(dtrace_invop_callsite
)
362 cmpl $DTRACE_INVOP_PUSHL_EBP
, %eax
364 cmpl $DTRACE_INVOP_LEAVE
, %eax
366 cmpl $DTRACE_INVOP_NOP
, %eax
368 cmpl $DTRACE_INVOP_RET
, %eax
374 * We must emulate a "pushq %rbp". To do this, we pull the stack
375 * down 8 bytes, and then store the base pointer.
378 subq $
16, %rsp
/* make room for %rbp */
379 pushq
%rax
/* push temp */
380 movq
24(%rsp
), %rax
/* load calling RIP */
381 addq $
1, %rax
/* increment over trapping instr */
382 movq
%rax
, 8(%rsp
) /* store calling RIP */
383 movq
32(%rsp
), %rax
/* load calling CS */
384 movq
%rax
, 16(%rsp
) /* store calling CS */
385 movq
40(%rsp
), %rax
/* load calling RFLAGS */
386 movq
%rax
, 24(%rsp
) /* store calling RFLAGS */
387 movq
48(%rsp
), %rax
/* load calling RSP */
388 subq $
8, %rax
/* make room for %rbp */
389 movq
%rax
, 32(%rsp
) /* store calling RSP */
390 movq
56(%rsp
), %rax
/* load calling SS */
391 movq
%rax
, 40(%rsp
) /* store calling SS */
392 movq
32(%rsp
), %rax
/* reload calling RSP */
393 movq
%rbp
, (%rax
) /* store %rbp there */
394 popq
%rax
/* pop off temp */
395 jmp tr_iret_kernel
/* return from interrupt */
400 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
401 * followed by a "popq %rbp". This is quite a bit simpler on amd64
402 * than it is on i386 -- we can exploit the fact that the %rsp is
403 * explicitly saved to effect the pop without having to reshuffle
404 * the other data pushed for the trap.
407 pushq
%rax
/* push temp */
408 movq
8(%rsp
), %rax
/* load calling RIP */
409 addq $
1, %rax
/* increment over trapping instr */
410 movq
%rax
, 8(%rsp
) /* store calling RIP */
411 movq
(%rbp
), %rax
/* get new %rbp */
412 addq $
8, %rbp
/* adjust new %rsp */
413 movq
%rbp
, 32(%rsp
) /* store new %rsp */
414 movq
%rax
, %rbp
/* set new %rbp */
415 popq
%rax
/* pop off temp */
416 jmp tr_iret_kernel
/* return from interrupt */
421 * We must emulate a "nop". This is obviously not hard: we need only
422 * advance the %rip by one.
431 pushq
%rax
/* push temp */
432 movq
32(%rsp
), %rax
/* load %rsp */
433 movq
(%rax
), %rax
/* load calling RIP */
434 movq
%rax
, 8(%rsp
) /* store calling RIP */
435 addq $
8, 32(%rsp
) /* adjust new %rsp */
436 popq
%rax
/* pop off temp */
437 jmp tr_iret_kernel
/* return from interrupt */
442 * We're going to let the kernel handle this as a normal #UD. If,
443 * however, we came through #BP and are spoofing #UD (in this case,
444 * the stored error value will be non-zero), we need to de-spoof
445 * the trap by incrementing %rip and pushing T_BPTFLT.
447 cmpq $
0, REGOFF_ERR
(%rsp
)
449 incq REGOFF_RIP
(%rsp
)
450 addq $REGOFF_RIP
, %rsp
451 NPTRAP_NOERR
(T_BPTFLT
) /* $3 */
455 addq $REGOFF_RIP
, %rsp
457 NPTRAP_NOERR
(T_ILLINST
)
461 #elif defined(__i386)
468 * If we are taking an invalid opcode trap while in the kernel, this
469 * is likely an FBT probe point.
472 cmpw $KGS_SEL
, (%esp
)
477 pushl
%eax
/* push %eax -- may be return value */
478 pushl
%esp
/* push stack pointer */
479 addl $
48, (%esp
) /* adjust to incoming args */
480 pushl
40(%esp
) /* push calling EIP */
482 ALTENTRY
(dtrace_invop_callsite
)
484 cmpl $DTRACE_INVOP_PUSHL_EBP
, %eax
486 cmpl $DTRACE_INVOP_POPL_EBP
, %eax
488 cmpl $DTRACE_INVOP_LEAVE
, %eax
490 cmpl $DTRACE_INVOP_NOP
, %eax
495 * We must emulate a "pushl %ebp". To do this, we pull the stack
496 * down 4 bytes, and then store the base pointer.
499 subl $
4, %esp
/* make room for %ebp */
500 pushl
%eax
/* push temp */
501 movl
8(%esp
), %eax
/* load calling EIP */
502 incl
%eax
/* increment over LOCK prefix */
503 movl
%eax
, 4(%esp
) /* store calling EIP */
504 movl
12(%esp
), %eax
/* load calling CS */
505 movl
%eax
, 8(%esp
) /* store calling CS */
506 movl
16(%esp
), %eax
/* load calling EFLAGS */
507 movl
%eax
, 12(%esp
) /* store calling EFLAGS */
508 movl
%ebp
, 16(%esp
) /* push %ebp */
509 popl
%eax
/* pop off temp */
513 * We must emulate a "popl %ebp". To do this, we do the opposite of
514 * the above: we remove the %ebp from the stack, and squeeze up the
515 * saved state from the trap.
518 pushl
%eax
/* push temp */
519 movl
16(%esp
), %ebp
/* pop %ebp */
520 movl
12(%esp
), %eax
/* load calling EFLAGS */
521 movl
%eax
, 16(%esp
) /* store calling EFLAGS */
522 movl
8(%esp
), %eax
/* load calling CS */
523 movl
%eax
, 12(%esp
) /* store calling CS */
524 movl
4(%esp
), %eax
/* load calling EIP */
525 incl
%eax
/* increment over LOCK prefix */
526 movl
%eax
, 8(%esp
) /* store calling EIP */
527 popl
%eax
/* pop off temp */
528 addl $
4, %esp
/* adjust stack pointer */
532 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
533 * followed by a "popl %ebp". This looks similar to the above, but
534 * requires two temporaries: one for the new base pointer, and one
535 * for the staging register.
538 pushl
%eax
/* push temp */
539 pushl
%ebx
/* push temp */
540 movl
%ebp
, %ebx
/* set temp to old %ebp */
541 movl
(%ebx
), %ebp
/* pop %ebp */
542 movl
16(%esp
), %eax
/* load calling EFLAGS */
543 movl
%eax
, (%ebx
) /* store calling EFLAGS */
544 movl
12(%esp
), %eax
/* load calling CS */
545 movl
%eax
, -4(%ebx
) /* store calling CS */
546 movl
8(%esp
), %eax
/* load calling EIP */
547 incl
%eax
/* increment over LOCK prefix */
548 movl
%eax
, -8(%ebx
) /* store calling EIP */
549 movl
%ebx
, -4(%esp
) /* temporarily store new %esp */
550 popl
%ebx
/* pop off temp */
551 popl
%eax
/* pop off temp */
552 movl
-12(%esp
), %esp
/* set stack pointer */
553 subl $
8, %esp
/* adjust for three pushes, one pop */
557 * We must emulate a "nop". This is obviously not hard: we need only
558 * advance the %eip by one.
563 IRET
/* return from interrupt */
567 pushl $T_ILLINST
/* $6 */
572 pushl $T_ILLINST
/* $6 */
583 TRAP_NOERR
(T_NOEXTFLT
) /* $0 */
598 * We share this handler with kmdb (if kmdb is loaded). As such, we
599 * may have reached this point after encountering a #df in kmdb. If
600 * that happens, we'll still be on kmdb's IDT. We need to switch back
601 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
602 * here from kmdb, kmdb is probably in a very sickly state, and
603 * shouldn't be entered from the panic flow. We'll suppress that
604 * entry by setting nopanicdebug.
607 subq $DESCTBR_SIZE
, %rsp
609 movq
%gs
:CPU_IDT
, %rax
610 cmpq
%rax
, DTR_BASE
(%rsp
)
613 movq
%rax
, DTR_BASE
(%rsp
)
614 movw $_MUL
(NIDT
, GATE_DESC_SIZE
), DTR_LIMIT
(%rsp
)
617 movl $
1, nopanicdebug
619 1: addq $DESCTBR_SIZE
, %rsp
628 leaq trap_trace_freeze
(%rip
), %r11
634 movq
%rsp
, %rdi
/* ®s */
635 xorl
%esi
, %esi
/* clear address */
636 xorl
%edx
, %edx
/* cpuid = 0 */
641 #elif defined(__i386)
647 cli /* disable interrupts */
650 * We share this handler with kmdb (if kmdb is loaded). As such, we
651 * may have reached this point after encountering a #df in kmdb. If
652 * that happens, we'll still be on kmdb's IDT. We need to switch back
653 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
654 * here from kmdb, kmdb is probably in a very sickly state, and
655 * shouldn't be entered from the panic flow. We'll suppress that
656 * entry by setting nopanicdebug.
659 subl $DESCTBR_SIZE
, %esp
660 movl
%gs
:CPU_IDT
, %eax
662 cmpl DTR_BASE
(%esp
), %eax
665 movl
%eax
, DTR_BASE
(%esp
)
666 movw $_MUL
(NIDT
, GATE_DESC_SIZE
), DTR_LIMIT
(%esp
)
669 movl $
1, nopanicdebug
671 1: addl $DESCTBR_SIZE
, %esp
674 * Check the CPL in the TSS to see what mode
675 * (user or kernel) we took the fault in. At this
676 * point we are running in the context of the double
677 * fault task (dftss) but the CPU's task points to
678 * the previous task (ktss) where the process context
679 * has been saved as the result of the task switch.
681 movl
%gs
:CPU_TSS
, %eax
/* get the TSS */
682 movl TSS_SS
(%eax
), %ebx
/* save the fault SS */
683 movl TSS_ESP
(%eax
), %edx
/* save the fault ESP */
684 testw $CPL_MASK
, TSS_CS
(%eax
) /* user mode ? */
686 movw TSS_SS0
(%eax
), %ss
/* get on the kernel stack */
687 movl TSS_ESP0
(%eax
), %esp
690 * Clear the NT flag to avoid a task switch when the process
691 * finally pops the EFL off the stack via an iret. Clear
692 * the TF flag since that is what the processor does for
693 * a normal exception. Clear the IE flag so that interrupts
696 movl TSS_EFL
(%eax
), %ecx
697 andl $_BITNOT
(PS_NT|PS_T|PS_IE
), %ecx
699 popfl
/* restore the EFL */
700 movw TSS_LDT
(%eax
), %cx
/* restore the LDT */
704 * Restore process segment selectors.
706 movw TSS_DS
(%eax
), %ds
707 movw TSS_ES
(%eax
), %es
708 movw TSS_FS
(%eax
), %fs
709 movw TSS_GS
(%eax
), %gs
712 * Restore task segment selectors.
714 movl $KDS_SEL
, TSS_DS
(%eax
)
715 movl $KDS_SEL
, TSS_ES
(%eax
)
716 movl $KDS_SEL
, TSS_SS
(%eax
)
717 movl $KFS_SEL
, TSS_FS
(%eax
)
718 movl $KGS_SEL
, TSS_GS
(%eax
)
721 * Clear the TS bit, the busy bits in both task
722 * descriptors, and switch tasks.
726 movl DFTSS_SEL+
4(%ecx
), %esi
727 andl $_BITNOT
(0x200), %esi
728 movl
%esi
, DFTSS_SEL+
4(%ecx
)
729 movl KTSS_SEL+
4(%ecx
), %esi
730 andl $_BITNOT
(0x200), %esi
731 movl
%esi
, KTSS_SEL+
4(%ecx
)
736 * Restore part of the process registers.
738 movl TSS_EBP
(%eax
), %ebp
739 movl TSS_ECX
(%eax
), %ecx
740 movl TSS_ESI
(%eax
), %esi
741 movl TSS_EDI
(%eax
), %edi
745 * Make a trap frame. Leave the error code (0) on
746 * the stack since the first word on a trap stack is
749 pushl
%ebx
/ fault SS
750 pushl
%edx
/ fault ESP
751 pushl TSS_EFL
(%eax
) / fault EFL
752 pushl TSS_CS
(%eax
) / fault CS
753 pushl TSS_EIP
(%eax
) / fault EIP
754 pushl $
0 / error code
755 pushl $T_DBLFLT
/ trap number
8
756 movl TSS_EBX
(%eax
), %ebx
/ restore EBX
757 movl TSS_EDX
(%eax
), %edx
/ restore EDX
758 movl TSS_EAX
(%eax
), %eax
/ restore EAX
759 sti
/ enable interrupts
769 TRAP_ERR
(T_TSSFLT
) /* $10 already have error code on stack */
777 TRAP_ERR
(T_SEGFLT
) /* $11 already have error code on stack */
788 TRAP_ERR
(T_STKFLT
) /* $12 already have error code on stack */
799 TRAP_ERR
(T_GPFLT
) /* $13 already have error code on stack */
810 TRAP_ERR
(T_PGFLT
) /* $14 already have error code on stack */
815 #elif defined(__i386)
822 #if !defined(__amd64)
824 .globl idt0_default_r
827 * #PF pentium bug workaround
829 ENTRY_NP
(pentium_pftrap
)
832 andl $MMU_STD_PAGEMASK
, %eax
834 cmpl %eax
, %cs
:idt0_default_r+
2 /* fixme */
836 je check_for_user_address
839 pushl $T_PGFLT
/* $14 */
841 check_for_user_address
:
843 * Before we assume that we have an unmapped trap on our hands,
844 * check to see if this is a fault from user mode. If it is,
845 * we'll kick back into the page fault handler.
847 movl
4(%esp
), %eax
/* error code */
848 andl $PF_ERR_USER
, %eax
852 * We now know that this is the invalid opcode trap.
855 addl $
4, %esp
/* pop error code */
857 SET_SIZE
(pentium_pftrap
)
859 #endif /* !__amd64 */
862 TRAP_NOERR
(T_RESVTRAP
) /* (reserved) */
870 TRAP_NOERR
(T_EXTERRFLT
) /* $16 */
878 TRAP_ERR
(T_ALIGNMENT
) /* $17 */
885 .globl cmi_mca_trap /* see uts/i86pc/os/cmi.c */
890 TRAP_NOERR
(T_MCE
) /* $18 */
895 INTGATE_INIT_KERNEL_FLAGS
897 TRACE_PTR
(%rdi
, %rbx
, %ebx
, %rcx
, $TT_TRAP
)
898 TRACE_REGS
(%rdi
, %rsp
, %rbx
, %rcx
)
903 movq
%rsp
, %rdi
/* arg0 = struct regs *rp */
904 call cmi_mca_trap
/* cmi_mca_trap(rp); */
912 TRAP_NOERR
(T_MCE
) /* $18 */
915 INTGATE_INIT_KERNEL_FLAGS
917 TRACE_PTR
(%edi
, %ebx
, %ebx
, %ecx
, $TT_TRAP
)
918 TRACE_REGS
(%edi
, %esp
, %ebx
, %ecx
)
924 pushl
%ecx
/* arg0 = struct regs *rp */
925 call cmi_mca_trap
/* cmi_mca_trap(rp) */
926 addl $
4, %esp
/* pop arg0 */
937 TRAP_NOERR
(T_SIMDFPE
) /* $19 */
942 TRAP_NOERR
(T_INVALTRAP
) /* very invalid */
951 cmpl $T_LASTFAST
, %eax
953 orl
%eax
, %eax
/* (zero extend top 32-bits) */
954 leaq fasttable
(%rip
), %r11
955 leaq
(%r11, %rax
, CLONGSIZE
), %r11
959 * Fast syscall number was illegal. Make it look
960 * as if the INT failed. Modify %rip to point before the
961 * INT, push the expected error code and fake a GP fault.
963 * XXX Why make the error code be offset into idt + 1?
964 * Instead we should push a real (soft?) error code
965 * on the stack and #gp handler could know about fasttraps?
969 subq $
2, (%rsp
) /* XXX int insn 2-bytes */
970 pushq $_CONST
(_MUL
(T_FASTTRAP
, GATE_DESC_SIZE
) + 2)
975 #elif defined(__i386)
978 cmpl $T_LASTFAST
, %eax
980 jmp
*%cs
:fasttable
(, %eax
, CLONGSIZE
)
983 * Fast syscall number was illegal. Make it look
984 * as if the INT failed. Modify %eip to point before the
985 * INT, push the expected error code and fake a GP fault.
987 * XXX Why make the error code be offset into idt + 1?
988 * Instead we should push a real (soft?) error code
989 * on the stack and #gp handler could know about fasttraps?
991 subl $
2, (%esp
) /* XXX int insn 2-bytes */
992 pushl $_CONST
(_MUL
(T_FASTTRAP
, GATE_DESC_SIZE
) + 2)
999 TRAP_NOERR
(T_DTRACE_RET
)
1001 SET_SIZE
(dtrace_ret
)
1003 #if defined(__amd64)
1006 * RFLAGS 24 bytes up the stack from %rsp.
1007 * XXX a constant would be nicer.
1011 orq $PS_C
, 24(%rsp
) /* set carry bit in user flags */
1016 #elif defined(__i386)
1019 orw $PS_C
, 8(%esp
) /* set carry bit in user flags */
1026 * Interrupts start at 32