Unleashed v1.4
[unleashed.git] / arch / x86 / kernel / ml / exception.s
blobe17c1971ca6a185d7d8f9ab9d8f0ac2b3e644afb
1 /*
2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
4 * Copyright (c) 2018 Joyent, Inc.
5 */
7 /*
8 * Copyright (c) 1989, 1990 William F. Jolitz.
9 * Copyright (c) 1990 The Regents of the University of California.
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
40 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
43 #include <sys/asm_linkage.h>
44 #include <sys/asm_misc.h>
45 #include <sys/trap.h>
46 #include <sys/psw.h>
47 #include <sys/regset.h>
48 #include <sys/privregs.h>
49 #include <sys/dtrace.h>
50 #include <sys/x86_archext.h>
51 #include <sys/traptrace.h>
52 #include <sys/machparam.h>
54 #include "assym.h"
57 * push $0 on stack for traps that do not
58 * generate an error code. This is so the rest
59 * of the kernel can expect a consistent stack
60 * from from any exception.
62 * Note that for all exceptions for amd64
63 * %r11 and %rcx are on the stack. Just pop
64 * them back into their appropriate registers and let
65 * it get saved as is running native.
68 #define TRAP_NOERR(trapno) \
69 push $0; \
70 push $trapno
72 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
75 * error code already pushed by hw
76 * onto stack.
78 #define TRAP_ERR(trapno) \
79 push $trapno
82 * These are the stacks used on cpu0 for taking double faults,
83 * NMIs and MCEs (the latter two only on amd64 where we have IST).
85 * We define them here instead of in a C file so that we can page-align
86 * them (gcc won't do that in a .c file).
88 .data
89 DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
90 .fill DEFAULTSTKSZ, 1, 0
91 DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
92 .fill DEFAULTSTKSZ, 1, 0
93 DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
94 .fill DEFAULTSTKSZ, 1, 0
97 * #DE
99 ENTRY_NP(div0trap)
100 TRAP_NOERR(T_ZERODIV) /* $0 */
101 jmp cmntrap
102 SET_SIZE(div0trap)
105 * #DB
107 * Fetch %dr6 and clear it, handing off the value to the
108 * cmntrap code in %r15/%esi
110 ENTRY_NP(dbgtrap)
111 TRAP_NOERR(T_SGLSTP) /* $1 */
113 #if defined(__amd64)
115 * If we get here as a result of single-stepping a sysenter
116 * instruction, we suddenly find ourselves taking a #db
117 * in kernel mode -before- we've swapgs'ed. So before we can
118 * take the trap, we do the swapgs here, and fix the return
119 * %rip in trap() so that we return immediately after the
120 * swapgs in the sysenter handler to avoid doing the swapgs again.
122 * Nobody said that the design of sysenter was particularly
123 * elegant, did they?
126 pushq %r11
129 * At this point the stack looks like this:
131 * (high address) r_ss
132 * r_rsp
133 * r_rfl
134 * r_cs
135 * r_rip <-- %rsp + 24
136 * r_err <-- %rsp + 16
137 * r_trapno <-- %rsp + 8
138 * (low address) %r11 <-- %rsp
140 leaq sys_sysenter(%rip), %r11
141 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
142 je 1f
143 leaq brand_sys_sysenter(%rip), %r11
144 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
145 je 1f
146 leaq tr_sys_sysenter(%rip), %r11
147 cmpq %r11, 24(%rsp)
148 je 1f
149 leaq tr_brand_sys_sysenter(%rip), %r11
150 cmpq %r11, 24(%rsp)
151 jne 2f
152 1: SWAPGS
153 2: popq %r11
155 INTR_PUSH
156 movq %db6, %r15
157 xorl %eax, %eax
158 movq %rax, %db6
160 #elif defined(__i386)
162 INTR_PUSH
163 movl %db6, %esi
164 xorl %eax, %eax
165 movl %eax, %db6
166 #endif /* __i386 */
168 jmp cmntrap_pushed
169 SET_SIZE(dbgtrap)
171 #if defined(__amd64)
174 * Macro to set the gsbase or kgsbase to the address of the struct cpu
175 * for this processor. If we came from userland, set kgsbase else
176 * set gsbase. We find the proper cpu struct by looping through
177 * the cpu structs for all processors till we find a match for the gdt
178 * of the trapping processor. The stack is expected to be pointing at
179 * the standard regs pushed by hardware on a trap (plus error code and trapno).
181 * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
182 * and kgsbase set to the same value) because we're not going back the normal
183 * way out of here (via IRET). Where we're going, we don't need no user %gs.
185 #define SET_CPU_GSBASE \
186 subq $REGOFF_TRAPNO, %rsp; /* save regs */ \
187 movq %rax, REGOFF_RAX(%rsp); \
188 movq %rbx, REGOFF_RBX(%rsp); \
189 movq %rcx, REGOFF_RCX(%rsp); \
190 movq %rdx, REGOFF_RDX(%rsp); \
191 movq %rbp, REGOFF_RBP(%rsp); \
192 movq %rsp, %rbp; \
193 subq $16, %rsp; /* space for gdt */ \
194 sgdt 6(%rsp); \
195 movq 8(%rsp), %rcx; /* %rcx has gdt to match */ \
196 xorl %ebx, %ebx; /* loop index */ \
197 leaq cpu(%rip), %rdx; /* cpu pointer array */ \
198 1: \
199 movq (%rdx, %rbx, CLONGSIZE), %rax; /* get cpu[i] */ \
200 cmpq $0x0, %rax; /* cpu[i] == NULL ? */ \
201 je 2f; /* yes, continue */ \
202 cmpq %rcx, CPU_GDT(%rax); /* gdt == cpu[i]->cpu_gdt ? */ \
203 je 3f; /* yes, go set gsbase */ \
204 2: \
205 incl %ebx; /* i++ */ \
206 cmpl $NCPU, %ebx; /* i < NCPU ? */ \
207 jb 1b; /* yes, loop */ \
208 /* XXX BIG trouble if we fall thru here. We didn't find a gdt match */ \
209 3: \
210 movl $MSR_AMD_KGSBASE, %ecx; \
211 cmpw $KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */ \
212 jne 4f; /* no, go set KGSBASE */ \
213 movl $MSR_AMD_GSBASE, %ecx; /* yes, set GSBASE */ \
214 mfence; /* OPTERON_ERRATUM_88 */ \
215 4: \
216 movq %rax, %rdx; /* write base register */ \
217 shrq $32, %rdx; \
218 wrmsr; \
219 movq REGOFF_RDX(%rbp), %rdx; /* restore regs */ \
220 movq REGOFF_RCX(%rbp), %rcx; \
221 movq REGOFF_RBX(%rbp), %rbx; \
222 movq REGOFF_RAX(%rbp), %rax; \
223 movq %rbp, %rsp; \
224 movq REGOFF_RBP(%rsp), %rbp; \
225 addq $REGOFF_TRAPNO, %rsp /* pop stack */
227 #endif /* __amd64 */
230 #if defined(__amd64)
233 * #NMI
235 * XXPV: See 6532669.
237 ENTRY_NP(nmiint)
238 TRAP_NOERR(T_NMIFLT) /* $2 */
240 SET_CPU_GSBASE
243 * Save all registers and setup segment registers
244 * with kernel selectors.
246 INTR_PUSH
247 INTGATE_INIT_KERNEL_FLAGS
249 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
250 TRACE_REGS(%r12, %rsp, %rax, %rbx)
251 TRACE_STAMP(%r12)
253 movq %rsp, %rbp
255 movq %rbp, %rdi
256 call av_dispatch_nmivect
258 INTR_POP
259 jmp tr_iret_auto
260 /*NOTREACHED*/
261 SET_SIZE(nmiint)
263 #elif defined(__i386)
266 * #NMI
268 ENTRY_NP(nmiint)
269 TRAP_NOERR(T_NMIFLT) /* $2 */
272 * Save all registers and setup segment registers
273 * with kernel selectors.
275 INTR_PUSH
276 INTGATE_INIT_KERNEL_FLAGS
278 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
279 TRACE_REGS(%edi, %esp, %ebx, %ecx)
280 TRACE_STAMP(%edi)
282 movl %esp, %ebp
284 pushl %ebp
285 call av_dispatch_nmivect
286 addl $4, %esp
288 INTR_POP_USER
289 IRET
290 SET_SIZE(nmiint)
292 #endif /* __i386 */
295 * #BP
297 ENTRY_NP(brktrap)
299 #if defined(__amd64)
300 XPV_TRAP_POP
301 cmpw $KCS_SEL, 8(%rsp)
302 jne bp_user
305 * This is a breakpoint in the kernel -- it is very likely that this
306 * is DTrace-induced. To unify DTrace handling, we spoof this as an
307 * invalid opcode (#UD) fault. Note that #BP is a trap, not a fault --
308 * we must decrement the trapping %rip to make it appear as a fault.
309 * We then push a non-zero error code to indicate that this is coming
310 * from #BP.
312 decq (%rsp)
313 push $1 /* error code -- non-zero for #BP */
314 jmp ud_kernel
316 bp_user:
317 #endif /* __amd64 */
319 NPTRAP_NOERR(T_BPTFLT) /* $3 */
320 jmp dtrace_trap
322 SET_SIZE(brktrap)
325 * #OF
327 ENTRY_NP(ovflotrap)
328 TRAP_NOERR(T_OVFLW) /* $4 */
329 jmp cmntrap
330 SET_SIZE(ovflotrap)
333 * #BR
335 ENTRY_NP(boundstrap)
336 TRAP_NOERR(T_BOUNDFLT) /* $5 */
337 jmp cmntrap
338 SET_SIZE(boundstrap)
340 #if defined(__amd64)
342 ENTRY_NP(invoptrap)
344 XPV_TRAP_POP
346 cmpw $KCS_SEL, 8(%rsp)
347 jne ud_user
349 push $0 /* error code -- zero for #UD */
350 ud_kernel:
351 push $0xdddd /* a dummy trap number */
352 INTR_PUSH
353 movq REGOFF_RIP(%rsp), %rdi
354 movq REGOFF_RSP(%rsp), %rsi
355 movq REGOFF_RAX(%rsp), %rdx
356 pushq (%rsi)
357 movq %rsp, %rsi
358 subq $8, %rsp
359 call dtrace_invop
360 ALTENTRY(dtrace_invop_callsite)
361 addq $16, %rsp
362 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
363 je ud_push
364 cmpl $DTRACE_INVOP_LEAVE, %eax
365 je ud_leave
366 cmpl $DTRACE_INVOP_NOP, %eax
367 je ud_nop
368 cmpl $DTRACE_INVOP_RET, %eax
369 je ud_ret
370 jmp ud_trap
372 ud_push:
374 * We must emulate a "pushq %rbp". To do this, we pull the stack
375 * down 8 bytes, and then store the base pointer.
377 INTR_POP
378 subq $16, %rsp /* make room for %rbp */
379 pushq %rax /* push temp */
380 movq 24(%rsp), %rax /* load calling RIP */
381 addq $1, %rax /* increment over trapping instr */
382 movq %rax, 8(%rsp) /* store calling RIP */
383 movq 32(%rsp), %rax /* load calling CS */
384 movq %rax, 16(%rsp) /* store calling CS */
385 movq 40(%rsp), %rax /* load calling RFLAGS */
386 movq %rax, 24(%rsp) /* store calling RFLAGS */
387 movq 48(%rsp), %rax /* load calling RSP */
388 subq $8, %rax /* make room for %rbp */
389 movq %rax, 32(%rsp) /* store calling RSP */
390 movq 56(%rsp), %rax /* load calling SS */
391 movq %rax, 40(%rsp) /* store calling SS */
392 movq 32(%rsp), %rax /* reload calling RSP */
393 movq %rbp, (%rax) /* store %rbp there */
394 popq %rax /* pop off temp */
395 jmp tr_iret_kernel /* return from interrupt */
396 /*NOTREACHED*/
398 ud_leave:
400 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
401 * followed by a "popq %rbp". This is quite a bit simpler on amd64
402 * than it is on i386 -- we can exploit the fact that the %rsp is
403 * explicitly saved to effect the pop without having to reshuffle
404 * the other data pushed for the trap.
406 INTR_POP
407 pushq %rax /* push temp */
408 movq 8(%rsp), %rax /* load calling RIP */
409 addq $1, %rax /* increment over trapping instr */
410 movq %rax, 8(%rsp) /* store calling RIP */
411 movq (%rbp), %rax /* get new %rbp */
412 addq $8, %rbp /* adjust new %rsp */
413 movq %rbp, 32(%rsp) /* store new %rsp */
414 movq %rax, %rbp /* set new %rbp */
415 popq %rax /* pop off temp */
416 jmp tr_iret_kernel /* return from interrupt */
417 /*NOTREACHED*/
419 ud_nop:
421 * We must emulate a "nop". This is obviously not hard: we need only
422 * advance the %rip by one.
424 INTR_POP
425 incq (%rsp)
426 jmp tr_iret_kernel
427 /*NOTREACHED*/
429 ud_ret:
430 INTR_POP
431 pushq %rax /* push temp */
432 movq 32(%rsp), %rax /* load %rsp */
433 movq (%rax), %rax /* load calling RIP */
434 movq %rax, 8(%rsp) /* store calling RIP */
435 addq $8, 32(%rsp) /* adjust new %rsp */
436 popq %rax /* pop off temp */
437 jmp tr_iret_kernel /* return from interrupt */
438 /*NOTREACHED*/
440 ud_trap:
442 * We're going to let the kernel handle this as a normal #UD. If,
443 * however, we came through #BP and are spoofing #UD (in this case,
444 * the stored error value will be non-zero), we need to de-spoof
445 * the trap by incrementing %rip and pushing T_BPTFLT.
447 cmpq $0, REGOFF_ERR(%rsp)
448 je ud_ud
449 incq REGOFF_RIP(%rsp)
450 addq $REGOFF_RIP, %rsp
451 NPTRAP_NOERR(T_BPTFLT) /* $3 */
452 jmp cmntrap
454 ud_ud:
455 addq $REGOFF_RIP, %rsp
456 ud_user:
457 NPTRAP_NOERR(T_ILLINST)
458 jmp cmntrap
459 SET_SIZE(invoptrap)
461 #elif defined(__i386)
464 * #UD
466 ENTRY_NP(invoptrap)
468 * If we are taking an invalid opcode trap while in the kernel, this
469 * is likely an FBT probe point.
471 pushl %gs
472 cmpw $KGS_SEL, (%esp)
473 jne 8f
475 addl $4, %esp
476 pusha
477 pushl %eax /* push %eax -- may be return value */
478 pushl %esp /* push stack pointer */
479 addl $48, (%esp) /* adjust to incoming args */
480 pushl 40(%esp) /* push calling EIP */
481 call dtrace_invop
482 ALTENTRY(dtrace_invop_callsite)
483 addl $12, %esp
484 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
485 je 1f
486 cmpl $DTRACE_INVOP_POPL_EBP, %eax
487 je 2f
488 cmpl $DTRACE_INVOP_LEAVE, %eax
489 je 3f
490 cmpl $DTRACE_INVOP_NOP, %eax
491 je 4f
492 jmp 7f
495 * We must emulate a "pushl %ebp". To do this, we pull the stack
496 * down 4 bytes, and then store the base pointer.
498 popa
499 subl $4, %esp /* make room for %ebp */
500 pushl %eax /* push temp */
501 movl 8(%esp), %eax /* load calling EIP */
502 incl %eax /* increment over LOCK prefix */
503 movl %eax, 4(%esp) /* store calling EIP */
504 movl 12(%esp), %eax /* load calling CS */
505 movl %eax, 8(%esp) /* store calling CS */
506 movl 16(%esp), %eax /* load calling EFLAGS */
507 movl %eax, 12(%esp) /* store calling EFLAGS */
508 movl %ebp, 16(%esp) /* push %ebp */
509 popl %eax /* pop off temp */
510 jmp _emul_done
513 * We must emulate a "popl %ebp". To do this, we do the opposite of
514 * the above: we remove the %ebp from the stack, and squeeze up the
515 * saved state from the trap.
517 popa
518 pushl %eax /* push temp */
519 movl 16(%esp), %ebp /* pop %ebp */
520 movl 12(%esp), %eax /* load calling EFLAGS */
521 movl %eax, 16(%esp) /* store calling EFLAGS */
522 movl 8(%esp), %eax /* load calling CS */
523 movl %eax, 12(%esp) /* store calling CS */
524 movl 4(%esp), %eax /* load calling EIP */
525 incl %eax /* increment over LOCK prefix */
526 movl %eax, 8(%esp) /* store calling EIP */
527 popl %eax /* pop off temp */
528 addl $4, %esp /* adjust stack pointer */
529 jmp _emul_done
532 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
533 * followed by a "popl %ebp". This looks similar to the above, but
534 * requires two temporaries: one for the new base pointer, and one
535 * for the staging register.
537 popa
538 pushl %eax /* push temp */
539 pushl %ebx /* push temp */
540 movl %ebp, %ebx /* set temp to old %ebp */
541 movl (%ebx), %ebp /* pop %ebp */
542 movl 16(%esp), %eax /* load calling EFLAGS */
543 movl %eax, (%ebx) /* store calling EFLAGS */
544 movl 12(%esp), %eax /* load calling CS */
545 movl %eax, -4(%ebx) /* store calling CS */
546 movl 8(%esp), %eax /* load calling EIP */
547 incl %eax /* increment over LOCK prefix */
548 movl %eax, -8(%ebx) /* store calling EIP */
549 movl %ebx, -4(%esp) /* temporarily store new %esp */
550 popl %ebx /* pop off temp */
551 popl %eax /* pop off temp */
552 movl -12(%esp), %esp /* set stack pointer */
553 subl $8, %esp /* adjust for three pushes, one pop */
554 jmp _emul_done
557 * We must emulate a "nop". This is obviously not hard: we need only
558 * advance the %eip by one.
560 popa
561 incl (%esp)
562 _emul_done:
563 IRET /* return from interrupt */
565 popa
566 pushl $0
567 pushl $T_ILLINST /* $6 */
568 jmp cmntrap
570 addl $4, %esp
571 pushl $0
572 pushl $T_ILLINST /* $6 */
573 jmp cmntrap
574 SET_SIZE(invoptrap)
576 #endif /* __i386 */
579 * #NM
582 ENTRY_NP(ndptrap)
583 TRAP_NOERR(T_NOEXTFLT) /* $0 */
584 SET_CPU_GSBASE
585 jmp cmntrap
586 SET_SIZE(ndptrap)
588 #if defined(__amd64)
591 * #DF
593 ENTRY_NP(syserrtrap)
594 pushq $T_DBLFLT
595 SET_CPU_GSBASE
598 * We share this handler with kmdb (if kmdb is loaded). As such, we
599 * may have reached this point after encountering a #df in kmdb. If
600 * that happens, we'll still be on kmdb's IDT. We need to switch back
601 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
602 * here from kmdb, kmdb is probably in a very sickly state, and
603 * shouldn't be entered from the panic flow. We'll suppress that
604 * entry by setting nopanicdebug.
606 pushq %rax
607 subq $DESCTBR_SIZE, %rsp
608 sidt (%rsp)
609 movq %gs:CPU_IDT, %rax
610 cmpq %rax, DTR_BASE(%rsp)
611 je 1f
613 movq %rax, DTR_BASE(%rsp)
614 movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
615 lidt (%rsp)
617 movl $1, nopanicdebug
619 1: addq $DESCTBR_SIZE, %rsp
620 popq %rax
622 DFTRAP_PUSH
625 * freeze trap trace.
627 #ifdef TRAPTRACE
628 leaq trap_trace_freeze(%rip), %r11
629 incl (%r11)
630 #endif
632 ENABLE_INTR_FLAGS
634 movq %rsp, %rdi /* &regs */
635 xorl %esi, %esi /* clear address */
636 xorl %edx, %edx /* cpuid = 0 */
637 call trap
639 SET_SIZE(syserrtrap)
641 #elif defined(__i386)
644 * #DF
646 ENTRY_NP(syserrtrap)
647 cli /* disable interrupts */
650 * We share this handler with kmdb (if kmdb is loaded). As such, we
651 * may have reached this point after encountering a #df in kmdb. If
652 * that happens, we'll still be on kmdb's IDT. We need to switch back
653 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
654 * here from kmdb, kmdb is probably in a very sickly state, and
655 * shouldn't be entered from the panic flow. We'll suppress that
656 * entry by setting nopanicdebug.
659 subl $DESCTBR_SIZE, %esp
660 movl %gs:CPU_IDT, %eax
661 sidt (%esp)
662 cmpl DTR_BASE(%esp), %eax
663 je 1f
665 movl %eax, DTR_BASE(%esp)
666 movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
667 lidt (%esp)
669 movl $1, nopanicdebug
671 1: addl $DESCTBR_SIZE, %esp
674 * Check the CPL in the TSS to see what mode
675 * (user or kernel) we took the fault in. At this
676 * point we are running in the context of the double
677 * fault task (dftss) but the CPU's task points to
678 * the previous task (ktss) where the process context
679 * has been saved as the result of the task switch.
681 movl %gs:CPU_TSS, %eax /* get the TSS */
682 movl TSS_SS(%eax), %ebx /* save the fault SS */
683 movl TSS_ESP(%eax), %edx /* save the fault ESP */
684 testw $CPL_MASK, TSS_CS(%eax) /* user mode ? */
685 jz make_frame
686 movw TSS_SS0(%eax), %ss /* get on the kernel stack */
687 movl TSS_ESP0(%eax), %esp
690 * Clear the NT flag to avoid a task switch when the process
691 * finally pops the EFL off the stack via an iret. Clear
692 * the TF flag since that is what the processor does for
693 * a normal exception. Clear the IE flag so that interrupts
694 * remain disabled.
696 movl TSS_EFL(%eax), %ecx
697 andl $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
698 pushl %ecx
699 popfl /* restore the EFL */
700 movw TSS_LDT(%eax), %cx /* restore the LDT */
701 lldt %cx
704 * Restore process segment selectors.
706 movw TSS_DS(%eax), %ds
707 movw TSS_ES(%eax), %es
708 movw TSS_FS(%eax), %fs
709 movw TSS_GS(%eax), %gs
712 * Restore task segment selectors.
714 movl $KDS_SEL, TSS_DS(%eax)
715 movl $KDS_SEL, TSS_ES(%eax)
716 movl $KDS_SEL, TSS_SS(%eax)
717 movl $KFS_SEL, TSS_FS(%eax)
718 movl $KGS_SEL, TSS_GS(%eax)
721 * Clear the TS bit, the busy bits in both task
722 * descriptors, and switch tasks.
724 clts
725 leal gdt0, %ecx
726 movl DFTSS_SEL+4(%ecx), %esi
727 andl $_BITNOT(0x200), %esi
728 movl %esi, DFTSS_SEL+4(%ecx)
729 movl KTSS_SEL+4(%ecx), %esi
730 andl $_BITNOT(0x200), %esi
731 movl %esi, KTSS_SEL+4(%ecx)
732 movw $KTSS_SEL, %cx
733 ltr %cx
736 * Restore part of the process registers.
738 movl TSS_EBP(%eax), %ebp
739 movl TSS_ECX(%eax), %ecx
740 movl TSS_ESI(%eax), %esi
741 movl TSS_EDI(%eax), %edi
743 make_frame:
745 * Make a trap frame. Leave the error code (0) on
746 * the stack since the first word on a trap stack is
747 * unused anyway.
749 pushl %ebx / fault SS
750 pushl %edx / fault ESP
751 pushl TSS_EFL(%eax) / fault EFL
752 pushl TSS_CS(%eax) / fault CS
753 pushl TSS_EIP(%eax) / fault EIP
754 pushl $0 / error code
755 pushl $T_DBLFLT / trap number 8
756 movl TSS_EBX(%eax), %ebx / restore EBX
757 movl TSS_EDX(%eax), %edx / restore EDX
758 movl TSS_EAX(%eax), %eax / restore EAX
759 sti / enable interrupts
760 jmp cmntrap
761 SET_SIZE(syserrtrap)
763 #endif /* __i386 */
766 * #TS
768 ENTRY_NP(invtsstrap)
769 TRAP_ERR(T_TSSFLT) /* $10 already have error code on stack */
770 jmp cmntrap
771 SET_SIZE(invtsstrap)
774 * #NP
776 ENTRY_NP(segnptrap)
777 TRAP_ERR(T_SEGFLT) /* $11 already have error code on stack */
778 #if defined(__amd64)
779 SET_CPU_GSBASE
780 #endif
781 jmp cmntrap
782 SET_SIZE(segnptrap)
785 * #SS
787 ENTRY_NP(stktrap)
788 TRAP_ERR(T_STKFLT) /* $12 already have error code on stack */
789 #if defined(__amd64)
790 SET_CPU_GSBASE
791 #endif
792 jmp cmntrap
793 SET_SIZE(stktrap)
796 * #GP
798 ENTRY_NP(gptrap)
799 TRAP_ERR(T_GPFLT) /* $13 already have error code on stack */
800 #if defined(__amd64)
801 SET_CPU_GSBASE
802 #endif
803 jmp cmntrap
804 SET_SIZE(gptrap)
807 * #PF
809 ENTRY_NP(pftrap)
810 TRAP_ERR(T_PGFLT) /* $14 already have error code on stack */
811 INTR_PUSH
813 #if defined(__amd64)
814 movq %cr2, %r15
815 #elif defined(__i386)
816 movl %cr2, %esi
817 #endif /* __i386 */
819 jmp cmntrap_pushed
820 SET_SIZE(pftrap)
822 #if !defined(__amd64)
824 .globl idt0_default_r
827 * #PF pentium bug workaround
829 ENTRY_NP(pentium_pftrap)
830 pushl %eax
831 movl %cr2, %eax
832 andl $MMU_STD_PAGEMASK, %eax
834 cmpl %eax, %cs:idt0_default_r+2 /* fixme */
836 je check_for_user_address
837 user_mode:
838 popl %eax
839 pushl $T_PGFLT /* $14 */
840 jmp cmntrap
841 check_for_user_address:
843 * Before we assume that we have an unmapped trap on our hands,
844 * check to see if this is a fault from user mode. If it is,
845 * we'll kick back into the page fault handler.
847 movl 4(%esp), %eax /* error code */
848 andl $PF_ERR_USER, %eax
849 jnz user_mode
852 * We now know that this is the invalid opcode trap.
854 popl %eax
855 addl $4, %esp /* pop error code */
856 jmp invoptrap
857 SET_SIZE(pentium_pftrap)
859 #endif /* !__amd64 */
861 ENTRY_NP(resvtrap)
862 TRAP_NOERR(T_RESVTRAP) /* (reserved) */
863 jmp cmntrap
864 SET_SIZE(resvtrap)
867 * #MF
869 ENTRY_NP(ndperr)
870 TRAP_NOERR(T_EXTERRFLT) /* $16 */
871 jmp cmninttrap
872 SET_SIZE(ndperr)
875 * #AC
877 ENTRY_NP(achktrap)
878 TRAP_ERR(T_ALIGNMENT) /* $17 */
879 jmp cmntrap
880 SET_SIZE(achktrap)
883 * #MC
885 .globl cmi_mca_trap /* see uts/i86pc/os/cmi.c */
887 #if defined(__amd64)
889 ENTRY_NP(mcetrap)
890 TRAP_NOERR(T_MCE) /* $18 */
892 SET_CPU_GSBASE
894 INTR_PUSH
895 INTGATE_INIT_KERNEL_FLAGS
897 TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
898 TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
899 TRACE_STAMP(%rdi)
901 movq %rsp, %rbp
903 movq %rsp, %rdi /* arg0 = struct regs *rp */
904 call cmi_mca_trap /* cmi_mca_trap(rp); */
906 jmp _sys_rtt
907 SET_SIZE(mcetrap)
909 #else
911 ENTRY_NP(mcetrap)
912 TRAP_NOERR(T_MCE) /* $18 */
914 INTR_PUSH
915 INTGATE_INIT_KERNEL_FLAGS
917 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
918 TRACE_REGS(%edi, %esp, %ebx, %ecx)
919 TRACE_STAMP(%edi)
921 movl %esp, %ebp
923 movl %esp, %ecx
924 pushl %ecx /* arg0 = struct regs *rp */
925 call cmi_mca_trap /* cmi_mca_trap(rp) */
926 addl $4, %esp /* pop arg0 */
928 jmp _sys_rtt
929 SET_SIZE(mcetrap)
931 #endif
934 * #XF
936 ENTRY_NP(xmtrap)
937 TRAP_NOERR(T_SIMDFPE) /* $19 */
938 jmp cmninttrap
939 SET_SIZE(xmtrap)
941 ENTRY_NP(invaltrap)
942 TRAP_NOERR(T_INVALTRAP) /* very invalid */
943 jmp cmntrap
944 SET_SIZE(invaltrap)
946 .globl fasttable
948 #if defined(__amd64)
950 ENTRY_NP(fasttrap)
951 cmpl $T_LASTFAST, %eax
952 ja 1f
953 orl %eax, %eax /* (zero extend top 32-bits) */
954 leaq fasttable(%rip), %r11
955 leaq (%r11, %rax, CLONGSIZE), %r11
956 jmp *(%r11)
959 * Fast syscall number was illegal. Make it look
960 * as if the INT failed. Modify %rip to point before the
961 * INT, push the expected error code and fake a GP fault.
963 * XXX Why make the error code be offset into idt + 1?
964 * Instead we should push a real (soft?) error code
965 * on the stack and #gp handler could know about fasttraps?
967 XPV_TRAP_POP
969 subq $2, (%rsp) /* XXX int insn 2-bytes */
970 pushq $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
972 jmp gptrap
973 SET_SIZE(fasttrap)
975 #elif defined(__i386)
977 ENTRY_NP(fasttrap)
978 cmpl $T_LASTFAST, %eax
979 ja 1f
980 jmp *%cs:fasttable(, %eax, CLONGSIZE)
983 * Fast syscall number was illegal. Make it look
984 * as if the INT failed. Modify %eip to point before the
985 * INT, push the expected error code and fake a GP fault.
987 * XXX Why make the error code be offset into idt + 1?
988 * Instead we should push a real (soft?) error code
989 * on the stack and #gp handler could know about fasttraps?
991 subl $2, (%esp) /* XXX int insn 2-bytes */
992 pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
993 jmp gptrap
994 SET_SIZE(fasttrap)
996 #endif /* __i386 */
998 ENTRY_NP(dtrace_ret)
999 TRAP_NOERR(T_DTRACE_RET)
1000 jmp dtrace_trap
1001 SET_SIZE(dtrace_ret)
1003 #if defined(__amd64)
1006 * RFLAGS 24 bytes up the stack from %rsp.
1007 * XXX a constant would be nicer.
1009 ENTRY_NP(fast_null)
1010 XPV_TRAP_POP
1011 orq $PS_C, 24(%rsp) /* set carry bit in user flags */
1012 jmp tr_iret_auto
1013 /*NOTREACHED*/
1014 SET_SIZE(fast_null)
1016 #elif defined(__i386)
1018 ENTRY_NP(fast_null)
1019 orw $PS_C, 8(%esp) /* set carry bit in user flags */
1020 IRET
1021 SET_SIZE(fast_null)
1023 #endif /* __i386 */
1026 * Interrupts start at 32
1028 #define MKIVCT(n) \
1029 ENTRY_NP(ivct##n) \
1030 push $0; \
1031 push $n - 0x20; \
1032 jmp cmnint; \
1033 SET_SIZE(ivct##n)
1035 MKIVCT(32)
1036 MKIVCT(33)
1037 MKIVCT(34)
1038 MKIVCT(35)
1039 MKIVCT(36)
1040 MKIVCT(37)
1041 MKIVCT(38)
1042 MKIVCT(39)
1043 MKIVCT(40)
1044 MKIVCT(41)
1045 MKIVCT(42)
1046 MKIVCT(43)
1047 MKIVCT(44)
1048 MKIVCT(45)
1049 MKIVCT(46)
1050 MKIVCT(47)
1051 MKIVCT(48)
1052 MKIVCT(49)
1053 MKIVCT(50)
1054 MKIVCT(51)
1055 MKIVCT(52)
1056 MKIVCT(53)
1057 MKIVCT(54)
1058 MKIVCT(55)
1059 MKIVCT(56)
1060 MKIVCT(57)
1061 MKIVCT(58)
1062 MKIVCT(59)
1063 MKIVCT(60)
1064 MKIVCT(61)
1065 MKIVCT(62)
1066 MKIVCT(63)
1067 MKIVCT(64)
1068 MKIVCT(65)
1069 MKIVCT(66)
1070 MKIVCT(67)
1071 MKIVCT(68)
1072 MKIVCT(69)
1073 MKIVCT(70)
1074 MKIVCT(71)
1075 MKIVCT(72)
1076 MKIVCT(73)
1077 MKIVCT(74)
1078 MKIVCT(75)
1079 MKIVCT(76)
1080 MKIVCT(77)
1081 MKIVCT(78)
1082 MKIVCT(79)
1083 MKIVCT(80)
1084 MKIVCT(81)
1085 MKIVCT(82)
1086 MKIVCT(83)
1087 MKIVCT(84)
1088 MKIVCT(85)
1089 MKIVCT(86)
1090 MKIVCT(87)
1091 MKIVCT(88)
1092 MKIVCT(89)
1093 MKIVCT(90)
1094 MKIVCT(91)
1095 MKIVCT(92)
1096 MKIVCT(93)
1097 MKIVCT(94)
1098 MKIVCT(95)
1099 MKIVCT(96)
1100 MKIVCT(97)
1101 MKIVCT(98)
1102 MKIVCT(99)
1103 MKIVCT(100)
1104 MKIVCT(101)
1105 MKIVCT(102)
1106 MKIVCT(103)
1107 MKIVCT(104)
1108 MKIVCT(105)
1109 MKIVCT(106)
1110 MKIVCT(107)
1111 MKIVCT(108)
1112 MKIVCT(109)
1113 MKIVCT(110)
1114 MKIVCT(111)
1115 MKIVCT(112)
1116 MKIVCT(113)
1117 MKIVCT(114)
1118 MKIVCT(115)
1119 MKIVCT(116)
1120 MKIVCT(117)
1121 MKIVCT(118)
1122 MKIVCT(119)
1123 MKIVCT(120)
1124 MKIVCT(121)
1125 MKIVCT(122)
1126 MKIVCT(123)
1127 MKIVCT(124)
1128 MKIVCT(125)
1129 MKIVCT(126)
1130 MKIVCT(127)
1131 MKIVCT(128)
1132 MKIVCT(129)
1133 MKIVCT(130)
1134 MKIVCT(131)
1135 MKIVCT(132)
1136 MKIVCT(133)
1137 MKIVCT(134)
1138 MKIVCT(135)
1139 MKIVCT(136)
1140 MKIVCT(137)
1141 MKIVCT(138)
1142 MKIVCT(139)
1143 MKIVCT(140)
1144 MKIVCT(141)
1145 MKIVCT(142)
1146 MKIVCT(143)
1147 MKIVCT(144)
1148 MKIVCT(145)
1149 MKIVCT(146)
1150 MKIVCT(147)
1151 MKIVCT(148)
1152 MKIVCT(149)
1153 MKIVCT(150)
1154 MKIVCT(151)
1155 MKIVCT(152)
1156 MKIVCT(153)
1157 MKIVCT(154)
1158 MKIVCT(155)
1159 MKIVCT(156)
1160 MKIVCT(157)
1161 MKIVCT(158)
1162 MKIVCT(159)
1163 MKIVCT(160)
1164 MKIVCT(161)
1165 MKIVCT(162)
1166 MKIVCT(163)
1167 MKIVCT(164)
1168 MKIVCT(165)
1169 MKIVCT(166)
1170 MKIVCT(167)
1171 MKIVCT(168)
1172 MKIVCT(169)
1173 MKIVCT(170)
1174 MKIVCT(171)
1175 MKIVCT(172)
1176 MKIVCT(173)
1177 MKIVCT(174)
1178 MKIVCT(175)
1179 MKIVCT(176)
1180 MKIVCT(177)
1181 MKIVCT(178)
1182 MKIVCT(179)
1183 MKIVCT(180)
1184 MKIVCT(181)
1185 MKIVCT(182)
1186 MKIVCT(183)
1187 MKIVCT(184)
1188 MKIVCT(185)
1189 MKIVCT(186)
1190 MKIVCT(187)
1191 MKIVCT(188)
1192 MKIVCT(189)
1193 MKIVCT(190)
1194 MKIVCT(191)
1195 MKIVCT(192)
1196 MKIVCT(193)
1197 MKIVCT(194)
1198 MKIVCT(195)
1199 MKIVCT(196)
1200 MKIVCT(197)
1201 MKIVCT(198)
1202 MKIVCT(199)
1203 MKIVCT(200)
1204 MKIVCT(201)
1205 MKIVCT(202)
1206 MKIVCT(203)
1207 MKIVCT(204)
1208 MKIVCT(205)
1209 MKIVCT(206)
1210 MKIVCT(207)
1211 MKIVCT(208)
1212 MKIVCT(209)
1213 MKIVCT(210)
1214 MKIVCT(211)
1215 MKIVCT(212)
1216 MKIVCT(213)
1217 MKIVCT(214)
1218 MKIVCT(215)
1219 MKIVCT(216)
1220 MKIVCT(217)
1221 MKIVCT(218)
1222 MKIVCT(219)
1223 MKIVCT(220)
1224 MKIVCT(221)
1225 MKIVCT(222)
1226 MKIVCT(223)
1227 MKIVCT(224)
1228 MKIVCT(225)
1229 MKIVCT(226)
1230 MKIVCT(227)
1231 MKIVCT(228)
1232 MKIVCT(229)
1233 MKIVCT(230)
1234 MKIVCT(231)
1235 MKIVCT(232)
1236 MKIVCT(233)
1237 MKIVCT(234)
1238 MKIVCT(235)
1239 MKIVCT(236)
1240 MKIVCT(237)
1241 MKIVCT(238)
1242 MKIVCT(239)
1243 MKIVCT(240)
1244 MKIVCT(241)
1245 MKIVCT(242)
1246 MKIVCT(243)
1247 MKIVCT(244)
1248 MKIVCT(245)
1249 MKIVCT(246)
1250 MKIVCT(247)
1251 MKIVCT(248)
1252 MKIVCT(249)
1253 MKIVCT(250)
1254 MKIVCT(251)
1255 MKIVCT(252)
1256 MKIVCT(253)
1257 MKIVCT(254)
1258 MKIVCT(255)