Merge commit '74ecdb5171c9f3673b9393b1a3dc6f3a65e93895'
[unleashed.git] / arch / x86 / kernel / ml / exception.s
blob090093ed7e5a583397cc796ff6bfb87d621a2bf8
1 /*
2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
4 * Copyright (c) 2018 Joyent, Inc.
5 */
7 /*
8 * Copyright (c) 1989, 1990 William F. Jolitz.
9 * Copyright (c) 1990 The Regents of the University of California.
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
40 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
43 #include <sys/asm_linkage.h>
44 #include <sys/asm_misc.h>
45 #include <sys/trap.h>
46 #include <sys/psw.h>
47 #include <sys/regset.h>
48 #include <sys/privregs.h>
49 #include <sys/dtrace.h>
50 #include <sys/x86_archext.h>
51 #include <sys/traptrace.h>
52 #include <sys/machparam.h>
55 * only one routine in this file is interesting to lint
59 #include "assym.h"
62 * push $0 on stack for traps that do not
63 * generate an error code. This is so the rest
64 * of the kernel can expect a consistent stack
65 * from from any exception.
67 * Note that for all exceptions for amd64
68 * %r11 and %rcx are on the stack. Just pop
69 * them back into their appropriate registers and let
70 * it get saved as is running native.
73 #if defined(__xpv) && defined(__amd64)
75 #define NPTRAP_NOERR(trapno) \
76 pushq $0; \
77 pushq $trapno
79 #define TRAP_NOERR(trapno) \
80 XPV_TRAP_POP; \
81 NPTRAP_NOERR(trapno)
84 * error code already pushed by hw
85 * onto stack.
87 #define TRAP_ERR(trapno) \
88 XPV_TRAP_POP; \
89 pushq $trapno
91 #else /* __xpv && __amd64 */
93 #define TRAP_NOERR(trapno) \
94 push $0; \
95 push $trapno
97 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
100 * error code already pushed by hw
101 * onto stack.
103 #define TRAP_ERR(trapno) \
104 push $trapno
106 #endif /* __xpv && __amd64 */
109 * These are the stacks used on cpu0 for taking double faults,
110 * NMIs and MCEs (the latter two only on amd64 where we have IST).
112 * We define them here instead of in a C file so that we can page-align
113 * them (gcc won't do that in a .c file).
115 .data
116 DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
117 .fill DEFAULTSTKSZ, 1, 0
118 DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
119 .fill DEFAULTSTKSZ, 1, 0
120 DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
121 .fill DEFAULTSTKSZ, 1, 0
124 * #DE
126 ENTRY_NP(div0trap)
127 TRAP_NOERR(T_ZERODIV) /* $0 */
128 jmp cmntrap
129 SET_SIZE(div0trap)
132 * #DB
134 * Fetch %dr6 and clear it, handing off the value to the
135 * cmntrap code in %r15/%esi
137 ENTRY_NP(dbgtrap)
138 TRAP_NOERR(T_SGLSTP) /* $1 */
140 #if defined(__amd64)
141 #if !defined(__xpv) /* no sysenter support yet */
143 * If we get here as a result of single-stepping a sysenter
144 * instruction, we suddenly find ourselves taking a #db
145 * in kernel mode -before- we've swapgs'ed. So before we can
146 * take the trap, we do the swapgs here, and fix the return
147 * %rip in trap() so that we return immediately after the
148 * swapgs in the sysenter handler to avoid doing the swapgs again.
150 * Nobody said that the design of sysenter was particularly
151 * elegant, did they?
154 pushq %r11
157 * At this point the stack looks like this:
159 * (high address) r_ss
160 * r_rsp
161 * r_rfl
162 * r_cs
163 * r_rip <-- %rsp + 24
164 * r_err <-- %rsp + 16
165 * r_trapno <-- %rsp + 8
166 * (low address) %r11 <-- %rsp
168 leaq sys_sysenter(%rip), %r11
169 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
170 je 1f
171 leaq brand_sys_sysenter(%rip), %r11
172 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
173 je 1f
174 leaq tr_sys_sysenter(%rip), %r11
175 cmpq %r11, 24(%rsp)
176 je 1f
177 leaq tr_brand_sys_sysenter(%rip), %r11
178 cmpq %r11, 24(%rsp)
179 jne 2f
180 1: SWAPGS
181 2: popq %r11
182 #endif /* !__xpv */
184 INTR_PUSH
185 #if defined(__xpv)
186 movl $6, %edi
187 call kdi_dreg_get
188 movq %rax, %r15 /* %db6 -> %r15 */
189 movl $6, %edi
190 movl $0, %esi
191 call kdi_dreg_set /* 0 -> %db6 */
192 #else
193 movq %db6, %r15
194 xorl %eax, %eax
195 movq %rax, %db6
196 #endif
198 #elif defined(__i386)
200 INTR_PUSH
201 #if defined(__xpv)
202 pushl $6
203 call kdi_dreg_get
204 addl $4, %esp
205 movl %eax, %esi /* %dr6 -> %esi */
206 pushl $0
207 pushl $6
208 call kdi_dreg_set /* 0 -> %dr6 */
209 addl $8, %esp
210 #else
211 movl %db6, %esi
212 xorl %eax, %eax
213 movl %eax, %db6
214 #endif
215 #endif /* __i386 */
217 jmp cmntrap_pushed
218 SET_SIZE(dbgtrap)
220 #if defined(__amd64)
221 #if !defined(__xpv)
224 * Macro to set the gsbase or kgsbase to the address of the struct cpu
225 * for this processor. If we came from userland, set kgsbase else
226 * set gsbase. We find the proper cpu struct by looping through
227 * the cpu structs for all processors till we find a match for the gdt
228 * of the trapping processor. The stack is expected to be pointing at
229 * the standard regs pushed by hardware on a trap (plus error code and trapno).
231 * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
232 * and kgsbase set to the same value) because we're not going back the normal
233 * way out of here (via IRET). Where we're going, we don't need no user %gs.
235 #define SET_CPU_GSBASE \
236 subq $REGOFF_TRAPNO, %rsp; /* save regs */ \
237 movq %rax, REGOFF_RAX(%rsp); \
238 movq %rbx, REGOFF_RBX(%rsp); \
239 movq %rcx, REGOFF_RCX(%rsp); \
240 movq %rdx, REGOFF_RDX(%rsp); \
241 movq %rbp, REGOFF_RBP(%rsp); \
242 movq %rsp, %rbp; \
243 subq $16, %rsp; /* space for gdt */ \
244 sgdt 6(%rsp); \
245 movq 8(%rsp), %rcx; /* %rcx has gdt to match */ \
246 xorl %ebx, %ebx; /* loop index */ \
247 leaq cpu(%rip), %rdx; /* cpu pointer array */ \
248 1: \
249 movq (%rdx, %rbx, CLONGSIZE), %rax; /* get cpu[i] */ \
250 cmpq $0x0, %rax; /* cpu[i] == NULL ? */ \
251 je 2f; /* yes, continue */ \
252 cmpq %rcx, CPU_GDT(%rax); /* gdt == cpu[i]->cpu_gdt ? */ \
253 je 3f; /* yes, go set gsbase */ \
254 2: \
255 incl %ebx; /* i++ */ \
256 cmpl $NCPU, %ebx; /* i < NCPU ? */ \
257 jb 1b; /* yes, loop */ \
258 /* XXX BIG trouble if we fall thru here. We didn't find a gdt match */ \
259 3: \
260 movl $MSR_AMD_KGSBASE, %ecx; \
261 cmpw $KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */ \
262 jne 4f; /* no, go set KGSBASE */ \
263 movl $MSR_AMD_GSBASE, %ecx; /* yes, set GSBASE */ \
264 mfence; /* OPTERON_ERRATUM_88 */ \
265 4: \
266 movq %rax, %rdx; /* write base register */ \
267 shrq $32, %rdx; \
268 wrmsr; \
269 movq REGOFF_RDX(%rbp), %rdx; /* restore regs */ \
270 movq REGOFF_RCX(%rbp), %rcx; \
271 movq REGOFF_RBX(%rbp), %rbx; \
272 movq REGOFF_RAX(%rbp), %rax; \
273 movq %rbp, %rsp; \
274 movq REGOFF_RBP(%rsp), %rbp; \
275 addq $REGOFF_TRAPNO, %rsp /* pop stack */
277 #else /* __xpv */
279 #define SET_CPU_GSBASE /* noop on the hypervisor */
281 #endif /* __xpv */
282 #endif /* __amd64 */
285 #if defined(__amd64)
288 * #NMI
290 * XXPV: See 6532669.
292 ENTRY_NP(nmiint)
293 TRAP_NOERR(T_NMIFLT) /* $2 */
295 SET_CPU_GSBASE
298 * Save all registers and setup segment registers
299 * with kernel selectors.
301 INTR_PUSH
302 INTGATE_INIT_KERNEL_FLAGS
304 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
305 TRACE_REGS(%r12, %rsp, %rax, %rbx)
306 TRACE_STAMP(%r12)
308 movq %rsp, %rbp
310 movq %rbp, %rdi
311 call av_dispatch_nmivect
313 INTR_POP
314 jmp tr_iret_auto
315 /*NOTREACHED*/
316 SET_SIZE(nmiint)
318 #elif defined(__i386)
321 * #NMI
323 ENTRY_NP(nmiint)
324 TRAP_NOERR(T_NMIFLT) /* $2 */
327 * Save all registers and setup segment registers
328 * with kernel selectors.
330 INTR_PUSH
331 INTGATE_INIT_KERNEL_FLAGS
333 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
334 TRACE_REGS(%edi, %esp, %ebx, %ecx)
335 TRACE_STAMP(%edi)
337 movl %esp, %ebp
339 pushl %ebp
340 call av_dispatch_nmivect
341 addl $4, %esp
343 INTR_POP_USER
344 IRET
345 SET_SIZE(nmiint)
347 #endif /* __i386 */
350 * #BP
352 ENTRY_NP(brktrap)
354 #if defined(__amd64)
355 XPV_TRAP_POP
356 cmpw $KCS_SEL, 8(%rsp)
357 jne bp_user
360 * This is a breakpoint in the kernel -- it is very likely that this
361 * is DTrace-induced. To unify DTrace handling, we spoof this as an
362 * invalid opcode (#UD) fault. Note that #BP is a trap, not a fault --
363 * we must decrement the trapping %rip to make it appear as a fault.
364 * We then push a non-zero error code to indicate that this is coming
365 * from #BP.
367 decq (%rsp)
368 push $1 /* error code -- non-zero for #BP */
369 jmp ud_kernel
371 bp_user:
372 #endif /* __amd64 */
374 NPTRAP_NOERR(T_BPTFLT) /* $3 */
375 jmp dtrace_trap
377 SET_SIZE(brktrap)
380 * #OF
382 ENTRY_NP(ovflotrap)
383 TRAP_NOERR(T_OVFLW) /* $4 */
384 jmp cmntrap
385 SET_SIZE(ovflotrap)
388 * #BR
390 ENTRY_NP(boundstrap)
391 TRAP_NOERR(T_BOUNDFLT) /* $5 */
392 jmp cmntrap
393 SET_SIZE(boundstrap)
395 #if defined(__amd64)
397 ENTRY_NP(invoptrap)
399 XPV_TRAP_POP
401 cmpw $KCS_SEL, 8(%rsp)
402 jne ud_user
404 #if defined(__xpv)
405 movb $0, 12(%rsp) /* clear saved upcall_mask from %cs */
406 #endif
407 push $0 /* error code -- zero for #UD */
408 ud_kernel:
409 push $0xdddd /* a dummy trap number */
410 INTR_PUSH
411 movq REGOFF_RIP(%rsp), %rdi
412 movq REGOFF_RSP(%rsp), %rsi
413 movq REGOFF_RAX(%rsp), %rdx
414 pushq (%rsi)
415 movq %rsp, %rsi
416 subq $8, %rsp
417 call dtrace_invop
418 ALTENTRY(dtrace_invop_callsite)
419 addq $16, %rsp
420 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
421 je ud_push
422 cmpl $DTRACE_INVOP_LEAVE, %eax
423 je ud_leave
424 cmpl $DTRACE_INVOP_NOP, %eax
425 je ud_nop
426 cmpl $DTRACE_INVOP_RET, %eax
427 je ud_ret
428 jmp ud_trap
430 ud_push:
432 * We must emulate a "pushq %rbp". To do this, we pull the stack
433 * down 8 bytes, and then store the base pointer.
435 INTR_POP
436 subq $16, %rsp /* make room for %rbp */
437 pushq %rax /* push temp */
438 movq 24(%rsp), %rax /* load calling RIP */
439 addq $1, %rax /* increment over trapping instr */
440 movq %rax, 8(%rsp) /* store calling RIP */
441 movq 32(%rsp), %rax /* load calling CS */
442 movq %rax, 16(%rsp) /* store calling CS */
443 movq 40(%rsp), %rax /* load calling RFLAGS */
444 movq %rax, 24(%rsp) /* store calling RFLAGS */
445 movq 48(%rsp), %rax /* load calling RSP */
446 subq $8, %rax /* make room for %rbp */
447 movq %rax, 32(%rsp) /* store calling RSP */
448 movq 56(%rsp), %rax /* load calling SS */
449 movq %rax, 40(%rsp) /* store calling SS */
450 movq 32(%rsp), %rax /* reload calling RSP */
451 movq %rbp, (%rax) /* store %rbp there */
452 popq %rax /* pop off temp */
453 jmp tr_iret_kernel /* return from interrupt */
454 /*NOTREACHED*/
456 ud_leave:
458 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
459 * followed by a "popq %rbp". This is quite a bit simpler on amd64
460 * than it is on i386 -- we can exploit the fact that the %rsp is
461 * explicitly saved to effect the pop without having to reshuffle
462 * the other data pushed for the trap.
464 INTR_POP
465 pushq %rax /* push temp */
466 movq 8(%rsp), %rax /* load calling RIP */
467 addq $1, %rax /* increment over trapping instr */
468 movq %rax, 8(%rsp) /* store calling RIP */
469 movq (%rbp), %rax /* get new %rbp */
470 addq $8, %rbp /* adjust new %rsp */
471 movq %rbp, 32(%rsp) /* store new %rsp */
472 movq %rax, %rbp /* set new %rbp */
473 popq %rax /* pop off temp */
474 jmp tr_iret_kernel /* return from interrupt */
475 /*NOTREACHED*/
477 ud_nop:
479 * We must emulate a "nop". This is obviously not hard: we need only
480 * advance the %rip by one.
482 INTR_POP
483 incq (%rsp)
484 jmp tr_iret_kernel
485 /*NOTREACHED*/
487 ud_ret:
488 INTR_POP
489 pushq %rax /* push temp */
490 movq 32(%rsp), %rax /* load %rsp */
491 movq (%rax), %rax /* load calling RIP */
492 movq %rax, 8(%rsp) /* store calling RIP */
493 addq $8, 32(%rsp) /* adjust new %rsp */
494 popq %rax /* pop off temp */
495 jmp tr_iret_kernel /* return from interrupt */
496 /*NOTREACHED*/
498 ud_trap:
500 * We're going to let the kernel handle this as a normal #UD. If,
501 * however, we came through #BP and are spoofing #UD (in this case,
502 * the stored error value will be non-zero), we need to de-spoof
503 * the trap by incrementing %rip and pushing T_BPTFLT.
505 cmpq $0, REGOFF_ERR(%rsp)
506 je ud_ud
507 incq REGOFF_RIP(%rsp)
508 addq $REGOFF_RIP, %rsp
509 NPTRAP_NOERR(T_BPTFLT) /* $3 */
510 jmp cmntrap
512 ud_ud:
513 addq $REGOFF_RIP, %rsp
514 ud_user:
515 NPTRAP_NOERR(T_ILLINST)
516 jmp cmntrap
517 SET_SIZE(invoptrap)
519 #elif defined(__i386)
522 * #UD
524 ENTRY_NP(invoptrap)
526 * If we are taking an invalid opcode trap while in the kernel, this
527 * is likely an FBT probe point.
529 pushl %gs
530 cmpw $KGS_SEL, (%esp)
531 jne 8f
533 addl $4, %esp
534 #if defined(__xpv)
535 movb $0, 6(%esp) /* clear saved upcall_mask from %cs */
536 #endif /* __xpv */
537 pusha
538 pushl %eax /* push %eax -- may be return value */
539 pushl %esp /* push stack pointer */
540 addl $48, (%esp) /* adjust to incoming args */
541 pushl 40(%esp) /* push calling EIP */
542 call dtrace_invop
543 ALTENTRY(dtrace_invop_callsite)
544 addl $12, %esp
545 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
546 je 1f
547 cmpl $DTRACE_INVOP_POPL_EBP, %eax
548 je 2f
549 cmpl $DTRACE_INVOP_LEAVE, %eax
550 je 3f
551 cmpl $DTRACE_INVOP_NOP, %eax
552 je 4f
553 jmp 7f
556 * We must emulate a "pushl %ebp". To do this, we pull the stack
557 * down 4 bytes, and then store the base pointer.
559 popa
560 subl $4, %esp /* make room for %ebp */
561 pushl %eax /* push temp */
562 movl 8(%esp), %eax /* load calling EIP */
563 incl %eax /* increment over LOCK prefix */
564 movl %eax, 4(%esp) /* store calling EIP */
565 movl 12(%esp), %eax /* load calling CS */
566 movl %eax, 8(%esp) /* store calling CS */
567 movl 16(%esp), %eax /* load calling EFLAGS */
568 movl %eax, 12(%esp) /* store calling EFLAGS */
569 movl %ebp, 16(%esp) /* push %ebp */
570 popl %eax /* pop off temp */
571 jmp _emul_done
574 * We must emulate a "popl %ebp". To do this, we do the opposite of
575 * the above: we remove the %ebp from the stack, and squeeze up the
576 * saved state from the trap.
578 popa
579 pushl %eax /* push temp */
580 movl 16(%esp), %ebp /* pop %ebp */
581 movl 12(%esp), %eax /* load calling EFLAGS */
582 movl %eax, 16(%esp) /* store calling EFLAGS */
583 movl 8(%esp), %eax /* load calling CS */
584 movl %eax, 12(%esp) /* store calling CS */
585 movl 4(%esp), %eax /* load calling EIP */
586 incl %eax /* increment over LOCK prefix */
587 movl %eax, 8(%esp) /* store calling EIP */
588 popl %eax /* pop off temp */
589 addl $4, %esp /* adjust stack pointer */
590 jmp _emul_done
593 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
594 * followed by a "popl %ebp". This looks similar to the above, but
595 * requires two temporaries: one for the new base pointer, and one
596 * for the staging register.
598 popa
599 pushl %eax /* push temp */
600 pushl %ebx /* push temp */
601 movl %ebp, %ebx /* set temp to old %ebp */
602 movl (%ebx), %ebp /* pop %ebp */
603 movl 16(%esp), %eax /* load calling EFLAGS */
604 movl %eax, (%ebx) /* store calling EFLAGS */
605 movl 12(%esp), %eax /* load calling CS */
606 movl %eax, -4(%ebx) /* store calling CS */
607 movl 8(%esp), %eax /* load calling EIP */
608 incl %eax /* increment over LOCK prefix */
609 movl %eax, -8(%ebx) /* store calling EIP */
610 movl %ebx, -4(%esp) /* temporarily store new %esp */
611 popl %ebx /* pop off temp */
612 popl %eax /* pop off temp */
613 movl -12(%esp), %esp /* set stack pointer */
614 subl $8, %esp /* adjust for three pushes, one pop */
615 jmp _emul_done
618 * We must emulate a "nop". This is obviously not hard: we need only
619 * advance the %eip by one.
621 popa
622 incl (%esp)
623 _emul_done:
624 IRET /* return from interrupt */
626 popa
627 pushl $0
628 pushl $T_ILLINST /* $6 */
629 jmp cmntrap
631 addl $4, %esp
632 pushl $0
633 pushl $T_ILLINST /* $6 */
634 jmp cmntrap
635 SET_SIZE(invoptrap)
637 #endif /* __i386 */
639 #if defined(__amd64)
642 * #NM
644 #if defined(__xpv)
646 ENTRY_NP(ndptrap)
648 * (On the hypervisor we must make a hypercall so we might as well
649 * save everything and handle as in a normal trap.)
651 TRAP_NOERR(T_NOEXTFLT) /* $7 */
652 INTR_PUSH
655 * We want to do this quickly as every lwp using fp will take this
656 * after a context switch -- we do the frequent path in ndptrap_frstor
657 * below; for all other cases, we let the trap code handle it
659 LOADCPU(%rax) /* swapgs handled in hypervisor */
660 cmpl $0, fpu_exists(%rip)
661 je .handle_in_trap /* let trap handle no fp case */
662 movq CPU_THREAD(%rax), %rbx /* %rbx = curthread */
663 movl $FPU_EN, %eax
664 movq T_LWP(%rbx), %rbx /* %rbx = lwp */
665 testq %rbx, %rbx
666 jz .handle_in_trap /* should not happen? */
667 #if LWP_PCB_FPU != 0
668 addq $LWP_PCB_FPU, %rbx /* &lwp->lwp_pcb.pcb_fpu */
669 #endif
670 testl %eax, PCB_FPU_FLAGS(%rbx)
671 jz .handle_in_trap /* must be the first fault */
672 CLTS
673 andl $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
674 #if FPU_CTX_FPU_REGS != 0
675 addq $FPU_CTX_FPU_REGS, %rbx
676 #endif
678 movl FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax /* for xrstor */
679 movl FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx /* for xrstor */
682 * the label below is used in trap.c to detect FP faults in
683 * kernel due to user fault.
685 ALTENTRY(ndptrap_frstor)
686 movq (%rbx), %rbx /* fpu_regs.kfpu_u.kfpu_XX pointer */
687 .globl _patch_xrstorq_rbx
688 _patch_xrstorq_rbx:
689 fxrstorq (%rbx)
690 cmpw $KCS_SEL, REGOFF_CS(%rsp)
691 je .return_to_kernel
693 ASSERT_UPCALL_MASK_IS_SET
694 USER_POP
695 IRET /* return to user mode */
696 /*NOTREACHED*/
698 .return_to_kernel:
699 INTR_POP
700 IRET
701 /*NOTREACHED*/
703 .handle_in_trap:
704 INTR_POP
705 pushq $0 /* can not use TRAP_NOERR */
706 pushq $T_NOEXTFLT
707 jmp cmninttrap
708 SET_SIZE(ndptrap_frstor)
709 SET_SIZE(ndptrap)
711 #else /* __xpv */
713 ENTRY_NP(ndptrap)
715 * We want to do this quickly as every lwp using fp will take this
716 * after a context switch -- we do the frequent path in ndptrap_frstor
717 * below; for all other cases, we let the trap code handle it
719 pushq %rax
720 pushq %rbx
721 cmpw $KCS_SEL, 24(%rsp) /* did we come from kernel mode? */
722 jne 1f
723 LOADCPU(%rax) /* if yes, don't swapgs */
724 jmp 2f
726 SWAPGS /* if from user, need swapgs */
727 LOADCPU(%rax)
728 SWAPGS
731 * Xrstor needs to use edx as part of its flag.
732 * NOTE: have to push rdx after "cmpw ...24(%rsp)", otherwise rsp+$24
733 * will not point to CS.
735 pushq %rdx
736 cmpl $0, fpu_exists(%rip)
737 je .handle_in_trap /* let trap handle no fp case */
738 movq CPU_THREAD(%rax), %rbx /* %rbx = curthread */
739 movl $FPU_EN, %eax
740 movq T_LWP(%rbx), %rbx /* %rbx = lwp */
741 testq %rbx, %rbx
742 jz .handle_in_trap /* should not happen? */
743 #if LWP_PCB_FPU != 0
744 addq $LWP_PCB_FPU, %rbx /* &lwp->lwp_pcb.pcb_fpu */
745 #endif
746 testl %eax, PCB_FPU_FLAGS(%rbx)
747 jz .handle_in_trap /* must be the first fault */
748 clts
749 andl $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
750 #if FPU_CTX_FPU_REGS != 0
751 addq $FPU_CTX_FPU_REGS, %rbx
752 #endif
754 movl FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax /* for xrstor */
755 movl FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx /* for xrstor */
758 * the label below is used in trap.c to detect FP faults in
759 * kernel due to user fault.
761 ALTENTRY(ndptrap_frstor)
762 movq (%rbx), %rbx /* fpu_regs.kfpu_u.kfpu_XX pointer */
763 .globl _patch_xrstorq_rbx
764 _patch_xrstorq_rbx:
765 fxrstorq (%rbx)
766 popq %rdx
767 popq %rbx
768 popq %rax
769 jmp tr_iret_auto
770 /*NOTREACHED*/
772 .handle_in_trap:
773 popq %rdx
774 popq %rbx
775 popq %rax
776 TRAP_NOERR(T_NOEXTFLT) /* $7 */
777 jmp cmninttrap
778 SET_SIZE(ndptrap_frstor)
779 SET_SIZE(ndptrap)
781 #endif /* __xpv */
783 #elif defined(__i386)
785 ENTRY_NP(ndptrap)
787 * We want to do this quickly as every lwp using fp will take this
788 * after a context switch -- we do the frequent path in fpnoextflt
789 * below; for all other cases, we let the trap code handle it
791 pushl %eax
792 pushl %ebx
793 pushl %edx /* for xrstor */
794 pushl %ds
795 pushl %gs
796 movl $KDS_SEL, %ebx
797 movw %bx, %ds
798 movl $KGS_SEL, %eax
799 movw %ax, %gs
800 LOADCPU(%eax)
801 cmpl $0, fpu_exists
802 je .handle_in_trap /* let trap handle no fp case */
803 movl CPU_THREAD(%eax), %ebx /* %ebx = curthread */
804 movl $FPU_EN, %eax
805 movl T_LWP(%ebx), %ebx /* %ebx = lwp */
806 testl %ebx, %ebx
807 jz .handle_in_trap /* should not happen? */
808 #if LWP_PCB_FPU != 0
809 addl $LWP_PCB_FPU, %ebx /* &lwp->lwp_pcb.pcb_fpu */
810 #endif
811 testl %eax, PCB_FPU_FLAGS(%ebx)
812 jz .handle_in_trap /* must be the first fault */
813 CLTS
814 andl $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%ebx)
815 #if FPU_CTX_FPU_REGS != 0
816 addl $FPU_CTX_FPU_REGS, %ebx
817 #endif
819 movl FPU_CTX_FPU_XSAVE_MASK(%ebx), %eax /* for xrstor */
820 movl FPU_CTX_FPU_XSAVE_MASK+4(%ebx), %edx /* for xrstor */
823 * the label below is used in trap.c to detect FP faults in kernel
824 * due to user fault.
826 ALTENTRY(ndptrap_frstor)
827 movl (%ebx), %ebx /* fpu_regs.kfpu_u.kfpu_XX pointer */
828 .globl _patch_fxrstor_ebx
829 _patch_fxrstor_ebx:
830 .globl _patch_xrstor_ebx
831 _patch_xrstor_ebx:
832 frstor (%ebx) /* may be patched to fxrstor or xrstor */
833 popl %gs
834 popl %ds
835 popl %edx
836 popl %ebx
837 popl %eax
838 IRET
840 .handle_in_trap:
841 popl %gs
842 popl %ds
843 popl %edx
844 popl %ebx
845 popl %eax
846 TRAP_NOERR(T_NOEXTFLT) /* $7 */
847 jmp cmninttrap
848 SET_SIZE(ndptrap_frstor)
849 SET_SIZE(ndptrap)
851 #endif /* __i386 */
853 #if defined(__amd64)
856 * #DF
858 ENTRY_NP(syserrtrap)
859 pushq $T_DBLFLT
860 SET_CPU_GSBASE
863 * We share this handler with kmdb (if kmdb is loaded). As such, we
864 * may have reached this point after encountering a #df in kmdb. If
865 * that happens, we'll still be on kmdb's IDT. We need to switch back
866 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
867 * here from kmdb, kmdb is probably in a very sickly state, and
868 * shouldn't be entered from the panic flow. We'll suppress that
869 * entry by setting nopanicdebug.
871 pushq %rax
872 subq $DESCTBR_SIZE, %rsp
873 sidt (%rsp)
874 movq %gs:CPU_IDT, %rax
875 cmpq %rax, DTR_BASE(%rsp)
876 je 1f
878 movq %rax, DTR_BASE(%rsp)
879 movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
880 lidt (%rsp)
882 movl $1, nopanicdebug
884 1: addq $DESCTBR_SIZE, %rsp
885 popq %rax
887 DFTRAP_PUSH
890 * freeze trap trace.
892 #ifdef TRAPTRACE
893 leaq trap_trace_freeze(%rip), %r11
894 incl (%r11)
895 #endif
897 ENABLE_INTR_FLAGS
899 movq %rsp, %rdi /* &regs */
900 xorl %esi, %esi /* clear address */
901 xorl %edx, %edx /* cpuid = 0 */
902 call trap
904 SET_SIZE(syserrtrap)
906 #elif defined(__i386)
909 * #DF
911 ENTRY_NP(syserrtrap)
912 cli /* disable interrupts */
915 * We share this handler with kmdb (if kmdb is loaded). As such, we
916 * may have reached this point after encountering a #df in kmdb. If
917 * that happens, we'll still be on kmdb's IDT. We need to switch back
918 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
919 * here from kmdb, kmdb is probably in a very sickly state, and
920 * shouldn't be entered from the panic flow. We'll suppress that
921 * entry by setting nopanicdebug.
924 subl $DESCTBR_SIZE, %esp
925 movl %gs:CPU_IDT, %eax
926 sidt (%esp)
927 cmpl DTR_BASE(%esp), %eax
928 je 1f
930 movl %eax, DTR_BASE(%esp)
931 movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
932 lidt (%esp)
934 movl $1, nopanicdebug
936 1: addl $DESCTBR_SIZE, %esp
939 * Check the CPL in the TSS to see what mode
940 * (user or kernel) we took the fault in. At this
941 * point we are running in the context of the double
942 * fault task (dftss) but the CPU's task points to
943 * the previous task (ktss) where the process context
944 * has been saved as the result of the task switch.
946 movl %gs:CPU_TSS, %eax /* get the TSS */
947 movl TSS_SS(%eax), %ebx /* save the fault SS */
948 movl TSS_ESP(%eax), %edx /* save the fault ESP */
949 testw $CPL_MASK, TSS_CS(%eax) /* user mode ? */
950 jz make_frame
951 movw TSS_SS0(%eax), %ss /* get on the kernel stack */
952 movl TSS_ESP0(%eax), %esp
955 * Clear the NT flag to avoid a task switch when the process
956 * finally pops the EFL off the stack via an iret. Clear
957 * the TF flag since that is what the processor does for
958 * a normal exception. Clear the IE flag so that interrupts
959 * remain disabled.
961 movl TSS_EFL(%eax), %ecx
962 andl $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
963 pushl %ecx
964 popfl /* restore the EFL */
965 movw TSS_LDT(%eax), %cx /* restore the LDT */
966 lldt %cx
969 * Restore process segment selectors.
971 movw TSS_DS(%eax), %ds
972 movw TSS_ES(%eax), %es
973 movw TSS_FS(%eax), %fs
974 movw TSS_GS(%eax), %gs
977 * Restore task segment selectors.
979 movl $KDS_SEL, TSS_DS(%eax)
980 movl $KDS_SEL, TSS_ES(%eax)
981 movl $KDS_SEL, TSS_SS(%eax)
982 movl $KFS_SEL, TSS_FS(%eax)
983 movl $KGS_SEL, TSS_GS(%eax)
986 * Clear the TS bit, the busy bits in both task
987 * descriptors, and switch tasks.
989 clts
990 leal gdt0, %ecx
991 movl DFTSS_SEL+4(%ecx), %esi
992 andl $_BITNOT(0x200), %esi
993 movl %esi, DFTSS_SEL+4(%ecx)
994 movl KTSS_SEL+4(%ecx), %esi
995 andl $_BITNOT(0x200), %esi
996 movl %esi, KTSS_SEL+4(%ecx)
997 movw $KTSS_SEL, %cx
998 ltr %cx
1001 * Restore part of the process registers.
1003 movl TSS_EBP(%eax), %ebp
1004 movl TSS_ECX(%eax), %ecx
1005 movl TSS_ESI(%eax), %esi
1006 movl TSS_EDI(%eax), %edi
1008 make_frame:
1010 * Make a trap frame. Leave the error code (0) on
1011 * the stack since the first word on a trap stack is
1012 * unused anyway.
1014 pushl %ebx / fault SS
1015 pushl %edx / fault ESP
1016 pushl TSS_EFL(%eax) / fault EFL
1017 pushl TSS_CS(%eax) / fault CS
1018 pushl TSS_EIP(%eax) / fault EIP
1019 pushl $0 / error code
1020 pushl $T_DBLFLT / trap number 8
1021 movl TSS_EBX(%eax), %ebx / restore EBX
1022 movl TSS_EDX(%eax), %edx / restore EDX
1023 movl TSS_EAX(%eax), %eax / restore EAX
1024 sti / enable interrupts
1025 jmp cmntrap
1026 SET_SIZE(syserrtrap)
1028 #endif /* __i386 */
1030 ENTRY_NP(overrun)
1031 push $0
1032 TRAP_NOERR(T_EXTOVRFLT) /* $9 i386 only - not generated */
1033 jmp cmninttrap
1034 SET_SIZE(overrun)
1037 * #TS
1039 ENTRY_NP(invtsstrap)
1040 TRAP_ERR(T_TSSFLT) /* $10 already have error code on stack */
1041 jmp cmntrap
1042 SET_SIZE(invtsstrap)
1045 * #NP
1047 ENTRY_NP(segnptrap)
1048 TRAP_ERR(T_SEGFLT) /* $11 already have error code on stack */
1049 #if defined(__amd64)
1050 SET_CPU_GSBASE
1051 #endif
1052 jmp cmntrap
1053 SET_SIZE(segnptrap)
1056 * #SS
1058 ENTRY_NP(stktrap)
1059 TRAP_ERR(T_STKFLT) /* $12 already have error code on stack */
1060 #if defined(__amd64)
1061 SET_CPU_GSBASE
1062 #endif
1063 jmp cmntrap
1064 SET_SIZE(stktrap)
1067 * #GP
1069 ENTRY_NP(gptrap)
1070 TRAP_ERR(T_GPFLT) /* $13 already have error code on stack */
1071 #if defined(__amd64)
1072 SET_CPU_GSBASE
1073 #endif
1074 jmp cmntrap
1075 SET_SIZE(gptrap)
1078 * #PF
1080 ENTRY_NP(pftrap)
1081 TRAP_ERR(T_PGFLT) /* $14 already have error code on stack */
1082 INTR_PUSH
1084 #if defined(__amd64)
1085 movq %cr2, %r15
1086 #elif defined(__i386)
1087 movl %cr2, %esi
1088 #endif /* __i386 */
1090 jmp cmntrap_pushed
1091 SET_SIZE(pftrap)
1093 #if !defined(__amd64)
1095 .globl idt0_default_r
1098 * #PF pentium bug workaround
1100 ENTRY_NP(pentium_pftrap)
1101 pushl %eax
1102 movl %cr2, %eax
1103 andl $MMU_STD_PAGEMASK, %eax
1105 cmpl %eax, %cs:idt0_default_r+2 /* fixme */
1107 je check_for_user_address
1108 user_mode:
1109 popl %eax
1110 pushl $T_PGFLT /* $14 */
1111 jmp cmntrap
1112 check_for_user_address:
1114 * Before we assume that we have an unmapped trap on our hands,
1115 * check to see if this is a fault from user mode. If it is,
1116 * we'll kick back into the page fault handler.
1118 movl 4(%esp), %eax /* error code */
1119 andl $PF_ERR_USER, %eax
1120 jnz user_mode
1123 * We now know that this is the invalid opcode trap.
1125 popl %eax
1126 addl $4, %esp /* pop error code */
1127 jmp invoptrap
1128 SET_SIZE(pentium_pftrap)
1130 #endif /* !__amd64 */
1132 ENTRY_NP(resvtrap)
1133 TRAP_NOERR(T_RESVTRAP) /* (reserved) */
1134 jmp cmntrap
1135 SET_SIZE(resvtrap)
1138 * #MF
1140 ENTRY_NP(ndperr)
1141 TRAP_NOERR(T_EXTERRFLT) /* $16 */
1142 jmp cmninttrap
1143 SET_SIZE(ndperr)
1146 * #AC
1148 ENTRY_NP(achktrap)
1149 TRAP_ERR(T_ALIGNMENT) /* $17 */
1150 jmp cmntrap
1151 SET_SIZE(achktrap)
1154 * #MC
1156 .globl cmi_mca_trap /* see uts/i86pc/os/cmi.c */
1158 #if defined(__amd64)
1160 ENTRY_NP(mcetrap)
1161 TRAP_NOERR(T_MCE) /* $18 */
1163 SET_CPU_GSBASE
1165 INTR_PUSH
1166 INTGATE_INIT_KERNEL_FLAGS
1168 TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
1169 TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
1170 TRACE_STAMP(%rdi)
1172 movq %rsp, %rbp
1174 movq %rsp, %rdi /* arg0 = struct regs *rp */
1175 call cmi_mca_trap /* cmi_mca_trap(rp); */
1177 jmp _sys_rtt
1178 SET_SIZE(mcetrap)
1180 #else
1182 ENTRY_NP(mcetrap)
1183 TRAP_NOERR(T_MCE) /* $18 */
1185 INTR_PUSH
1186 INTGATE_INIT_KERNEL_FLAGS
1188 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
1189 TRACE_REGS(%edi, %esp, %ebx, %ecx)
1190 TRACE_STAMP(%edi)
1192 movl %esp, %ebp
1194 movl %esp, %ecx
1195 pushl %ecx /* arg0 = struct regs *rp */
1196 call cmi_mca_trap /* cmi_mca_trap(rp) */
1197 addl $4, %esp /* pop arg0 */
1199 jmp _sys_rtt
1200 SET_SIZE(mcetrap)
1202 #endif
1205 * #XF
1207 ENTRY_NP(xmtrap)
1208 TRAP_NOERR(T_SIMDFPE) /* $19 */
1209 jmp cmninttrap
1210 SET_SIZE(xmtrap)
1212 ENTRY_NP(invaltrap)
1213 TRAP_NOERR(T_INVALTRAP) /* very invalid */
1214 jmp cmntrap
1215 SET_SIZE(invaltrap)
1217 .globl fasttable
1219 #if defined(__amd64)
1221 ENTRY_NP(fasttrap)
1222 cmpl $T_LASTFAST, %eax
1223 ja 1f
1224 orl %eax, %eax /* (zero extend top 32-bits) */
1225 leaq fasttable(%rip), %r11
1226 leaq (%r11, %rax, CLONGSIZE), %r11
1227 jmp *(%r11)
1230 * Fast syscall number was illegal. Make it look
1231 * as if the INT failed. Modify %rip to point before the
1232 * INT, push the expected error code and fake a GP fault.
1234 * XXX Why make the error code be offset into idt + 1?
1235 * Instead we should push a real (soft?) error code
1236 * on the stack and #gp handler could know about fasttraps?
1238 XPV_TRAP_POP
1240 subq $2, (%rsp) /* XXX int insn 2-bytes */
1241 pushq $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1243 #if defined(__xpv)
1244 pushq %r11
1245 pushq %rcx
1246 #endif
1247 jmp gptrap
1248 SET_SIZE(fasttrap)
1250 #elif defined(__i386)
1252 ENTRY_NP(fasttrap)
1253 cmpl $T_LASTFAST, %eax
1254 ja 1f
1255 jmp *%cs:fasttable(, %eax, CLONGSIZE)
1258 * Fast syscall number was illegal. Make it look
1259 * as if the INT failed. Modify %eip to point before the
1260 * INT, push the expected error code and fake a GP fault.
1262 * XXX Why make the error code be offset into idt + 1?
1263 * Instead we should push a real (soft?) error code
1264 * on the stack and #gp handler could know about fasttraps?
1266 subl $2, (%esp) /* XXX int insn 2-bytes */
1267 pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1268 jmp gptrap
1269 SET_SIZE(fasttrap)
1271 #endif /* __i386 */
1273 ENTRY_NP(dtrace_ret)
1274 TRAP_NOERR(T_DTRACE_RET)
1275 jmp dtrace_trap
1276 SET_SIZE(dtrace_ret)
1278 #if defined(__amd64)
1281 * RFLAGS 24 bytes up the stack from %rsp.
1282 * XXX a constant would be nicer.
1284 ENTRY_NP(fast_null)
1285 XPV_TRAP_POP
1286 orq $PS_C, 24(%rsp) /* set carry bit in user flags */
1287 jmp tr_iret_auto
1288 /*NOTREACHED*/
1289 SET_SIZE(fast_null)
1291 #elif defined(__i386)
1293 ENTRY_NP(fast_null)
1294 orw $PS_C, 8(%esp) /* set carry bit in user flags */
1295 IRET
1296 SET_SIZE(fast_null)
1298 #endif /* __i386 */
1301 * Interrupts start at 32
1303 #define MKIVCT(n) \
1304 ENTRY_NP(ivct##n) \
1305 push $0; \
1306 push $n - 0x20; \
1307 jmp cmnint; \
1308 SET_SIZE(ivct##n)
1310 MKIVCT(32)
1311 MKIVCT(33)
1312 MKIVCT(34)
1313 MKIVCT(35)
1314 MKIVCT(36)
1315 MKIVCT(37)
1316 MKIVCT(38)
1317 MKIVCT(39)
1318 MKIVCT(40)
1319 MKIVCT(41)
1320 MKIVCT(42)
1321 MKIVCT(43)
1322 MKIVCT(44)
1323 MKIVCT(45)
1324 MKIVCT(46)
1325 MKIVCT(47)
1326 MKIVCT(48)
1327 MKIVCT(49)
1328 MKIVCT(50)
1329 MKIVCT(51)
1330 MKIVCT(52)
1331 MKIVCT(53)
1332 MKIVCT(54)
1333 MKIVCT(55)
1334 MKIVCT(56)
1335 MKIVCT(57)
1336 MKIVCT(58)
1337 MKIVCT(59)
1338 MKIVCT(60)
1339 MKIVCT(61)
1340 MKIVCT(62)
1341 MKIVCT(63)
1342 MKIVCT(64)
1343 MKIVCT(65)
1344 MKIVCT(66)
1345 MKIVCT(67)
1346 MKIVCT(68)
1347 MKIVCT(69)
1348 MKIVCT(70)
1349 MKIVCT(71)
1350 MKIVCT(72)
1351 MKIVCT(73)
1352 MKIVCT(74)
1353 MKIVCT(75)
1354 MKIVCT(76)
1355 MKIVCT(77)
1356 MKIVCT(78)
1357 MKIVCT(79)
1358 MKIVCT(80)
1359 MKIVCT(81)
1360 MKIVCT(82)
1361 MKIVCT(83)
1362 MKIVCT(84)
1363 MKIVCT(85)
1364 MKIVCT(86)
1365 MKIVCT(87)
1366 MKIVCT(88)
1367 MKIVCT(89)
1368 MKIVCT(90)
1369 MKIVCT(91)
1370 MKIVCT(92)
1371 MKIVCT(93)
1372 MKIVCT(94)
1373 MKIVCT(95)
1374 MKIVCT(96)
1375 MKIVCT(97)
1376 MKIVCT(98)
1377 MKIVCT(99)
1378 MKIVCT(100)
1379 MKIVCT(101)
1380 MKIVCT(102)
1381 MKIVCT(103)
1382 MKIVCT(104)
1383 MKIVCT(105)
1384 MKIVCT(106)
1385 MKIVCT(107)
1386 MKIVCT(108)
1387 MKIVCT(109)
1388 MKIVCT(110)
1389 MKIVCT(111)
1390 MKIVCT(112)
1391 MKIVCT(113)
1392 MKIVCT(114)
1393 MKIVCT(115)
1394 MKIVCT(116)
1395 MKIVCT(117)
1396 MKIVCT(118)
1397 MKIVCT(119)
1398 MKIVCT(120)
1399 MKIVCT(121)
1400 MKIVCT(122)
1401 MKIVCT(123)
1402 MKIVCT(124)
1403 MKIVCT(125)
1404 MKIVCT(126)
1405 MKIVCT(127)
1406 MKIVCT(128)
1407 MKIVCT(129)
1408 MKIVCT(130)
1409 MKIVCT(131)
1410 MKIVCT(132)
1411 MKIVCT(133)
1412 MKIVCT(134)
1413 MKIVCT(135)
1414 MKIVCT(136)
1415 MKIVCT(137)
1416 MKIVCT(138)
1417 MKIVCT(139)
1418 MKIVCT(140)
1419 MKIVCT(141)
1420 MKIVCT(142)
1421 MKIVCT(143)
1422 MKIVCT(144)
1423 MKIVCT(145)
1424 MKIVCT(146)
1425 MKIVCT(147)
1426 MKIVCT(148)
1427 MKIVCT(149)
1428 MKIVCT(150)
1429 MKIVCT(151)
1430 MKIVCT(152)
1431 MKIVCT(153)
1432 MKIVCT(154)
1433 MKIVCT(155)
1434 MKIVCT(156)
1435 MKIVCT(157)
1436 MKIVCT(158)
1437 MKIVCT(159)
1438 MKIVCT(160)
1439 MKIVCT(161)
1440 MKIVCT(162)
1441 MKIVCT(163)
1442 MKIVCT(164)
1443 MKIVCT(165)
1444 MKIVCT(166)
1445 MKIVCT(167)
1446 MKIVCT(168)
1447 MKIVCT(169)
1448 MKIVCT(170)
1449 MKIVCT(171)
1450 MKIVCT(172)
1451 MKIVCT(173)
1452 MKIVCT(174)
1453 MKIVCT(175)
1454 MKIVCT(176)
1455 MKIVCT(177)
1456 MKIVCT(178)
1457 MKIVCT(179)
1458 MKIVCT(180)
1459 MKIVCT(181)
1460 MKIVCT(182)
1461 MKIVCT(183)
1462 MKIVCT(184)
1463 MKIVCT(185)
1464 MKIVCT(186)
1465 MKIVCT(187)
1466 MKIVCT(188)
1467 MKIVCT(189)
1468 MKIVCT(190)
1469 MKIVCT(191)
1470 MKIVCT(192)
1471 MKIVCT(193)
1472 MKIVCT(194)
1473 MKIVCT(195)
1474 MKIVCT(196)
1475 MKIVCT(197)
1476 MKIVCT(198)
1477 MKIVCT(199)
1478 MKIVCT(200)
1479 MKIVCT(201)
1480 MKIVCT(202)
1481 MKIVCT(203)
1482 MKIVCT(204)
1483 MKIVCT(205)
1484 MKIVCT(206)
1485 MKIVCT(207)
1486 MKIVCT(208)
1487 MKIVCT(209)
1488 MKIVCT(210)
1489 MKIVCT(211)
1490 MKIVCT(212)
1491 MKIVCT(213)
1492 MKIVCT(214)
1493 MKIVCT(215)
1494 MKIVCT(216)
1495 MKIVCT(217)
1496 MKIVCT(218)
1497 MKIVCT(219)
1498 MKIVCT(220)
1499 MKIVCT(221)
1500 MKIVCT(222)
1501 MKIVCT(223)
1502 MKIVCT(224)
1503 MKIVCT(225)
1504 MKIVCT(226)
1505 MKIVCT(227)
1506 MKIVCT(228)
1507 MKIVCT(229)
1508 MKIVCT(230)
1509 MKIVCT(231)
1510 MKIVCT(232)
1511 MKIVCT(233)
1512 MKIVCT(234)
1513 MKIVCT(235)
1514 MKIVCT(236)
1515 MKIVCT(237)
1516 MKIVCT(238)
1517 MKIVCT(239)
1518 MKIVCT(240)
1519 MKIVCT(241)
1520 MKIVCT(242)
1521 MKIVCT(243)
1522 MKIVCT(244)
1523 MKIVCT(245)
1524 MKIVCT(246)
1525 MKIVCT(247)
1526 MKIVCT(248)
1527 MKIVCT(249)
1528 MKIVCT(250)
1529 MKIVCT(251)
1530 MKIVCT(252)
1531 MKIVCT(253)
1532 MKIVCT(254)
1533 MKIVCT(255)