This is a MAJOR rewrite of usched_bsd4 and related support logic, plus
[dragonfly.git] / sys / platform / vkernel / i386 / trap.c
blobbd1bfed65b95e5f47b38240ea803619d2915d5f9
1 /*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/vkernel/i386/trap.c,v 1.35 2008/09/09 04:06:19 dillon Exp $
43 * 386 Trap and System call handling
46 #include "use_isa.h"
47 #include "use_npx.h"
49 #include "opt_ddb.h"
50 #include "opt_ktrace.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/proc.h>
55 #include <sys/pioctl.h>
56 #include <sys/kernel.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/signal2.h>
60 #include <sys/syscall.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysent.h>
63 #include <sys/uio.h>
64 #include <sys/vmmeter.h>
65 #include <sys/malloc.h>
66 #ifdef KTRACE
67 #include <sys/ktrace.h>
68 #endif
69 #include <sys/ktr.h>
70 #include <sys/upcall.h>
71 #include <sys/vkernel.h>
72 #include <sys/sysproto.h>
73 #include <sys/sysunion.h>
74 #include <sys/vmspace.h>
76 #include <vm/vm.h>
77 #include <vm/vm_param.h>
78 #include <sys/lock.h>
79 #include <vm/pmap.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_extern.h>
85 #include <machine/cpu.h>
86 #include <machine/md_var.h>
87 #include <machine/pcb.h>
88 #include <machine/smp.h>
89 #include <machine/tss.h>
90 #include <machine/globaldata.h>
92 #include <machine/vm86.h>
94 #include <ddb/ddb.h>
95 #include <sys/msgport2.h>
96 #include <sys/thread2.h>
98 #ifdef SMP
100 #define MAKEMPSAFE(have_mplock) \
101 if (have_mplock == 0) { \
102 get_mplock(); \
103 have_mplock = 1; \
106 #else
108 #define MAKEMPSAFE(have_mplock)
110 #endif
112 int (*pmath_emulate) (struct trapframe *);
114 extern int trapwrite (unsigned addr);
116 static int trap_pfault (struct trapframe *, int, vm_offset_t);
117 static void trap_fatal (struct trapframe *, int, vm_offset_t);
118 void dblfault_handler (void);
120 #if 0
121 extern inthand_t IDTVEC(syscall);
122 #endif
124 #define MAX_TRAP_MSG 28
125 static char *trap_msg[] = {
126 "", /* 0 unused */
127 "privileged instruction fault", /* 1 T_PRIVINFLT */
128 "", /* 2 unused */
129 "breakpoint instruction fault", /* 3 T_BPTFLT */
130 "", /* 4 unused */
131 "", /* 5 unused */
132 "arithmetic trap", /* 6 T_ARITHTRAP */
133 "system forced exception", /* 7 T_ASTFLT */
134 "", /* 8 unused */
135 "general protection fault", /* 9 T_PROTFLT */
136 "trace trap", /* 10 T_TRCTRAP */
137 "", /* 11 unused */
138 "page fault", /* 12 T_PAGEFLT */
139 "", /* 13 unused */
140 "alignment fault", /* 14 T_ALIGNFLT */
141 "", /* 15 unused */
142 "", /* 16 unused */
143 "", /* 17 unused */
144 "integer divide fault", /* 18 T_DIVIDE */
145 "non-maskable interrupt trap", /* 19 T_NMI */
146 "overflow trap", /* 20 T_OFLOW */
147 "FPU bounds check fault", /* 21 T_BOUND */
148 "FPU device not available", /* 22 T_DNA */
149 "double fault", /* 23 T_DOUBLEFLT */
150 "FPU operand fetch fault", /* 24 T_FPOPFLT */
151 "invalid TSS fault", /* 25 T_TSSFLT */
152 "segment not present fault", /* 26 T_SEGNPFLT */
153 "stack fault", /* 27 T_STKFLT */
154 "machine check trap", /* 28 T_MCHK */
157 #ifdef DDB
158 static int ddb_on_nmi = 1;
159 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
160 &ddb_on_nmi, 0, "Go to DDB on NMI");
161 #endif
162 static int panic_on_nmi = 1;
163 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
164 &panic_on_nmi, 0, "Panic on NMI");
165 static int fast_release;
166 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
167 &fast_release, 0, "Passive Release was optimal");
168 static int slow_release;
169 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
170 &slow_release, 0, "Passive Release was nonoptimal");
171 #ifdef SMP
172 static int syscall_mpsafe = 1;
173 SYSCTL_INT(_kern, OID_AUTO, syscall_mpsafe, CTLFLAG_RW,
174 &syscall_mpsafe, 0, "Allow MPSAFE marked syscalls to run without BGL");
175 TUNABLE_INT("kern.syscall_mpsafe", &syscall_mpsafe);
176 static int trap_mpsafe = 1;
177 SYSCTL_INT(_kern, OID_AUTO, trap_mpsafe, CTLFLAG_RW,
178 &trap_mpsafe, 0, "Allow traps to mostly run without the BGL");
179 TUNABLE_INT("kern.trap_mpsafe", &trap_mpsafe);
180 #endif
182 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
183 extern int max_sysmsg;
186 * Passive USER->KERNEL transition. This only occurs if we block in the
187 * kernel while still holding our userland priority. We have to fixup our
188 * priority in order to avoid potential deadlocks before we allow the system
189 * to switch us to another thread.
191 static void
192 passive_release(struct thread *td)
194 struct lwp *lp = td->td_lwp;
196 td->td_release = NULL;
197 lwkt_setpri_self(TDPRI_KERN_USER);
198 lp->lwp_proc->p_usched->release_curproc(lp);
202 * userenter() passively intercepts the thread switch function to increase
203 * the thread priority from a user priority to a kernel priority, reducing
204 * syscall and trap overhead for the case where no switch occurs.
207 static __inline void
208 userenter(struct thread *curtd)
210 curtd->td_release = passive_release;
214 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
215 * must be completed before we can return to or try to return to userland.
217 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
218 * arithmatic on the delta calculation so the absolute tick values are
219 * truncated to an integer.
221 static void
222 userret(struct lwp *lp, struct trapframe *frame, int sticks)
224 struct proc *p = lp->lwp_proc;
225 int sig;
228 * Charge system time if profiling. Note: times are in microseconds.
229 * This may do a copyout and block, so do it first even though it
230 * means some system time will be charged as user time.
232 if (p->p_flag & P_PROFIL) {
233 addupc_task(p, frame->tf_eip,
234 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
237 recheck:
239 * If the jungle wants us dead, so be it.
241 if (lp->lwp_flag & LWP_WEXIT) {
242 get_mplock();
243 lwp_exit(0);
244 rel_mplock(); /* NOT REACHED */
248 * Block here if we are in a stopped state.
250 if (p->p_stat == SSTOP) {
251 get_mplock();
252 tstop();
253 rel_mplock();
254 goto recheck;
258 * Post any pending upcalls
260 if (p->p_flag & P_UPCALLPEND) {
261 get_mplock();
262 p->p_flag &= ~P_UPCALLPEND;
263 postupcall(lp);
264 rel_mplock();
265 goto recheck;
269 * Post any pending signals
271 if ((sig = CURSIG(lp)) != 0) {
272 get_mplock();
273 postsig(sig);
274 rel_mplock();
275 goto recheck;
279 * block here if we are swapped out, but still process signals
280 * (such as SIGKILL). proc0 (the swapin scheduler) is already
281 * aware of our situation, we do not have to wake it up.
283 if (p->p_flag & P_SWAPPEDOUT) {
284 get_mplock();
285 p->p_flag |= P_SWAPWAIT;
286 swapin_request();
287 if (p->p_flag & P_SWAPWAIT)
288 tsleep(p, PCATCH, "SWOUT", 0);
289 p->p_flag &= ~P_SWAPWAIT;
290 rel_mplock();
291 goto recheck;
295 * Make sure postsig() handled request to restore old signal mask after
296 * running signal handler.
298 KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0);
302 * Cleanup from userenter and any passive release that might have occured.
303 * We must reclaim the current-process designation before we can return
304 * to usermode. We also handle both LWKT and USER reschedule requests.
306 static __inline void
307 userexit(struct lwp *lp)
309 struct thread *td = lp->lwp_thread;
310 globaldata_t gd = td->td_gd;
313 * Handle stop requests at kernel priority. Any requests queued
314 * after this loop will generate another AST.
316 while (lp->lwp_proc->p_stat == SSTOP) {
317 get_mplock();
318 tstop();
319 rel_mplock();
323 * Reduce our priority in preparation for a return to userland. If
324 * our passive release function was still in place, our priority was
325 * never raised and does not need to be reduced.
327 if (td->td_release == NULL)
328 lwkt_setpri_self(TDPRI_USER_NORM);
329 td->td_release = NULL;
332 * Become the current user scheduled process if we aren't already,
333 * and deal with reschedule requests and other factors.
335 lp->lwp_proc->p_usched->acquire_curproc(lp);
336 /* WARNING: we may have migrated cpu's */
337 /* gd = td->td_gd; */
340 #if !defined(KTR_KERNENTRY)
341 #define KTR_KERNENTRY KTR_ALL
342 #endif
343 KTR_INFO_MASTER(kernentry);
344 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, "pid=%d, tid=%d, trapno=%d, eva=%p",
345 sizeof(int) + sizeof(int) + sizeof(int) + sizeof(vm_offset_t));
346 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "pid=%d, tid=%d",
347 sizeof(int) + sizeof(int));
348 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "pid=%d, tid=%d, call=%d",
349 sizeof(int) + sizeof(int) + sizeof(int));
350 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "pid=%d, tid=%d, err=%d",
351 sizeof(int) + sizeof(int) + sizeof(int));
352 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "pid=%d, tid=%d",
353 sizeof(int) + sizeof(int));
356 * Exception, fault, and trap interface to the kernel.
357 * This common code is called from assembly language IDT gate entry
358 * routines that prepare a suitable stack frame, and restore this
359 * frame after the exception has been processed.
361 * This function is also called from doreti in an interlock to handle ASTs.
362 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
364 * NOTE! We have to retrieve the fault address prior to obtaining the
365 * MP lock because get_mplock() may switch out. YYY cr2 really ought
366 * to be retrieved by the assembly code, not here.
368 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
369 * if an attempt is made to switch from a fast interrupt or IPI. This is
370 * necessary to properly take fatal kernel traps on SMP machines if
371 * get_mplock() has to block.
374 void
375 user_trap(struct trapframe *frame)
377 struct globaldata *gd = mycpu;
378 struct thread *td = gd->gd_curthread;
379 struct lwp *lp = td->td_lwp;
380 struct proc *p;
381 int sticks = 0;
382 int i = 0, ucode = 0, type, code;
383 #ifdef SMP
384 int have_mplock = 0;
385 #endif
386 #ifdef INVARIANTS
387 int crit_count = td->td_pri & ~TDPRI_MASK;
388 #endif
389 vm_offset_t eva;
391 p = td->td_proc;
394 * This is a bad kludge to avoid changing the various trapframe
395 * structures. Because we are enabled as a virtual kernel,
396 * the original tf_err field will be passed to us shifted 16
397 * over in the tf_trapno field for T_PAGEFLT.
399 if (frame->tf_trapno == T_PAGEFLT)
400 eva = frame->tf_err;
401 else
402 eva = 0;
403 #if 0
404 kprintf("USER_TRAP AT %08x xflags %d trapno %d eva %08x\n",
405 frame->tf_eip, frame->tf_xflags, frame->tf_trapno, eva);
406 #endif
409 * Everything coming from user mode runs through user_trap,
410 * including system calls.
412 if (frame->tf_trapno == T_SYSCALL80) {
413 syscall2(frame);
414 return;
417 KTR_LOG(kernentry_trap, lp->lwp_proc->p_pid, lp->lwp_tid,
418 frame->tf_trapno, eva);
420 #ifdef DDB
421 if (db_active) {
422 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
423 ++gd->gd_trap_nesting_level;
424 MAKEMPSAFE(have_mplock);
425 trap_fatal(frame, TRUE, eva);
426 --gd->gd_trap_nesting_level;
427 goto out2;
429 #endif
431 ++gd->gd_trap_nesting_level;
432 #ifdef SMP
433 if (trap_mpsafe == 0)
434 MAKEMPSAFE(have_mplock);
435 #endif
437 --gd->gd_trap_nesting_level;
439 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
440 restart:
441 #endif
442 type = frame->tf_trapno;
443 code = frame->tf_err;
445 userenter(td);
447 sticks = (int)td->td_sticks;
448 lp->lwp_md.md_regs = frame;
450 switch (type) {
451 case T_PRIVINFLT: /* privileged instruction fault */
452 ucode = type;
453 i = SIGILL;
454 break;
456 case T_BPTFLT: /* bpt instruction fault */
457 case T_TRCTRAP: /* trace trap */
458 frame->tf_eflags &= ~PSL_T;
459 i = SIGTRAP;
460 break;
462 case T_ARITHTRAP: /* arithmetic trap */
463 ucode = code;
464 i = SIGFPE;
465 break;
467 case T_ASTFLT: /* Allow process switch */
468 mycpu->gd_cnt.v_soft++;
469 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
470 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
471 RQF_AST_OWEUPC);
472 addupc_task(p, p->p_prof.pr_addr,
473 p->p_prof.pr_ticks);
475 goto out;
478 * The following two traps can happen in
479 * vm86 mode, and, if so, we want to handle
480 * them specially.
482 case T_PROTFLT: /* general protection fault */
483 case T_STKFLT: /* stack fault */
484 #if 0
485 if (frame->tf_eflags & PSL_VM) {
486 i = vm86_emulate((struct vm86frame *)frame);
487 if (i == 0)
488 goto out;
489 break;
491 #endif
492 /* FALL THROUGH */
494 case T_SEGNPFLT: /* segment not present fault */
495 case T_TSSFLT: /* invalid TSS fault */
496 case T_DOUBLEFLT: /* double fault */
497 default:
498 ucode = code + BUS_SEGM_FAULT ;
499 i = SIGBUS;
500 break;
502 case T_PAGEFLT: /* page fault */
503 MAKEMPSAFE(have_mplock);
504 i = trap_pfault(frame, TRUE, eva);
505 if (i == -1)
506 goto out;
507 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
508 if (i == -2)
509 goto restart;
510 #endif
511 if (i == 0)
512 goto out;
514 ucode = T_PAGEFLT;
515 break;
517 case T_DIVIDE: /* integer divide fault */
518 ucode = FPE_INTDIV;
519 i = SIGFPE;
520 break;
522 #if NISA > 0
523 case T_NMI:
524 MAKEMPSAFE(have_mplock);
525 /* machine/parity/power fail/"kitchen sink" faults */
526 if (isa_nmi(code) == 0) {
527 #ifdef DDB
529 * NMI can be hooked up to a pushbutton
530 * for debugging.
532 if (ddb_on_nmi) {
533 kprintf ("NMI ... going to debugger\n");
534 kdb_trap (type, 0, frame);
536 #endif /* DDB */
537 goto out2;
538 } else if (panic_on_nmi)
539 panic("NMI indicates hardware failure");
540 break;
541 #endif /* NISA > 0 */
543 case T_OFLOW: /* integer overflow fault */
544 ucode = FPE_INTOVF;
545 i = SIGFPE;
546 break;
548 case T_BOUND: /* bounds check fault */
549 ucode = FPE_FLTSUB;
550 i = SIGFPE;
551 break;
553 case T_DNA:
555 * Virtual kernel intercept - pass the DNA exception
556 * to the (emulated) virtual kernel if it asked to handle
557 * it. This occurs when the virtual kernel is holding
558 * onto the FP context for a different emulated
559 * process then the one currently running.
561 * We must still call npxdna() since we may have
562 * saved FP state that the (emulated) virtual kernel
563 * needs to hand over to a different emulated process.
565 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
566 (td->td_pcb->pcb_flags & FP_VIRTFP)
568 npxdna(frame);
569 break;
571 #if NNPX > 0
573 * The kernel may have switched out the FP unit's
574 * state, causing the user process to take a fault
575 * when it tries to use the FP unit. Restore the
576 * state here
578 if (npxdna(frame))
579 goto out;
580 #endif
581 if (!pmath_emulate) {
582 i = SIGFPE;
583 ucode = FPE_FPU_NP_TRAP;
584 break;
586 i = (*pmath_emulate)(frame);
587 if (i == 0) {
588 if (!(frame->tf_eflags & PSL_T))
589 goto out2;
590 frame->tf_eflags &= ~PSL_T;
591 i = SIGTRAP;
593 /* else ucode = emulator_only_knows() XXX */
594 break;
596 case T_FPOPFLT: /* FPU operand fetch fault */
597 ucode = T_FPOPFLT;
598 i = SIGILL;
599 break;
601 case T_XMMFLT: /* SIMD floating-point exception */
602 ucode = 0; /* XXX */
603 i = SIGFPE;
604 break;
608 * Virtual kernel intercept - if the fault is directly related to a
609 * VM context managed by a virtual kernel then let the virtual kernel
610 * handle it.
612 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
613 vkernel_trap(lp, frame);
614 goto out;
618 * Translate fault for emulators (e.g. Linux)
620 if (*p->p_sysent->sv_transtrap)
621 i = (*p->p_sysent->sv_transtrap)(i, type);
623 MAKEMPSAFE(have_mplock);
624 trapsignal(lp, i, ucode);
626 #ifdef DEBUG
627 if (type <= MAX_TRAP_MSG) {
628 uprintf("fatal process exception: %s",
629 trap_msg[type]);
630 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
631 uprintf(", fault VA = 0x%lx", (u_long)eva);
632 uprintf("\n");
634 #endif
636 out:
637 #ifdef SMP
638 KASSERT(td->td_mpcount == have_mplock, ("badmpcount trap/end from %p", (void *)frame->tf_eip));
639 #endif
640 userret(lp, frame, sticks);
641 userexit(lp);
642 out2: ;
643 #ifdef SMP
644 if (have_mplock)
645 rel_mplock();
646 #endif
647 KTR_LOG(kernentry_trap_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
648 #ifdef INVARIANTS
649 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
650 ("syscall: critical section count mismatch! %d/%d",
651 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
652 #endif
655 void
656 kern_trap(struct trapframe *frame)
658 struct globaldata *gd = mycpu;
659 struct thread *td = gd->gd_curthread;
660 struct lwp *lp;
661 struct proc *p;
662 int i = 0, ucode = 0, type, code;
663 #ifdef SMP
664 int have_mplock = 0;
665 #endif
666 #ifdef INVARIANTS
667 int crit_count = td->td_pri & ~TDPRI_MASK;
668 #endif
669 vm_offset_t eva;
671 lp = td->td_lwp;
672 p = td->td_proc;
674 if (frame->tf_trapno == T_PAGEFLT)
675 eva = frame->tf_err;
676 else
677 eva = 0;
679 #ifdef DDB
680 if (db_active) {
681 ++gd->gd_trap_nesting_level;
682 MAKEMPSAFE(have_mplock);
683 trap_fatal(frame, FALSE, eva);
684 --gd->gd_trap_nesting_level;
685 goto out2;
687 #endif
689 ++gd->gd_trap_nesting_level;
691 #ifdef SMP
692 if (trap_mpsafe == 0)
693 MAKEMPSAFE(have_mplock);
694 #endif
696 --gd->gd_trap_nesting_level;
698 type = frame->tf_trapno;
699 code = frame->tf_err;
701 #if 0
702 kernel_trap:
703 #endif
704 /* kernel trap */
706 switch (type) {
707 case T_PAGEFLT: /* page fault */
708 MAKEMPSAFE(have_mplock);
709 trap_pfault(frame, FALSE, eva);
710 goto out2;
712 case T_DNA:
713 #if NNPX > 0
715 * The kernel may be using npx for copying or other
716 * purposes.
718 panic("kernel NPX should not happen");
719 if (npxdna(frame))
720 goto out2;
721 #endif
722 break;
724 case T_PROTFLT: /* general protection fault */
725 case T_SEGNPFLT: /* segment not present fault */
727 * Invalid segment selectors and out of bounds
728 * %eip's and %esp's can be set up in user mode.
729 * This causes a fault in kernel mode when the
730 * kernel tries to return to user mode. We want
731 * to get this fault so that we can fix the
732 * problem here and not have to check all the
733 * selectors and pointers when the user changes
734 * them.
736 if (mycpu->gd_intr_nesting_level == 0) {
737 if (td->td_pcb->pcb_onfault) {
738 frame->tf_eip =
739 (register_t)td->td_pcb->pcb_onfault;
740 goto out2;
743 break;
745 case T_TSSFLT:
747 * PSL_NT can be set in user mode and isn't cleared
748 * automatically when the kernel is entered. This
749 * causes a TSS fault when the kernel attempts to
750 * `iret' because the TSS link is uninitialized. We
751 * want to get this fault so that we can fix the
752 * problem here and not every time the kernel is
753 * entered.
755 if (frame->tf_eflags & PSL_NT) {
756 frame->tf_eflags &= ~PSL_NT;
757 goto out2;
759 break;
761 case T_TRCTRAP: /* trace trap */
762 #if 0
763 if (frame->tf_eip == (int)IDTVEC(syscall)) {
765 * We've just entered system mode via the
766 * syscall lcall. Continue single stepping
767 * silently until the syscall handler has
768 * saved the flags.
770 goto out2;
772 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
774 * The syscall handler has now saved the
775 * flags. Stop single stepping it.
777 frame->tf_eflags &= ~PSL_T;
778 goto out2;
780 #endif
781 #if 0
783 * Ignore debug register trace traps due to
784 * accesses in the user's address space, which
785 * can happen under several conditions such as
786 * if a user sets a watchpoint on a buffer and
787 * then passes that buffer to a system call.
788 * We still want to get TRCTRAPS for addresses
789 * in kernel space because that is useful when
790 * debugging the kernel.
792 if (user_dbreg_trap()) {
794 * Reset breakpoint bits because the
795 * processor doesn't
797 load_dr6(rdr6() & 0xfffffff0);
798 goto out2;
800 #endif
802 * Fall through (TRCTRAP kernel mode, kernel address)
804 case T_BPTFLT:
806 * If DDB is enabled, let it handle the debugger trap.
807 * Otherwise, debugger traps "can't happen".
809 #ifdef DDB
810 MAKEMPSAFE(have_mplock);
811 if (kdb_trap (type, 0, frame))
812 goto out2;
813 #endif
814 break;
815 case T_DIVIDE:
816 MAKEMPSAFE(have_mplock);
817 trap_fatal(frame, FALSE, eva);
818 goto out2;
819 case T_NMI:
820 MAKEMPSAFE(have_mplock);
821 trap_fatal(frame, FALSE, eva);
822 goto out2;
823 case T_SYSCALL80:
825 * Ignore this trap generated from a spurious SIGTRAP.
827 * single stepping in / syscalls leads to spurious / SIGTRAP
828 * so ignore
830 * Haiku (c) 2007 Simon 'corecode' Schubert
832 goto out2;
836 * Translate fault for emulators (e.g. Linux)
838 if (*p->p_sysent->sv_transtrap)
839 i = (*p->p_sysent->sv_transtrap)(i, type);
841 MAKEMPSAFE(have_mplock);
842 trapsignal(lp, i, ucode);
844 #ifdef DEBUG
845 if (type <= MAX_TRAP_MSG) {
846 uprintf("fatal process exception: %s",
847 trap_msg[type]);
848 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
849 uprintf(", fault VA = 0x%lx", (u_long)eva);
850 uprintf("\n");
852 #endif
854 out2:
856 #ifdef SMP
857 if (have_mplock)
858 rel_mplock();
859 #endif
860 #ifdef INVARIANTS
861 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
862 ("syscall: critical section count mismatch! %d/%d",
863 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
864 #endif
868 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
870 vm_offset_t va;
871 struct vmspace *vm = NULL;
872 vm_map_t map = 0;
873 int rv = 0;
874 vm_prot_t ftype;
875 thread_t td = curthread;
876 struct lwp *lp = td->td_lwp;
878 va = trunc_page(eva);
879 if (usermode == FALSE) {
881 * This is a fault on kernel virtual memory.
883 map = &kernel_map;
884 } else {
886 * This is a fault on non-kernel virtual memory.
887 * vm is initialized above to NULL. If curproc is NULL
888 * or curproc->p_vmspace is NULL the fault is fatal.
890 if (lp != NULL)
891 vm = lp->lwp_vmspace;
893 if (vm == NULL)
894 goto nogo;
896 map = &vm->vm_map;
899 if (frame->tf_xflags & PGEX_W)
900 ftype = VM_PROT_READ | VM_PROT_WRITE;
901 else
902 ftype = VM_PROT_READ;
904 if (map != &kernel_map) {
906 * Keep swapout from messing with us during this
907 * critical time.
909 PHOLD(lp->lwp_proc);
912 * Grow the stack if necessary
914 /* grow_stack returns false only if va falls into
915 * a growable stack region and the stack growth
916 * fails. It returns true if va was not within
917 * a growable stack region, or if the stack
918 * growth succeeded.
920 if (!grow_stack (lp->lwp_proc, va)) {
921 rv = KERN_FAILURE;
922 PRELE(lp->lwp_proc);
923 goto nogo;
926 /* Fault in the user page: */
927 rv = vm_fault(map, va, ftype,
928 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
929 : VM_FAULT_NORMAL);
931 PRELE(lp->lwp_proc);
932 } else {
934 * Don't have to worry about process locking or stacks in the kernel.
936 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
939 if (rv == KERN_SUCCESS)
940 return (0);
941 nogo:
942 if (!usermode) {
943 if (td->td_gd->gd_intr_nesting_level == 0 &&
944 td->td_pcb->pcb_onfault) {
945 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
946 return (0);
948 trap_fatal(frame, usermode, eva);
949 return (-1);
951 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
954 static void
955 trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
957 int code, type, ss, esp;
959 code = frame->tf_xflags;
960 type = frame->tf_trapno;
962 if (type <= MAX_TRAP_MSG) {
963 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
964 type, trap_msg[type],
965 (usermode ? "user" : "kernel"));
967 #ifdef SMP
968 /* two separate prints in case of a trap on an unmapped page */
969 kprintf("mp_lock = %08x; ", mp_lock);
970 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
971 #endif
972 if (type == T_PAGEFLT) {
973 kprintf("fault virtual address = 0x%x\n", eva);
974 kprintf("fault code = %s %s, %s\n",
975 usermode ? "user" : "supervisor",
976 code & PGEX_W ? "write" : "read",
977 code & PGEX_P ? "protection violation" : "page not present");
979 kprintf("instruction pointer = 0x%x:0x%x\n",
980 frame->tf_cs & 0xffff, frame->tf_eip);
981 if (usermode) {
982 ss = frame->tf_ss & 0xffff;
983 esp = frame->tf_esp;
984 } else {
985 ss = GSEL(GDATA_SEL, SEL_KPL);
986 esp = (int)&frame->tf_esp;
988 kprintf("stack pointer = 0x%x:0x%x\n", ss, esp);
989 kprintf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
990 kprintf("processor eflags = ");
991 if (frame->tf_eflags & PSL_T)
992 kprintf("trace trap, ");
993 if (frame->tf_eflags & PSL_I)
994 kprintf("interrupt enabled, ");
995 if (frame->tf_eflags & PSL_NT)
996 kprintf("nested task, ");
997 if (frame->tf_eflags & PSL_RF)
998 kprintf("resume, ");
999 #if 0
1000 if (frame->tf_eflags & PSL_VM)
1001 kprintf("vm86, ");
1002 #endif
1003 kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1004 kprintf("current process = ");
1005 if (curproc) {
1006 kprintf("%lu (%s)\n",
1007 (u_long)curproc->p_pid, curproc->p_comm ?
1008 curproc->p_comm : "");
1009 } else {
1010 kprintf("Idle\n");
1012 kprintf("current thread = pri %d ", curthread->td_pri);
1013 if (curthread->td_pri >= TDPRI_CRIT)
1014 kprintf("(CRIT)");
1015 kprintf("\n");
1016 #ifdef SMP
1018 * XXX FIXME:
1019 * we probably SHOULD have stopped the other CPUs before now!
1020 * another CPU COULD have been touching cpl at this moment...
1022 kprintf(" <- SMP: XXX");
1023 #endif
1024 kprintf("\n");
1026 #ifdef KDB
1027 if (kdb_trap(&psl))
1028 return;
1029 #endif
1030 #ifdef DDB
1031 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1032 return;
1033 #endif
1034 kprintf("trap number = %d\n", type);
1035 if (type <= MAX_TRAP_MSG)
1036 panic("%s", trap_msg[type]);
1037 else
1038 panic("unknown/reserved trap");
1042 * Double fault handler. Called when a fault occurs while writing
1043 * a frame for a trap/exception onto the stack. This usually occurs
1044 * when the stack overflows (such is the case with infinite recursion,
1045 * for example).
1047 * XXX Note that the current PTD gets replaced by IdlePTD when the
1048 * task switch occurs. This means that the stack that was active at
1049 * the time of the double fault is not available at <kstack> unless
1050 * the machine was idle when the double fault occurred. The downside
1051 * of this is that "trace <ebp>" in ddb won't work.
1053 void
1054 dblfault_handler(void)
1056 struct mdglobaldata *gd = mdcpu;
1058 kprintf("\nFatal double fault:\n");
1059 kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1060 kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1061 kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1062 #ifdef SMP
1063 /* two separate prints in case of a trap on an unmapped page */
1064 kprintf("mp_lock = %08x; ", mp_lock);
1065 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
1066 #endif
1067 panic("double fault");
1071 * Compensate for 386 brain damage (missing URKR).
1072 * This is a little simpler than the pagefault handler in trap() because
1073 * it the page tables have already been faulted in and high addresses
1074 * are thrown out early for other reasons.
1077 trapwrite(unsigned addr)
1079 struct lwp *lp;
1080 vm_offset_t va;
1081 struct vmspace *vm;
1082 int rv;
1084 va = trunc_page((vm_offset_t)addr);
1086 * XXX - MAX is END. Changed > to >= for temp. fix.
1088 if (va >= VM_MAX_USER_ADDRESS)
1089 return (1);
1091 lp = curthread->td_lwp;
1092 vm = lp->lwp_vmspace;
1094 PHOLD(lp->lwp_proc);
1096 if (!grow_stack (lp->lwp_proc, va)) {
1097 PRELE(lp->lwp_proc);
1098 return (1);
1102 * fault the data page
1104 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1106 PRELE(lp->lwp_proc);
1108 if (rv != KERN_SUCCESS)
1109 return 1;
1111 return (0);
1115 * syscall2 - MP aware system call request C handler
1117 * A system call is essentially treated as a trap except that the
1118 * MP lock is not held on entry or return. We are responsible for
1119 * obtaining the MP lock if necessary and for handling ASTs
1120 * (e.g. a task switch) prior to return.
1122 * In general, only simple access and manipulation of curproc and
1123 * the current stack is allowed without having to hold MP lock.
1125 * MPSAFE - note that large sections of this routine are run without
1126 * the MP lock.
1129 void
1130 syscall2(struct trapframe *frame)
1132 struct thread *td = curthread;
1133 struct proc *p = td->td_proc;
1134 struct lwp *lp = td->td_lwp;
1135 caddr_t params;
1136 struct sysent *callp;
1137 register_t orig_tf_eflags;
1138 int sticks;
1139 int error;
1140 int narg;
1141 #ifdef INVARIANTS
1142 int crit_count = td->td_pri & ~TDPRI_MASK;
1143 #endif
1144 #ifdef SMP
1145 int have_mplock = 0;
1146 #endif
1147 u_int code;
1148 union sysunion args;
1150 KTR_LOG(kernentry_syscall, lp->lwp_proc->p_pid, lp->lwp_tid,
1151 frame->tf_eax);
1153 #ifdef SMP
1154 KASSERT(td->td_mpcount == 0, ("badmpcount syscall2 from %p", (void *)frame->tf_eip));
1155 if (syscall_mpsafe == 0)
1156 MAKEMPSAFE(have_mplock);
1157 #endif
1158 userenter(td); /* lazy raise our priority */
1161 * Misc
1163 sticks = (int)td->td_sticks;
1164 orig_tf_eflags = frame->tf_eflags;
1167 * Virtual kernel intercept - if a VM context managed by a virtual
1168 * kernel issues a system call the virtual kernel handles it, not us.
1169 * Restore the virtual kernel context and return from its system
1170 * call. The current frame is copied out to the virtual kernel.
1172 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
1173 error = vkernel_trap(lp, frame);
1174 frame->tf_eax = error;
1175 if (error)
1176 frame->tf_eflags |= PSL_C;
1177 error = EJUSTRETURN;
1178 goto out;
1182 * Get the system call parameters and account for time
1184 lp->lwp_md.md_regs = frame;
1185 params = (caddr_t)frame->tf_esp + sizeof(int);
1186 code = frame->tf_eax;
1188 if (p->p_sysent->sv_prepsyscall) {
1189 (*p->p_sysent->sv_prepsyscall)(
1190 frame, (int *)(&args.nosys.sysmsg + 1),
1191 &code, &params);
1192 } else {
1194 * Need to check if this is a 32 bit or 64 bit syscall.
1195 * fuword is MP aware.
1197 if (code == SYS_syscall) {
1199 * Code is first argument, followed by actual args.
1201 code = fuword(params);
1202 params += sizeof(int);
1203 } else if (code == SYS___syscall) {
1205 * Like syscall, but code is a quad, so as to maintain
1206 * quad alignment for the rest of the arguments.
1208 code = fuword(params);
1209 params += sizeof(quad_t);
1213 code &= p->p_sysent->sv_mask;
1214 if (code >= p->p_sysent->sv_size)
1215 callp = &p->p_sysent->sv_table[0];
1216 else
1217 callp = &p->p_sysent->sv_table[code];
1219 narg = callp->sy_narg & SYF_ARGMASK;
1222 * copyin is MP aware, but the tracing code is not
1224 if (narg && params) {
1225 error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
1226 narg * sizeof(register_t));
1227 if (error) {
1228 #ifdef KTRACE
1229 if (KTRPOINT(td, KTR_SYSCALL)) {
1230 MAKEMPSAFE(have_mplock);
1232 ktrsyscall(lp, code, narg,
1233 (void *)(&args.nosys.sysmsg + 1));
1235 #endif
1236 goto bad;
1240 #ifdef KTRACE
1241 if (KTRPOINT(td, KTR_SYSCALL)) {
1242 MAKEMPSAFE(have_mplock);
1243 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1245 #endif
1248 * For traditional syscall code edx is left untouched when 32 bit
1249 * results are returned. Since edx is loaded from fds[1] when the
1250 * system call returns we pre-set it here.
1252 args.sysmsg_fds[0] = 0;
1253 args.sysmsg_fds[1] = frame->tf_edx;
1256 * The syscall might manipulate the trap frame. If it does it
1257 * will probably return EJUSTRETURN.
1259 args.sysmsg_frame = frame;
1261 STOPEVENT(p, S_SCE, narg); /* MP aware */
1263 #ifdef SMP
1265 * Try to run the syscall without the MP lock if the syscall
1266 * is MP safe. We have to obtain the MP lock no matter what if
1267 * we are ktracing
1269 if ((callp->sy_narg & SYF_MPSAFE) == 0)
1270 MAKEMPSAFE(have_mplock);
1271 #endif
1273 error = (*callp->sy_call)(&args);
1275 #if 0
1276 kprintf("system call %d returned %d\n", code, error);
1277 #endif
1279 out:
1281 * MP SAFE (we may or may not have the MP lock at this point)
1283 switch (error) {
1284 case 0:
1286 * Reinitialize proc pointer `p' as it may be different
1287 * if this is a child returning from fork syscall.
1289 p = curproc;
1290 lp = curthread->td_lwp;
1291 frame->tf_eax = args.sysmsg_fds[0];
1292 frame->tf_edx = args.sysmsg_fds[1];
1293 frame->tf_eflags &= ~PSL_C;
1294 break;
1295 case ERESTART:
1297 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1298 * int 0x80 is 2 bytes. We saved this in tf_err.
1300 frame->tf_eip -= frame->tf_err;
1301 break;
1302 case EJUSTRETURN:
1303 break;
1304 case EASYNC:
1305 panic("Unexpected EASYNC return value (for now)");
1306 default:
1307 bad:
1308 if (p->p_sysent->sv_errsize) {
1309 if (error >= p->p_sysent->sv_errsize)
1310 error = -1; /* XXX */
1311 else
1312 error = p->p_sysent->sv_errtbl[error];
1314 frame->tf_eax = error;
1315 frame->tf_eflags |= PSL_C;
1316 break;
1320 * Traced syscall. trapsignal() is not MP aware.
1322 if ((orig_tf_eflags & PSL_T) /*&& !(orig_tf_eflags & PSL_VM)*/) {
1323 MAKEMPSAFE(have_mplock);
1324 frame->tf_eflags &= ~PSL_T;
1325 trapsignal(lp, SIGTRAP, 0);
1329 * Handle reschedule and other end-of-syscall issues
1331 userret(lp, frame, sticks);
1333 #ifdef KTRACE
1334 if (KTRPOINT(td, KTR_SYSRET)) {
1335 MAKEMPSAFE(have_mplock);
1336 ktrsysret(lp, code, error, args.sysmsg_result);
1338 #endif
1341 * This works because errno is findable through the
1342 * register set. If we ever support an emulation where this
1343 * is not the case, this code will need to be revisited.
1345 STOPEVENT(p, S_SCX, code);
1347 userexit(lp);
1348 #ifdef SMP
1350 * Release the MP lock if we had to get it
1352 KASSERT(td->td_mpcount == have_mplock,
1353 ("badmpcount syscall2/end from %p", (void *)frame->tf_eip));
1354 if (have_mplock)
1355 rel_mplock();
1356 #endif
1357 KTR_LOG(kernentry_syscall_ret, lp->lwp_proc->p_pid, lp->lwp_tid, error);
1358 #ifdef INVARIANTS
1359 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
1360 ("syscall: critical section count mismatch! %d/%d",
1361 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
1362 #endif
1365 void
1366 fork_return(struct lwp *lp, struct trapframe *frame)
1368 frame->tf_eax = 0; /* Child returns zero */
1369 frame->tf_eflags &= ~PSL_C; /* success */
1370 frame->tf_edx = 1;
1372 generic_lwp_return(lp, frame);
1373 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
1377 * Simplified back end of syscall(), used when returning from fork()
1378 * or lwp_create() directly into user mode. MP lock is held on entry and
1379 * should be released on return. This code will return back into the fork
1380 * trampoline code which then runs doreti.
1382 void
1383 generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1385 struct proc *p = lp->lwp_proc;
1388 * Newly forked processes are given a kernel priority. We have to
1389 * adjust the priority to a normal user priority and fake entry
1390 * into the kernel (call userenter()) to install a passive release
1391 * function just in case userret() decides to stop the process. This
1392 * can occur when ^Z races a fork. If we do not install the passive
1393 * release function the current process designation will not be
1394 * released when the thread goes to sleep.
1396 lwkt_setpri_self(TDPRI_USER_NORM);
1397 userenter(lp->lwp_thread);
1398 userret(lp, frame, 0);
1399 #ifdef KTRACE
1400 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1401 ktrsysret(lp, SYS_fork, 0, 0);
1402 #endif
1403 p->p_flag |= P_PASSIVE_ACQ;
1404 userexit(lp);
1405 p->p_flag &= ~P_PASSIVE_ACQ;
1406 #ifdef SMP
1407 KKASSERT(lp->lwp_thread->td_mpcount == 1);
1408 rel_mplock();
1409 #endif
1413 * doreti has turned into this. The frame is directly on the stack. We
1414 * pull everything else we need (fpu and tls context) from the current
1415 * thread.
1417 * Note on fpu interactions: In a virtual kernel, the fpu context for
1418 * an emulated user mode process is not shared with the virtual kernel's
1419 * fpu context, so we only have to 'stack' fpu contexts within the virtual
1420 * kernel itself, and not even then since the signal() contexts that we care
1421 * about save and restore the FPU state (I think anyhow).
1423 * vmspace_ctl() returns an error only if it had problems instaling the
1424 * context we supplied or problems copying data to/from our VM space.
1426 void
1427 go_user(struct intrframe *frame)
1429 struct trapframe *tf = (void *)&frame->if_gs;
1430 int r;
1433 * Interrupts may be disabled on entry, make sure all signals
1434 * can be received before beginning our loop.
1436 sigsetmask(0);
1439 * Switch to the current simulated user process, then call
1440 * user_trap() when we break out of it (usually due to a signal).
1442 for (;;) {
1444 * Tell the real kernel whether it is ok to use the FP
1445 * unit or not.
1447 if (mdcpu->gd_npxthread == curthread) {
1448 tf->tf_xflags &= ~PGEX_FPFAULT;
1449 } else {
1450 tf->tf_xflags |= PGEX_FPFAULT;
1454 * Run emulated user process context. This call interlocks
1455 * with new mailbox signals.
1457 * Set PGEX_U unconditionally, indicating a user frame (the
1458 * bit is normally set only by T_PAGEFLT).
1460 r = vmspace_ctl(&curproc->p_vmspace->vm_pmap, VMSPACE_CTL_RUN,
1461 tf, &curthread->td_savevext);
1462 frame->if_xflags |= PGEX_U;
1463 #if 0
1464 kprintf("GO USER %d trap %d EVA %08x EIP %08x ESP %08x XFLAGS %02x/%02x\n",
1465 r, tf->tf_trapno, tf->tf_err, tf->tf_eip, tf->tf_esp,
1466 tf->tf_xflags, frame->if_xflags);
1467 #endif
1468 if (r < 0) {
1469 if (errno != EINTR)
1470 panic("vmspace_ctl failed");
1471 } else {
1472 if (tf->tf_trapno) {
1473 user_trap(tf);
1474 } else if (mycpu->gd_reqflags & RQF_AST_MASK) {
1475 tf->tf_trapno = T_ASTFLT;
1476 user_trap(tf);
1478 tf->tf_trapno = 0;
1484 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1485 * fault (which is then passed back to the virtual kernel) if an attempt is
1486 * made to use the FP unit.
1488 * XXX this is a fairly big hack.
1490 void
1491 set_vkernel_fp(struct trapframe *frame)
1493 struct thread *td = curthread;
1495 if (frame->tf_xflags & PGEX_FPFAULT) {
1496 td->td_pcb->pcb_flags |= FP_VIRTFP;
1497 if (mdcpu->gd_npxthread == td)
1498 npxexit();
1499 } else {
1500 td->td_pcb->pcb_flags &= ~FP_VIRTFP;