Fix a bug vm_fault_page(). PG_MAPPED was not getting set, causing the
[dragonfly/vkernel-mp.git] / sys / platform / vkernel / i386 / trap.c
blob2f345490f080217440d34bf9ac12f9ff9dfafd11
1 /*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/vkernel/i386/trap.c,v 1.9 2007/01/11 11:15:17 dillon Exp $
43 * 386 Trap and System call handling
46 #include "use_isa.h"
47 #include "use_npx.h"
49 #include "opt_ddb.h"
50 #include "opt_ktrace.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/proc.h>
55 #include <sys/pioctl.h>
56 #include <sys/kernel.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/syscall.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysent.h>
62 #include <sys/uio.h>
63 #include <sys/vmmeter.h>
64 #include <sys/malloc.h>
65 #ifdef KTRACE
66 #include <sys/ktrace.h>
67 #endif
68 #include <sys/upcall.h>
69 #include <sys/vkernel.h>
70 #include <sys/sysproto.h>
71 #include <sys/sysunion.h>
72 #include <sys/vmspace.h>
74 #include <vm/vm.h>
75 #include <vm/vm_param.h>
76 #include <sys/lock.h>
77 #include <vm/pmap.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_extern.h>
83 #include <machine/cpu.h>
84 #include <machine/md_var.h>
85 #include <machine/pcb.h>
86 #include <machine/smp.h>
87 #include <machine/tss.h>
88 #include <machine/globaldata.h>
90 #include <machine/vm86.h>
92 #include <ddb/ddb.h>
93 #include <sys/msgport2.h>
94 #include <sys/thread2.h>
96 #ifdef SMP
98 #define MAKEMPSAFE(have_mplock) \
99 if (have_mplock == 0) { \
100 get_mplock(); \
101 have_mplock = 1; \
104 #else
106 #define MAKEMPSAFE(have_mplock)
108 #endif
110 int (*pmath_emulate) (struct trapframe *);
112 extern int trapwrite (unsigned addr);
114 static int trap_pfault (struct trapframe *, int, vm_offset_t);
115 static void trap_fatal (struct trapframe *, int, vm_offset_t);
116 void dblfault_handler (void);
118 #if 0
119 extern inthand_t IDTVEC(syscall);
120 #endif
122 #define MAX_TRAP_MSG 28
123 static char *trap_msg[] = {
124 "", /* 0 unused */
125 "privileged instruction fault", /* 1 T_PRIVINFLT */
126 "", /* 2 unused */
127 "breakpoint instruction fault", /* 3 T_BPTFLT */
128 "", /* 4 unused */
129 "", /* 5 unused */
130 "arithmetic trap", /* 6 T_ARITHTRAP */
131 "system forced exception", /* 7 T_ASTFLT */
132 "", /* 8 unused */
133 "general protection fault", /* 9 T_PROTFLT */
134 "trace trap", /* 10 T_TRCTRAP */
135 "", /* 11 unused */
136 "page fault", /* 12 T_PAGEFLT */
137 "", /* 13 unused */
138 "alignment fault", /* 14 T_ALIGNFLT */
139 "", /* 15 unused */
140 "", /* 16 unused */
141 "", /* 17 unused */
142 "integer divide fault", /* 18 T_DIVIDE */
143 "non-maskable interrupt trap", /* 19 T_NMI */
144 "overflow trap", /* 20 T_OFLOW */
145 "FPU bounds check fault", /* 21 T_BOUND */
146 "FPU device not available", /* 22 T_DNA */
147 "double fault", /* 23 T_DOUBLEFLT */
148 "FPU operand fetch fault", /* 24 T_FPOPFLT */
149 "invalid TSS fault", /* 25 T_TSSFLT */
150 "segment not present fault", /* 26 T_SEGNPFLT */
151 "stack fault", /* 27 T_STKFLT */
152 "machine check trap", /* 28 T_MCHK */
155 #ifdef DDB
156 static int ddb_on_nmi = 1;
157 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
158 &ddb_on_nmi, 0, "Go to DDB on NMI");
159 #endif
160 static int panic_on_nmi = 1;
161 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
162 &panic_on_nmi, 0, "Panic on NMI");
163 static int fast_release;
164 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
165 &fast_release, 0, "Passive Release was optimal");
166 static int slow_release;
167 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
168 &slow_release, 0, "Passive Release was nonoptimal");
169 #ifdef SMP
170 static int syscall_mpsafe = 0;
171 SYSCTL_INT(_kern, OID_AUTO, syscall_mpsafe, CTLFLAG_RW,
172 &syscall_mpsafe, 0, "Allow MPSAFE marked syscalls to run without BGL");
173 TUNABLE_INT("kern.syscall_mpsafe", &syscall_mpsafe);
174 static int trap_mpsafe = 0;
175 SYSCTL_INT(_kern, OID_AUTO, trap_mpsafe, CTLFLAG_RW,
176 &trap_mpsafe, 0, "Allow traps to mostly run without the BGL");
177 TUNABLE_INT("kern.trap_mpsafe", &trap_mpsafe);
178 #endif
180 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
181 extern int max_sysmsg;
184 * Passive USER->KERNEL transition. This only occurs if we block in the
185 * kernel while still holding our userland priority. We have to fixup our
186 * priority in order to avoid potential deadlocks before we allow the system
187 * to switch us to another thread.
189 static void
190 passive_release(struct thread *td)
192 struct lwp *lp = td->td_lwp;
194 td->td_release = NULL;
195 lwkt_setpri_self(TDPRI_KERN_USER);
196 lp->lwp_proc->p_usched->release_curproc(lp);
200 * userenter() passively intercepts the thread switch function to increase
201 * the thread priority from a user priority to a kernel priority, reducing
202 * syscall and trap overhead for the case where no switch occurs.
205 static __inline void
206 userenter(struct thread *curtd)
208 curtd->td_release = passive_release;
212 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
213 * must be completed before we can return to or try to return to userland.
215 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
216 * arithmatic on the delta calculation so the absolute tick values are
217 * truncated to an integer.
219 static void
220 userret(struct lwp *lp, struct trapframe *frame, int sticks)
222 struct proc *p = lp->lwp_proc;
223 int sig;
226 * Charge system time if profiling. Note: times are in microseconds.
227 * This may do a copyout and block, so do it first even though it
228 * means some system time will be charged as user time.
230 if (p->p_flag & P_PROFIL) {
231 addupc_task(p, frame->tf_eip,
232 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
235 recheck:
237 * Block here if we are in a stopped state.
239 if (p->p_flag & P_STOPPED) {
240 get_mplock();
241 tstop(p);
242 rel_mplock();
243 goto recheck;
247 * Post any pending upcalls
249 if (p->p_flag & P_UPCALLPEND) {
250 get_mplock();
251 p->p_flag &= ~P_UPCALLPEND;
252 postupcall(lp);
253 rel_mplock();
254 goto recheck;
258 * Post any pending signals
260 if ((sig = CURSIG(p)) != 0) {
261 get_mplock();
262 postsig(sig);
263 rel_mplock();
264 goto recheck;
268 * block here if we are swapped out, but still process signals
269 * (such as SIGKILL). proc0 (the swapin scheduler) is already
270 * aware of our situation, we do not have to wake it up.
272 if (p->p_flag & P_SWAPPEDOUT) {
273 get_mplock();
274 p->p_flag |= P_SWAPWAIT;
275 swapin_request();
276 if (p->p_flag & P_SWAPWAIT)
277 tsleep(p, PCATCH, "SWOUT", 0);
278 p->p_flag &= ~P_SWAPWAIT;
279 rel_mplock();
280 goto recheck;
285 * Cleanup from userenter and any passive release that might have occured.
286 * We must reclaim the current-process designation before we can return
287 * to usermode. We also handle both LWKT and USER reschedule requests.
289 static __inline void
290 userexit(struct lwp *lp)
292 struct thread *td = lp->lwp_thread;
293 globaldata_t gd = td->td_gd;
295 #if 0
297 * If a user reschedule is requested force a new process to be
298 * chosen by releasing the current process. Our process will only
299 * be chosen again if it has a considerably better priority.
301 if (user_resched_wanted())
302 lp->lwp_proc->p_usched->release_curproc(lp);
303 #endif
306 * Handle a LWKT reschedule request first. Since our passive release
307 * is still in place we do not have to do anything special.
309 if (lwkt_resched_wanted())
310 lwkt_switch();
313 * Acquire the current process designation for this user scheduler
314 * on this cpu. This will also handle any user-reschedule requests.
316 lp->lwp_proc->p_usched->acquire_curproc(lp);
317 /* We may have switched cpus on acquisition */
318 gd = td->td_gd;
321 * Reduce our priority in preparation for a return to userland. If
322 * our passive release function was still in place, our priority was
323 * never raised and does not need to be reduced.
325 if (td->td_release == NULL)
326 lwkt_setpri_self(TDPRI_USER_NORM);
327 td->td_release = NULL;
330 * After reducing our priority there might be other kernel-level
331 * LWKTs that now have a greater priority. Run them as necessary.
332 * We don't have to worry about losing cpu to userland because
333 * we still control the current-process designation and we no longer
334 * have a passive release function installed.
336 if (lwkt_checkpri_self())
337 lwkt_switch();
341 * Exception, fault, and trap interface to the kernel.
342 * This common code is called from assembly language IDT gate entry
343 * routines that prepare a suitable stack frame, and restore this
344 * frame after the exception has been processed.
346 * This function is also called from doreti in an interlock to handle ASTs.
347 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
349 * NOTE! We have to retrieve the fault address prior to obtaining the
350 * MP lock because get_mplock() may switch out. YYY cr2 really ought
351 * to be retrieved by the assembly code, not here.
353 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
354 * if an attempt is made to switch from a fast interrupt or IPI. This is
355 * necessary to properly take fatal kernel traps on SMP machines if
356 * get_mplock() has to block.
359 void
360 user_trap(struct trapframe *frame)
362 struct globaldata *gd = mycpu;
363 struct thread *td = gd->gd_curthread;
364 struct lwp *lp = td->td_lwp;
365 struct proc *p;
366 int sticks = 0;
367 int i = 0, ucode = 0, type, code;
368 #ifdef SMP
369 int have_mplock = 0;
370 #endif
371 #ifdef INVARIANTS
372 int crit_count = td->td_pri & ~TDPRI_MASK;
373 #endif
374 vm_offset_t eva;
376 p = td->td_proc;
379 * This is a bad kludge to avoid changing the various trapframe
380 * structures. Because we are enabled as a virtual kernel,
381 * the original tf_err field will be passed to us shifted 16
382 * over in the tf_trapno field for T_PAGEFLT.
384 if (frame->tf_trapno == T_PAGEFLT)
385 eva = frame->tf_err;
386 else
387 eva = 0;
388 #if 0
389 kprintf("USER_TRAP AT %08x xflags %d trapno %d eva %08x\n",
390 frame->tf_eip, frame->tf_xflags, frame->tf_trapno, eva);
391 #endif
394 * Everything coming from user mode runs through user_trap,
395 * including system calls.
397 if (frame->tf_trapno == T_SYSCALL80) {
398 syscall2(frame);
399 return;
402 #ifdef DDB
403 if (db_active) {
404 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
405 ++gd->gd_trap_nesting_level;
406 MAKEMPSAFE(have_mplock);
407 trap_fatal(frame, TRUE, eva);
408 --gd->gd_trap_nesting_level;
409 goto out2;
411 #endif
413 ++gd->gd_trap_nesting_level;
414 #ifdef SMP
415 if (trap_mpsafe == 0)
416 MAKEMPSAFE(have_mplock);
417 #endif
419 --gd->gd_trap_nesting_level;
421 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
422 restart:
423 #endif
424 type = frame->tf_trapno;
425 code = frame->tf_err;
427 userenter(td);
429 sticks = (int)td->td_sticks;
430 lp->lwp_md.md_regs = frame;
432 switch (type) {
433 case T_PRIVINFLT: /* privileged instruction fault */
434 ucode = type;
435 i = SIGILL;
436 break;
438 case T_BPTFLT: /* bpt instruction fault */
439 case T_TRCTRAP: /* trace trap */
440 frame->tf_eflags &= ~PSL_T;
441 i = SIGTRAP;
442 break;
444 case T_ARITHTRAP: /* arithmetic trap */
445 ucode = code;
446 i = SIGFPE;
447 break;
449 case T_ASTFLT: /* Allow process switch */
450 mycpu->gd_cnt.v_soft++;
451 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
452 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
453 RQF_AST_OWEUPC);
454 addupc_task(p, p->p_prof.pr_addr,
455 p->p_prof.pr_ticks);
457 goto out;
460 * The following two traps can happen in
461 * vm86 mode, and, if so, we want to handle
462 * them specially.
464 case T_PROTFLT: /* general protection fault */
465 case T_STKFLT: /* stack fault */
466 #if 0
467 if (frame->tf_eflags & PSL_VM) {
468 i = vm86_emulate((struct vm86frame *)frame);
469 if (i == 0)
470 goto out;
471 break;
473 #endif
474 /* FALL THROUGH */
476 case T_SEGNPFLT: /* segment not present fault */
477 case T_TSSFLT: /* invalid TSS fault */
478 case T_DOUBLEFLT: /* double fault */
479 default:
480 ucode = code + BUS_SEGM_FAULT ;
481 i = SIGBUS;
482 break;
484 case T_PAGEFLT: /* page fault */
485 MAKEMPSAFE(have_mplock);
486 i = trap_pfault(frame, TRUE, eva);
487 if (i == -1)
488 goto out;
489 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
490 if (i == -2)
491 goto restart;
492 #endif
493 if (i == 0)
494 goto out;
496 ucode = T_PAGEFLT;
497 break;
499 case T_DIVIDE: /* integer divide fault */
500 ucode = FPE_INTDIV;
501 i = SIGFPE;
502 break;
504 #if NISA > 0
505 case T_NMI:
506 MAKEMPSAFE(have_mplock);
507 /* machine/parity/power fail/"kitchen sink" faults */
508 if (isa_nmi(code) == 0) {
509 #ifdef DDB
511 * NMI can be hooked up to a pushbutton
512 * for debugging.
514 if (ddb_on_nmi) {
515 kprintf ("NMI ... going to debugger\n");
516 kdb_trap (type, 0, frame);
518 #endif /* DDB */
519 goto out2;
520 } else if (panic_on_nmi)
521 panic("NMI indicates hardware failure");
522 break;
523 #endif /* NISA > 0 */
525 case T_OFLOW: /* integer overflow fault */
526 ucode = FPE_INTOVF;
527 i = SIGFPE;
528 break;
530 case T_BOUND: /* bounds check fault */
531 ucode = FPE_FLTSUB;
532 i = SIGFPE;
533 break;
535 case T_DNA:
537 * Virtual kernel intercept - pass the DNA exception
538 * to the (emulated) virtual kernel if it asked to handle
539 * it. This occurs when the virtual kernel is holding
540 * onto the FP context for a different emulated
541 * process then the one currently running.
543 * We must still call npxdna() since we may have
544 * saved FP state that the (emulated) virtual kernel
545 * needs to hand over to a different emulated process.
547 if (p->p_vkernel && p->p_vkernel->vk_current &&
548 (td->td_pcb->pcb_flags & FP_VIRTFP)
550 npxdna(frame);
551 break;
553 #if NNPX > 0
555 * The kernel may have switched out the FP unit's
556 * state, causing the user process to take a fault
557 * when it tries to use the FP unit. Restore the
558 * state here
560 if (npxdna(frame))
561 goto out;
562 #endif
563 if (!pmath_emulate) {
564 i = SIGFPE;
565 ucode = FPE_FPU_NP_TRAP;
566 break;
568 i = (*pmath_emulate)(frame);
569 if (i == 0) {
570 if (!(frame->tf_eflags & PSL_T))
571 goto out2;
572 frame->tf_eflags &= ~PSL_T;
573 i = SIGTRAP;
575 /* else ucode = emulator_only_knows() XXX */
576 break;
578 case T_FPOPFLT: /* FPU operand fetch fault */
579 ucode = T_FPOPFLT;
580 i = SIGILL;
581 break;
583 case T_XMMFLT: /* SIMD floating-point exception */
584 ucode = 0; /* XXX */
585 i = SIGFPE;
586 break;
590 * Virtual kernel intercept - if the fault is directly related to a
591 * VM context managed by a virtual kernel then let the virtual kernel
592 * handle it.
594 if (p->p_vkernel && p->p_vkernel->vk_current) {
595 vkernel_trap(p, frame);
596 goto out;
600 * Translate fault for emulators (e.g. Linux)
602 if (*p->p_sysent->sv_transtrap)
603 i = (*p->p_sysent->sv_transtrap)(i, type);
605 MAKEMPSAFE(have_mplock);
606 trapsignal(p, i, ucode);
608 #ifdef DEBUG
609 if (type <= MAX_TRAP_MSG) {
610 uprintf("fatal process exception: %s",
611 trap_msg[type]);
612 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
613 uprintf(", fault VA = 0x%lx", (u_long)eva);
614 uprintf("\n");
616 #endif
618 out:
619 #ifdef SMP
620 KASSERT(td->td_mpcount == have_mplock, ("badmpcount trap/end from %p", (void *)frame->tf_eip));
621 #endif
622 userret(lp, frame, sticks);
623 userexit(lp);
624 out2: ;
625 #ifdef SMP
626 if (have_mplock)
627 rel_mplock();
628 #endif
629 #ifdef INVARIANTS
630 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
631 ("syscall: critical section count mismatch! %d/%d",
632 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
633 #endif
636 void
637 kern_trap(struct trapframe *frame)
639 struct globaldata *gd = mycpu;
640 struct thread *td = gd->gd_curthread;
641 struct proc *p;
642 int i = 0, ucode = 0, type, code;
643 #ifdef SMP
644 int have_mplock = 0;
645 #endif
646 #ifdef INVARIANTS
647 int crit_count = td->td_pri & ~TDPRI_MASK;
648 #endif
649 vm_offset_t eva;
651 p = td->td_proc;
653 if (frame->tf_trapno == T_PAGEFLT)
654 eva = frame->tf_err;
655 else
656 eva = 0;
658 #ifdef DDB
659 if (db_active) {
660 ++gd->gd_trap_nesting_level;
661 MAKEMPSAFE(have_mplock);
662 trap_fatal(frame, FALSE, eva);
663 --gd->gd_trap_nesting_level;
664 goto out2;
666 #endif
668 ++gd->gd_trap_nesting_level;
670 #ifdef SMP
671 if (trap_mpsafe == 0)
672 MAKEMPSAFE(have_mplock);
673 #endif
675 --gd->gd_trap_nesting_level;
677 type = frame->tf_trapno;
678 code = frame->tf_err;
680 #if 0
681 kernel_trap:
682 #endif
683 /* kernel trap */
685 switch (type) {
686 case T_PAGEFLT: /* page fault */
687 MAKEMPSAFE(have_mplock);
688 trap_pfault(frame, FALSE, eva);
689 goto out2;
691 case T_DNA:
692 #if NNPX > 0
694 * The kernel may be using npx for copying or other
695 * purposes.
697 panic("kernel NPX should not happen");
698 if (npxdna(frame))
699 goto out2;
700 #endif
701 break;
703 case T_PROTFLT: /* general protection fault */
704 case T_SEGNPFLT: /* segment not present fault */
706 * Invalid segment selectors and out of bounds
707 * %eip's and %esp's can be set up in user mode.
708 * This causes a fault in kernel mode when the
709 * kernel tries to return to user mode. We want
710 * to get this fault so that we can fix the
711 * problem here and not have to check all the
712 * selectors and pointers when the user changes
713 * them.
715 if (mycpu->gd_intr_nesting_level == 0) {
716 if (td->td_pcb->pcb_onfault) {
717 frame->tf_eip =
718 (register_t)td->td_pcb->pcb_onfault;
719 goto out2;
722 break;
724 case T_TSSFLT:
726 * PSL_NT can be set in user mode and isn't cleared
727 * automatically when the kernel is entered. This
728 * causes a TSS fault when the kernel attempts to
729 * `iret' because the TSS link is uninitialized. We
730 * want to get this fault so that we can fix the
731 * problem here and not every time the kernel is
732 * entered.
734 if (frame->tf_eflags & PSL_NT) {
735 frame->tf_eflags &= ~PSL_NT;
736 goto out2;
738 break;
740 case T_TRCTRAP: /* trace trap */
741 #if 0
742 if (frame->tf_eip == (int)IDTVEC(syscall)) {
744 * We've just entered system mode via the
745 * syscall lcall. Continue single stepping
746 * silently until the syscall handler has
747 * saved the flags.
749 goto out2;
751 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
753 * The syscall handler has now saved the
754 * flags. Stop single stepping it.
756 frame->tf_eflags &= ~PSL_T;
757 goto out2;
759 #endif
760 #if 0
762 * Ignore debug register trace traps due to
763 * accesses in the user's address space, which
764 * can happen under several conditions such as
765 * if a user sets a watchpoint on a buffer and
766 * then passes that buffer to a system call.
767 * We still want to get TRCTRAPS for addresses
768 * in kernel space because that is useful when
769 * debugging the kernel.
771 if (user_dbreg_trap()) {
773 * Reset breakpoint bits because the
774 * processor doesn't
776 load_dr6(rdr6() & 0xfffffff0);
777 goto out2;
779 #endif
781 * Fall through (TRCTRAP kernel mode, kernel address)
783 case T_BPTFLT:
785 * If DDB is enabled, let it handle the debugger trap.
786 * Otherwise, debugger traps "can't happen".
788 #ifdef DDB
789 MAKEMPSAFE(have_mplock);
790 if (kdb_trap (type, 0, frame))
791 goto out2;
792 #endif
793 break;
795 case T_NMI:
796 MAKEMPSAFE(have_mplock);
797 trap_fatal(frame, FALSE, eva);
798 goto out2;
802 * Translate fault for emulators (e.g. Linux)
804 if (*p->p_sysent->sv_transtrap)
805 i = (*p->p_sysent->sv_transtrap)(i, type);
807 MAKEMPSAFE(have_mplock);
808 trapsignal(p, i, ucode);
810 #ifdef DEBUG
811 if (type <= MAX_TRAP_MSG) {
812 uprintf("fatal process exception: %s",
813 trap_msg[type]);
814 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
815 uprintf(", fault VA = 0x%lx", (u_long)eva);
816 uprintf("\n");
818 #endif
820 out2:
822 #ifdef SMP
823 if (have_mplock)
824 rel_mplock();
825 #endif
826 #ifdef INVARIANTS
827 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
828 ("syscall: critical section count mismatch! %d/%d",
829 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
830 #endif
834 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
836 vm_offset_t va;
837 struct vmspace *vm = NULL;
838 vm_map_t map = 0;
839 int rv = 0;
840 vm_prot_t ftype;
841 thread_t td = curthread;
842 struct proc *p = td->td_proc;
844 va = trunc_page(eva);
845 if (usermode == FALSE) {
847 * This is a fault on kernel virtual memory.
849 map = &kernel_map;
850 } else {
852 * This is a fault on non-kernel virtual memory.
853 * vm is initialized above to NULL. If curproc is NULL
854 * or curproc->p_vmspace is NULL the fault is fatal.
856 if (p != NULL)
857 vm = p->p_vmspace;
859 if (vm == NULL)
860 goto nogo;
862 map = &vm->vm_map;
865 if (frame->tf_xflags & PGEX_W)
866 ftype = VM_PROT_READ | VM_PROT_WRITE;
867 else
868 ftype = VM_PROT_READ;
870 if (map != &kernel_map) {
872 * Keep swapout from messing with us during this
873 * critical time.
875 ++p->p_lock;
878 * Grow the stack if necessary
880 /* grow_stack returns false only if va falls into
881 * a growable stack region and the stack growth
882 * fails. It returns true if va was not within
883 * a growable stack region, or if the stack
884 * growth succeeded.
886 if (!grow_stack (p, va)) {
887 rv = KERN_FAILURE;
888 --p->p_lock;
889 goto nogo;
892 /* Fault in the user page: */
893 rv = vm_fault(map, va, ftype,
894 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
895 : VM_FAULT_NORMAL);
897 --p->p_lock;
898 } else {
900 * Don't have to worry about process locking or stacks in the kernel.
902 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
905 if (rv == KERN_SUCCESS)
906 return (0);
907 nogo:
908 if (!usermode) {
909 if (td->td_gd->gd_intr_nesting_level == 0 &&
910 td->td_pcb->pcb_onfault) {
911 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
912 return (0);
914 trap_fatal(frame, usermode, eva);
915 return (-1);
917 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
920 static void
921 trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
923 int code, type, ss, esp;
925 code = frame->tf_xflags;
926 type = frame->tf_trapno;
928 if (type <= MAX_TRAP_MSG)
929 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
930 type, trap_msg[type],
931 /*frame->tf_eflags & PSL_VM ? "vm86" :*/
932 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
933 #ifdef SMP
934 /* three separate prints in case of a trap on an unmapped page */
935 kprintf("mp_lock = %08x; ", mp_lock);
936 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
937 kprintf("lapic.id = %08x\n", lapic.id);
938 #endif
939 if (type == T_PAGEFLT) {
940 kprintf("fault virtual address = 0x%x\n", eva);
941 kprintf("fault code = %s %s, %s\n",
942 usermode ? "user" : "supervisor",
943 code & PGEX_W ? "write" : "read",
944 code & PGEX_P ? "protection violation" : "page not present");
946 kprintf("instruction pointer = 0x%x:0x%x\n",
947 frame->tf_cs & 0xffff, frame->tf_eip);
948 if ((ISPL(frame->tf_cs) == SEL_UPL) /*||(frame->tf_eflags&PSL_VM)*/) {
949 ss = frame->tf_ss & 0xffff;
950 esp = frame->tf_esp;
951 } else {
952 ss = GSEL(GDATA_SEL, SEL_KPL);
953 esp = (int)&frame->tf_esp;
955 kprintf("stack pointer = 0x%x:0x%x\n", ss, esp);
956 kprintf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
957 kprintf("processor eflags = ");
958 if (frame->tf_eflags & PSL_T)
959 kprintf("trace trap, ");
960 if (frame->tf_eflags & PSL_I)
961 kprintf("interrupt enabled, ");
962 if (frame->tf_eflags & PSL_NT)
963 kprintf("nested task, ");
964 if (frame->tf_eflags & PSL_RF)
965 kprintf("resume, ");
966 #if 0
967 if (frame->tf_eflags & PSL_VM)
968 kprintf("vm86, ");
969 #endif
970 kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
971 kprintf("current process = ");
972 if (curproc) {
973 kprintf("%lu (%s)\n",
974 (u_long)curproc->p_pid, curproc->p_comm ?
975 curproc->p_comm : "");
976 } else {
977 kprintf("Idle\n");
979 kprintf("current thread = pri %d ", curthread->td_pri);
980 if (curthread->td_pri >= TDPRI_CRIT)
981 kprintf("(CRIT)");
982 kprintf("\n");
983 #ifdef SMP
985 * XXX FIXME:
986 * we probably SHOULD have stopped the other CPUs before now!
987 * another CPU COULD have been touching cpl at this moment...
989 kprintf(" <- SMP: XXX");
990 #endif
991 kprintf("\n");
993 #ifdef KDB
994 if (kdb_trap(&psl))
995 return;
996 #endif
997 #ifdef DDB
998 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
999 return;
1000 #endif
1001 kprintf("trap number = %d\n", type);
1002 if (type <= MAX_TRAP_MSG)
1003 panic("%s", trap_msg[type]);
1004 else
1005 panic("unknown/reserved trap");
1009 * Double fault handler. Called when a fault occurs while writing
1010 * a frame for a trap/exception onto the stack. This usually occurs
1011 * when the stack overflows (such is the case with infinite recursion,
1012 * for example).
1014 * XXX Note that the current PTD gets replaced by IdlePTD when the
1015 * task switch occurs. This means that the stack that was active at
1016 * the time of the double fault is not available at <kstack> unless
1017 * the machine was idle when the double fault occurred. The downside
1018 * of this is that "trace <ebp>" in ddb won't work.
1020 void
1021 dblfault_handler(void)
1023 struct mdglobaldata *gd = mdcpu;
1025 kprintf("\nFatal double fault:\n");
1026 kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1027 kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1028 kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1029 #ifdef SMP
1030 /* three separate prints in case of a trap on an unmapped page */
1031 kprintf("mp_lock = %08x; ", mp_lock);
1032 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
1033 kprintf("lapic.id = %08x\n", lapic.id);
1034 #endif
1035 panic("double fault");
1039 * Compensate for 386 brain damage (missing URKR).
1040 * This is a little simpler than the pagefault handler in trap() because
1041 * it the page tables have already been faulted in and high addresses
1042 * are thrown out early for other reasons.
1045 trapwrite(unsigned addr)
1047 struct proc *p;
1048 vm_offset_t va;
1049 struct vmspace *vm;
1050 int rv;
1052 va = trunc_page((vm_offset_t)addr);
1054 * XXX - MAX is END. Changed > to >= for temp. fix.
1056 if (va >= VM_MAX_USER_ADDRESS)
1057 return (1);
1059 p = curproc;
1060 vm = p->p_vmspace;
1062 ++p->p_lock;
1064 if (!grow_stack (p, va)) {
1065 --p->p_lock;
1066 return (1);
1070 * fault the data page
1072 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1074 --p->p_lock;
1076 if (rv != KERN_SUCCESS)
1077 return 1;
1079 return (0);
1083 * syscall2 - MP aware system call request C handler
1085 * A system call is essentially treated as a trap except that the
1086 * MP lock is not held on entry or return. We are responsible for
1087 * obtaining the MP lock if necessary and for handling ASTs
1088 * (e.g. a task switch) prior to return.
1090 * In general, only simple access and manipulation of curproc and
1091 * the current stack is allowed without having to hold MP lock.
1093 * MPSAFE - note that large sections of this routine are run without
1094 * the MP lock.
1097 void
1098 syscall2(struct trapframe *frame)
1100 struct thread *td = curthread;
1101 struct proc *p = td->td_proc;
1102 struct lwp *lp = td->td_lwp;
1103 caddr_t params;
1104 struct sysent *callp;
1105 register_t orig_tf_eflags;
1106 int sticks;
1107 int error;
1108 int narg;
1109 #ifdef INVARIANTS
1110 int crit_count = td->td_pri & ~TDPRI_MASK;
1111 #endif
1112 #ifdef SMP
1113 int have_mplock = 0;
1114 #endif
1115 u_int code;
1116 union sysunion args;
1118 #ifdef DIAGNOSTIC
1119 if (ISPL(frame->tf_cs) != SEL_UPL) {
1120 get_mplock();
1121 panic("syscall");
1122 /* NOT REACHED */
1124 #endif
1126 #ifdef SMP
1127 KASSERT(td->td_mpcount == 0, ("badmpcount syscall2 from %p", (void *)frame->tf_eip));
1128 if (syscall_mpsafe == 0)
1129 MAKEMPSAFE(have_mplock);
1130 #endif
1131 userenter(td); /* lazy raise our priority */
1134 * Misc
1136 sticks = (int)td->td_sticks;
1137 orig_tf_eflags = frame->tf_eflags;
1140 * Virtual kernel intercept - if a VM context managed by a virtual
1141 * kernel issues a system call the virtual kernel handles it, not us.
1142 * Restore the virtual kernel context and return from its system
1143 * call. The current frame is copied out to the virtual kernel.
1145 if (p->p_vkernel && p->p_vkernel->vk_current) {
1146 error = vkernel_trap(p, frame);
1147 frame->tf_eax = error;
1148 if (error)
1149 frame->tf_eflags |= PSL_C;
1150 error = EJUSTRETURN;
1151 goto out;
1155 * Get the system call parameters and account for time
1157 lp->lwp_md.md_regs = frame;
1158 params = (caddr_t)frame->tf_esp + sizeof(int);
1159 code = frame->tf_eax;
1161 if (p->p_sysent->sv_prepsyscall) {
1162 (*p->p_sysent->sv_prepsyscall)(
1163 frame, (int *)(&args.nosys.sysmsg + 1),
1164 &code, &params);
1165 } else {
1167 * Need to check if this is a 32 bit or 64 bit syscall.
1168 * fuword is MP aware.
1170 if (code == SYS_syscall) {
1172 * Code is first argument, followed by actual args.
1174 code = fuword(params);
1175 params += sizeof(int);
1176 } else if (code == SYS___syscall) {
1178 * Like syscall, but code is a quad, so as to maintain
1179 * quad alignment for the rest of the arguments.
1181 code = fuword(params);
1182 params += sizeof(quad_t);
1186 code &= p->p_sysent->sv_mask;
1187 if (code >= p->p_sysent->sv_size)
1188 callp = &p->p_sysent->sv_table[0];
1189 else
1190 callp = &p->p_sysent->sv_table[code];
1192 narg = callp->sy_narg & SYF_ARGMASK;
1195 * copyin is MP aware, but the tracing code is not
1197 if (narg && params) {
1198 error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
1199 narg * sizeof(register_t));
1200 if (error) {
1201 #ifdef KTRACE
1202 if (KTRPOINT(td, KTR_SYSCALL)) {
1203 MAKEMPSAFE(have_mplock);
1205 ktrsyscall(p, code, narg,
1206 (void *)(&args.nosys.sysmsg + 1));
1208 #endif
1209 goto bad;
1213 #ifdef KTRACE
1214 if (KTRPOINT(td, KTR_SYSCALL)) {
1215 MAKEMPSAFE(have_mplock);
1216 ktrsyscall(p, code, narg, (void *)(&args.nosys.sysmsg + 1));
1218 #endif
1221 * For traditional syscall code edx is left untouched when 32 bit
1222 * results are returned. Since edx is loaded from fds[1] when the
1223 * system call returns we pre-set it here.
1225 args.sysmsg_fds[0] = 0;
1226 args.sysmsg_fds[1] = frame->tf_edx;
1229 * The syscall might manipulate the trap frame. If it does it
1230 * will probably return EJUSTRETURN.
1232 args.sysmsg_frame = frame;
1234 STOPEVENT(p, S_SCE, narg); /* MP aware */
1236 #ifdef SMP
1238 * Try to run the syscall without the MP lock if the syscall
1239 * is MP safe. We have to obtain the MP lock no matter what if
1240 * we are ktracing
1242 if ((callp->sy_narg & SYF_MPSAFE) == 0)
1243 MAKEMPSAFE(have_mplock);
1244 #endif
1246 error = (*callp->sy_call)(&args);
1248 #if 0
1249 kprintf("system call %d returned %d\n", code, error);
1250 #endif
1252 out:
1254 * MP SAFE (we may or may not have the MP lock at this point)
1256 switch (error) {
1257 case 0:
1259 * Reinitialize proc pointer `p' as it may be different
1260 * if this is a child returning from fork syscall.
1262 p = curproc;
1263 lp = curthread->td_lwp;
1264 frame->tf_eax = args.sysmsg_fds[0];
1265 frame->tf_edx = args.sysmsg_fds[1];
1266 frame->tf_eflags &= ~PSL_C;
1267 break;
1268 case ERESTART:
1270 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1271 * int 0x80 is 2 bytes. We saved this in tf_err.
1273 frame->tf_eip -= frame->tf_err;
1274 break;
1275 case EJUSTRETURN:
1276 break;
1277 case EASYNC:
1278 panic("Unexpected EASYNC return value (for now)");
1279 default:
1280 bad:
1281 if (p->p_sysent->sv_errsize) {
1282 if (error >= p->p_sysent->sv_errsize)
1283 error = -1; /* XXX */
1284 else
1285 error = p->p_sysent->sv_errtbl[error];
1287 frame->tf_eax = error;
1288 frame->tf_eflags |= PSL_C;
1289 break;
1293 * Traced syscall. trapsignal() is not MP aware.
1295 if ((orig_tf_eflags & PSL_T) /*&& !(orig_tf_eflags & PSL_VM)*/) {
1296 MAKEMPSAFE(have_mplock);
1297 frame->tf_eflags &= ~PSL_T;
1298 trapsignal(p, SIGTRAP, 0);
1302 * Handle reschedule and other end-of-syscall issues
1304 userret(lp, frame, sticks);
1306 #ifdef KTRACE
1307 if (KTRPOINT(td, KTR_SYSRET)) {
1308 MAKEMPSAFE(have_mplock);
1309 ktrsysret(p, code, error, args.sysmsg_result);
1311 #endif
1314 * This works because errno is findable through the
1315 * register set. If we ever support an emulation where this
1316 * is not the case, this code will need to be revisited.
1318 STOPEVENT(p, S_SCX, code);
1320 userexit(lp);
1321 #ifdef SMP
1323 * Release the MP lock if we had to get it
1325 KASSERT(td->td_mpcount == have_mplock,
1326 ("badmpcount syscall2/end from %p", (void *)frame->tf_eip));
1327 if (have_mplock)
1328 rel_mplock();
1329 #endif
1330 #ifdef INVARIANTS
1331 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
1332 ("syscall: critical section count mismatch! %d/%d",
1333 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
1334 #endif
1338 * Simplified back end of syscall(), used when returning from fork()
1339 * directly into user mode. MP lock is held on entry and should be
1340 * released on return. This code will return back into the fork
1341 * trampoline code which then runs doreti.
1343 void
1344 fork_return(struct lwp *lp, struct trapframe frame)
1346 struct proc *p = lp->lwp_proc;
1348 frame.tf_eax = 0; /* Child returns zero */
1349 frame.tf_eflags &= ~PSL_C; /* success */
1350 frame.tf_edx = 1;
1353 * Newly forked processes are given a kernel priority. We have to
1354 * adjust the priority to a normal user priority and fake entry
1355 * into the kernel (call userenter()) to install a passive release
1356 * function just in case userret() decides to stop the process. This
1357 * can occur when ^Z races a fork. If we do not install the passive
1358 * release function the current process designation will not be
1359 * released when the thread goes to sleep.
1361 lwkt_setpri_self(TDPRI_USER_NORM);
1362 userenter(lp->lwp_thread);
1363 userret(lp, &frame, 0);
1364 #ifdef KTRACE
1365 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1366 ktrsysret(p, SYS_fork, 0, 0);
1367 #endif
1368 p->p_flag |= P_PASSIVE_ACQ;
1369 userexit(lp);
1370 p->p_flag &= ~P_PASSIVE_ACQ;
1371 #ifdef SMP
1372 KKASSERT(lp->lwp_thread->td_mpcount == 1);
1373 rel_mplock();
1374 #endif
1378 * doreti has turned into this. The frame is directly on the stack. We
1379 * pull everything else we need (fpu and tls context) from the current
1380 * thread.
1382 * Note on fpu interactions: In a virtual kernel, the fpu context for
1383 * an emulated user mode process is not shared with the virtual kernel's
1384 * fpu context, so we only have to 'stack' fpu contexts within the virtual
1385 * kernel itself, and not even then since the signal() contexts that we care
1386 * about save and restore the FPU state (I think anyhow).
1388 * vmspace_ctl() returns an error only if it had problems instaling the
1389 * context we supplied or problems copying data to/from our VM space.
1391 void
1392 go_user(struct trapframe frame)
1394 int r;
1397 * Interrupts may be disabled on entry, make sure all signals
1398 * can be received before beginning our loop.
1400 sigsetmask(0);
1403 * Switch to the current simulated user process, then call
1404 * user_trap() when we break out of it (usually due to a signal).
1406 for (;;) {
1407 #if 0
1408 kprintf("GO USER VMSPC %p pid %-4d %s (blocked %08x)\n",
1409 &curproc->p_vmspace->vm_pmap,
1410 curproc->p_pid, curproc->p_comm, sigblock(0));
1411 #endif
1414 * Tell the real kernel whether it is ok to use the FP
1415 * unit or not.
1417 if (mdcpu->gd_npxthread == curthread) {
1418 frame.tf_xflags &= ~FPEX_FAULT;
1419 } else {
1420 frame.tf_xflags |= FPEX_FAULT;
1424 * Run emulated user process context
1426 r = vmspace_ctl(&curproc->p_vmspace->vm_pmap, VMSPACE_CTL_RUN,
1427 &frame, &curthread->td_savevext);
1428 if (r < 0)
1429 panic("vmspace_ctl had problems with the context");
1431 if (frame.tf_trapno) {
1432 #if 0
1433 kprintf("User trapno %d eip %08x err %08x xflags %d\n",
1434 frame.tf_trapno, frame.tf_eip, frame.tf_err, frame.tf_xflags);
1435 #endif
1436 user_trap(&frame);
1437 } else if (mycpu->gd_reqflags & RQF_AST_MASK) {
1438 #if 0
1439 kprintf("reqflags %08x %08x %d\n", mycpu->gd_reqflags, sigblock(0), curthread->td_pri);
1440 #endif
1441 frame.tf_trapno = T_ASTFLT;
1442 user_trap(&frame);
1443 } else {
1444 #if 0
1445 kprintf("Kernel AST %08x\n", sigblock(0));
1446 #endif
1452 * If FPEX_FAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1453 * fault (which is then passed back to the virtual kernel) if an attempt is
1454 * made to use the FP unit.
1456 * XXX this is a fairly big hack.
1458 void
1459 set_vkernel_fp(struct trapframe *frame)
1461 struct thread *td = curthread;
1463 panic("set_vkernel_fp: vkernel-within-vkernel not yet supported");
1464 if (frame->tf_xflags & FPEX_FAULT) {
1465 td->td_pcb->pcb_flags |= FP_VIRTFP;
1466 if (mdcpu->gd_npxthread == td)
1467 npxexit();
1468 } else {
1469 td->td_pcb->pcb_flags &= ~FP_VIRTFP;