kernel - Move grow_stack code in fault path to improve fault performance
[dragonfly.git] / sys / platform / vkernel / i386 / trap.c
blob49fbd831f2e422195fb0dcc04f62f30d6a4962dd
1 /*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/vkernel/i386/trap.c,v 1.35 2008/09/09 04:06:19 dillon Exp $
43 * 386 Trap and System call handling
46 #include "use_isa.h"
47 #include "use_npx.h"
49 #include "opt_ddb.h"
50 #include "opt_ktrace.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/proc.h>
55 #include <sys/pioctl.h>
56 #include <sys/kernel.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/signal2.h>
60 #include <sys/syscall.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysent.h>
63 #include <sys/uio.h>
64 #include <sys/vmmeter.h>
65 #include <sys/malloc.h>
66 #ifdef KTRACE
67 #include <sys/ktrace.h>
68 #endif
69 #include <sys/ktr.h>
70 #include <sys/upcall.h>
71 #include <sys/vkernel.h>
72 #include <sys/sysproto.h>
73 #include <sys/sysunion.h>
74 #include <sys/vmspace.h>
76 #include <vm/vm.h>
77 #include <vm/vm_param.h>
78 #include <sys/lock.h>
79 #include <vm/pmap.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_extern.h>
85 #include <machine/cpu.h>
86 #include <machine/md_var.h>
87 #include <machine/pcb.h>
88 #include <machine/smp.h>
89 #include <machine/tss.h>
90 #include <machine/globaldata.h>
92 #include <machine/vm86.h>
94 #include <ddb/ddb.h>
96 #include <sys/msgport2.h>
97 #include <sys/thread2.h>
98 #include <sys/mplock2.h>
100 #ifdef SMP
102 #define MAKEMPSAFE(have_mplock) \
103 if (have_mplock == 0) { \
104 get_mplock(); \
105 have_mplock = 1; \
108 #else
110 #define MAKEMPSAFE(have_mplock)
112 #endif
114 int (*pmath_emulate) (struct trapframe *);
116 static int trap_pfault (struct trapframe *, int, vm_offset_t);
117 static void trap_fatal (struct trapframe *, int, vm_offset_t);
118 void dblfault_handler (void);
120 #if 0
121 extern inthand_t IDTVEC(syscall);
122 #endif
124 #define MAX_TRAP_MSG 28
125 static char *trap_msg[] = {
126 "", /* 0 unused */
127 "privileged instruction fault", /* 1 T_PRIVINFLT */
128 "", /* 2 unused */
129 "breakpoint instruction fault", /* 3 T_BPTFLT */
130 "", /* 4 unused */
131 "", /* 5 unused */
132 "arithmetic trap", /* 6 T_ARITHTRAP */
133 "system forced exception", /* 7 T_ASTFLT */
134 "", /* 8 unused */
135 "general protection fault", /* 9 T_PROTFLT */
136 "trace trap", /* 10 T_TRCTRAP */
137 "", /* 11 unused */
138 "page fault", /* 12 T_PAGEFLT */
139 "", /* 13 unused */
140 "alignment fault", /* 14 T_ALIGNFLT */
141 "", /* 15 unused */
142 "", /* 16 unused */
143 "", /* 17 unused */
144 "integer divide fault", /* 18 T_DIVIDE */
145 "non-maskable interrupt trap", /* 19 T_NMI */
146 "overflow trap", /* 20 T_OFLOW */
147 "FPU bounds check fault", /* 21 T_BOUND */
148 "FPU device not available", /* 22 T_DNA */
149 "double fault", /* 23 T_DOUBLEFLT */
150 "FPU operand fetch fault", /* 24 T_FPOPFLT */
151 "invalid TSS fault", /* 25 T_TSSFLT */
152 "segment not present fault", /* 26 T_SEGNPFLT */
153 "stack fault", /* 27 T_STKFLT */
154 "machine check trap", /* 28 T_MCHK */
157 #ifdef DDB
158 static int ddb_on_nmi = 1;
159 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
160 &ddb_on_nmi, 0, "Go to DDB on NMI");
161 #endif
162 static int panic_on_nmi = 1;
163 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
164 &panic_on_nmi, 0, "Panic on NMI");
165 static int fast_release;
166 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
167 &fast_release, 0, "Passive Release was optimal");
168 static int slow_release;
169 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
170 &slow_release, 0, "Passive Release was nonoptimal");
171 #ifdef SMP
172 static int syscall_mpsafe = 1;
173 SYSCTL_INT(_kern, OID_AUTO, syscall_mpsafe, CTLFLAG_RW,
174 &syscall_mpsafe, 0, "Allow MPSAFE marked syscalls to run without BGL");
175 TUNABLE_INT("kern.syscall_mpsafe", &syscall_mpsafe);
176 static int trap_mpsafe = 1;
177 SYSCTL_INT(_kern, OID_AUTO, trap_mpsafe, CTLFLAG_RW,
178 &trap_mpsafe, 0, "Allow traps to mostly run without the BGL");
179 TUNABLE_INT("kern.trap_mpsafe", &trap_mpsafe);
180 #endif
182 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
183 extern int max_sysmsg;
186 * Passively intercepts the thread switch function to increase
187 * the thread priority from a user priority to a kernel priority, reducing
188 * syscall and trap overhead for the case where no switch occurs.
190 * Synchronizes td_ucred with p_ucred. This is used by system calls,
191 * signal handling, faults, AST traps, and anything else that enters the
192 * kernel from userland and provides the kernel with a stable read-only
193 * copy of the process ucred.
195 static __inline void
196 userenter(struct thread *curtd, struct proc *curp)
198 struct ucred *ocred;
199 struct ucred *ncred;
201 curtd->td_release = lwkt_passive_release;
203 if (curtd->td_ucred != curp->p_ucred) {
204 ncred = crhold(curp->p_ucred);
205 ocred = curtd->td_ucred;
206 curtd->td_ucred = ncred;
207 if (ocred)
208 crfree(ocred);
213 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
214 * must be completed before we can return to or try to return to userland.
216 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
217 * arithmatic on the delta calculation so the absolute tick values are
218 * truncated to an integer.
220 static void
221 userret(struct lwp *lp, struct trapframe *frame, int sticks)
223 struct proc *p = lp->lwp_proc;
224 int sig;
227 * Charge system time if profiling. Note: times are in microseconds.
228 * This may do a copyout and block, so do it first even though it
229 * means some system time will be charged as user time.
231 if (p->p_flag & P_PROFIL) {
232 addupc_task(p, frame->tf_eip,
233 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
236 recheck:
238 * If the jungle wants us dead, so be it.
240 if (lp->lwp_flag & LWP_WEXIT) {
241 get_mplock();
242 lwp_exit(0);
243 rel_mplock(); /* NOT REACHED */
247 * Block here if we are in a stopped state.
249 if (p->p_stat == SSTOP) {
250 get_mplock();
251 tstop();
252 rel_mplock();
253 goto recheck;
257 * Post any pending upcalls
259 if (p->p_flag & P_UPCALLPEND) {
260 get_mplock();
261 p->p_flag &= ~P_UPCALLPEND;
262 postupcall(lp);
263 rel_mplock();
264 goto recheck;
268 * Post any pending signals
270 if ((sig = CURSIG_TRACE(lp)) != 0) {
271 get_mplock();
272 postsig(sig);
273 rel_mplock();
274 goto recheck;
278 * block here if we are swapped out, but still process signals
279 * (such as SIGKILL). proc0 (the swapin scheduler) is already
280 * aware of our situation, we do not have to wake it up.
282 if (p->p_flag & P_SWAPPEDOUT) {
283 get_mplock();
284 p->p_flag |= P_SWAPWAIT;
285 swapin_request();
286 if (p->p_flag & P_SWAPWAIT)
287 tsleep(p, PCATCH, "SWOUT", 0);
288 p->p_flag &= ~P_SWAPWAIT;
289 rel_mplock();
290 goto recheck;
294 * Make sure postsig() handled request to restore old signal mask after
295 * running signal handler.
297 KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0);
301 * Cleanup from userenter and any passive release that might have occured.
302 * We must reclaim the current-process designation before we can return
303 * to usermode. We also handle both LWKT and USER reschedule requests.
305 static __inline void
306 userexit(struct lwp *lp)
308 struct thread *td = lp->lwp_thread;
309 /* globaldata_t gd = td->td_gd; */
312 * Handle stop requests at kernel priority. Any requests queued
313 * after this loop will generate another AST.
315 while (lp->lwp_proc->p_stat == SSTOP) {
316 get_mplock();
317 tstop();
318 rel_mplock();
322 * Reduce our priority in preparation for a return to userland. If
323 * our passive release function was still in place, our priority was
324 * never raised and does not need to be reduced.
326 lwkt_passive_recover(td);
329 * Become the current user scheduled process if we aren't already,
330 * and deal with reschedule requests and other factors.
332 lp->lwp_proc->p_usched->acquire_curproc(lp);
333 /* WARNING: we may have migrated cpu's */
334 /* gd = td->td_gd; */
337 #if !defined(KTR_KERNENTRY)
338 #define KTR_KERNENTRY KTR_ALL
339 #endif
340 KTR_INFO_MASTER(kernentry);
341 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, "pid=%d, tid=%d, trapno=%d, eva=%p",
342 sizeof(int) + sizeof(int) + sizeof(int) + sizeof(vm_offset_t));
343 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "pid=%d, tid=%d",
344 sizeof(int) + sizeof(int));
345 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "pid=%d, tid=%d, call=%d",
346 sizeof(int) + sizeof(int) + sizeof(int));
347 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "pid=%d, tid=%d, err=%d",
348 sizeof(int) + sizeof(int) + sizeof(int));
349 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "pid=%d, tid=%d",
350 sizeof(int) + sizeof(int));
353 * Exception, fault, and trap interface to the kernel.
354 * This common code is called from assembly language IDT gate entry
355 * routines that prepare a suitable stack frame, and restore this
356 * frame after the exception has been processed.
358 * This function is also called from doreti in an interlock to handle ASTs.
359 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
361 * NOTE! We have to retrieve the fault address prior to obtaining the
362 * MP lock because get_mplock() may switch out. YYY cr2 really ought
363 * to be retrieved by the assembly code, not here.
365 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
366 * if an attempt is made to switch from a fast interrupt or IPI. This is
367 * necessary to properly take fatal kernel traps on SMP machines if
368 * get_mplock() has to block.
371 void
372 user_trap(struct trapframe *frame)
374 struct globaldata *gd = mycpu;
375 struct thread *td = gd->gd_curthread;
376 struct lwp *lp = td->td_lwp;
377 struct proc *p;
378 int sticks = 0;
379 int i = 0, ucode = 0, type, code;
380 #ifdef SMP
381 int have_mplock = 0;
382 #endif
383 #ifdef INVARIANTS
384 int crit_count = td->td_pri & ~TDPRI_MASK;
385 #endif
386 vm_offset_t eva;
388 p = td->td_proc;
391 * This is a bad kludge to avoid changing the various trapframe
392 * structures. Because we are enabled as a virtual kernel,
393 * the original tf_err field will be passed to us shifted 16
394 * over in the tf_trapno field for T_PAGEFLT.
396 if (frame->tf_trapno == T_PAGEFLT)
397 eva = frame->tf_err;
398 else
399 eva = 0;
400 #if 0
401 kprintf("USER_TRAP AT %08x xflags %d trapno %d eva %08x\n",
402 frame->tf_eip, frame->tf_xflags, frame->tf_trapno, eva);
403 #endif
406 * Everything coming from user mode runs through user_trap,
407 * including system calls.
409 if (frame->tf_trapno == T_SYSCALL80) {
410 syscall2(frame);
411 return;
414 KTR_LOG(kernentry_trap, lp->lwp_proc->p_pid, lp->lwp_tid,
415 frame->tf_trapno, eva);
417 #ifdef DDB
418 if (db_active) {
419 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
420 ++gd->gd_trap_nesting_level;
421 MAKEMPSAFE(have_mplock);
422 trap_fatal(frame, TRUE, eva);
423 --gd->gd_trap_nesting_level;
424 goto out2;
426 #endif
428 ++gd->gd_trap_nesting_level;
429 #ifdef SMP
430 if (trap_mpsafe == 0)
431 MAKEMPSAFE(have_mplock);
432 #endif
434 --gd->gd_trap_nesting_level;
436 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
437 restart:
438 #endif
439 type = frame->tf_trapno;
440 code = frame->tf_err;
442 userenter(td, p);
444 sticks = (int)td->td_sticks;
445 lp->lwp_md.md_regs = frame;
447 switch (type) {
448 case T_PRIVINFLT: /* privileged instruction fault */
449 ucode = ILL_PRVOPC;
450 i = SIGILL;
451 break;
453 case T_BPTFLT: /* bpt instruction fault */
454 case T_TRCTRAP: /* trace trap */
455 frame->tf_eflags &= ~PSL_T;
456 ucode = TRAP_TRACE;
457 i = SIGTRAP;
458 break;
460 case T_ARITHTRAP: /* arithmetic trap */
461 ucode = code;
462 i = SIGFPE;
463 break;
465 case T_ASTFLT: /* Allow process switch */
466 mycpu->gd_cnt.v_soft++;
467 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
468 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
469 RQF_AST_OWEUPC);
470 addupc_task(p, p->p_prof.pr_addr,
471 p->p_prof.pr_ticks);
473 goto out;
476 * The following two traps can happen in
477 * vm86 mode, and, if so, we want to handle
478 * them specially.
480 case T_PROTFLT: /* general protection fault */
481 case T_STKFLT: /* stack fault */
482 #if 0
483 if (frame->tf_eflags & PSL_VM) {
484 i = vm86_emulate((struct vm86frame *)frame);
485 if (i == 0)
486 goto out;
487 break;
489 #endif
490 i = SIGBUS;
491 ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR;
492 break;
493 case T_SEGNPFLT: /* segment not present fault */
494 i = SIGBUS;
495 ucode = BUS_ADRERR;
496 break;
497 case T_TSSFLT: /* invalid TSS fault */
498 case T_DOUBLEFLT: /* double fault */
499 i = SIGBUS;
500 ucode = BUS_OBJERR;
501 default:
502 #if 0
503 ucode = code + BUS_SEGM_FAULT ; /* XXX: ???*/
504 #endif
505 ucode = BUS_OBJERR;
506 i = SIGBUS;
507 break;
509 case T_PAGEFLT: /* page fault */
510 MAKEMPSAFE(have_mplock);
511 i = trap_pfault(frame, TRUE, eva);
512 if (i == -1)
513 goto out;
514 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
515 if (i == -2)
516 goto restart;
517 #endif
518 if (i == 0)
519 goto out;
521 #if 0
522 ucode = T_PAGEFLT;
523 #endif
524 if (i == SIGSEGV)
525 ucode = SEGV_MAPERR;
526 else
527 ucode = BUS_ADRERR;
528 break;
530 case T_DIVIDE: /* integer divide fault */
531 ucode = FPE_INTDIV;
532 i = SIGFPE;
533 break;
535 #if NISA > 0
536 case T_NMI:
537 MAKEMPSAFE(have_mplock);
538 /* machine/parity/power fail/"kitchen sink" faults */
539 if (isa_nmi(code) == 0) {
540 #ifdef DDB
542 * NMI can be hooked up to a pushbutton
543 * for debugging.
545 if (ddb_on_nmi) {
546 kprintf ("NMI ... going to debugger\n");
547 kdb_trap (type, 0, frame);
549 #endif /* DDB */
550 goto out2;
551 } else if (panic_on_nmi)
552 panic("NMI indicates hardware failure");
553 break;
554 #endif /* NISA > 0 */
556 case T_OFLOW: /* integer overflow fault */
557 ucode = FPE_INTOVF;
558 i = SIGFPE;
559 break;
561 case T_BOUND: /* bounds check fault */
562 ucode = FPE_FLTSUB;
563 i = SIGFPE;
564 break;
566 case T_DNA:
568 * Virtual kernel intercept - pass the DNA exception
569 * to the (emulated) virtual kernel if it asked to handle
570 * it. This occurs when the virtual kernel is holding
571 * onto the FP context for a different emulated
572 * process then the one currently running.
574 * We must still call npxdna() since we may have
575 * saved FP state that the (emulated) virtual kernel
576 * needs to hand over to a different emulated process.
578 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
579 (td->td_pcb->pcb_flags & FP_VIRTFP)
581 npxdna(frame);
582 break;
584 #if NNPX > 0
586 * The kernel may have switched out the FP unit's
587 * state, causing the user process to take a fault
588 * when it tries to use the FP unit. Restore the
589 * state here
591 if (npxdna(frame))
592 goto out;
593 #endif
594 if (!pmath_emulate) {
595 i = SIGFPE;
596 ucode = FPE_FPU_NP_TRAP;
597 break;
599 i = (*pmath_emulate)(frame);
600 if (i == 0) {
601 if (!(frame->tf_eflags & PSL_T))
602 goto out2;
603 frame->tf_eflags &= ~PSL_T;
604 i = SIGTRAP;
606 /* else ucode = emulator_only_knows() XXX */
607 break;
609 case T_FPOPFLT: /* FPU operand fetch fault */
610 ucode = ILL_COPROC;
611 i = SIGILL;
612 break;
614 case T_XMMFLT: /* SIMD floating-point exception */
615 ucode = 0; /* XXX */
616 i = SIGFPE;
617 break;
621 * Virtual kernel intercept - if the fault is directly related to a
622 * VM context managed by a virtual kernel then let the virtual kernel
623 * handle it.
625 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
626 vkernel_trap(lp, frame);
627 goto out;
631 * Translate fault for emulators (e.g. Linux)
633 if (*p->p_sysent->sv_transtrap)
634 i = (*p->p_sysent->sv_transtrap)(i, type);
636 MAKEMPSAFE(have_mplock);
637 trapsignal(lp, i, ucode);
639 #ifdef DEBUG
640 if (type <= MAX_TRAP_MSG) {
641 uprintf("fatal process exception: %s",
642 trap_msg[type]);
643 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
644 uprintf(", fault VA = 0x%lx", (u_long)eva);
645 uprintf("\n");
647 #endif
649 out:
650 #ifdef SMP
651 KASSERT(td->td_mpcount == have_mplock, ("badmpcount trap/end from %p", (void *)frame->tf_eip));
652 #endif
653 userret(lp, frame, sticks);
654 userexit(lp);
655 out2: ;
656 #ifdef SMP
657 if (have_mplock)
658 rel_mplock();
659 #endif
660 KTR_LOG(kernentry_trap_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
661 #ifdef INVARIANTS
662 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
663 ("syscall: critical section count mismatch! %d/%d",
664 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
665 #endif
668 void
669 kern_trap(struct trapframe *frame)
671 struct globaldata *gd = mycpu;
672 struct thread *td = gd->gd_curthread;
673 struct lwp *lp;
674 struct proc *p;
675 int i = 0, ucode = 0, type, code;
676 #ifdef SMP
677 int have_mplock = 0;
678 #endif
679 #ifdef INVARIANTS
680 int crit_count = td->td_pri & ~TDPRI_MASK;
681 #endif
682 vm_offset_t eva;
684 lp = td->td_lwp;
685 p = td->td_proc;
687 if (frame->tf_trapno == T_PAGEFLT)
688 eva = frame->tf_err;
689 else
690 eva = 0;
692 #ifdef DDB
693 if (db_active) {
694 ++gd->gd_trap_nesting_level;
695 MAKEMPSAFE(have_mplock);
696 trap_fatal(frame, FALSE, eva);
697 --gd->gd_trap_nesting_level;
698 goto out2;
700 #endif
702 ++gd->gd_trap_nesting_level;
704 #ifdef SMP
705 if (trap_mpsafe == 0)
706 MAKEMPSAFE(have_mplock);
707 #endif
709 --gd->gd_trap_nesting_level;
711 type = frame->tf_trapno;
712 code = frame->tf_err;
714 #if 0
715 kernel_trap:
716 #endif
717 /* kernel trap */
719 switch (type) {
720 case T_PAGEFLT: /* page fault */
721 MAKEMPSAFE(have_mplock);
722 trap_pfault(frame, FALSE, eva);
723 goto out2;
725 case T_DNA:
726 #if NNPX > 0
728 * The kernel may be using npx for copying or other
729 * purposes.
731 panic("kernel NPX should not happen");
732 if (npxdna(frame))
733 goto out2;
734 #endif
735 break;
737 case T_PROTFLT: /* general protection fault */
738 case T_SEGNPFLT: /* segment not present fault */
740 * Invalid segment selectors and out of bounds
741 * %eip's and %esp's can be set up in user mode.
742 * This causes a fault in kernel mode when the
743 * kernel tries to return to user mode. We want
744 * to get this fault so that we can fix the
745 * problem here and not have to check all the
746 * selectors and pointers when the user changes
747 * them.
749 if (mycpu->gd_intr_nesting_level == 0) {
750 if (td->td_pcb->pcb_onfault) {
751 frame->tf_eip =
752 (register_t)td->td_pcb->pcb_onfault;
753 goto out2;
756 break;
758 case T_TSSFLT:
760 * PSL_NT can be set in user mode and isn't cleared
761 * automatically when the kernel is entered. This
762 * causes a TSS fault when the kernel attempts to
763 * `iret' because the TSS link is uninitialized. We
764 * want to get this fault so that we can fix the
765 * problem here and not every time the kernel is
766 * entered.
768 if (frame->tf_eflags & PSL_NT) {
769 frame->tf_eflags &= ~PSL_NT;
770 goto out2;
772 break;
774 case T_TRCTRAP: /* trace trap */
775 #if 0
776 if (frame->tf_eip == (int)IDTVEC(syscall)) {
778 * We've just entered system mode via the
779 * syscall lcall. Continue single stepping
780 * silently until the syscall handler has
781 * saved the flags.
783 goto out2;
785 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
787 * The syscall handler has now saved the
788 * flags. Stop single stepping it.
790 frame->tf_eflags &= ~PSL_T;
791 goto out2;
793 #endif
794 #if 0
796 * Ignore debug register trace traps due to
797 * accesses in the user's address space, which
798 * can happen under several conditions such as
799 * if a user sets a watchpoint on a buffer and
800 * then passes that buffer to a system call.
801 * We still want to get TRCTRAPS for addresses
802 * in kernel space because that is useful when
803 * debugging the kernel.
805 if (user_dbreg_trap()) {
807 * Reset breakpoint bits because the
808 * processor doesn't
810 load_dr6(rdr6() & 0xfffffff0);
811 goto out2;
813 #endif
815 * Fall through (TRCTRAP kernel mode, kernel address)
817 case T_BPTFLT:
819 * If DDB is enabled, let it handle the debugger trap.
820 * Otherwise, debugger traps "can't happen".
822 #ifdef DDB
823 MAKEMPSAFE(have_mplock);
824 if (kdb_trap (type, 0, frame))
825 goto out2;
826 #endif
827 break;
828 case T_DIVIDE:
829 MAKEMPSAFE(have_mplock);
830 trap_fatal(frame, FALSE, eva);
831 goto out2;
832 case T_NMI:
833 MAKEMPSAFE(have_mplock);
834 trap_fatal(frame, FALSE, eva);
835 goto out2;
836 case T_SYSCALL80:
838 * Ignore this trap generated from a spurious SIGTRAP.
840 * single stepping in / syscalls leads to spurious / SIGTRAP
841 * so ignore
843 * Haiku (c) 2007 Simon 'corecode' Schubert
845 goto out2;
849 * Translate fault for emulators (e.g. Linux)
851 if (*p->p_sysent->sv_transtrap)
852 i = (*p->p_sysent->sv_transtrap)(i, type);
854 MAKEMPSAFE(have_mplock);
855 trapsignal(lp, i, ucode);
857 #ifdef DEBUG
858 if (type <= MAX_TRAP_MSG) {
859 uprintf("fatal process exception: %s",
860 trap_msg[type]);
861 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
862 uprintf(", fault VA = 0x%lx", (u_long)eva);
863 uprintf("\n");
865 #endif
867 out2:
869 #ifdef SMP
870 if (have_mplock)
871 rel_mplock();
872 #endif
873 #ifdef INVARIANTS
874 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
875 ("syscall: critical section count mismatch! %d/%d",
876 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
877 #endif
881 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
883 vm_offset_t va;
884 struct vmspace *vm = NULL;
885 vm_map_t map = 0;
886 int rv = 0;
887 int fault_flags;
888 vm_prot_t ftype;
889 thread_t td = curthread;
890 struct lwp *lp = td->td_lwp;
892 va = trunc_page(eva);
893 if (usermode == FALSE) {
895 * This is a fault on kernel virtual memory.
897 map = &kernel_map;
898 } else {
900 * This is a fault on non-kernel virtual memory.
901 * vm is initialized above to NULL. If curproc is NULL
902 * or curproc->p_vmspace is NULL the fault is fatal.
904 if (lp != NULL)
905 vm = lp->lwp_vmspace;
907 if (vm == NULL)
908 goto nogo;
910 map = &vm->vm_map;
913 if (frame->tf_xflags & PGEX_W)
914 ftype = VM_PROT_READ | VM_PROT_WRITE;
915 else
916 ftype = VM_PROT_READ;
918 if (map != &kernel_map) {
920 * Keep swapout from messing with us during this
921 * critical time.
923 PHOLD(lp->lwp_proc);
926 * Issue fault
928 fault_flags = 0;
929 if (usermode)
930 fault_flags |= VM_FAULT_BURST;
931 if (ftype & VM_PROT_WRITE)
932 fault_flags |= VM_FAULT_DIRTY;
933 else
934 fault_flags |= VM_FAULT_NORMAL;
935 rv = vm_fault(map, va, ftype, fault_flags);
937 PRELE(lp->lwp_proc);
938 } else {
940 * Don't have to worry about process locking or stacks in the kernel.
942 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
945 if (rv == KERN_SUCCESS)
946 return (0);
947 nogo:
948 if (!usermode) {
949 if (td->td_gd->gd_intr_nesting_level == 0 &&
950 td->td_pcb->pcb_onfault) {
951 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
952 return (0);
954 trap_fatal(frame, usermode, eva);
955 return (-1);
957 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
960 static void
961 trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
963 int code, type, ss, esp;
965 code = frame->tf_xflags;
966 type = frame->tf_trapno;
968 if (type <= MAX_TRAP_MSG) {
969 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
970 type, trap_msg[type],
971 (usermode ? "user" : "kernel"));
973 #ifdef SMP
974 /* two separate prints in case of a trap on an unmapped page */
975 kprintf("mp_lock = %08x; ", mp_lock);
976 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
977 #endif
978 if (type == T_PAGEFLT) {
979 kprintf("fault virtual address = %p\n", (void *)eva);
980 kprintf("fault code = %s %s, %s\n",
981 usermode ? "user" : "supervisor",
982 code & PGEX_W ? "write" : "read",
983 code & PGEX_P ? "protection violation" : "page not present");
985 kprintf("instruction pointer = 0x%x:0x%x\n",
986 frame->tf_cs & 0xffff, frame->tf_eip);
987 if (usermode) {
988 ss = frame->tf_ss & 0xffff;
989 esp = frame->tf_esp;
990 } else {
991 ss = GSEL(GDATA_SEL, SEL_KPL);
992 esp = (int)&frame->tf_esp;
994 kprintf("stack pointer = 0x%x:0x%x\n", ss, esp);
995 kprintf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
996 kprintf("processor eflags = ");
997 if (frame->tf_eflags & PSL_T)
998 kprintf("trace trap, ");
999 if (frame->tf_eflags & PSL_I)
1000 kprintf("interrupt enabled, ");
1001 if (frame->tf_eflags & PSL_NT)
1002 kprintf("nested task, ");
1003 if (frame->tf_eflags & PSL_RF)
1004 kprintf("resume, ");
1005 #if 0
1006 if (frame->tf_eflags & PSL_VM)
1007 kprintf("vm86, ");
1008 #endif
1009 kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1010 kprintf("current process = ");
1011 if (curproc) {
1012 kprintf("%lu (%s)\n",
1013 (u_long)curproc->p_pid, curproc->p_comm ?
1014 curproc->p_comm : "");
1015 } else {
1016 kprintf("Idle\n");
1018 kprintf("current thread = pri %d ", curthread->td_pri);
1019 if (curthread->td_pri >= TDPRI_CRIT)
1020 kprintf("(CRIT)");
1021 kprintf("\n");
1022 #ifdef SMP
1024 * XXX FIXME:
1025 * we probably SHOULD have stopped the other CPUs before now!
1026 * another CPU COULD have been touching cpl at this moment...
1028 kprintf(" <- SMP: XXX");
1029 #endif
1030 kprintf("\n");
1032 #ifdef KDB
1033 if (kdb_trap(&psl))
1034 return;
1035 #endif
1036 #ifdef DDB
1037 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1038 return;
1039 #endif
1040 kprintf("trap number = %d\n", type);
1041 if (type <= MAX_TRAP_MSG)
1042 panic("%s", trap_msg[type]);
1043 else
1044 panic("unknown/reserved trap");
1048 * Double fault handler. Called when a fault occurs while writing
1049 * a frame for a trap/exception onto the stack. This usually occurs
1050 * when the stack overflows (such is the case with infinite recursion,
1051 * for example).
1053 * XXX Note that the current PTD gets replaced by IdlePTD when the
1054 * task switch occurs. This means that the stack that was active at
1055 * the time of the double fault is not available at <kstack> unless
1056 * the machine was idle when the double fault occurred. The downside
1057 * of this is that "trace <ebp>" in ddb won't work.
1059 void
1060 dblfault_handler(void)
1062 struct mdglobaldata *gd = mdcpu;
1064 kprintf("\nFatal double fault:\n");
1065 kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1066 kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1067 kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1068 #ifdef SMP
1069 /* two separate prints in case of a trap on an unmapped page */
1070 kprintf("mp_lock = %08x; ", mp_lock);
1071 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
1072 #endif
1073 panic("double fault");
1077 * syscall2 - MP aware system call request C handler
1079 * A system call is essentially treated as a trap except that the
1080 * MP lock is not held on entry or return. We are responsible for
1081 * obtaining the MP lock if necessary and for handling ASTs
1082 * (e.g. a task switch) prior to return.
1084 * MPSAFE
1086 void
1087 syscall2(struct trapframe *frame)
1089 struct thread *td = curthread;
1090 struct proc *p = td->td_proc;
1091 struct lwp *lp = td->td_lwp;
1092 caddr_t params;
1093 struct sysent *callp;
1094 register_t orig_tf_eflags;
1095 int sticks;
1096 int error;
1097 int narg;
1098 #ifdef INVARIANTS
1099 int crit_count = td->td_pri & ~TDPRI_MASK;
1100 #endif
1101 #ifdef SMP
1102 int have_mplock = 0;
1103 #endif
1104 u_int code;
1105 union sysunion args;
1107 KTR_LOG(kernentry_syscall, lp->lwp_proc->p_pid, lp->lwp_tid,
1108 frame->tf_eax);
1110 #ifdef SMP
1111 KASSERT(td->td_mpcount == 0, ("badmpcount syscall2 from %p", (void *)frame->tf_eip));
1112 if (syscall_mpsafe == 0)
1113 MAKEMPSAFE(have_mplock);
1114 #endif
1115 userenter(td, p); /* lazy raise our priority */
1118 * Misc
1120 sticks = (int)td->td_sticks;
1121 orig_tf_eflags = frame->tf_eflags;
1124 * Virtual kernel intercept - if a VM context managed by a virtual
1125 * kernel issues a system call the virtual kernel handles it, not us.
1126 * Restore the virtual kernel context and return from its system
1127 * call. The current frame is copied out to the virtual kernel.
1129 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
1130 vkernel_trap(lp, frame);
1131 error = EJUSTRETURN;
1132 goto out;
1136 * Get the system call parameters and account for time
1138 lp->lwp_md.md_regs = frame;
1139 params = (caddr_t)frame->tf_esp + sizeof(int);
1140 code = frame->tf_eax;
1142 if (p->p_sysent->sv_prepsyscall) {
1143 (*p->p_sysent->sv_prepsyscall)(
1144 frame, (int *)(&args.nosys.sysmsg + 1),
1145 &code, &params);
1146 } else {
1148 * Need to check if this is a 32 bit or 64 bit syscall.
1149 * fuword is MP aware.
1151 if (code == SYS_syscall) {
1153 * Code is first argument, followed by actual args.
1155 code = fuword(params);
1156 params += sizeof(int);
1157 } else if (code == SYS___syscall) {
1159 * Like syscall, but code is a quad, so as to maintain
1160 * quad alignment for the rest of the arguments.
1162 code = fuword(params);
1163 params += sizeof(quad_t);
1167 code &= p->p_sysent->sv_mask;
1168 if (code >= p->p_sysent->sv_size)
1169 callp = &p->p_sysent->sv_table[0];
1170 else
1171 callp = &p->p_sysent->sv_table[code];
1173 narg = callp->sy_narg & SYF_ARGMASK;
1176 * copyin is MP aware, but the tracing code is not
1178 if (narg && params) {
1179 error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
1180 narg * sizeof(register_t));
1181 if (error) {
1182 #ifdef KTRACE
1183 if (KTRPOINT(td, KTR_SYSCALL)) {
1184 MAKEMPSAFE(have_mplock);
1186 ktrsyscall(lp, code, narg,
1187 (void *)(&args.nosys.sysmsg + 1));
1189 #endif
1190 goto bad;
1194 #ifdef KTRACE
1195 if (KTRPOINT(td, KTR_SYSCALL)) {
1196 MAKEMPSAFE(have_mplock);
1197 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1199 #endif
1202 * For traditional syscall code edx is left untouched when 32 bit
1203 * results are returned. Since edx is loaded from fds[1] when the
1204 * system call returns we pre-set it here.
1206 args.sysmsg_fds[0] = 0;
1207 args.sysmsg_fds[1] = frame->tf_edx;
1210 * The syscall might manipulate the trap frame. If it does it
1211 * will probably return EJUSTRETURN.
1213 args.sysmsg_frame = frame;
1215 STOPEVENT(p, S_SCE, narg); /* MP aware */
1218 * NOTE: All system calls run MPSAFE now. The system call itself
1219 * is responsible for getting the MP lock.
1221 error = (*callp->sy_call)(&args);
1223 #if 0
1224 kprintf("system call %d returned %d\n", code, error);
1225 #endif
1227 out:
1229 * MP SAFE (we may or may not have the MP lock at this point)
1231 switch (error) {
1232 case 0:
1234 * Reinitialize proc pointer `p' as it may be different
1235 * if this is a child returning from fork syscall.
1237 p = curproc;
1238 lp = curthread->td_lwp;
1239 frame->tf_eax = args.sysmsg_fds[0];
1240 frame->tf_edx = args.sysmsg_fds[1];
1241 frame->tf_eflags &= ~PSL_C;
1242 break;
1243 case ERESTART:
1245 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1246 * int 0x80 is 2 bytes. We saved this in tf_err.
1248 frame->tf_eip -= frame->tf_err;
1249 break;
1250 case EJUSTRETURN:
1251 break;
1252 case EASYNC:
1253 panic("Unexpected EASYNC return value (for now)");
1254 default:
1255 bad:
1256 if (p->p_sysent->sv_errsize) {
1257 if (error >= p->p_sysent->sv_errsize)
1258 error = -1; /* XXX */
1259 else
1260 error = p->p_sysent->sv_errtbl[error];
1262 frame->tf_eax = error;
1263 frame->tf_eflags |= PSL_C;
1264 break;
1268 * Traced syscall. trapsignal() is not MP aware.
1270 if ((orig_tf_eflags & PSL_T) /*&& !(orig_tf_eflags & PSL_VM)*/) {
1271 MAKEMPSAFE(have_mplock);
1272 frame->tf_eflags &= ~PSL_T;
1273 trapsignal(lp, SIGTRAP, TRAP_TRACE);
1277 * Handle reschedule and other end-of-syscall issues
1279 userret(lp, frame, sticks);
1281 #ifdef KTRACE
1282 if (KTRPOINT(td, KTR_SYSRET)) {
1283 MAKEMPSAFE(have_mplock);
1284 ktrsysret(lp, code, error, args.sysmsg_result);
1286 #endif
1289 * This works because errno is findable through the
1290 * register set. If we ever support an emulation where this
1291 * is not the case, this code will need to be revisited.
1293 STOPEVENT(p, S_SCX, code);
1295 userexit(lp);
1296 #ifdef SMP
1298 * Release the MP lock if we had to get it
1300 KASSERT(td->td_mpcount == have_mplock,
1301 ("badmpcount syscall2/end from %p", (void *)frame->tf_eip));
1302 if (have_mplock)
1303 rel_mplock();
1304 #endif
1305 KTR_LOG(kernentry_syscall_ret, lp->lwp_proc->p_pid, lp->lwp_tid, error);
1306 #ifdef INVARIANTS
1307 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
1308 ("syscall: critical section count mismatch! %d/%d",
1309 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
1310 #endif
1313 void
1314 fork_return(struct lwp *lp, struct trapframe *frame)
1316 frame->tf_eax = 0; /* Child returns zero */
1317 frame->tf_eflags &= ~PSL_C; /* success */
1318 frame->tf_edx = 1;
1320 generic_lwp_return(lp, frame);
1321 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
1325 * Simplified back end of syscall(), used when returning from fork()
1326 * or lwp_create() directly into user mode. MP lock is held on entry and
1327 * should be released on return. This code will return back into the fork
1328 * trampoline code which then runs doreti.
1330 void
1331 generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1333 struct proc *p = lp->lwp_proc;
1336 * Newly forked processes are given a kernel priority. We have to
1337 * adjust the priority to a normal user priority and fake entry
1338 * into the kernel (call userenter()) to install a passive release
1339 * function just in case userret() decides to stop the process. This
1340 * can occur when ^Z races a fork. If we do not install the passive
1341 * release function the current process designation will not be
1342 * released when the thread goes to sleep.
1344 lwkt_setpri_self(TDPRI_USER_NORM);
1345 userenter(lp->lwp_thread, p);
1346 userret(lp, frame, 0);
1347 #ifdef KTRACE
1348 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1349 ktrsysret(lp, SYS_fork, 0, 0);
1350 #endif
1351 p->p_flag |= P_PASSIVE_ACQ;
1352 userexit(lp);
1353 p->p_flag &= ~P_PASSIVE_ACQ;
1354 #ifdef SMP
1355 KKASSERT(lp->lwp_thread->td_mpcount == 1);
1356 rel_mplock();
1357 #endif
1361 * doreti has turned into this. The frame is directly on the stack. We
1362 * pull everything else we need (fpu and tls context) from the current
1363 * thread.
1365 * Note on fpu interactions: In a virtual kernel, the fpu context for
1366 * an emulated user mode process is not shared with the virtual kernel's
1367 * fpu context, so we only have to 'stack' fpu contexts within the virtual
1368 * kernel itself, and not even then since the signal() contexts that we care
1369 * about save and restore the FPU state (I think anyhow).
1371 * vmspace_ctl() returns an error only if it had problems instaling the
1372 * context we supplied or problems copying data to/from our VM space.
1374 void
1375 go_user(struct intrframe *frame)
1377 struct trapframe *tf = (void *)&frame->if_gs;
1378 int r;
1381 * Interrupts may be disabled on entry, make sure all signals
1382 * can be received before beginning our loop.
1384 sigsetmask(0);
1387 * Switch to the current simulated user process, then call
1388 * user_trap() when we break out of it (usually due to a signal).
1390 for (;;) {
1392 * Tell the real kernel whether it is ok to use the FP
1393 * unit or not.
1395 * The critical section is required to prevent an interrupt
1396 * from causing a preemptive task switch and changing
1397 * the FP state.
1399 crit_enter();
1400 if (mdcpu->gd_npxthread == curthread) {
1401 tf->tf_xflags &= ~PGEX_FPFAULT;
1402 } else {
1403 tf->tf_xflags |= PGEX_FPFAULT;
1407 * Run emulated user process context. This call interlocks
1408 * with new mailbox signals.
1410 * Set PGEX_U unconditionally, indicating a user frame (the
1411 * bit is normally set only by T_PAGEFLT).
1413 r = vmspace_ctl(&curproc->p_vmspace->vm_pmap, VMSPACE_CTL_RUN,
1414 tf, &curthread->td_savevext);
1415 crit_exit();
1416 frame->if_xflags |= PGEX_U;
1417 #if 0
1418 kprintf("GO USER %d trap %d EVA %08x EIP %08x ESP %08x XFLAGS %02x/%02x\n",
1419 r, tf->tf_trapno, tf->tf_err, tf->tf_eip, tf->tf_esp,
1420 tf->tf_xflags, frame->if_xflags);
1421 #endif
1422 if (r < 0) {
1423 if (errno != EINTR)
1424 panic("vmspace_ctl failed error %d", errno);
1425 } else {
1426 if (tf->tf_trapno) {
1427 user_trap(tf);
1430 if (mycpu->gd_reqflags & RQF_AST_MASK) {
1431 tf->tf_trapno = T_ASTFLT;
1432 user_trap(tf);
1434 tf->tf_trapno = 0;
1439 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1440 * fault (which is then passed back to the virtual kernel) if an attempt is
1441 * made to use the FP unit.
1443 * XXX this is a fairly big hack.
1445 void
1446 set_vkernel_fp(struct trapframe *frame)
1448 struct thread *td = curthread;
1450 if (frame->tf_xflags & PGEX_FPFAULT) {
1451 td->td_pcb->pcb_flags |= FP_VIRTFP;
1452 if (mdcpu->gd_npxthread == td)
1453 npxexit();
1454 } else {
1455 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1460 * Called from vkernel_trap() to fixup the vkernel's syscall
1461 * frame for vmspace_ctl() return.
1463 void
1464 cpu_vkernel_trap(struct trapframe *frame, int error)
1466 frame->tf_eax = error;
1467 if (error)
1468 frame->tf_eflags |= PSL_C;
1469 else
1470 frame->tf_eflags &= ~PSL_C;