2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.104 2007/04/29 18:25:36 dillon Exp $
43 * 386 Trap and System call handling
51 #include "opt_ktrace.h"
52 #include "opt_clock.h"
55 #include <sys/param.h>
56 #include <sys/systm.h>
58 #include <sys/pioctl.h>
59 #include <sys/kernel.h>
60 #include <sys/resourcevar.h>
61 #include <sys/signalvar.h>
62 #include <sys/signal2.h>
63 #include <sys/syscall.h>
64 #include <sys/sysctl.h>
65 #include <sys/sysent.h>
67 #include <sys/vmmeter.h>
68 #include <sys/malloc.h>
70 #include <sys/ktrace.h>
72 #include <sys/upcall.h>
73 #include <sys/vkernel.h>
74 #include <sys/sysproto.h>
75 #include <sys/sysunion.h>
78 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_extern.h>
86 #include <machine/cpu.h>
87 #include <machine/md_var.h>
88 #include <machine/pcb.h>
89 #include <machine/smp.h>
90 #include <machine/tss.h>
91 #include <machine/specialreg.h>
92 #include <machine/globaldata.h>
94 #include <machine_base/isa/intr_machdep.h>
97 #include <sys/syslog.h>
98 #include <machine/clock.h>
101 #include <machine/vm86.h>
104 #include <sys/msgport2.h>
105 #include <sys/thread2.h>
109 #define MAKEMPSAFE(have_mplock) \
110 if (have_mplock == 0) { \
117 #define MAKEMPSAFE(have_mplock)
121 int (*pmath_emulate
) (struct trapframe
*);
123 extern void trap (struct trapframe
*frame
);
124 extern int trapwrite (unsigned addr
);
125 extern void syscall2 (struct trapframe
*frame
);
127 static int trap_pfault (struct trapframe
*, int, vm_offset_t
);
128 static void trap_fatal (struct trapframe
*, vm_offset_t
);
129 void dblfault_handler (void);
131 extern inthand_t
IDTVEC(syscall
);
133 #define MAX_TRAP_MSG 28
134 static char *trap_msg
[] = {
136 "privileged instruction fault", /* 1 T_PRIVINFLT */
138 "breakpoint instruction fault", /* 3 T_BPTFLT */
141 "arithmetic trap", /* 6 T_ARITHTRAP */
142 "system forced exception", /* 7 T_ASTFLT */
144 "general protection fault", /* 9 T_PROTFLT */
145 "trace trap", /* 10 T_TRCTRAP */
147 "page fault", /* 12 T_PAGEFLT */
149 "alignment fault", /* 14 T_ALIGNFLT */
153 "integer divide fault", /* 18 T_DIVIDE */
154 "non-maskable interrupt trap", /* 19 T_NMI */
155 "overflow trap", /* 20 T_OFLOW */
156 "FPU bounds check fault", /* 21 T_BOUND */
157 "FPU device not available", /* 22 T_DNA */
158 "double fault", /* 23 T_DOUBLEFLT */
159 "FPU operand fetch fault", /* 24 T_FPOPFLT */
160 "invalid TSS fault", /* 25 T_TSSFLT */
161 "segment not present fault", /* 26 T_SEGNPFLT */
162 "stack fault", /* 27 T_STKFLT */
163 "machine check trap", /* 28 T_MCHK */
166 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
167 extern int has_f00f_bug
;
171 static int ddb_on_nmi
= 1;
172 SYSCTL_INT(_machdep
, OID_AUTO
, ddb_on_nmi
, CTLFLAG_RW
,
173 &ddb_on_nmi
, 0, "Go to DDB on NMI");
175 static int panic_on_nmi
= 1;
176 SYSCTL_INT(_machdep
, OID_AUTO
, panic_on_nmi
, CTLFLAG_RW
,
177 &panic_on_nmi
, 0, "Panic on NMI");
178 static int fast_release
;
179 SYSCTL_INT(_machdep
, OID_AUTO
, fast_release
, CTLFLAG_RW
,
180 &fast_release
, 0, "Passive Release was optimal");
181 static int slow_release
;
182 SYSCTL_INT(_machdep
, OID_AUTO
, slow_release
, CTLFLAG_RW
,
183 &slow_release
, 0, "Passive Release was nonoptimal");
185 static int syscall_mpsafe
= 0;
186 SYSCTL_INT(_kern
, OID_AUTO
, syscall_mpsafe
, CTLFLAG_RW
,
187 &syscall_mpsafe
, 0, "Allow MPSAFE marked syscalls to run without BGL");
188 TUNABLE_INT("kern.syscall_mpsafe", &syscall_mpsafe
);
189 static int trap_mpsafe
= 0;
190 SYSCTL_INT(_kern
, OID_AUTO
, trap_mpsafe
, CTLFLAG_RW
,
191 &trap_mpsafe
, 0, "Allow traps to mostly run without the BGL");
192 TUNABLE_INT("kern.trap_mpsafe", &trap_mpsafe
);
195 MALLOC_DEFINE(M_SYSMSG
, "sysmsg", "sysmsg structure");
196 extern int max_sysmsg
;
199 * Passive USER->KERNEL transition. This only occurs if we block in the
200 * kernel while still holding our userland priority. We have to fixup our
201 * priority in order to avoid potential deadlocks before we allow the system
202 * to switch us to another thread.
205 passive_release(struct thread
*td
)
207 struct lwp
*lp
= td
->td_lwp
;
209 td
->td_release
= NULL
;
210 lwkt_setpri_self(TDPRI_KERN_USER
);
211 lp
->lwp_proc
->p_usched
->release_curproc(lp
);
215 * userenter() passively intercepts the thread switch function to increase
216 * the thread priority from a user priority to a kernel priority, reducing
217 * syscall and trap overhead for the case where no switch occurs.
221 userenter(struct thread
*curtd
)
223 curtd
->td_release
= passive_release
;
227 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
228 * must be completed before we can return to or try to return to userland.
230 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
231 * arithmatic on the delta calculation so the absolute tick values are
232 * truncated to an integer.
235 userret(struct lwp
*lp
, struct trapframe
*frame
, int sticks
)
237 struct proc
*p
= lp
->lwp_proc
;
241 * Charge system time if profiling. Note: times are in microseconds.
242 * This may do a copyout and block, so do it first even though it
243 * means some system time will be charged as user time.
245 if (p
->p_flag
& P_PROFIL
) {
246 addupc_task(p
, frame
->tf_eip
,
247 (u_int
)((int)lp
->lwp_thread
->td_sticks
- sticks
));
252 * If the jungle wants us dead, so be it.
254 if (lp
->lwp_flag
& LWP_WEXIT
)
258 * Block here if we are in a stopped state.
260 if (p
->p_stat
== SSTOP
) {
268 * Post any pending upcalls. If running a virtual kernel be sure
269 * to restore the virtual kernel's vmspace before posting the upcall.
271 if (p
->p_flag
& P_UPCALLPEND
) {
272 p
->p_flag
&= ~P_UPCALLPEND
;
280 * Post any pending signals. If running a virtual kernel be sure
281 * to restore the virtual kernel's vmspace before posting the signal.
283 if ((sig
= CURSIG(lp
)) != 0) {
291 * block here if we are swapped out, but still process signals
292 * (such as SIGKILL). proc0 (the swapin scheduler) is already
293 * aware of our situation, we do not have to wake it up.
295 if (p
->p_flag
& P_SWAPPEDOUT
) {
297 p
->p_flag
|= P_SWAPWAIT
;
299 if (p
->p_flag
& P_SWAPWAIT
)
300 tsleep(p
, PCATCH
, "SWOUT", 0);
301 p
->p_flag
&= ~P_SWAPWAIT
;
308 * Cleanup from userenter and any passive release that might have occured.
309 * We must reclaim the current-process designation before we can return
310 * to usermode. We also handle both LWKT and USER reschedule requests.
313 userexit(struct lwp
*lp
)
315 struct thread
*td
= lp
->lwp_thread
;
316 globaldata_t gd
= td
->td_gd
;
320 * If a user reschedule is requested force a new process to be
321 * chosen by releasing the current process. Our process will only
322 * be chosen again if it has a considerably better priority.
324 if (user_resched_wanted())
325 lp
->lwp_proc
->p_usched
->release_curproc(lp
);
329 * Handle a LWKT reschedule request first. Since our passive release
330 * is still in place we do not have to do anything special.
332 if (lwkt_resched_wanted())
336 * Acquire the current process designation for this user scheduler
337 * on this cpu. This will also handle any user-reschedule requests.
339 lp
->lwp_proc
->p_usched
->acquire_curproc(lp
);
340 /* We may have switched cpus on acquisition */
344 * Reduce our priority in preparation for a return to userland. If
345 * our passive release function was still in place, our priority was
346 * never raised and does not need to be reduced.
348 if (td
->td_release
== NULL
)
349 lwkt_setpri_self(TDPRI_USER_NORM
);
350 td
->td_release
= NULL
;
353 * After reducing our priority there might be other kernel-level
354 * LWKTs that now have a greater priority. Run them as necessary.
355 * We don't have to worry about losing cpu to userland because
356 * we still control the current-process designation and we no longer
357 * have a passive release function installed.
359 if (lwkt_checkpri_self())
364 * Exception, fault, and trap interface to the kernel.
365 * This common code is called from assembly language IDT gate entry
366 * routines that prepare a suitable stack frame, and restore this
367 * frame after the exception has been processed.
369 * This function is also called from doreti in an interlock to handle ASTs.
370 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
372 * NOTE! We have to retrieve the fault address prior to obtaining the
373 * MP lock because get_mplock() may switch out. YYY cr2 really ought
374 * to be retrieved by the assembly code, not here.
376 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
377 * if an attempt is made to switch from a fast interrupt or IPI. This is
378 * necessary to properly take fatal kernel traps on SMP machines if
379 * get_mplock() has to block.
383 trap(struct trapframe
*frame
)
385 struct globaldata
*gd
= mycpu
;
386 struct thread
*td
= gd
->gd_curthread
;
387 struct lwp
*lp
= td
->td_lwp
;
390 int i
= 0, ucode
= 0, type
, code
;
395 int crit_count
= td
->td_pri
& ~TDPRI_MASK
;
402 eva
= (frame
->tf_trapno
== T_PAGEFLT
? rcr2() : 0);
403 ++gd
->gd_trap_nesting_level
;
404 MAKEMPSAFE(have_mplock
);
405 trap_fatal(frame
, eva
);
406 --gd
->gd_trap_nesting_level
;
412 ++gd
->gd_trap_nesting_level
;
413 if (frame
->tf_trapno
== T_PAGEFLT
) {
415 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
416 * This problem is worked around by using an interrupt
417 * gate for the pagefault handler. We are finally ready
418 * to read %cr2 and then must reenable interrupts.
420 * XXX this should be in the switch statement, but the
421 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
422 * flow of control too much for this to be obviously
429 if (trap_mpsafe
== 0)
430 MAKEMPSAFE(have_mplock
);
433 --gd
->gd_trap_nesting_level
;
435 if (!(frame
->tf_eflags
& PSL_I
)) {
437 * Buggy application or kernel code has disabled interrupts
438 * and then trapped. Enabling interrupts now is wrong, but
439 * it is better than running with interrupts disabled until
440 * they are accidentally enabled later.
442 type
= frame
->tf_trapno
;
443 if (ISPL(frame
->tf_cs
)==SEL_UPL
|| (frame
->tf_eflags
& PSL_VM
)) {
444 MAKEMPSAFE(have_mplock
);
446 "pid %ld (%s): trap %d with interrupts disabled\n",
447 (long)curproc
->p_pid
, curproc
->p_comm
, type
);
448 } else if (type
!= T_BPTFLT
&& type
!= T_TRCTRAP
) {
450 * XXX not quite right, since this may be for a
451 * multiple fault in user mode.
453 MAKEMPSAFE(have_mplock
);
454 kprintf("kernel trap %d with interrupts disabled\n",
460 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
463 type
= frame
->tf_trapno
;
464 code
= frame
->tf_err
;
467 ASSERT_MP_LOCK_HELD(curthread
);
468 if (frame
->tf_eflags
& PSL_VM
&&
469 (type
== T_PROTFLT
|| type
== T_STKFLT
)) {
471 KKASSERT(td
->td_mpcount
> 0);
473 i
= vm86_emulate((struct vm86frame
*)frame
);
475 KKASSERT(td
->td_mpcount
> 0);
479 * returns to original process
482 vm86_trap((struct vm86frame
*)frame
,
485 vm86_trap((struct vm86frame
*)frame
, 0);
487 KKASSERT(0); /* NOT REACHED */
493 * these traps want either a process context, or
494 * assume a normal userspace trap.
498 trap_fatal(frame
, eva
);
501 type
= T_BPTFLT
; /* kernel breakpoint */
504 goto kernel_trap
; /* normal kernel trap handling */
507 if ((ISPL(frame
->tf_cs
) == SEL_UPL
) || (frame
->tf_eflags
& PSL_VM
)) {
512 sticks
= (int)td
->td_sticks
;
513 lp
->lwp_md
.md_regs
= frame
;
516 case T_PRIVINFLT
: /* privileged instruction fault */
521 case T_BPTFLT
: /* bpt instruction fault */
522 case T_TRCTRAP
: /* trace trap */
523 frame
->tf_eflags
&= ~PSL_T
;
527 case T_ARITHTRAP
: /* arithmetic trap */
532 case T_ASTFLT
: /* Allow process switch */
533 mycpu
->gd_cnt
.v_soft
++;
534 if (mycpu
->gd_reqflags
& RQF_AST_OWEUPC
) {
535 atomic_clear_int_nonlocked(&mycpu
->gd_reqflags
,
537 addupc_task(p
, p
->p_prof
.pr_addr
,
543 * The following two traps can happen in
544 * vm86 mode, and, if so, we want to handle
547 case T_PROTFLT
: /* general protection fault */
548 case T_STKFLT
: /* stack fault */
549 if (frame
->tf_eflags
& PSL_VM
) {
550 i
= vm86_emulate((struct vm86frame
*)frame
);
557 case T_SEGNPFLT
: /* segment not present fault */
558 case T_TSSFLT
: /* invalid TSS fault */
559 case T_DOUBLEFLT
: /* double fault */
561 ucode
= code
+ BUS_SEGM_FAULT
;
565 case T_PAGEFLT
: /* page fault */
566 MAKEMPSAFE(have_mplock
);
567 i
= trap_pfault(frame
, TRUE
, eva
);
570 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
580 case T_DIVIDE
: /* integer divide fault */
587 MAKEMPSAFE(have_mplock
);
589 goto handle_powerfail
;
590 #else /* !POWERFAIL_NMI */
591 /* machine/parity/power fail/"kitchen sink" faults */
592 if (isa_nmi(code
) == 0) {
595 * NMI can be hooked up to a pushbutton
599 kprintf ("NMI ... going to debugger\n");
600 kdb_trap (type
, 0, frame
);
604 } else if (panic_on_nmi
)
605 panic("NMI indicates hardware failure");
607 #endif /* POWERFAIL_NMI */
608 #endif /* NISA > 0 */
610 case T_OFLOW
: /* integer overflow fault */
615 case T_BOUND
: /* bounds check fault */
622 * Virtual kernel intercept - pass the DNA exception
623 * to the virtual kernel if it asked to handle it.
624 * This occurs when the virtual kernel is holding
625 * onto the FP context for a different emulated
626 * process then the one currently running.
628 * We must still call npxdna() since we may have
629 * saved FP state that the virtual kernel needs
630 * to hand over to a different emulated process.
632 if (p
->p_vkernel
&& p
->p_vkernel
->vk_current
&&
633 (td
->td_pcb
->pcb_flags
& FP_VIRTFP
)
641 * The kernel may have switched out the FP unit's
642 * state, causing the user process to take a fault
643 * when it tries to use the FP unit. Restore the
649 if (!pmath_emulate
) {
651 ucode
= FPE_FPU_NP_TRAP
;
654 i
= (*pmath_emulate
)(frame
);
656 if (!(frame
->tf_eflags
& PSL_T
))
658 frame
->tf_eflags
&= ~PSL_T
;
661 /* else ucode = emulator_only_knows() XXX */
664 case T_FPOPFLT
: /* FPU operand fetch fault */
669 case T_XMMFLT
: /* SIMD floating-point exception */
679 case T_PAGEFLT
: /* page fault */
680 MAKEMPSAFE(have_mplock
);
681 trap_pfault(frame
, FALSE
, eva
);
687 * The kernel may be using npx for copying or other
695 case T_PROTFLT
: /* general protection fault */
696 case T_SEGNPFLT
: /* segment not present fault */
698 * Invalid segment selectors and out of bounds
699 * %eip's and %esp's can be set up in user mode.
700 * This causes a fault in kernel mode when the
701 * kernel tries to return to user mode. We want
702 * to get this fault so that we can fix the
703 * problem here and not have to check all the
704 * selectors and pointers when the user changes
707 #define MAYBE_DORETI_FAULT(where, whereto) \
709 if (frame->tf_eip == (int)where) { \
710 frame->tf_eip = (int)whereto; \
714 if (mycpu
->gd_intr_nesting_level
== 0) {
716 * Invalid %fs's and %gs's can be created using
717 * procfs or PT_SETREGS or by invalidating the
718 * underlying LDT entry. This causes a fault
719 * in kernel mode when the kernel attempts to
720 * switch contexts. Lose the bad context
721 * (XXX) so that we can continue, and generate
724 MAYBE_DORETI_FAULT(doreti_iret
,
726 MAYBE_DORETI_FAULT(doreti_popl_ds
,
727 doreti_popl_ds_fault
);
728 MAYBE_DORETI_FAULT(doreti_popl_es
,
729 doreti_popl_es_fault
);
730 MAYBE_DORETI_FAULT(doreti_popl_fs
,
731 doreti_popl_fs_fault
);
732 MAYBE_DORETI_FAULT(doreti_popl_gs
,
733 doreti_popl_gs_fault
);
734 if (td
->td_pcb
->pcb_onfault
) {
736 (register_t
)td
->td_pcb
->pcb_onfault
;
744 * PSL_NT can be set in user mode and isn't cleared
745 * automatically when the kernel is entered. This
746 * causes a TSS fault when the kernel attempts to
747 * `iret' because the TSS link is uninitialized. We
748 * want to get this fault so that we can fix the
749 * problem here and not every time the kernel is
752 if (frame
->tf_eflags
& PSL_NT
) {
753 frame
->tf_eflags
&= ~PSL_NT
;
758 case T_TRCTRAP
: /* trace trap */
759 if (frame
->tf_eip
== (int)IDTVEC(syscall
)) {
761 * We've just entered system mode via the
762 * syscall lcall. Continue single stepping
763 * silently until the syscall handler has
768 if (frame
->tf_eip
== (int)IDTVEC(syscall
) + 1) {
770 * The syscall handler has now saved the
771 * flags. Stop single stepping it.
773 frame
->tf_eflags
&= ~PSL_T
;
777 * Ignore debug register trace traps due to
778 * accesses in the user's address space, which
779 * can happen under several conditions such as
780 * if a user sets a watchpoint on a buffer and
781 * then passes that buffer to a system call.
782 * We still want to get TRCTRAPS for addresses
783 * in kernel space because that is useful when
784 * debugging the kernel.
786 if (user_dbreg_trap()) {
788 * Reset breakpoint bits because the
791 load_dr6(rdr6() & 0xfffffff0);
795 * Fall through (TRCTRAP kernel mode, kernel address)
799 * If DDB is enabled, let it handle the debugger trap.
800 * Otherwise, debugger traps "can't happen".
803 MAKEMPSAFE(have_mplock
);
804 if (kdb_trap (type
, 0, frame
))
811 MAKEMPSAFE(have_mplock
);
814 # define TIMER_FREQ 1193182
818 static unsigned lastalert
= 0;
820 if(time_second
- lastalert
> 10)
822 log(LOG_WARNING
, "NMI: power fail\n");
823 sysbeep(TIMER_FREQ
/880, hz
);
824 lastalert
= time_second
;
829 #else /* !POWERFAIL_NMI */
830 /* machine/parity/power fail/"kitchen sink" faults */
831 if (isa_nmi(code
) == 0) {
834 * NMI can be hooked up to a pushbutton
838 kprintf ("NMI ... going to debugger\n");
839 kdb_trap (type
, 0, frame
);
843 } else if (panic_on_nmi
== 0)
846 #endif /* POWERFAIL_NMI */
847 #endif /* NISA > 0 */
850 MAKEMPSAFE(have_mplock
);
851 trap_fatal(frame
, eva
);
856 * Virtual kernel intercept - if the fault is directly related to a
857 * VM context managed by a virtual kernel then let the virtual kernel
860 if (p
->p_vkernel
&& p
->p_vkernel
->vk_current
) {
861 vkernel_trap(p
, frame
);
866 * Translate fault for emulators (e.g. Linux)
868 if (*p
->p_sysent
->sv_transtrap
)
869 i
= (*p
->p_sysent
->sv_transtrap
)(i
, type
);
871 MAKEMPSAFE(have_mplock
);
872 trapsignal(lp
, i
, ucode
);
875 if (type
<= MAX_TRAP_MSG
) {
876 uprintf("fatal process exception: %s",
878 if ((type
== T_PAGEFLT
) || (type
== T_PROTFLT
))
879 uprintf(", fault VA = 0x%lx", (u_long
)eva
);
886 if (ISPL(frame
->tf_cs
) == SEL_UPL
)
887 KASSERT(td
->td_mpcount
== have_mplock
, ("badmpcount trap/end from %p", (void *)frame
->tf_eip
));
889 userret(lp
, frame
, sticks
);
897 KASSERT(crit_count
== (td
->td_pri
& ~TDPRI_MASK
),
898 ("syscall: critical section count mismatch! %d/%d",
899 crit_count
/ TDPRI_CRIT
, td
->td_pri
/ TDPRI_CRIT
));
904 trap_pfault(struct trapframe
*frame
, int usermode
, vm_offset_t eva
)
907 struct vmspace
*vm
= NULL
;
911 thread_t td
= curthread
;
912 struct proc
*p
= td
->td_proc
;
914 va
= trunc_page(eva
);
915 if (va
>= KERNBASE
) {
917 * Don't allow user-mode faults in kernel address space.
918 * An exception: if the faulting address is the invalid
919 * instruction entry in the IDT, then the Intel Pentium
920 * F00F bug workaround was triggered, and we need to
921 * treat it is as an illegal instruction, and not a page
924 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
925 if ((eva
== (unsigned int)&idt
[6]) && has_f00f_bug
) {
926 frame
->tf_trapno
= T_PRIVINFLT
;
936 * This is a fault on non-kernel virtual memory.
937 * vm is initialized above to NULL. If curproc is NULL
938 * or curproc->p_vmspace is NULL the fault is fatal.
949 if (frame
->tf_err
& PGEX_W
)
950 ftype
= VM_PROT_WRITE
;
952 ftype
= VM_PROT_READ
;
954 if (map
!= &kernel_map
) {
956 * Keep swapout from messing with us during this
962 * Grow the stack if necessary
964 /* grow_stack returns false only if va falls into
965 * a growable stack region and the stack growth
966 * fails. It returns true if va was not within
967 * a growable stack region, or if the stack
970 if (!grow_stack (p
, va
)) {
976 /* Fault in the user page: */
977 rv
= vm_fault(map
, va
, ftype
,
978 (ftype
& VM_PROT_WRITE
) ? VM_FAULT_DIRTY
984 * Don't have to worry about process locking or stacks in the kernel.
986 rv
= vm_fault(map
, va
, ftype
, VM_FAULT_NORMAL
);
989 if (rv
== KERN_SUCCESS
)
993 if (td
->td_gd
->gd_intr_nesting_level
== 0 &&
994 td
->td_pcb
->pcb_onfault
) {
995 frame
->tf_eip
= (register_t
)td
->td_pcb
->pcb_onfault
;
998 trap_fatal(frame
, eva
);
1002 /* kludge to pass faulting virtual address to sendsig */
1003 frame
->tf_xflags
= frame
->tf_err
;
1004 frame
->tf_err
= eva
;
1006 return((rv
== KERN_PROTECTION_FAILURE
) ? SIGBUS
: SIGSEGV
);
1010 trap_fatal(struct trapframe
*frame
, vm_offset_t eva
)
1012 int code
, type
, ss
, esp
;
1013 struct soft_segment_descriptor softseg
;
1015 code
= frame
->tf_err
;
1016 type
= frame
->tf_trapno
;
1017 sdtossd(&gdt
[mycpu
->gd_cpuid
* NGDT
+ IDXSEL(frame
->tf_cs
& 0xffff)].sd
, &softseg
);
1019 if (type
<= MAX_TRAP_MSG
)
1020 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
1021 type
, trap_msg
[type
],
1022 frame
->tf_eflags
& PSL_VM
? "vm86" :
1023 ISPL(frame
->tf_cs
) == SEL_UPL
? "user" : "kernel");
1025 /* three separate prints in case of a trap on an unmapped page */
1026 kprintf("mp_lock = %08x; ", mp_lock
);
1027 kprintf("cpuid = %d; ", mycpu
->gd_cpuid
);
1028 kprintf("lapic.id = %08x\n", lapic
.id
);
1030 if (type
== T_PAGEFLT
) {
1031 kprintf("fault virtual address = 0x%x\n", eva
);
1032 kprintf("fault code = %s %s, %s\n",
1033 code
& PGEX_U
? "user" : "supervisor",
1034 code
& PGEX_W
? "write" : "read",
1035 code
& PGEX_P
? "protection violation" : "page not present");
1037 kprintf("instruction pointer = 0x%x:0x%x\n",
1038 frame
->tf_cs
& 0xffff, frame
->tf_eip
);
1039 if ((ISPL(frame
->tf_cs
) == SEL_UPL
) || (frame
->tf_eflags
& PSL_VM
)) {
1040 ss
= frame
->tf_ss
& 0xffff;
1041 esp
= frame
->tf_esp
;
1043 ss
= GSEL(GDATA_SEL
, SEL_KPL
);
1044 esp
= (int)&frame
->tf_esp
;
1046 kprintf("stack pointer = 0x%x:0x%x\n", ss
, esp
);
1047 kprintf("frame pointer = 0x%x:0x%x\n", ss
, frame
->tf_ebp
);
1048 kprintf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
1049 softseg
.ssd_base
, softseg
.ssd_limit
, softseg
.ssd_type
);
1050 kprintf(" = DPL %d, pres %d, def32 %d, gran %d\n",
1051 softseg
.ssd_dpl
, softseg
.ssd_p
, softseg
.ssd_def32
,
1053 kprintf("processor eflags = ");
1054 if (frame
->tf_eflags
& PSL_T
)
1055 kprintf("trace trap, ");
1056 if (frame
->tf_eflags
& PSL_I
)
1057 kprintf("interrupt enabled, ");
1058 if (frame
->tf_eflags
& PSL_NT
)
1059 kprintf("nested task, ");
1060 if (frame
->tf_eflags
& PSL_RF
)
1061 kprintf("resume, ");
1062 if (frame
->tf_eflags
& PSL_VM
)
1064 kprintf("IOPL = %d\n", (frame
->tf_eflags
& PSL_IOPL
) >> 12);
1065 kprintf("current process = ");
1067 kprintf("%lu (%s)\n",
1068 (u_long
)curproc
->p_pid
, curproc
->p_comm
?
1069 curproc
->p_comm
: "");
1073 kprintf("current thread = pri %d ", curthread
->td_pri
);
1074 if (curthread
->td_pri
>= TDPRI_CRIT
)
1080 * we probably SHOULD have stopped the other CPUs before now!
1081 * another CPU COULD have been touching cpl at this moment...
1083 kprintf(" <- SMP: XXX");
1092 if ((debugger_on_panic
|| db_active
) && kdb_trap(type
, code
, frame
))
1095 kprintf("trap number = %d\n", type
);
1096 if (type
<= MAX_TRAP_MSG
)
1097 panic("%s", trap_msg
[type
]);
1099 panic("unknown/reserved trap");
1103 * Double fault handler. Called when a fault occurs while writing
1104 * a frame for a trap/exception onto the stack. This usually occurs
1105 * when the stack overflows (such is the case with infinite recursion,
1108 * XXX Note that the current PTD gets replaced by IdlePTD when the
1109 * task switch occurs. This means that the stack that was active at
1110 * the time of the double fault is not available at <kstack> unless
1111 * the machine was idle when the double fault occurred. The downside
1112 * of this is that "trace <ebp>" in ddb won't work.
1115 dblfault_handler(void)
1117 struct mdglobaldata
*gd
= mdcpu
;
1119 kprintf("\nFatal double fault:\n");
1120 kprintf("eip = 0x%x\n", gd
->gd_common_tss
.tss_eip
);
1121 kprintf("esp = 0x%x\n", gd
->gd_common_tss
.tss_esp
);
1122 kprintf("ebp = 0x%x\n", gd
->gd_common_tss
.tss_ebp
);
1124 /* three separate prints in case of a trap on an unmapped page */
1125 kprintf("mp_lock = %08x; ", mp_lock
);
1126 kprintf("cpuid = %d; ", mycpu
->gd_cpuid
);
1127 kprintf("lapic.id = %08x\n", lapic
.id
);
1129 panic("double fault");
1133 * Compensate for 386 brain damage (missing URKR).
1134 * This is a little simpler than the pagefault handler in trap() because
1135 * it the page tables have already been faulted in and high addresses
1136 * are thrown out early for other reasons.
1139 trapwrite(unsigned addr
)
1146 va
= trunc_page((vm_offset_t
)addr
);
1148 * XXX - MAX is END. Changed > to >= for temp. fix.
1150 if (va
>= VM_MAX_USER_ADDRESS
)
1158 if (!grow_stack (p
, va
)) {
1164 * fault the data page
1166 rv
= vm_fault(&vm
->vm_map
, va
, VM_PROT_WRITE
, VM_FAULT_DIRTY
);
1170 if (rv
!= KERN_SUCCESS
)
1177 * syscall2 - MP aware system call request C handler
1179 * A system call is essentially treated as a trap except that the
1180 * MP lock is not held on entry or return. We are responsible for
1181 * obtaining the MP lock if necessary and for handling ASTs
1182 * (e.g. a task switch) prior to return.
1184 * In general, only simple access and manipulation of curproc and
1185 * the current stack is allowed without having to hold MP lock.
1187 * MPSAFE - note that large sections of this routine are run without
1192 syscall2(struct trapframe
*frame
)
1194 struct thread
*td
= curthread
;
1195 struct proc
*p
= td
->td_proc
;
1196 struct lwp
*lp
= td
->td_lwp
;
1198 struct sysent
*callp
;
1199 register_t orig_tf_eflags
;
1204 int crit_count
= td
->td_pri
& ~TDPRI_MASK
;
1207 int have_mplock
= 0;
1210 union sysunion args
;
1213 if (ISPL(frame
->tf_cs
) != SEL_UPL
) {
1221 KASSERT(td
->td_mpcount
== 0, ("badmpcount syscall2 from %p", (void *)frame
->tf_eip
));
1222 if (syscall_mpsafe
== 0)
1223 MAKEMPSAFE(have_mplock
);
1225 userenter(td
); /* lazy raise our priority */
1230 sticks
= (int)td
->td_sticks
;
1231 orig_tf_eflags
= frame
->tf_eflags
;
1234 * Virtual kernel intercept - if a VM context managed by a virtual
1235 * kernel issues a system call the virtual kernel handles it, not us.
1236 * Restore the virtual kernel context and return from its system
1237 * call. The current frame is copied out to the virtual kernel.
1239 if (p
->p_vkernel
&& p
->p_vkernel
->vk_current
) {
1240 error
= vkernel_trap(p
, frame
);
1241 frame
->tf_eax
= error
;
1243 frame
->tf_eflags
|= PSL_C
;
1244 error
= EJUSTRETURN
;
1249 * Get the system call parameters and account for time
1251 lp
->lwp_md
.md_regs
= frame
;
1252 params
= (caddr_t
)frame
->tf_esp
+ sizeof(int);
1253 code
= frame
->tf_eax
;
1255 if (p
->p_sysent
->sv_prepsyscall
) {
1256 (*p
->p_sysent
->sv_prepsyscall
)(
1257 frame
, (int *)(&args
.nosys
.sysmsg
+ 1),
1261 * Need to check if this is a 32 bit or 64 bit syscall.
1262 * fuword is MP aware.
1264 if (code
== SYS_syscall
) {
1266 * Code is first argument, followed by actual args.
1268 code
= fuword(params
);
1269 params
+= sizeof(int);
1270 } else if (code
== SYS___syscall
) {
1272 * Like syscall, but code is a quad, so as to maintain
1273 * quad alignment for the rest of the arguments.
1275 code
= fuword(params
);
1276 params
+= sizeof(quad_t
);
1280 code
&= p
->p_sysent
->sv_mask
;
1281 if (code
>= p
->p_sysent
->sv_size
)
1282 callp
= &p
->p_sysent
->sv_table
[0];
1284 callp
= &p
->p_sysent
->sv_table
[code
];
1286 narg
= callp
->sy_narg
& SYF_ARGMASK
;
1289 * copyin is MP aware, but the tracing code is not
1291 if (narg
&& params
) {
1292 error
= copyin(params
, (caddr_t
)(&args
.nosys
.sysmsg
+ 1),
1293 narg
* sizeof(register_t
));
1296 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1297 MAKEMPSAFE(have_mplock
);
1299 ktrsyscall(p
, code
, narg
,
1300 (void *)(&args
.nosys
.sysmsg
+ 1));
1308 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1309 MAKEMPSAFE(have_mplock
);
1310 ktrsyscall(p
, code
, narg
, (void *)(&args
.nosys
.sysmsg
+ 1));
1315 * For traditional syscall code edx is left untouched when 32 bit
1316 * results are returned. Since edx is loaded from fds[1] when the
1317 * system call returns we pre-set it here.
1319 args
.sysmsg_fds
[0] = 0;
1320 args
.sysmsg_fds
[1] = frame
->tf_edx
;
1323 * The syscall might manipulate the trap frame. If it does it
1324 * will probably return EJUSTRETURN.
1326 args
.sysmsg_frame
= frame
;
1328 STOPEVENT(p
, S_SCE
, narg
); /* MP aware */
1332 * Try to run the syscall without the MP lock if the syscall
1333 * is MP safe. We have to obtain the MP lock no matter what if
1336 if ((callp
->sy_narg
& SYF_MPSAFE
) == 0)
1337 MAKEMPSAFE(have_mplock
);
1340 error
= (*callp
->sy_call
)(&args
);
1344 * MP SAFE (we may or may not have the MP lock at this point)
1349 * Reinitialize proc pointer `p' as it may be different
1350 * if this is a child returning from fork syscall.
1353 lp
= curthread
->td_lwp
;
1354 frame
->tf_eax
= args
.sysmsg_fds
[0];
1355 frame
->tf_edx
= args
.sysmsg_fds
[1];
1356 frame
->tf_eflags
&= ~PSL_C
;
1360 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1361 * int 0x80 is 2 bytes. We saved this in tf_err.
1363 frame
->tf_eip
-= frame
->tf_err
;
1368 panic("Unexpected EASYNC return value (for now)");
1371 if (p
->p_sysent
->sv_errsize
) {
1372 if (error
>= p
->p_sysent
->sv_errsize
)
1373 error
= -1; /* XXX */
1375 error
= p
->p_sysent
->sv_errtbl
[error
];
1377 frame
->tf_eax
= error
;
1378 frame
->tf_eflags
|= PSL_C
;
1383 * Traced syscall. trapsignal() is not MP aware.
1385 if ((orig_tf_eflags
& PSL_T
) && !(orig_tf_eflags
& PSL_VM
)) {
1386 MAKEMPSAFE(have_mplock
);
1387 frame
->tf_eflags
&= ~PSL_T
;
1388 trapsignal(lp
, SIGTRAP
, 0);
1392 * Handle reschedule and other end-of-syscall issues
1394 userret(lp
, frame
, sticks
);
1397 if (KTRPOINT(td
, KTR_SYSRET
)) {
1398 MAKEMPSAFE(have_mplock
);
1399 ktrsysret(p
, code
, error
, args
.sysmsg_result
);
1404 * This works because errno is findable through the
1405 * register set. If we ever support an emulation where this
1406 * is not the case, this code will need to be revisited.
1408 STOPEVENT(p
, S_SCX
, code
);
1413 * Release the MP lock if we had to get it
1415 KASSERT(td
->td_mpcount
== have_mplock
,
1416 ("badmpcount syscall2/end from %p", (void *)frame
->tf_eip
));
1421 KASSERT(crit_count
== (td
->td_pri
& ~TDPRI_MASK
),
1422 ("syscall: critical section count mismatch! %d/%d",
1423 crit_count
/ TDPRI_CRIT
, td
->td_pri
/ TDPRI_CRIT
));
1428 fork_return(struct lwp
*lp
, struct trapframe
*frame
)
1430 frame
->tf_eax
= 0; /* Child returns zero */
1431 frame
->tf_eflags
&= ~PSL_C
; /* success */
1434 generic_lwp_return(lp
, frame
);
1438 * Simplified back end of syscall(), used when returning from fork()
1439 * directly into user mode. MP lock is held on entry and should be
1440 * released on return. This code will return back into the fork
1441 * trampoline code which then runs doreti.
1444 generic_lwp_return(struct lwp
*lp
, struct trapframe
*frame
)
1446 struct proc
*p
= lp
->lwp_proc
;
1449 * Newly forked processes are given a kernel priority. We have to
1450 * adjust the priority to a normal user priority and fake entry
1451 * into the kernel (call userenter()) to install a passive release
1452 * function just in case userret() decides to stop the process. This
1453 * can occur when ^Z races a fork. If we do not install the passive
1454 * release function the current process designation will not be
1455 * released when the thread goes to sleep.
1457 lwkt_setpri_self(TDPRI_USER_NORM
);
1458 userenter(lp
->lwp_thread
);
1459 userret(lp
, frame
, 0);
1461 if (KTRPOINT(lp
->lwp_thread
, KTR_SYSRET
))
1462 ktrsysret(p
, SYS_fork
, 0, 0);
1464 p
->p_flag
|= P_PASSIVE_ACQ
;
1466 p
->p_flag
&= ~P_PASSIVE_ACQ
;
1468 KKASSERT(lp
->lwp_thread
->td_mpcount
== 1);
1474 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1475 * fault (which is then passed back to the virtual kernel) if an attempt is
1476 * made to use the FP unit.
1478 * XXX this is a fairly big hack.
1481 set_vkernel_fp(struct trapframe
*frame
)
1483 struct thread
*td
= curthread
;
1485 if (frame
->tf_xflags
& PGEX_FPFAULT
) {
1486 td
->td_pcb
->pcb_flags
|= FP_VIRTFP
;
1487 if (mdcpu
->gd_npxthread
== td
)
1490 td
->td_pcb
->pcb_flags
&= ~FP_VIRTFP
;