2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.115 2008/09/09 04:06:17 dillon Exp $
43 * 386 Trap and System call handling
51 #include "opt_ktrace.h"
52 #include "opt_clock.h"
55 #include <sys/param.h>
56 #include <sys/systm.h>
58 #include <sys/pioctl.h>
59 #include <sys/kernel.h>
60 #include <sys/resourcevar.h>
61 #include <sys/signalvar.h>
62 #include <sys/signal2.h>
63 #include <sys/syscall.h>
64 #include <sys/sysctl.h>
65 #include <sys/sysent.h>
67 #include <sys/vmmeter.h>
68 #include <sys/malloc.h>
70 #include <sys/ktrace.h>
73 #include <sys/upcall.h>
74 #include <sys/vkernel.h>
75 #include <sys/sysproto.h>
76 #include <sys/sysunion.h>
79 #include <vm/vm_param.h>
82 #include <vm/vm_kern.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_extern.h>
87 #include <machine/cpu.h>
88 #include <machine/md_var.h>
89 #include <machine/pcb.h>
90 #include <machine/smp.h>
91 #include <machine/tss.h>
92 #include <machine/specialreg.h>
93 #include <machine/globaldata.h>
95 #include <machine_base/isa/intr_machdep.h>
98 #include <sys/syslog.h>
99 #include <machine/clock.h>
102 #include <machine/vm86.h>
106 #include <sys/msgport2.h>
107 #include <sys/thread2.h>
108 #include <sys/mplock2.h>
112 #define MAKEMPSAFE(have_mplock) \
113 if (have_mplock == 0) { \
120 #define MAKEMPSAFE(have_mplock)
124 int (*pmath_emulate
) (struct trapframe
*);
126 extern void trap (struct trapframe
*frame
);
127 extern void syscall2 (struct trapframe
*frame
);
129 static int trap_pfault (struct trapframe
*, int, vm_offset_t
);
130 static void trap_fatal (struct trapframe
*, vm_offset_t
);
131 void dblfault_handler (void);
133 extern inthand_t
IDTVEC(syscall
);
135 #define MAX_TRAP_MSG 28
136 static char *trap_msg
[] = {
138 "privileged instruction fault", /* 1 T_PRIVINFLT */
140 "breakpoint instruction fault", /* 3 T_BPTFLT */
143 "arithmetic trap", /* 6 T_ARITHTRAP */
144 "system forced exception", /* 7 T_ASTFLT */
146 "general protection fault", /* 9 T_PROTFLT */
147 "trace trap", /* 10 T_TRCTRAP */
149 "page fault", /* 12 T_PAGEFLT */
151 "alignment fault", /* 14 T_ALIGNFLT */
155 "integer divide fault", /* 18 T_DIVIDE */
156 "non-maskable interrupt trap", /* 19 T_NMI */
157 "overflow trap", /* 20 T_OFLOW */
158 "FPU bounds check fault", /* 21 T_BOUND */
159 "FPU device not available", /* 22 T_DNA */
160 "double fault", /* 23 T_DOUBLEFLT */
161 "FPU operand fetch fault", /* 24 T_FPOPFLT */
162 "invalid TSS fault", /* 25 T_TSSFLT */
163 "segment not present fault", /* 26 T_SEGNPFLT */
164 "stack fault", /* 27 T_STKFLT */
165 "machine check trap", /* 28 T_MCHK */
168 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
169 extern int has_f00f_bug
;
173 static int ddb_on_nmi
= 1;
174 SYSCTL_INT(_machdep
, OID_AUTO
, ddb_on_nmi
, CTLFLAG_RW
,
175 &ddb_on_nmi
, 0, "Go to DDB on NMI");
177 static int panic_on_nmi
= 1;
178 SYSCTL_INT(_machdep
, OID_AUTO
, panic_on_nmi
, CTLFLAG_RW
,
179 &panic_on_nmi
, 0, "Panic on NMI");
180 static int fast_release
;
181 SYSCTL_INT(_machdep
, OID_AUTO
, fast_release
, CTLFLAG_RW
,
182 &fast_release
, 0, "Passive Release was optimal");
183 static int slow_release
;
184 SYSCTL_INT(_machdep
, OID_AUTO
, slow_release
, CTLFLAG_RW
,
185 &slow_release
, 0, "Passive Release was nonoptimal");
187 MALLOC_DEFINE(M_SYSMSG
, "sysmsg", "sysmsg structure");
188 extern int max_sysmsg
;
191 * Passively intercepts the thread switch function to increase the thread
192 * priority from a user priority to a kernel priority, reducing
193 * syscall and trap overhead for the case where no switch occurs.
195 * Synchronizes td_ucred with p_ucred. This is used by system calls,
196 * signal handling, faults, AST traps, and anything else that enters the
197 * kernel from userland and provides the kernel with a stable read-only
198 * copy of the process ucred.
201 userenter(struct thread
*curtd
, struct proc
*curp
)
206 curtd
->td_release
= lwkt_passive_release
;
208 if (curtd
->td_ucred
!= curp
->p_ucred
) {
209 ncred
= crhold(curp
->p_ucred
);
210 ocred
= curtd
->td_ucred
;
211 curtd
->td_ucred
= ncred
;
219 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
220 * must be completed before we can return to or try to return to userland.
222 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
223 * arithmatic on the delta calculation so the absolute tick values are
224 * truncated to an integer.
227 userret(struct lwp
*lp
, struct trapframe
*frame
, int sticks
)
229 struct proc
*p
= lp
->lwp_proc
;
233 if (p
->p_userret
!= NULL
) {
240 * Charge system time if profiling. Note: times are in microseconds.
241 * This may do a copyout and block, so do it first even though it
242 * means some system time will be charged as user time.
244 if (p
->p_flag
& P_PROFIL
) {
245 addupc_task(p
, frame
->tf_eip
,
246 (u_int
)((int)lp
->lwp_thread
->td_sticks
- sticks
));
251 * If the jungle wants us dead, so be it.
253 if (lp
->lwp_flag
& LWP_WEXIT
) {
256 rel_mplock(); /* NOT REACHED */
260 * Block here if we are in a stopped state.
262 if (p
->p_stat
== SSTOP
) {
270 * Post any pending upcalls. If running a virtual kernel be sure
271 * to restore the virtual kernel's vmspace before posting the upcall.
273 if (p
->p_flag
& P_UPCALLPEND
) {
274 p
->p_flag
&= ~P_UPCALLPEND
;
282 * Post any pending signals. If running a virtual kernel be sure
283 * to restore the virtual kernel's vmspace before posting the signal.
285 * WARNING! postsig() can exit and not return.
287 if ((sig
= CURSIG_TRACE(lp
)) != 0) {
295 * block here if we are swapped out, but still process signals
296 * (such as SIGKILL). proc0 (the swapin scheduler) is already
297 * aware of our situation, we do not have to wake it up.
299 if (p
->p_flag
& P_SWAPPEDOUT
) {
301 p
->p_flag
|= P_SWAPWAIT
;
303 if (p
->p_flag
& P_SWAPWAIT
)
304 tsleep(p
, PCATCH
, "SWOUT", 0);
305 p
->p_flag
&= ~P_SWAPWAIT
;
311 * Make sure postsig() handled request to restore old signal mask after
312 * running signal handler.
314 KKASSERT((lp
->lwp_flag
& LWP_OLDMASK
) == 0);
318 * Cleanup from userenter and any passive release that might have occured.
319 * We must reclaim the current-process designation before we can return
320 * to usermode. We also handle both LWKT and USER reschedule requests.
323 userexit(struct lwp
*lp
)
325 struct thread
*td
= lp
->lwp_thread
;
326 /* globaldata_t gd = td->td_gd; */
329 * Handle stop requests at kernel priority. Any requests queued
330 * after this loop will generate another AST.
332 while (lp
->lwp_proc
->p_stat
== SSTOP
) {
339 * Reduce our priority in preparation for a return to userland. If
340 * our passive release function was still in place, our priority was
341 * never raised and does not need to be reduced.
343 lwkt_passive_recover(td
);
346 * Become the current user scheduled process if we aren't already,
347 * and deal with reschedule requests and other factors.
349 lp
->lwp_proc
->p_usched
->acquire_curproc(lp
);
350 /* WARNING: we may have migrated cpu's */
351 /* gd = td->td_gd; */
354 #if !defined(KTR_KERNENTRY)
355 #define KTR_KERNENTRY KTR_ALL
357 KTR_INFO_MASTER(kernentry
);
358 KTR_INFO(KTR_KERNENTRY
, kernentry
, trap
, 0, "pid=%d, tid=%d, trapno=%d, eva=%p",
359 sizeof(int) + sizeof(int) + sizeof(int) + sizeof(vm_offset_t
));
360 KTR_INFO(KTR_KERNENTRY
, kernentry
, trap_ret
, 0, "pid=%d, tid=%d",
361 sizeof(int) + sizeof(int));
362 KTR_INFO(KTR_KERNENTRY
, kernentry
, syscall
, 0, "pid=%d, tid=%d, call=%d",
363 sizeof(int) + sizeof(int) + sizeof(int));
364 KTR_INFO(KTR_KERNENTRY
, kernentry
, syscall_ret
, 0, "pid=%d, tid=%d, err=%d",
365 sizeof(int) + sizeof(int) + sizeof(int));
366 KTR_INFO(KTR_KERNENTRY
, kernentry
, fork_ret
, 0, "pid=%d, tid=%d",
367 sizeof(int) + sizeof(int));
370 * Exception, fault, and trap interface to the kernel.
371 * This common code is called from assembly language IDT gate entry
372 * routines that prepare a suitable stack frame, and restore this
373 * frame after the exception has been processed.
375 * This function is also called from doreti in an interlock to handle ASTs.
376 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
378 * NOTE! We have to retrieve the fault address prior to obtaining the
379 * MP lock because get_mplock() may switch out. YYY cr2 really ought
380 * to be retrieved by the assembly code, not here.
382 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
383 * if an attempt is made to switch from a fast interrupt or IPI. This is
384 * necessary to properly take fatal kernel traps on SMP machines if
385 * get_mplock() has to block.
389 trap(struct trapframe
*frame
)
391 struct globaldata
*gd
= mycpu
;
392 struct thread
*td
= gd
->gd_curthread
;
393 struct lwp
*lp
= td
->td_lwp
;
396 int i
= 0, ucode
= 0, type
, code
;
401 int crit_count
= td
->td_critcount
;
402 lwkt_tokref_t curstop
= td
->td_toks_stop
;
409 eva
= (frame
->tf_trapno
== T_PAGEFLT
? rcr2() : 0);
410 ++gd
->gd_trap_nesting_level
;
411 MAKEMPSAFE(have_mplock
);
412 trap_fatal(frame
, eva
);
413 --gd
->gd_trap_nesting_level
;
419 ++gd
->gd_trap_nesting_level
;
420 if (frame
->tf_trapno
== T_PAGEFLT
) {
422 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
423 * This problem is worked around by using an interrupt
424 * gate for the pagefault handler. We are finally ready
425 * to read %cr2 and then must reenable interrupts.
427 * XXX this should be in the switch statement, but the
428 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
429 * flow of control too much for this to be obviously
436 --gd
->gd_trap_nesting_level
;
438 if (!(frame
->tf_eflags
& PSL_I
)) {
440 * Buggy application or kernel code has disabled interrupts
441 * and then trapped. Enabling interrupts now is wrong, but
442 * it is better than running with interrupts disabled until
443 * they are accidentally enabled later.
445 type
= frame
->tf_trapno
;
446 if (ISPL(frame
->tf_cs
)==SEL_UPL
|| (frame
->tf_eflags
& PSL_VM
)) {
447 MAKEMPSAFE(have_mplock
);
449 "pid %ld (%s): trap %d with interrupts disabled\n",
450 (long)curproc
->p_pid
, curproc
->p_comm
, type
);
451 } else if (type
!= T_BPTFLT
&& type
!= T_TRCTRAP
) {
453 * XXX not quite right, since this may be for a
454 * multiple fault in user mode.
456 MAKEMPSAFE(have_mplock
);
457 kprintf("kernel trap %d with interrupts disabled\n",
463 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
466 type
= frame
->tf_trapno
;
467 code
= frame
->tf_err
;
470 if (frame
->tf_eflags
& PSL_VM
&&
471 (type
== T_PROTFLT
|| type
== T_STKFLT
)) {
473 KKASSERT(td
->td_mpcount
> 0);
475 i
= vm86_emulate((struct vm86frame
*)frame
);
477 KKASSERT(td
->td_mpcount
> 0);
481 * returns to original process
484 vm86_trap((struct vm86frame
*)frame
,
487 vm86_trap((struct vm86frame
*)frame
, 0);
489 KKASSERT(0); /* NOT REACHED */
495 * these traps want either a process context, or
496 * assume a normal userspace trap.
500 trap_fatal(frame
, eva
);
503 type
= T_BPTFLT
; /* kernel breakpoint */
506 goto kernel_trap
; /* normal kernel trap handling */
509 if ((ISPL(frame
->tf_cs
) == SEL_UPL
) || (frame
->tf_eflags
& PSL_VM
)) {
512 KTR_LOG(kernentry_trap
, p
->p_pid
, lp
->lwp_tid
,
513 frame
->tf_trapno
, eva
);
517 sticks
= (int)td
->td_sticks
;
518 lp
->lwp_md
.md_regs
= frame
;
521 case T_PRIVINFLT
: /* privileged instruction fault */
526 case T_BPTFLT
: /* bpt instruction fault */
527 case T_TRCTRAP
: /* trace trap */
528 frame
->tf_eflags
&= ~PSL_T
;
533 case T_ARITHTRAP
: /* arithmetic trap */
538 case T_ASTFLT
: /* Allow process switch */
539 mycpu
->gd_cnt
.v_soft
++;
540 if (mycpu
->gd_reqflags
& RQF_AST_OWEUPC
) {
541 atomic_clear_int_nonlocked(&mycpu
->gd_reqflags
,
543 addupc_task(p
, p
->p_prof
.pr_addr
,
549 * The following two traps can happen in
550 * vm86 mode, and, if so, we want to handle
553 case T_PROTFLT
: /* general protection fault */
554 case T_STKFLT
: /* stack fault */
555 if (frame
->tf_eflags
& PSL_VM
) {
556 i
= vm86_emulate((struct vm86frame
*)frame
);
562 ucode
= (type
== T_PROTFLT
) ? BUS_OBJERR
: BUS_ADRERR
;
564 case T_SEGNPFLT
: /* segment not present fault */
568 case T_TSSFLT
: /* invalid TSS fault */
569 case T_DOUBLEFLT
: /* double fault */
574 ucode
= code
+ BUS_SEGM_FAULT
; /* XXX: ???*/
580 case T_PAGEFLT
: /* page fault */
581 MAKEMPSAFE(have_mplock
);
582 i
= trap_pfault(frame
, TRUE
, eva
);
585 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
597 ucode
= BUS_ADRERR
; /* XXX */
600 case T_DIVIDE
: /* integer divide fault */
607 MAKEMPSAFE(have_mplock
);
609 goto handle_powerfail
;
610 #else /* !POWERFAIL_NMI */
611 /* machine/parity/power fail/"kitchen sink" faults */
612 if (isa_nmi(code
) == 0) {
615 * NMI can be hooked up to a pushbutton
619 kprintf ("NMI ... going to debugger\n");
620 kdb_trap (type
, 0, frame
);
624 } else if (panic_on_nmi
)
625 panic("NMI indicates hardware failure");
627 #endif /* POWERFAIL_NMI */
628 #endif /* NISA > 0 */
630 case T_OFLOW
: /* integer overflow fault */
635 case T_BOUND
: /* bounds check fault */
642 * Virtual kernel intercept - pass the DNA exception
643 * to the virtual kernel if it asked to handle it.
644 * This occurs when the virtual kernel is holding
645 * onto the FP context for a different emulated
646 * process then the one currently running.
648 * We must still call npxdna() since we may have
649 * saved FP state that the virtual kernel needs
650 * to hand over to a different emulated process.
652 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
&&
653 (td
->td_pcb
->pcb_flags
& FP_VIRTFP
)
661 * The kernel may have switched out the FP unit's
662 * state, causing the user process to take a fault
663 * when it tries to use the FP unit. Restore the
669 if (!pmath_emulate
) {
671 ucode
= FPE_FPU_NP_TRAP
;
674 i
= (*pmath_emulate
)(frame
);
676 if (!(frame
->tf_eflags
& PSL_T
))
678 frame
->tf_eflags
&= ~PSL_T
;
681 /* else ucode = emulator_only_knows() XXX */
684 case T_FPOPFLT
: /* FPU operand fetch fault */
689 case T_XMMFLT
: /* SIMD floating-point exception */
699 case T_PAGEFLT
: /* page fault */
700 MAKEMPSAFE(have_mplock
);
701 trap_pfault(frame
, FALSE
, eva
);
707 * The kernel may be using npx for copying or other
715 case T_PROTFLT
: /* general protection fault */
716 case T_SEGNPFLT
: /* segment not present fault */
718 * Invalid segment selectors and out of bounds
719 * %eip's and %esp's can be set up in user mode.
720 * This causes a fault in kernel mode when the
721 * kernel tries to return to user mode. We want
722 * to get this fault so that we can fix the
723 * problem here and not have to check all the
724 * selectors and pointers when the user changes
727 #define MAYBE_DORETI_FAULT(where, whereto) \
729 if (frame->tf_eip == (int)where) { \
730 frame->tf_eip = (int)whereto; \
734 if (mycpu
->gd_intr_nesting_level
== 0) {
736 * Invalid %fs's and %gs's can be created using
737 * procfs or PT_SETREGS or by invalidating the
738 * underlying LDT entry. This causes a fault
739 * in kernel mode when the kernel attempts to
740 * switch contexts. Lose the bad context
741 * (XXX) so that we can continue, and generate
744 MAYBE_DORETI_FAULT(doreti_iret
,
746 MAYBE_DORETI_FAULT(doreti_popl_ds
,
747 doreti_popl_ds_fault
);
748 MAYBE_DORETI_FAULT(doreti_popl_es
,
749 doreti_popl_es_fault
);
750 MAYBE_DORETI_FAULT(doreti_popl_fs
,
751 doreti_popl_fs_fault
);
752 MAYBE_DORETI_FAULT(doreti_popl_gs
,
753 doreti_popl_gs_fault
);
754 if (td
->td_pcb
->pcb_onfault
) {
756 (register_t
)td
->td_pcb
->pcb_onfault
;
764 * PSL_NT can be set in user mode and isn't cleared
765 * automatically when the kernel is entered. This
766 * causes a TSS fault when the kernel attempts to
767 * `iret' because the TSS link is uninitialized. We
768 * want to get this fault so that we can fix the
769 * problem here and not every time the kernel is
772 if (frame
->tf_eflags
& PSL_NT
) {
773 frame
->tf_eflags
&= ~PSL_NT
;
778 case T_TRCTRAP
: /* trace trap */
779 if (frame
->tf_eip
== (int)IDTVEC(syscall
)) {
781 * We've just entered system mode via the
782 * syscall lcall. Continue single stepping
783 * silently until the syscall handler has
788 if (frame
->tf_eip
== (int)IDTVEC(syscall
) + 1) {
790 * The syscall handler has now saved the
791 * flags. Stop single stepping it.
793 frame
->tf_eflags
&= ~PSL_T
;
797 * Ignore debug register trace traps due to
798 * accesses in the user's address space, which
799 * can happen under several conditions such as
800 * if a user sets a watchpoint on a buffer and
801 * then passes that buffer to a system call.
802 * We still want to get TRCTRAPS for addresses
803 * in kernel space because that is useful when
804 * debugging the kernel.
806 if (user_dbreg_trap()) {
808 * Reset breakpoint bits because the
811 load_dr6(rdr6() & 0xfffffff0);
815 * Fall through (TRCTRAP kernel mode, kernel address)
819 * If DDB is enabled, let it handle the debugger trap.
820 * Otherwise, debugger traps "can't happen".
824 MAKEMPSAFE(have_mplock
);
825 if (kdb_trap (type
, 0, frame
))
832 MAKEMPSAFE(have_mplock
);
835 # define TIMER_FREQ 1193182
839 static unsigned lastalert
= 0;
841 if(time_second
- lastalert
> 10)
843 log(LOG_WARNING
, "NMI: power fail\n");
844 sysbeep(TIMER_FREQ
/880, hz
);
845 lastalert
= time_second
;
850 #else /* !POWERFAIL_NMI */
851 /* machine/parity/power fail/"kitchen sink" faults */
852 if (isa_nmi(code
) == 0) {
855 * NMI can be hooked up to a pushbutton
859 kprintf ("NMI ... going to debugger\n");
860 kdb_trap (type
, 0, frame
);
864 } else if (panic_on_nmi
== 0)
867 #endif /* POWERFAIL_NMI */
868 #endif /* NISA > 0 */
871 MAKEMPSAFE(have_mplock
);
872 trap_fatal(frame
, eva
);
877 * Virtual kernel intercept - if the fault is directly related to a
878 * VM context managed by a virtual kernel then let the virtual kernel
881 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
) {
882 vkernel_trap(lp
, frame
);
887 * Translate fault for emulators (e.g. Linux)
889 if (*p
->p_sysent
->sv_transtrap
)
890 i
= (*p
->p_sysent
->sv_transtrap
)(i
, type
);
892 MAKEMPSAFE(have_mplock
);
893 trapsignal(lp
, i
, ucode
);
896 if (type
<= MAX_TRAP_MSG
) {
897 uprintf("fatal process exception: %s",
899 if ((type
== T_PAGEFLT
) || (type
== T_PROTFLT
))
900 uprintf(", fault VA = 0x%lx", (u_long
)eva
);
907 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
908 KASSERT(td
->td_mpcount
== have_mplock
,
909 ("badmpcount trap/end from %p", (void *)frame
->tf_eip
));
912 userret(lp
, frame
, sticks
);
919 if (p
!= NULL
&& lp
!= NULL
)
920 KTR_LOG(kernentry_trap_ret
, p
->p_pid
, lp
->lwp_tid
);
922 KASSERT(crit_count
== td
->td_critcount
,
923 ("trap: critical section count mismatch! %d/%d",
924 crit_count
, td
->td_pri
));
925 KASSERT(curstop
== td
->td_toks_stop
,
926 ("trap: extra tokens held after trap! %zd/%zd",
927 curstop
- &td
->td_toks_base
,
928 td
->td_toks_stop
- &td
->td_toks_base
));
933 trap_pfault(struct trapframe
*frame
, int usermode
, vm_offset_t eva
)
936 struct vmspace
*vm
= NULL
;
941 thread_t td
= curthread
;
942 struct lwp
*lp
= td
->td_lwp
;
944 va
= trunc_page(eva
);
945 if (va
>= KERNBASE
) {
947 * Don't allow user-mode faults in kernel address space.
948 * An exception: if the faulting address is the invalid
949 * instruction entry in the IDT, then the Intel Pentium
950 * F00F bug workaround was triggered, and we need to
951 * treat it is as an illegal instruction, and not a page
954 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
955 if ((eva
== (unsigned int)&idt
[6]) && has_f00f_bug
) {
956 frame
->tf_trapno
= T_PRIVINFLT
;
966 * This is a fault on non-kernel virtual memory.
967 * vm is initialized above to NULL. If curproc is NULL
968 * or curproc->p_vmspace is NULL the fault is fatal.
971 vm
= lp
->lwp_vmspace
;
979 if (frame
->tf_err
& PGEX_W
)
980 ftype
= VM_PROT_WRITE
;
982 ftype
= VM_PROT_READ
;
984 if (map
!= &kernel_map
) {
986 * Keep swapout from messing with us during this
996 fault_flags
|= VM_FAULT_BURST
;
997 if (ftype
& VM_PROT_WRITE
)
998 fault_flags
|= VM_FAULT_DIRTY
;
1000 fault_flags
|= VM_FAULT_NORMAL
;
1001 rv
= vm_fault(map
, va
, ftype
, fault_flags
);
1002 PRELE(lp
->lwp_proc
);
1005 * Don't have to worry about process locking or stacks
1008 rv
= vm_fault(map
, va
, ftype
, VM_FAULT_NORMAL
);
1011 if (rv
== KERN_SUCCESS
)
1015 if (td
->td_gd
->gd_intr_nesting_level
== 0 &&
1016 td
->td_pcb
->pcb_onfault
) {
1017 frame
->tf_eip
= (register_t
)td
->td_pcb
->pcb_onfault
;
1020 trap_fatal(frame
, eva
);
1024 /* kludge to pass faulting virtual address to sendsig */
1025 frame
->tf_xflags
= frame
->tf_err
;
1026 frame
->tf_err
= eva
;
1028 return((rv
== KERN_PROTECTION_FAILURE
) ? SIGBUS
: SIGSEGV
);
1032 trap_fatal(struct trapframe
*frame
, vm_offset_t eva
)
1034 int code
, type
, ss
, esp
;
1035 struct soft_segment_descriptor softseg
;
1037 code
= frame
->tf_err
;
1038 type
= frame
->tf_trapno
;
1039 sdtossd(&gdt
[mycpu
->gd_cpuid
* NGDT
+ IDXSEL(frame
->tf_cs
& 0xffff)].sd
, &softseg
);
1041 if (type
<= MAX_TRAP_MSG
)
1042 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
1043 type
, trap_msg
[type
],
1044 frame
->tf_eflags
& PSL_VM
? "vm86" :
1045 ISPL(frame
->tf_cs
) == SEL_UPL
? "user" : "kernel");
1047 /* three separate prints in case of a trap on an unmapped page */
1048 kprintf("mp_lock = %08x; ", mp_lock
);
1049 kprintf("cpuid = %d; ", mycpu
->gd_cpuid
);
1050 kprintf("lapic.id = %08x\n", lapic
.id
);
1052 if (type
== T_PAGEFLT
) {
1053 kprintf("fault virtual address = %p\n", (void *)eva
);
1054 kprintf("fault code = %s %s, %s\n",
1055 code
& PGEX_U
? "user" : "supervisor",
1056 code
& PGEX_W
? "write" : "read",
1057 code
& PGEX_P
? "protection violation" : "page not present");
1059 kprintf("instruction pointer = 0x%x:0x%x\n",
1060 frame
->tf_cs
& 0xffff, frame
->tf_eip
);
1061 if ((ISPL(frame
->tf_cs
) == SEL_UPL
) || (frame
->tf_eflags
& PSL_VM
)) {
1062 ss
= frame
->tf_ss
& 0xffff;
1063 esp
= frame
->tf_esp
;
1065 ss
= GSEL(GDATA_SEL
, SEL_KPL
);
1066 esp
= (int)&frame
->tf_esp
;
1068 kprintf("stack pointer = 0x%x:0x%x\n", ss
, esp
);
1069 kprintf("frame pointer = 0x%x:0x%x\n", ss
, frame
->tf_ebp
);
1070 kprintf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
1071 softseg
.ssd_base
, softseg
.ssd_limit
, softseg
.ssd_type
);
1072 kprintf(" = DPL %d, pres %d, def32 %d, gran %d\n",
1073 softseg
.ssd_dpl
, softseg
.ssd_p
, softseg
.ssd_def32
,
1075 kprintf("processor eflags = ");
1076 if (frame
->tf_eflags
& PSL_T
)
1077 kprintf("trace trap, ");
1078 if (frame
->tf_eflags
& PSL_I
)
1079 kprintf("interrupt enabled, ");
1080 if (frame
->tf_eflags
& PSL_NT
)
1081 kprintf("nested task, ");
1082 if (frame
->tf_eflags
& PSL_RF
)
1083 kprintf("resume, ");
1084 if (frame
->tf_eflags
& PSL_VM
)
1086 kprintf("IOPL = %d\n", (frame
->tf_eflags
& PSL_IOPL
) >> 12);
1087 kprintf("current process = ");
1089 kprintf("%lu (%s)\n",
1090 (u_long
)curproc
->p_pid
, curproc
->p_comm
?
1091 curproc
->p_comm
: "");
1095 kprintf("current thread = pri %d ", curthread
->td_pri
);
1096 if (curthread
->td_critcount
)
1102 * we probably SHOULD have stopped the other CPUs before now!
1103 * another CPU COULD have been touching cpl at this moment...
1105 kprintf(" <- SMP: XXX");
1114 if ((debugger_on_panic
|| db_active
) && kdb_trap(type
, code
, frame
))
1117 kprintf("trap number = %d\n", type
);
1118 if (type
<= MAX_TRAP_MSG
)
1119 panic("%s", trap_msg
[type
]);
1121 panic("unknown/reserved trap");
1125 * Double fault handler. Called when a fault occurs while writing
1126 * a frame for a trap/exception onto the stack. This usually occurs
1127 * when the stack overflows (such is the case with infinite recursion,
1130 * XXX Note that the current PTD gets replaced by IdlePTD when the
1131 * task switch occurs. This means that the stack that was active at
1132 * the time of the double fault is not available at <kstack> unless
1133 * the machine was idle when the double fault occurred. The downside
1134 * of this is that "trace <ebp>" in ddb won't work.
1137 dblfault_handler(void)
1139 struct mdglobaldata
*gd
= mdcpu
;
1141 kprintf("\nFatal double fault:\n");
1142 kprintf("eip = 0x%x\n", gd
->gd_common_tss
.tss_eip
);
1143 kprintf("esp = 0x%x\n", gd
->gd_common_tss
.tss_esp
);
1144 kprintf("ebp = 0x%x\n", gd
->gd_common_tss
.tss_ebp
);
1146 /* three separate prints in case of a trap on an unmapped page */
1147 kprintf("mp_lock = %08x; ", mp_lock
);
1148 kprintf("cpuid = %d; ", mycpu
->gd_cpuid
);
1149 kprintf("lapic.id = %08x\n", lapic
.id
);
1151 panic("double fault");
1155 * syscall2 - MP aware system call request C handler
1157 * A system call is essentially treated as a trap. The MP lock is not
1158 * held on entry or return. We are responsible for handling ASTs
1159 * (e.g. a task switch) prior to return.
1164 syscall2(struct trapframe
*frame
)
1166 struct thread
*td
= curthread
;
1167 struct proc
*p
= td
->td_proc
;
1168 struct lwp
*lp
= td
->td_lwp
;
1170 struct sysent
*callp
;
1171 register_t orig_tf_eflags
;
1176 int crit_count
= td
->td_critcount
;
1179 int have_mplock
= 0;
1182 union sysunion args
;
1185 if (ISPL(frame
->tf_cs
) != SEL_UPL
) {
1192 KTR_LOG(kernentry_syscall
, p
->p_pid
, lp
->lwp_tid
,
1196 KASSERT(td
->td_mpcount
== 0,
1197 ("badmpcount syscall2 from %p", (void *)frame
->tf_eip
));
1199 userenter(td
, p
); /* lazy raise our priority */
1204 sticks
= (int)td
->td_sticks
;
1205 orig_tf_eflags
= frame
->tf_eflags
;
1208 * Virtual kernel intercept - if a VM context managed by a virtual
1209 * kernel issues a system call the virtual kernel handles it, not us.
1210 * Restore the virtual kernel context and return from its system
1211 * call. The current frame is copied out to the virtual kernel.
1213 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
) {
1214 vkernel_trap(lp
, frame
);
1215 error
= EJUSTRETURN
;
1221 * Get the system call parameters and account for time
1223 lp
->lwp_md
.md_regs
= frame
;
1224 params
= (caddr_t
)frame
->tf_esp
+ sizeof(int);
1225 code
= frame
->tf_eax
;
1227 if (p
->p_sysent
->sv_prepsyscall
) {
1228 (*p
->p_sysent
->sv_prepsyscall
)(
1229 frame
, (int *)(&args
.nosys
.sysmsg
+ 1),
1233 * Need to check if this is a 32 bit or 64 bit syscall.
1234 * fuword is MP aware.
1236 if (code
== SYS_syscall
) {
1238 * Code is first argument, followed by actual args.
1240 code
= fuword(params
);
1241 params
+= sizeof(int);
1242 } else if (code
== SYS___syscall
) {
1244 * Like syscall, but code is a quad, so as to maintain
1245 * quad alignment for the rest of the arguments.
1247 code
= fuword(params
);
1248 params
+= sizeof(quad_t
);
1252 code
&= p
->p_sysent
->sv_mask
;
1254 if (code
>= p
->p_sysent
->sv_size
)
1255 callp
= &p
->p_sysent
->sv_table
[0];
1257 callp
= &p
->p_sysent
->sv_table
[code
];
1259 narg
= callp
->sy_narg
& SYF_ARGMASK
;
1262 if (p
->p_sysent
->sv_name
[0] == 'L')
1263 kprintf("Linux syscall, code = %d\n", code
);
1267 * copyin is MP aware, but the tracing code is not
1269 if (narg
&& params
) {
1270 error
= copyin(params
, (caddr_t
)(&args
.nosys
.sysmsg
+ 1),
1271 narg
* sizeof(register_t
));
1274 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1275 MAKEMPSAFE(have_mplock
);
1277 ktrsyscall(lp
, code
, narg
,
1278 (void *)(&args
.nosys
.sysmsg
+ 1));
1286 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1287 MAKEMPSAFE(have_mplock
);
1288 ktrsyscall(lp
, code
, narg
, (void *)(&args
.nosys
.sysmsg
+ 1));
1293 * For traditional syscall code edx is left untouched when 32 bit
1294 * results are returned. Since edx is loaded from fds[1] when the
1295 * system call returns we pre-set it here.
1297 args
.sysmsg_fds
[0] = 0;
1298 args
.sysmsg_fds
[1] = frame
->tf_edx
;
1301 * The syscall might manipulate the trap frame. If it does it
1302 * will probably return EJUSTRETURN.
1304 args
.sysmsg_frame
= frame
;
1306 STOPEVENT(p
, S_SCE
, narg
); /* MP aware */
1309 * NOTE: All system calls run MPSAFE now. The system call itself
1310 * is responsible for getting the MP lock.
1312 error
= (*callp
->sy_call
)(&args
);
1316 * MP SAFE (we may or may not have the MP lock at this point)
1321 * Reinitialize proc pointer `p' as it may be different
1322 * if this is a child returning from fork syscall.
1325 lp
= curthread
->td_lwp
;
1326 frame
->tf_eax
= args
.sysmsg_fds
[0];
1327 frame
->tf_edx
= args
.sysmsg_fds
[1];
1328 frame
->tf_eflags
&= ~PSL_C
;
1332 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1333 * int 0x80 is 2 bytes. We saved this in tf_err.
1335 frame
->tf_eip
-= frame
->tf_err
;
1340 panic("Unexpected EASYNC return value (for now)");
1343 if (p
->p_sysent
->sv_errsize
) {
1344 if (error
>= p
->p_sysent
->sv_errsize
)
1345 error
= -1; /* XXX */
1347 error
= p
->p_sysent
->sv_errtbl
[error
];
1349 frame
->tf_eax
= error
;
1350 frame
->tf_eflags
|= PSL_C
;
1355 * Traced syscall. trapsignal() is not MP aware.
1357 if ((orig_tf_eflags
& PSL_T
) && !(orig_tf_eflags
& PSL_VM
)) {
1358 MAKEMPSAFE(have_mplock
);
1359 frame
->tf_eflags
&= ~PSL_T
;
1360 trapsignal(lp
, SIGTRAP
, TRAP_TRACE
);
1364 * Handle reschedule and other end-of-syscall issues
1366 userret(lp
, frame
, sticks
);
1369 if (KTRPOINT(td
, KTR_SYSRET
)) {
1370 MAKEMPSAFE(have_mplock
);
1371 ktrsysret(lp
, code
, error
, args
.sysmsg_result
);
1376 * This works because errno is findable through the
1377 * register set. If we ever support an emulation where this
1378 * is not the case, this code will need to be revisited.
1380 STOPEVENT(p
, S_SCX
, code
);
1385 * Release the MP lock if we had to get it
1387 KASSERT(td
->td_mpcount
== have_mplock
,
1388 ("badmpcount syscall2/end from %p callp %p",
1389 (void *)frame
->tf_eip
, callp
));
1393 KTR_LOG(kernentry_syscall_ret
, p
->p_pid
, lp
->lwp_tid
, error
);
1395 KASSERT(crit_count
== td
->td_critcount
,
1396 ("syscall: critical section count mismatch! %d/%d",
1397 crit_count
, td
->td_pri
));
1398 KASSERT(&td
->td_toks_base
== td
->td_toks_stop
,
1399 ("syscall: extra tokens held after trap! %zd",
1400 td
->td_toks_stop
- &td
->td_toks_base
));
1405 * NOTE: MP lock not held at any point.
1408 fork_return(struct lwp
*lp
, struct trapframe
*frame
)
1410 frame
->tf_eax
= 0; /* Child returns zero */
1411 frame
->tf_eflags
&= ~PSL_C
; /* success */
1414 generic_lwp_return(lp
, frame
);
1415 KTR_LOG(kernentry_fork_ret
, lp
->lwp_proc
->p_pid
, lp
->lwp_tid
);
1419 * Simplified back end of syscall(), used when returning from fork()
1420 * directly into user mode.
1422 * This code will return back into the fork trampoline code which then
1425 * NOTE: The mplock is not held at any point.
1428 generic_lwp_return(struct lwp
*lp
, struct trapframe
*frame
)
1430 struct proc
*p
= lp
->lwp_proc
;
1433 * Newly forked processes are given a kernel priority. We have to
1434 * adjust the priority to a normal user priority and fake entry
1435 * into the kernel (call userenter()) to install a passive release
1436 * function just in case userret() decides to stop the process. This
1437 * can occur when ^Z races a fork. If we do not install the passive
1438 * release function the current process designation will not be
1439 * released when the thread goes to sleep.
1441 lwkt_setpri_self(TDPRI_USER_NORM
);
1442 userenter(lp
->lwp_thread
, p
);
1443 userret(lp
, frame
, 0);
1445 if (KTRPOINT(lp
->lwp_thread
, KTR_SYSRET
))
1446 ktrsysret(lp
, SYS_fork
, 0, 0);
1448 p
->p_flag
|= P_PASSIVE_ACQ
;
1450 p
->p_flag
&= ~P_PASSIVE_ACQ
;
1454 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1455 * fault (which is then passed back to the virtual kernel) if an attempt is
1456 * made to use the FP unit.
1458 * XXX this is a fairly big hack.
1461 set_vkernel_fp(struct trapframe
*frame
)
1463 struct thread
*td
= curthread
;
1465 if (frame
->tf_xflags
& PGEX_FPFAULT
) {
1466 td
->td_pcb
->pcb_flags
|= FP_VIRTFP
;
1467 if (mdcpu
->gd_npxthread
== td
)
1470 td
->td_pcb
->pcb_flags
&= ~FP_VIRTFP
;
1475 * Called from vkernel_trap() to fixup the vkernel's syscall
1476 * frame for vmspace_ctl() return.
1479 cpu_vkernel_trap(struct trapframe
*frame
, int error
)
1481 frame
->tf_eax
= error
;
1483 frame
->tf_eflags
|= PSL_C
;
1485 frame
->tf_eflags
&= ~PSL_C
;