2 * Copyright (c) 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (C) 1994, David Greenman
5 * Copyright (c) 2008 The DragonFly Project.
6 * Copyright (c) 2008 Jordan Gordeev.
8 * This code is derived from software contributed to Berkeley by
9 * the University of Utah, and William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
44 * x86_64 Trap and System call handling
50 #include "opt_ktrace.h"
52 #include <machine/frame.h>
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/kerneldump.h>
58 #include <sys/pioctl.h>
59 #include <sys/types.h>
60 #include <sys/signal2.h>
61 #include <sys/syscall.h>
62 #include <sys/sysctl.h>
63 #include <sys/sysent.h>
65 #include <sys/ktrace.h>
68 #include <sys/sysmsg.h>
69 #include <sys/sysproto.h>
70 #include <sys/sysunion.h>
74 #include <vm/vm_extern.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_param.h>
77 #include <machine/cpu.h>
78 #include <machine/pcb.h>
79 #include <machine/smp.h>
80 #include <machine/thread.h>
81 #include <machine/clock.h>
82 #include <machine/vmparam.h>
83 #include <machine/md_var.h>
84 #include <machine_base/isa/isa_intr.h>
85 #include <machine_base/apic/lapic.h>
89 #include <sys/thread2.h>
90 #include <sys/spinlock2.h>
92 extern void trap(struct trapframe
*frame
);
94 static int trap_pfault(struct trapframe
*, int);
95 static void trap_fatal(struct trapframe
*, vm_offset_t
);
96 void dblfault_handler(struct trapframe
*frame
);
98 #define MAX_TRAP_MSG 30
99 static char *trap_msg
[] = {
101 "privileged instruction fault", /* 1 T_PRIVINFLT */
103 "breakpoint instruction fault", /* 3 T_BPTFLT */
106 "arithmetic trap", /* 6 T_ARITHTRAP */
107 "system forced exception", /* 7 T_ASTFLT */
109 "general protection fault", /* 9 T_PROTFLT */
110 "trace trap", /* 10 T_TRCTRAP */
112 "page fault", /* 12 T_PAGEFLT */
114 "alignment fault", /* 14 T_ALIGNFLT */
118 "integer divide fault", /* 18 T_DIVIDE */
119 "non-maskable interrupt trap", /* 19 T_NMI */
120 "overflow trap", /* 20 T_OFLOW */
121 "FPU bounds check fault", /* 21 T_BOUND */
122 "FPU device not available", /* 22 T_DNA */
123 "double fault", /* 23 T_DOUBLEFLT */
124 "FPU operand fetch fault", /* 24 T_FPOPFLT */
125 "invalid TSS fault", /* 25 T_TSSFLT */
126 "segment not present fault", /* 26 T_SEGNPFLT */
127 "stack fault", /* 27 T_STKFLT */
128 "machine check trap", /* 28 T_MCHK */
129 "SIMD floating-point exception", /* 29 T_XMMFLT */
130 "reserved (unknown) fault", /* 30 T_RESERVED */
134 static int ddb_on_nmi
= 1;
135 SYSCTL_INT(_machdep
, OID_AUTO
, ddb_on_nmi
, CTLFLAG_RW
,
136 &ddb_on_nmi
, 0, "Go to DDB on NMI");
137 static int ddb_on_seg_fault
= 0;
138 SYSCTL_INT(_machdep
, OID_AUTO
, ddb_on_seg_fault
, CTLFLAG_RW
,
139 &ddb_on_seg_fault
, 0, "Go to DDB on user seg-fault");
140 static int freeze_on_seg_fault
= 0;
141 SYSCTL_INT(_machdep
, OID_AUTO
, freeze_on_seg_fault
, CTLFLAG_RW
,
142 &freeze_on_seg_fault
, 0, "Go to DDB on user seg-fault");
144 static int panic_on_nmi
= 1;
145 SYSCTL_INT(_machdep
, OID_AUTO
, panic_on_nmi
, CTLFLAG_RW
,
146 &panic_on_nmi
, 0, "Panic on NMI");
147 static int fast_release
;
148 SYSCTL_INT(_machdep
, OID_AUTO
, fast_release
, CTLFLAG_RW
,
149 &fast_release
, 0, "Passive Release was optimal");
150 static int slow_release
;
151 SYSCTL_INT(_machdep
, OID_AUTO
, slow_release
, CTLFLAG_RW
,
152 &slow_release
, 0, "Passive Release was nonoptimal");
155 * System call debugging records the worst-case system call
156 * overhead (inclusive of blocking), but may be inaccurate.
158 /*#define SYSCALL_DEBUG*/
160 uint64_t SysCallsWorstCase
[SYS_MAXSYSCALL
];
164 * Passively intercepts the thread switch function to increase
165 * the thread priority from a user priority to a kernel priority, reducing
166 * syscall and trap overhead for the case where no switch occurs.
168 * Synchronizes td_ucred with p_ucred. This is used by system calls,
169 * signal handling, faults, AST traps, and anything else that enters the
170 * kernel from userland and provides the kernel with a stable read-only
171 * copy of the process ucred.
173 * To avoid races with another thread updating p_ucred we obtain p_spin.
174 * The other thread doing the update will obtain both p_token and p_spin.
175 * In the case where the cached cred pointer matches, we will already have
176 * the ref and we don't have to do one blessed thing.
179 userenter(struct thread
*curtd
, struct proc
*curp
)
184 curtd
->td_release
= lwkt_passive_release
;
186 if (curtd
->td_ucred
!= curp
->p_ucred
) {
187 spin_lock(&curp
->p_spin
);
188 ncred
= crhold(curp
->p_ucred
);
189 spin_unlock(&curp
->p_spin
);
190 ocred
= curtd
->td_ucred
;
191 curtd
->td_ucred
= ncred
;
198 * Debugging, remove top two user stack pages to catch kernel faults
200 if (freeze_on_seg_fault
> 1 && curtd
->td_lwp
) {
201 pmap_remove(vmspace_pmap(curtd
->td_lwp
->lwp_vmspace
),
202 0x00007FFFFFFFD000LU
,
203 0x0000800000000000LU
);
209 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
210 * must be completed before we can return to or try to return to userland.
212 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
213 * arithmatic on the delta calculation so the absolute tick values are
214 * truncated to an integer.
217 userret(struct lwp
*lp
, struct trapframe
*frame
, int sticks
)
219 struct proc
*p
= lp
->lwp_proc
;
224 * Charge system time if profiling. Note: times are in microseconds.
225 * This may do a copyout and block, so do it first even though it
226 * means some system time will be charged as user time.
228 if (p
->p_flags
& P_PROFIL
) {
229 addupc_task(p
, frame
->tf_rip
,
230 (u_int
)((int)lp
->lwp_thread
->td_sticks
- sticks
));
235 * Specific on-return-to-usermode checks (LWP_MP_WEXIT,
236 * LWP_MP_VNLRU, etc).
238 if (lp
->lwp_mpflags
& LWP_MP_URETMASK
)
242 * Block here if we are in a stopped state.
244 if (STOPLWP(p
, lp
)) {
245 lwkt_gettoken(&p
->p_token
);
247 lwkt_reltoken(&p
->p_token
);
250 while (dump_stop_usertds
) {
251 tsleep(&dump_stop_usertds
, 0, "dumpstp", 0);
255 * Post any pending upcalls. If running a virtual kernel be sure
256 * to restore the virtual kernel's vmspace before posting the upcall.
258 if (p
->p_flags
& (P_SIGVTALRM
| P_SIGPROF
)) {
259 lwkt_gettoken(&p
->p_token
);
260 if (p
->p_flags
& P_SIGVTALRM
) {
261 p
->p_flags
&= ~P_SIGVTALRM
;
262 ksignal(p
, SIGVTALRM
);
264 if (p
->p_flags
& P_SIGPROF
) {
265 p
->p_flags
&= ~P_SIGPROF
;
268 lwkt_reltoken(&p
->p_token
);
273 * Post any pending signals. If running a virtual kernel be sure
274 * to restore the virtual kernel's vmspace before posting the signal.
276 * WARNING! postsig() can exit and not return.
278 if ((sig
= CURSIG_LCK_TRACE(lp
, &ptok
)) != 0) {
284 * block here if we are swapped out, but still process signals
285 * (such as SIGKILL). proc0 (the swapin scheduler) is already
286 * aware of our situation, we do not have to wake it up.
288 if (p
->p_flags
& P_SWAPPEDOUT
) {
289 lwkt_gettoken(&p
->p_token
);
290 p
->p_flags
|= P_SWAPWAIT
;
292 if (p
->p_flags
& P_SWAPWAIT
)
293 tsleep(p
, PCATCH
, "SWOUT", 0);
294 p
->p_flags
&= ~P_SWAPWAIT
;
295 lwkt_reltoken(&p
->p_token
);
300 * In a multi-threaded program it is possible for a thread to change
301 * signal state during a system call which temporarily changes the
302 * signal mask. In this case postsig() might not be run and we
303 * have to restore the mask ourselves.
305 if (lp
->lwp_flags
& LWP_OLDMASK
) {
306 lp
->lwp_flags
&= ~LWP_OLDMASK
;
307 lp
->lwp_sigmask
= lp
->lwp_oldsigmask
;
313 * Cleanup from userenter and any passive release that might have occured.
314 * We must reclaim the current-process designation before we can return
315 * to usermode. We also handle both LWKT and USER reschedule requests.
318 userexit(struct lwp
*lp
)
320 struct thread
*td
= lp
->lwp_thread
;
321 /* globaldata_t gd = td->td_gd; */
324 * Handle stop requests at kernel priority. Any requests queued
325 * after this loop will generate another AST.
327 while (STOPLWP(lp
->lwp_proc
, lp
)) {
328 lwkt_gettoken(&lp
->lwp_proc
->p_token
);
330 lwkt_reltoken(&lp
->lwp_proc
->p_token
);
334 * Reduce our priority in preparation for a return to userland. If
335 * our passive release function was still in place, our priority was
336 * never raised and does not need to be reduced.
338 lwkt_passive_recover(td
);
340 /* WARNING: we may have migrated cpu's */
341 /* gd = td->td_gd; */
344 * Become the current user scheduled process if we aren't already,
345 * and deal with reschedule requests and other factors.
347 lp
->lwp_proc
->p_usched
->acquire_curproc(lp
);
350 #if !defined(KTR_KERNENTRY)
351 #define KTR_KERNENTRY KTR_ALL
353 KTR_INFO_MASTER(kernentry
);
354 KTR_INFO(KTR_KERNENTRY
, kernentry
, trap
, 0,
355 "TRAP(pid %d, tid %d, trapno %ld, eva %lu)",
356 pid_t pid
, lwpid_t tid
, register_t trapno
, vm_offset_t eva
);
357 KTR_INFO(KTR_KERNENTRY
, kernentry
, trap_ret
, 0, "TRAP_RET(pid %d, tid %d)",
358 pid_t pid
, lwpid_t tid
);
359 KTR_INFO(KTR_KERNENTRY
, kernentry
, syscall
, 0, "SYSC(pid %d, tid %d, nr %ld)",
360 pid_t pid
, lwpid_t tid
, register_t trapno
);
361 KTR_INFO(KTR_KERNENTRY
, kernentry
, syscall_ret
, 0, "SYSRET(pid %d, tid %d, err %d)",
362 pid_t pid
, lwpid_t tid
, int err
);
363 KTR_INFO(KTR_KERNENTRY
, kernentry
, fork_ret
, 0, "FORKRET(pid %d, tid %d)",
364 pid_t pid
, lwpid_t tid
);
367 * Exception, fault, and trap interface to the kernel.
368 * This common code is called from assembly language IDT gate entry
369 * routines that prepare a suitable stack frame, and restore this
370 * frame after the exception has been processed.
372 * This function is also called from doreti in an interlock to handle ASTs.
373 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
375 * NOTE! We have to retrieve the fault address prior to potentially
376 * blocking, including blocking on any token.
378 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
379 * if an attempt is made to switch from a fast interrupt or IPI.
382 trap(struct trapframe
*frame
)
384 struct globaldata
*gd
= mycpu
;
385 struct thread
*td
= gd
->gd_curthread
;
386 struct lwp
*lp
= td
->td_lwp
;
389 int i
= 0, ucode
= 0, type
, code
;
391 int crit_count
= td
->td_critcount
;
392 lwkt_tokref_t curstop
= td
->td_toks_stop
;
401 * We need to allow T_DNA faults when the debugger is active since
402 * some dumping paths do large bcopy() which use the floating
403 * point registers for faster copying.
405 if (db_active
&& frame
->tf_trapno
!= T_DNA
) {
406 eva
= (frame
->tf_trapno
== T_PAGEFLT
? frame
->tf_addr
: 0);
407 ++gd
->gd_trap_nesting_level
;
408 trap_fatal(frame
, eva
);
409 --gd
->gd_trap_nesting_level
;
416 if ((frame
->tf_rflags
& PSL_I
) == 0) {
418 * Buggy application or kernel code has disabled interrupts
419 * and then trapped. Enabling interrupts now is wrong, but
420 * it is better than running with interrupts disabled until
421 * they are accidentally enabled later.
423 type
= frame
->tf_trapno
;
424 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
425 /* JG curproc can be NULL */
427 "pid %ld (%s): trap %d with interrupts disabled\n",
428 (long)curproc
->p_pid
, curproc
->p_comm
, type
);
429 } else if (type
!= T_NMI
&& type
!= T_BPTFLT
&&
432 * XXX not quite right, since this may be for a
433 * multiple fault in user mode.
435 kprintf("kernel trap %d (%s @ 0x%016jx) with "
436 "interrupts disabled\n",
444 type
= frame
->tf_trapno
;
445 code
= frame
->tf_err
;
447 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
450 KTR_LOG(kernentry_trap
, p
->p_pid
, lp
->lwp_tid
,
451 frame
->tf_trapno
, eva
);
455 sticks
= (int)td
->td_sticks
;
456 KASSERT(lp
->lwp_md
.md_regs
== frame
,
457 ("Frame mismatch %p %p", lp
->lwp_md
.md_regs
, frame
));
460 case T_PRIVINFLT
: /* privileged instruction fault */
465 case T_BPTFLT
: /* bpt instruction fault */
466 case T_TRCTRAP
: /* trace trap */
467 frame
->tf_rflags
&= ~PSL_T
;
469 ucode
= (type
== T_TRCTRAP
? TRAP_TRACE
: TRAP_BRKPT
);
472 case T_ARITHTRAP
: /* arithmetic trap */
477 case T_ASTFLT
: /* Allow process switch */
478 mycpu
->gd_cnt
.v_soft
++;
479 if (mycpu
->gd_reqflags
& RQF_AST_OWEUPC
) {
480 atomic_clear_int(&mycpu
->gd_reqflags
,
482 addupc_task(p
, p
->p_prof
.pr_addr
,
487 case T_PROTFLT
: /* general protection fault */
491 case T_STKFLT
: /* stack fault */
492 case T_SEGNPFLT
: /* segment not present fault */
496 case T_TSSFLT
: /* invalid TSS fault */
497 case T_DOUBLEFLT
: /* double fault */
503 case T_PAGEFLT
: /* page fault */
504 i
= trap_pfault(frame
, TRUE
);
506 if (frame
->tf_rip
== 0) {
507 /* used for kernel debugging only */
508 while (freeze_on_seg_fault
)
509 tsleep(p
, 0, "freeze", hz
* 20);
512 if (i
== -1 || i
== 0)
522 case T_DIVIDE
: /* integer divide fault */
529 /* machine/parity/power fail/"kitchen sink" faults */
530 if (isa_nmi(code
) == 0) {
533 * NMI can be hooked up to a pushbutton
537 kprintf ("NMI ... going to debugger\n");
538 kdb_trap(type
, 0, frame
);
542 } else if (panic_on_nmi
)
543 panic("NMI indicates hardware failure");
545 #endif /* NISA > 0 */
547 case T_OFLOW
: /* integer overflow fault */
552 case T_BOUND
: /* bounds check fault */
559 * Virtual kernel intercept - pass the DNA exception
560 * to the virtual kernel if it asked to handle it.
561 * This occurs when the virtual kernel is holding
562 * onto the FP context for a different emulated
563 * process then the one currently running.
565 * We must still call npxdna() since we may have
566 * saved FP state that the virtual kernel needs
567 * to hand over to a different emulated process.
569 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
&&
570 (td
->td_pcb
->pcb_flags
& FP_VIRTFP
)
577 * The kernel may have switched out the FP unit's
578 * state, causing the user process to take a fault
579 * when it tries to use the FP unit. Restore the
585 ucode
= FPE_FPU_NP_TRAP
;
588 case T_FPOPFLT
: /* FPU operand fetch fault */
593 case T_XMMFLT
: /* SIMD floating-point exception */
602 case T_PAGEFLT
: /* page fault */
603 trap_pfault(frame
, FALSE
);
608 * The kernel is apparently using fpu for copying.
609 * XXX this should be fatal unless the kernel has
610 * registered such use.
616 case T_STKFLT
: /* stack fault */
617 case T_PROTFLT
: /* general protection fault */
618 case T_SEGNPFLT
: /* segment not present fault */
620 * Invalid segment selectors and out of bounds
621 * %rip's and %rsp's can be set up in user mode.
622 * This causes a fault in kernel mode when the
623 * kernel tries to return to user mode. We want
624 * to get this fault so that we can fix the
625 * problem here and not have to check all the
626 * selectors and pointers when the user changes
629 if (mycpu
->gd_intr_nesting_level
== 0) {
631 * NOTE: in 64-bit mode traps push rsp/ss
632 * even if no ring change occurs.
634 if (td
->td_pcb
->pcb_onfault
&&
635 td
->td_pcb
->pcb_onfault_sp
==
637 frame
->tf_rip
= (register_t
)
638 td
->td_pcb
->pcb_onfault
;
641 if (frame
->tf_rip
== (long)doreti_iret
) {
642 frame
->tf_rip
= (long)doreti_iret_fault
;
650 * PSL_NT can be set in user mode and isn't cleared
651 * automatically when the kernel is entered. This
652 * causes a TSS fault when the kernel attempts to
653 * `iret' because the TSS link is uninitialized. We
654 * want to get this fault so that we can fix the
655 * problem here and not every time the kernel is
658 if (frame
->tf_rflags
& PSL_NT
) {
659 frame
->tf_rflags
&= ~PSL_NT
;
661 /* do we need this? */
662 if (frame
->tf_rip
== (long)doreti_iret
)
663 frame
->tf_rip
= (long)doreti_iret_fault
;
669 case T_TRCTRAP
: /* trace trap */
671 if (frame
->tf_rip
== (int)IDTVEC(syscall
)) {
673 * We've just entered system mode via the
674 * syscall lcall. Continue single stepping
675 * silently until the syscall handler has
680 if (frame
->tf_rip
== (int)IDTVEC(syscall
) + 1) {
682 * The syscall handler has now saved the
683 * flags. Stop single stepping it.
685 frame
->tf_rflags
&= ~PSL_T
;
691 * Ignore debug register trace traps due to
692 * accesses in the user's address space, which
693 * can happen under several conditions such as
694 * if a user sets a watchpoint on a buffer and
695 * then passes that buffer to a system call.
696 * We still want to get TRCTRAPS for addresses
697 * in kernel space because that is useful when
698 * debugging the kernel.
701 if (user_dbreg_trap()) {
703 * Reset breakpoint bits because the
706 /* XXX check upper bits here */
707 load_dr6(rdr6() & 0xfffffff0);
712 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
716 * If DDB is enabled, let it handle the debugger trap.
717 * Otherwise, debugger traps "can't happen".
721 if (kdb_trap(type
, 0, frame
))
728 /* machine/parity/power fail/"kitchen sink" faults */
729 if (isa_nmi(code
) == 0) {
732 * NMI can be hooked up to a pushbutton
736 kprintf ("NMI ... going to debugger\n");
737 kdb_trap(type
, 0, frame
);
741 } else if (panic_on_nmi
== 0)
744 #endif /* NISA > 0 */
746 trap_fatal(frame
, 0);
751 * Fault from user mode, virtual kernel interecept.
753 * If the fault is directly related to a VM context managed by a
754 * virtual kernel then let the virtual kernel handle it.
756 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
) {
757 vkernel_trap(lp
, frame
);
761 /* Translate fault for emulators (e.g. Linux) */
762 if (*p
->p_sysent
->sv_transtrap
)
763 i
= (*p
->p_sysent
->sv_transtrap
)(i
, type
);
765 trapsignal(lp
, i
, ucode
);
768 if (type
<= MAX_TRAP_MSG
) {
769 uprintf("fatal process exception: %s",
771 if ((type
== T_PAGEFLT
) || (type
== T_PROTFLT
))
772 uprintf(", fault VA = 0x%lx", frame
->tf_addr
);
778 userret(lp
, frame
, sticks
);
781 if (p
!= NULL
&& lp
!= NULL
)
782 KTR_LOG(kernentry_trap_ret
, p
->p_pid
, lp
->lwp_tid
);
784 KASSERT(crit_count
== td
->td_critcount
,
785 ("trap: critical section count mismatch! %d/%d",
786 crit_count
, td
->td_pri
));
787 KASSERT(curstop
== td
->td_toks_stop
,
788 ("trap: extra tokens held after trap! %ld/%ld",
789 curstop
- &td
->td_toks_base
,
790 td
->td_toks_stop
- &td
->td_toks_base
));
795 trap_handle_userenter(struct thread
*td
)
797 userenter(td
, td
->td_proc
);
801 trap_handle_userexit(struct trapframe
*frame
, int sticks
)
803 struct lwp
*lp
= curthread
->td_lwp
;
806 userret(lp
, frame
, sticks
);
812 trap_pfault(struct trapframe
*frame
, int usermode
)
815 struct vmspace
*vm
= NULL
;
820 thread_t td
= curthread
;
821 struct lwp
*lp
= td
->td_lwp
;
824 va
= trunc_page(frame
->tf_addr
);
825 if (va
>= VM_MIN_KERNEL_ADDRESS
) {
827 * Don't allow user-mode faults in kernel address space.
838 * This is a fault on non-kernel virtual memory.
839 * vm is initialized above to NULL. If curproc is NULL
840 * or curproc->p_vmspace is NULL the fault is fatal.
843 vm
= lp
->lwp_vmspace
;
852 * Debugging, try to catch kernel faults on the user address
853 * space when not inside on onfault (e.g. copyin/copyout)
856 if (usermode
== 0 && (td
->td_pcb
== NULL
||
857 td
->td_pcb
->pcb_onfault
== NULL
)) {
859 if (freeze_on_seg_fault
) {
860 kprintf("trap_pfault: user address fault from kernel mode "
861 "%016lx\n", (long)frame
->tf_addr
);
862 while (freeze_on_seg_fault
)
863 tsleep(&freeze_on_seg_fault
, 0, "frzseg", hz
* 20);
871 * PGEX_I is defined only if the execute disable bit capability is
872 * supported and enabled.
874 if (frame
->tf_err
& PGEX_W
)
875 ftype
= VM_PROT_WRITE
;
876 else if (frame
->tf_err
& PGEX_I
)
877 ftype
= VM_PROT_EXECUTE
;
879 ftype
= VM_PROT_READ
;
881 if (map
!= &kernel_map
) {
883 * Keep swapout from messing with us during this
893 fault_flags
|= VM_FAULT_BURST
| VM_FAULT_USERMODE
;
894 if (ftype
& VM_PROT_WRITE
)
895 fault_flags
|= VM_FAULT_DIRTY
;
897 fault_flags
|= VM_FAULT_NORMAL
;
898 rv
= vm_fault(map
, va
, ftype
, fault_flags
);
903 * Don't have to worry about process locking or stacks in the
906 fault_flags
= VM_FAULT_NORMAL
;
907 rv
= vm_fault(map
, va
, ftype
, VM_FAULT_NORMAL
);
909 if (rv
== KERN_SUCCESS
)
914 * NOTE: in 64-bit mode traps push rsp/ss
915 * even if no ring change occurs.
917 if (td
->td_pcb
->pcb_onfault
&&
918 td
->td_pcb
->pcb_onfault_sp
== frame
->tf_rsp
&&
919 td
->td_gd
->gd_intr_nesting_level
== 0) {
920 frame
->tf_rip
= (register_t
)td
->td_pcb
->pcb_onfault
;
923 trap_fatal(frame
, frame
->tf_addr
);
928 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no
929 * kludge is needed to pass the fault address to signal handlers.
933 if (td
->td_lwp
->lwp_vkernel
== NULL
) {
934 while (freeze_on_seg_fault
) {
935 tsleep(p
, 0, "freeze", hz
* 20);
937 if (ddb_on_seg_fault
)
938 Debugger("ddb_on_seg_fault");
942 return((rv
== KERN_PROTECTION_FAILURE
) ? SIGBUS
: SIGSEGV
);
946 trap_fatal(struct trapframe
*frame
, vm_offset_t eva
)
951 struct soft_segment_descriptor softseg
;
954 code
= frame
->tf_err
;
955 type
= frame
->tf_trapno
;
956 sdtossd(&gdt
[IDXSEL(frame
->tf_cs
& 0xffff)], &softseg
);
958 if (type
<= MAX_TRAP_MSG
)
959 msg
= trap_msg
[type
];
962 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type
, msg
,
963 ISPL(frame
->tf_cs
) == SEL_UPL
? "user" : "kernel");
964 /* three separate prints in case of a trap on an unmapped page */
965 kprintf("cpuid = %d; ", mycpu
->gd_cpuid
);
966 kprintf("lapic->id = %08x\n", lapic
->id
);
967 if (type
== T_PAGEFLT
) {
968 kprintf("fault virtual address = 0x%lx\n", eva
);
969 kprintf("fault code = %s %s %s, %s\n",
970 code
& PGEX_U
? "user" : "supervisor",
971 code
& PGEX_W
? "write" : "read",
972 code
& PGEX_I
? "instruction" : "data",
973 code
& PGEX_P
? "protection violation" : "page not present");
975 kprintf("instruction pointer = 0x%lx:0x%lx\n",
976 frame
->tf_cs
& 0xffff, frame
->tf_rip
);
977 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
978 ss
= frame
->tf_ss
& 0xffff;
982 * NOTE: in 64-bit mode traps push rsp/ss even if no ring
985 ss
= GSEL(GDATA_SEL
, SEL_KPL
);
988 kprintf("stack pointer = 0x%x:0x%lx\n", ss
, rsp
);
989 kprintf("frame pointer = 0x%x:0x%lx\n", ss
, frame
->tf_rbp
);
990 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n",
991 softseg
.ssd_base
, softseg
.ssd_limit
, softseg
.ssd_type
);
992 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
993 softseg
.ssd_dpl
, softseg
.ssd_p
, softseg
.ssd_long
, softseg
.ssd_def32
,
995 kprintf("processor eflags = ");
996 if (frame
->tf_rflags
& PSL_T
)
997 kprintf("trace trap, ");
998 if (frame
->tf_rflags
& PSL_I
)
999 kprintf("interrupt enabled, ");
1000 if (frame
->tf_rflags
& PSL_NT
)
1001 kprintf("nested task, ");
1002 if (frame
->tf_rflags
& PSL_RF
)
1003 kprintf("resume, ");
1004 kprintf("IOPL = %ld\n", (frame
->tf_rflags
& PSL_IOPL
) >> 12);
1005 kprintf("current process = ");
1008 (u_long
)curproc
->p_pid
);
1012 kprintf("current thread = pri %d ", curthread
->td_pri
);
1013 if (curthread
->td_critcount
)
1018 if ((debugger_on_panic
|| db_active
) && kdb_trap(type
, code
, frame
))
1021 kprintf("trap number = %d\n", type
);
1022 if (type
<= MAX_TRAP_MSG
)
1023 panic("%s", trap_msg
[type
]);
1025 panic("unknown/reserved trap");
1029 * Double fault handler. Called when a fault occurs while writing
1030 * a frame for a trap/exception onto the stack. This usually occurs
1031 * when the stack overflows (such is the case with infinite recursion,
1036 in_kstack_guard(register_t rptr
)
1038 thread_t td
= curthread
;
1040 if ((char *)rptr
>= td
->td_kstack
&&
1041 (char *)rptr
< td
->td_kstack
+ PAGE_SIZE
) {
1048 dblfault_handler(struct trapframe
*frame
)
1050 thread_t td
= curthread
;
1052 if (in_kstack_guard(frame
->tf_rsp
) || in_kstack_guard(frame
->tf_rbp
)) {
1053 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n");
1054 if (in_kstack_guard(frame
->tf_rsp
))
1055 frame
->tf_rsp
= (register_t
)(td
->td_kstack
+ PAGE_SIZE
);
1056 if (in_kstack_guard(frame
->tf_rbp
))
1057 frame
->tf_rbp
= (register_t
)(td
->td_kstack
+ PAGE_SIZE
);
1059 kprintf("DOUBLE FAULT\n");
1061 kprintf("\nFatal double fault\n");
1062 kprintf("rip = 0x%lx\n", frame
->tf_rip
);
1063 kprintf("rsp = 0x%lx\n", frame
->tf_rsp
);
1064 kprintf("rbp = 0x%lx\n", frame
->tf_rbp
);
1065 /* three separate prints in case of a trap on an unmapped page */
1066 kprintf("cpuid = %d; ", mycpu
->gd_cpuid
);
1067 kprintf("lapic->id = %08x\n", lapic
->id
);
1068 panic("double fault");
1072 * syscall2 - MP aware system call request C handler
1074 * A system call is essentially treated as a trap except that the
1075 * MP lock is not held on entry or return. We are responsible for
1076 * obtaining the MP lock if necessary and for handling ASTs
1077 * (e.g. a task switch) prior to return.
1082 syscall2(struct trapframe
*frame
)
1084 struct thread
*td
= curthread
;
1085 struct proc
*p
= td
->td_proc
;
1086 struct lwp
*lp
= td
->td_lwp
;
1088 struct sysent
*callp
;
1089 register_t orig_tf_rflags
;
1094 int crit_count
= td
->td_critcount
;
1099 union sysunion args
;
1100 register_t
*argsdst
;
1102 mycpu
->gd_cnt
.v_syscall
++;
1105 if (ISPL(frame
->tf_cs
) != SEL_UPL
) {
1111 KTR_LOG(kernentry_syscall
, p
->p_pid
, lp
->lwp_tid
,
1114 userenter(td
, p
); /* lazy raise our priority */
1121 sticks
= (int)td
->td_sticks
;
1122 orig_tf_rflags
= frame
->tf_rflags
;
1125 * Virtual kernel intercept - if a VM context managed by a virtual
1126 * kernel issues a system call the virtual kernel handles it, not us.
1127 * Restore the virtual kernel context and return from its system
1128 * call. The current frame is copied out to the virtual kernel.
1130 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
) {
1131 vkernel_trap(lp
, frame
);
1132 error
= EJUSTRETURN
;
1138 * Get the system call parameters and account for time
1140 KASSERT(lp
->lwp_md
.md_regs
== frame
,
1141 ("Frame mismatch %p %p", lp
->lwp_md
.md_regs
, frame
));
1142 params
= (caddr_t
)frame
->tf_rsp
+ sizeof(register_t
);
1143 code
= frame
->tf_rax
;
1145 if (p
->p_sysent
->sv_prepsyscall
) {
1146 (*p
->p_sysent
->sv_prepsyscall
)(
1147 frame
, (int *)(&args
.nosys
.sysmsg
+ 1),
1150 if (code
== SYS_syscall
|| code
== SYS___syscall
) {
1151 code
= frame
->tf_rdi
;
1157 if (p
->p_sysent
->sv_mask
)
1158 code
&= p
->p_sysent
->sv_mask
;
1160 if (code
>= p
->p_sysent
->sv_size
)
1161 callp
= &p
->p_sysent
->sv_table
[0];
1163 callp
= &p
->p_sysent
->sv_table
[code
];
1165 narg
= callp
->sy_narg
& SYF_ARGMASK
;
1168 * On x86_64 we get up to six arguments in registers. The rest are
1169 * on the stack. The first six members of 'struct trapframe' happen
1170 * to be the registers used to pass arguments, in exactly the right
1173 argp
= &frame
->tf_rdi
;
1175 argsdst
= (register_t
*)(&args
.nosys
.sysmsg
+ 1);
1177 * JG can we overflow the space pointed to by 'argsdst'
1178 * either with 'bcopy' or with 'copyin'?
1180 bcopy(argp
, argsdst
, sizeof(register_t
) * regcnt
);
1182 * copyin is MP aware, but the tracing code is not
1184 if (narg
> regcnt
) {
1185 KASSERT(params
!= NULL
, ("copyin args with no params!"));
1186 error
= copyin(params
, &argsdst
[regcnt
],
1187 (narg
- regcnt
) * sizeof(register_t
));
1190 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1191 ktrsyscall(lp
, code
, narg
,
1192 (void *)(&args
.nosys
.sysmsg
+ 1));
1200 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1201 ktrsyscall(lp
, code
, narg
, (void *)(&args
.nosys
.sysmsg
+ 1));
1206 * Default return value is 0 (will be copied to %rax). Double-value
1207 * returns use %rax and %rdx. %rdx is left unchanged for system
1208 * calls which return only one result.
1210 args
.sysmsg_fds
[0] = 0;
1211 args
.sysmsg_fds
[1] = frame
->tf_rdx
;
1214 * The syscall might manipulate the trap frame. If it does it
1215 * will probably return EJUSTRETURN.
1217 args
.sysmsg_frame
= frame
;
1219 STOPEVENT(p
, S_SCE
, narg
); /* MP aware */
1222 * NOTE: All system calls run MPSAFE now. The system call itself
1223 * is responsible for getting the MP lock.
1225 #ifdef SYSCALL_DEBUG
1226 tsc_uclock_t tscval
= rdtsc();
1228 error
= (*callp
->sy_call
)(&args
);
1229 #ifdef SYSCALL_DEBUG
1230 tscval
= rdtsc() - tscval
;
1231 tscval
= tscval
* 1000000 / tsc_frequency
;
1232 if (SysCallsWorstCase
[code
] < tscval
)
1233 SysCallsWorstCase
[code
] = tscval
;
1238 * MP SAFE (we may or may not have the MP lock at this point)
1240 //kprintf("SYSMSG %d ", error);
1244 * Reinitialize proc pointer `p' as it may be different
1245 * if this is a child returning from fork syscall.
1248 lp
= curthread
->td_lwp
;
1249 frame
->tf_rax
= args
.sysmsg_fds
[0];
1250 frame
->tf_rdx
= args
.sysmsg_fds
[1];
1251 frame
->tf_rflags
&= ~PSL_C
;
1255 * Reconstruct pc, we know that 'syscall' is 2 bytes.
1256 * We have to do a full context restore so that %r10
1257 * (which was holding the value of %rcx) is restored for
1258 * the next iteration.
1260 if (frame
->tf_err
!= 0 && frame
->tf_err
!= 2)
1261 kprintf("lp %s:%d frame->tf_err is weird %ld\n",
1262 td
->td_comm
, lp
->lwp_proc
->p_pid
, frame
->tf_err
);
1263 frame
->tf_rip
-= frame
->tf_err
;
1264 frame
->tf_r10
= frame
->tf_rcx
;
1269 panic("Unexpected EASYNC return value (for now)");
1272 if (p
->p_sysent
->sv_errsize
) {
1273 if (error
>= p
->p_sysent
->sv_errsize
)
1274 error
= -1; /* XXX */
1276 error
= p
->p_sysent
->sv_errtbl
[error
];
1278 frame
->tf_rax
= error
;
1279 frame
->tf_rflags
|= PSL_C
;
1284 * Traced syscall. trapsignal() should now be MP aware
1286 if (orig_tf_rflags
& PSL_T
) {
1287 frame
->tf_rflags
&= ~PSL_T
;
1288 trapsignal(lp
, SIGTRAP
, TRAP_TRACE
);
1292 * Handle reschedule and other end-of-syscall issues
1294 userret(lp
, frame
, sticks
);
1297 if (KTRPOINT(td
, KTR_SYSRET
)) {
1298 ktrsysret(lp
, code
, error
, args
.sysmsg_result
);
1303 * This works because errno is findable through the
1304 * register set. If we ever support an emulation where this
1305 * is not the case, this code will need to be revisited.
1307 STOPEVENT(p
, S_SCX
, code
);
1310 KTR_LOG(kernentry_syscall_ret
, p
->p_pid
, lp
->lwp_tid
, error
);
1312 KASSERT(crit_count
== td
->td_critcount
,
1313 ("syscall: critical section count mismatch! %d/%d",
1314 crit_count
, td
->td_pri
));
1315 KASSERT(&td
->td_toks_base
== td
->td_toks_stop
,
1316 ("syscall: %ld extra tokens held after trap! syscall %p",
1317 td
->td_toks_stop
- &td
->td_toks_base
,
1323 fork_return(struct lwp
*lp
, struct trapframe
*frame
)
1325 frame
->tf_rax
= 0; /* Child returns zero */
1326 frame
->tf_rflags
&= ~PSL_C
; /* success */
1329 generic_lwp_return(lp
, frame
);
1330 KTR_LOG(kernentry_fork_ret
, lp
->lwp_proc
->p_pid
, lp
->lwp_tid
);
1334 * Simplified back end of syscall(), used when returning from fork()
1335 * directly into user mode.
1337 * This code will return back into the fork trampoline code which then
1341 generic_lwp_return(struct lwp
*lp
, struct trapframe
*frame
)
1343 struct proc
*p
= lp
->lwp_proc
;
1346 * Check for exit-race. If one lwp exits the process concurrent with
1347 * another lwp creating a new thread, the two operations may cross
1348 * each other resulting in the newly-created lwp not receiving a
1351 if (p
->p_flags
& P_WEXIT
) {
1352 lwpsignal(p
, lp
, SIGKILL
);
1356 * Newly forked processes are given a kernel priority. We have to
1357 * adjust the priority to a normal user priority and fake entry
1358 * into the kernel (call userenter()) to install a passive release
1359 * function just in case userret() decides to stop the process. This
1360 * can occur when ^Z races a fork. If we do not install the passive
1361 * release function the current process designation will not be
1362 * released when the thread goes to sleep.
1364 lwkt_setpri_self(TDPRI_USER_NORM
);
1365 userenter(lp
->lwp_thread
, p
);
1366 userret(lp
, frame
, 0);
1368 if (KTRPOINT(lp
->lwp_thread
, KTR_SYSRET
))
1369 ktrsysret(lp
, SYS_fork
, 0, 0);
1371 lp
->lwp_flags
|= LWP_PASSIVE_ACQ
;
1373 lp
->lwp_flags
&= ~LWP_PASSIVE_ACQ
;
1377 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1378 * fault (which is then passed back to the virtual kernel) if an attempt is
1379 * made to use the FP unit.
1381 * XXX this is a fairly big hack.
1384 set_vkernel_fp(struct trapframe
*frame
)
1386 struct thread
*td
= curthread
;
1388 if (frame
->tf_xflags
& PGEX_FPFAULT
) {
1389 td
->td_pcb
->pcb_flags
|= FP_VIRTFP
;
1390 if (mdcpu
->gd_npxthread
== td
)
1393 td
->td_pcb
->pcb_flags
&= ~FP_VIRTFP
;
1398 * Called from vkernel_trap() to fixup the vkernel's syscall
1399 * frame for vmspace_ctl() return.
1402 cpu_vkernel_trap(struct trapframe
*frame
, int error
)
1404 frame
->tf_rax
= error
;
1406 frame
->tf_rflags
|= PSL_C
;
1408 frame
->tf_rflags
&= ~PSL_C
;