2 * Copyright (c) 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (C) 1994, David Greenman
5 * Copyright (c) 2008 The DragonFly Project.
6 * Copyright (c) 2008 Jordan Gordeev.
8 * This code is derived from software contributed to Berkeley by
9 * the University of Utah, and William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
44 * x86_64 Trap and System call handling
48 #include "opt_ktrace.h"
50 #include <machine/frame.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/kerneldump.h>
56 #include <sys/pioctl.h>
57 #include <sys/types.h>
58 #include <sys/signal2.h>
59 #include <sys/syscall.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysent.h>
62 #include <sys/systm.h>
64 #include <sys/ktrace.h>
67 #include <sys/sysmsg.h>
68 #include <sys/sysproto.h>
69 #include <sys/sysunion.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_kern.h>
75 #include <vm/vm_param.h>
76 #include <machine/cpu.h>
77 #include <machine/pcb.h>
78 #include <machine/smp.h>
79 #include <machine/thread.h>
80 #include <machine/vmparam.h>
81 #include <machine/md_var.h>
82 #include <machine_base/isa/intr_machdep.h>
86 #include <sys/thread2.h>
87 #include <sys/mplock2.h>
91 #define MAKEMPSAFE(have_mplock) \
92 if (have_mplock == 0) { \
99 #define MAKEMPSAFE(have_mplock)
103 extern void trap(struct trapframe
*frame
);
105 static int trap_pfault(struct trapframe
*, int);
106 static void trap_fatal(struct trapframe
*, vm_offset_t
);
107 void dblfault_handler(struct trapframe
*frame
);
109 #define MAX_TRAP_MSG 30
110 static char *trap_msg
[] = {
112 "privileged instruction fault", /* 1 T_PRIVINFLT */
114 "breakpoint instruction fault", /* 3 T_BPTFLT */
117 "arithmetic trap", /* 6 T_ARITHTRAP */
118 "system forced exception", /* 7 T_ASTFLT */
120 "general protection fault", /* 9 T_PROTFLT */
121 "trace trap", /* 10 T_TRCTRAP */
123 "page fault", /* 12 T_PAGEFLT */
125 "alignment fault", /* 14 T_ALIGNFLT */
129 "integer divide fault", /* 18 T_DIVIDE */
130 "non-maskable interrupt trap", /* 19 T_NMI */
131 "overflow trap", /* 20 T_OFLOW */
132 "FPU bounds check fault", /* 21 T_BOUND */
133 "FPU device not available", /* 22 T_DNA */
134 "double fault", /* 23 T_DOUBLEFLT */
135 "FPU operand fetch fault", /* 24 T_FPOPFLT */
136 "invalid TSS fault", /* 25 T_TSSFLT */
137 "segment not present fault", /* 26 T_SEGNPFLT */
138 "stack fault", /* 27 T_STKFLT */
139 "machine check trap", /* 28 T_MCHK */
140 "SIMD floating-point exception", /* 29 T_XMMFLT */
141 "reserved (unknown) fault", /* 30 T_RESERVED */
145 static int ddb_on_nmi
= 1;
146 SYSCTL_INT(_machdep
, OID_AUTO
, ddb_on_nmi
, CTLFLAG_RW
,
147 &ddb_on_nmi
, 0, "Go to DDB on NMI");
148 static int ddb_on_seg_fault
= 0;
149 SYSCTL_INT(_machdep
, OID_AUTO
, ddb_on_seg_fault
, CTLFLAG_RW
,
150 &ddb_on_seg_fault
, 0, "Go to DDB on user seg-fault");
152 static int panic_on_nmi
= 1;
153 SYSCTL_INT(_machdep
, OID_AUTO
, panic_on_nmi
, CTLFLAG_RW
,
154 &panic_on_nmi
, 0, "Panic on NMI");
155 static int fast_release
;
156 SYSCTL_INT(_machdep
, OID_AUTO
, fast_release
, CTLFLAG_RW
,
157 &fast_release
, 0, "Passive Release was optimal");
158 static int slow_release
;
159 SYSCTL_INT(_machdep
, OID_AUTO
, slow_release
, CTLFLAG_RW
,
160 &slow_release
, 0, "Passive Release was nonoptimal");
163 * Passively intercepts the thread switch function to increase
164 * the thread priority from a user priority to a kernel priority, reducing
165 * syscall and trap overhead for the case where no switch occurs.
167 * Synchronizes td_ucred with p_ucred. This is used by system calls,
168 * signal handling, faults, AST traps, and anything else that enters the
169 * kernel from userland and provides the kernel with a stable read-only
170 * copy of the process ucred.
173 userenter(struct thread
*curtd
, struct proc
*curp
)
178 curtd
->td_release
= lwkt_passive_release
;
180 if (curtd
->td_ucred
!= curp
->p_ucred
) {
181 ncred
= crhold(curp
->p_ucred
);
182 ocred
= curtd
->td_ucred
;
183 curtd
->td_ucred
= ncred
;
190 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
191 * must be completed before we can return to or try to return to userland.
193 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
194 * arithmatic on the delta calculation so the absolute tick values are
195 * truncated to an integer.
198 userret(struct lwp
*lp
, struct trapframe
*frame
, int sticks
)
200 struct proc
*p
= lp
->lwp_proc
;
204 * Charge system time if profiling. Note: times are in microseconds.
205 * This may do a copyout and block, so do it first even though it
206 * means some system time will be charged as user time.
208 if (p
->p_flag
& P_PROFIL
) {
209 addupc_task(p
, frame
->tf_rip
,
210 (u_int
)((int)lp
->lwp_thread
->td_sticks
- sticks
));
215 * If the jungle wants us dead, so be it.
217 if (lp
->lwp_flag
& LWP_WEXIT
) {
220 rel_mplock(); /* NOT REACHED */
224 * Block here if we are in a stopped state.
226 if (p
->p_stat
== SSTOP
|| dump_stop_usertds
) {
234 * Post any pending upcalls. If running a virtual kernel be sure
235 * to restore the virtual kernel's vmspace before posting the upcall.
237 if (p
->p_flag
& P_UPCALLPEND
) {
238 p
->p_flag
&= ~P_UPCALLPEND
;
246 * Post any pending signals. If running a virtual kernel be sure
247 * to restore the virtual kernel's vmspace before posting the signal.
249 * WARNING! postsig() can exit and not return.
251 if ((sig
= CURSIG_TRACE(lp
)) != 0) {
259 * block here if we are swapped out, but still process signals
260 * (such as SIGKILL). proc0 (the swapin scheduler) is already
261 * aware of our situation, we do not have to wake it up.
263 if (p
->p_flag
& P_SWAPPEDOUT
) {
265 p
->p_flag
|= P_SWAPWAIT
;
267 if (p
->p_flag
& P_SWAPWAIT
)
268 tsleep(p
, PCATCH
, "SWOUT", 0);
269 p
->p_flag
&= ~P_SWAPWAIT
;
275 * Make sure postsig() handled request to restore old signal mask after
276 * running signal handler.
278 KKASSERT((lp
->lwp_flag
& LWP_OLDMASK
) == 0);
282 * Cleanup from userenter and any passive release that might have occured.
283 * We must reclaim the current-process designation before we can return
284 * to usermode. We also handle both LWKT and USER reschedule requests.
287 userexit(struct lwp
*lp
)
289 struct thread
*td
= lp
->lwp_thread
;
290 /* globaldata_t gd = td->td_gd;*/
293 * Handle stop requests at kernel priority. Any requests queued
294 * after this loop will generate another AST.
296 while (lp
->lwp_proc
->p_stat
== SSTOP
) {
303 * Reduce our priority in preparation for a return to userland. If
304 * our passive release function was still in place, our priority was
305 * never raised and does not need to be reduced.
307 lwkt_passive_recover(td
);
310 * Become the current user scheduled process if we aren't already,
311 * and deal with reschedule requests and other factors.
313 lp
->lwp_proc
->p_usched
->acquire_curproc(lp
);
314 /* WARNING: we may have migrated cpu's */
315 /* gd = td->td_gd; */
318 #if !defined(KTR_KERNENTRY)
319 #define KTR_KERNENTRY KTR_ALL
321 KTR_INFO_MASTER(kernentry
);
322 KTR_INFO(KTR_KERNENTRY
, kernentry
, trap
, 0, "STR",
323 sizeof(long) + sizeof(long) + sizeof(long) + sizeof(vm_offset_t
));
324 KTR_INFO(KTR_KERNENTRY
, kernentry
, trap_ret
, 0, "STR",
325 sizeof(long) + sizeof(long));
326 KTR_INFO(KTR_KERNENTRY
, kernentry
, syscall
, 0, "STR",
327 sizeof(long) + sizeof(long) + sizeof(long));
328 KTR_INFO(KTR_KERNENTRY
, kernentry
, syscall_ret
, 0, "STR",
329 sizeof(long) + sizeof(long) + sizeof(long));
330 KTR_INFO(KTR_KERNENTRY
, kernentry
, fork_ret
, 0, "STR",
331 sizeof(long) + sizeof(long));
334 * Exception, fault, and trap interface to the kernel.
335 * This common code is called from assembly language IDT gate entry
336 * routines that prepare a suitable stack frame, and restore this
337 * frame after the exception has been processed.
339 * This function is also called from doreti in an interlock to handle ASTs.
340 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
342 * NOTE! We have to retrieve the fault address prior to obtaining the
343 * MP lock because get_mplock() may switch out. YYY cr2 really ought
344 * to be retrieved by the assembly code, not here.
346 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
347 * if an attempt is made to switch from a fast interrupt or IPI. This is
348 * necessary to properly take fatal kernel traps on SMP machines if
349 * get_mplock() has to block.
353 trap(struct trapframe
*frame
)
355 struct globaldata
*gd
= mycpu
;
356 struct thread
*td
= gd
->gd_curthread
;
357 struct lwp
*lp
= td
->td_lwp
;
360 int i
= 0, ucode
= 0, type
, code
;
365 int crit_count
= td
->td_critcount
;
366 lwkt_tokref_t curstop
= td
->td_toks_stop
;
374 kprintf0("\"%s\" type=%ld\n",
375 trap_msg
[frame
->tf_trapno
], frame
->tf_trapno
);
376 kprintf0(" rip=%lx rsp=%lx\n", frame
->tf_rip
, frame
->tf_rsp
);
377 kprintf0(" err=%lx addr=%lx\n", frame
->tf_err
, frame
->tf_addr
);
378 kprintf0(" cs=%lx ss=%lx rflags=%lx\n", (unsigned long)frame
->tf_cs
, (unsigned long)frame
->tf_ss
, frame
->tf_rflags
);
383 * We need to allow T_DNA faults when the debugger is active since
384 * some dumping paths do large bcopy() which use the floating
385 * point registers for faster copying.
387 if (db_active
&& frame
->tf_trapno
!= T_DNA
) {
388 eva
= (frame
->tf_trapno
== T_PAGEFLT
? frame
->tf_addr
: 0);
389 ++gd
->gd_trap_nesting_level
;
390 MAKEMPSAFE(have_mplock
);
391 trap_fatal(frame
, eva
);
392 --gd
->gd_trap_nesting_level
;
399 if ((frame
->tf_rflags
& PSL_I
) == 0) {
401 * Buggy application or kernel code has disabled interrupts
402 * and then trapped. Enabling interrupts now is wrong, but
403 * it is better than running with interrupts disabled until
404 * they are accidentally enabled later.
406 type
= frame
->tf_trapno
;
407 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
408 MAKEMPSAFE(have_mplock
);
409 /* JG curproc can be NULL */
411 "pid %ld (%s): trap %d with interrupts disabled\n",
412 (long)curproc
->p_pid
, curproc
->p_comm
, type
);
413 } else if (type
!= T_NMI
&& type
!= T_BPTFLT
&&
416 * XXX not quite right, since this may be for a
417 * multiple fault in user mode.
419 MAKEMPSAFE(have_mplock
);
420 kprintf("kernel trap %d with interrupts disabled\n",
426 type
= frame
->tf_trapno
;
427 code
= frame
->tf_err
;
429 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
432 KTR_LOG(kernentry_trap
, p
->p_pid
, lp
->lwp_tid
,
433 frame
->tf_trapno
, eva
);
437 sticks
= (int)td
->td_sticks
;
438 lp
->lwp_md
.md_regs
= frame
;
441 case T_PRIVINFLT
: /* privileged instruction fault */
446 case T_BPTFLT
: /* bpt instruction fault */
447 case T_TRCTRAP
: /* trace trap */
448 frame
->tf_rflags
&= ~PSL_T
;
453 case T_ARITHTRAP
: /* arithmetic trap */
466 case T_ASTFLT
: /* Allow process switch */
467 mycpu
->gd_cnt
.v_soft
++;
468 if (mycpu
->gd_reqflags
& RQF_AST_OWEUPC
) {
469 atomic_clear_int_nonlocked(&mycpu
->gd_reqflags
,
471 addupc_task(p
, p
->p_prof
.pr_addr
,
476 case T_PROTFLT
: /* general protection fault */
480 case T_SEGNPFLT
: /* segment not present fault */
484 case T_TSSFLT
: /* invalid TSS fault */
485 case T_DOUBLEFLT
: /* double fault */
490 ucode
= code
+ BUS_SEGM_FAULT
; /* XXX: ???*/
496 case T_PAGEFLT
: /* page fault */
497 MAKEMPSAFE(have_mplock
);
498 i
= trap_pfault(frame
, TRUE
);
499 if (frame
->tf_rip
== 0)
500 kprintf("T_PAGEFLT: Warning %%rip == 0!\n");
515 case T_DIVIDE
: /* integer divide fault */
521 MAKEMPSAFE(have_mplock
);
522 /* machine/parity/power fail/"kitchen sink" faults */
523 if (isa_nmi(code
) == 0) {
526 * NMI can be hooked up to a pushbutton
530 kprintf ("NMI ... going to debugger\n");
531 kdb_trap(type
, 0, frame
);
535 } else if (panic_on_nmi
)
536 panic("NMI indicates hardware failure");
539 case T_OFLOW
: /* integer overflow fault */
544 case T_BOUND
: /* bounds check fault */
551 * Virtual kernel intercept - pass the DNA exception
552 * to the virtual kernel if it asked to handle it.
553 * This occurs when the virtual kernel is holding
554 * onto the FP context for a different emulated
555 * process then the one currently running.
557 * We must still call npxdna() since we may have
558 * saved FP state that the virtual kernel needs
559 * to hand over to a different emulated process.
561 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
&&
562 (td
->td_pcb
->pcb_flags
& FP_VIRTFP
)
569 * The kernel may have switched out the FP unit's
570 * state, causing the user process to take a fault
571 * when it tries to use the FP unit. Restore the
577 ucode
= FPE_FPU_NP_TRAP
;
580 case T_FPOPFLT
: /* FPU operand fetch fault */
585 case T_XMMFLT
: /* SIMD floating-point exception */
594 case T_PAGEFLT
: /* page fault */
595 MAKEMPSAFE(have_mplock
);
596 trap_pfault(frame
, FALSE
);
601 * The kernel is apparently using fpu for copying.
602 * XXX this should be fatal unless the kernel has
603 * registered such use.
609 case T_STKFLT
: /* stack fault */
612 case T_PROTFLT
: /* general protection fault */
613 case T_SEGNPFLT
: /* segment not present fault */
615 * Invalid segment selectors and out of bounds
616 * %rip's and %rsp's can be set up in user mode.
617 * This causes a fault in kernel mode when the
618 * kernel tries to return to user mode. We want
619 * to get this fault so that we can fix the
620 * problem here and not have to check all the
621 * selectors and pointers when the user changes
624 if (mycpu
->gd_intr_nesting_level
== 0) {
625 if (td
->td_pcb
->pcb_onfault
) {
626 frame
->tf_rip
= (register_t
)
627 td
->td_pcb
->pcb_onfault
;
630 if (frame
->tf_rip
== (long)doreti_iret
) {
631 frame
->tf_rip
= (long)doreti_iret_fault
;
639 * PSL_NT can be set in user mode and isn't cleared
640 * automatically when the kernel is entered. This
641 * causes a TSS fault when the kernel attempts to
642 * `iret' because the TSS link is uninitialized. We
643 * want to get this fault so that we can fix the
644 * problem here and not every time the kernel is
647 if (frame
->tf_rflags
& PSL_NT
) {
648 frame
->tf_rflags
&= ~PSL_NT
;
653 case T_TRCTRAP
: /* trace trap */
655 if (frame
->tf_rip
== (int)IDTVEC(syscall
)) {
657 * We've just entered system mode via the
658 * syscall lcall. Continue single stepping
659 * silently until the syscall handler has
664 if (frame
->tf_rip
== (int)IDTVEC(syscall
) + 1) {
666 * The syscall handler has now saved the
667 * flags. Stop single stepping it.
669 frame
->tf_rflags
&= ~PSL_T
;
675 * Ignore debug register trace traps due to
676 * accesses in the user's address space, which
677 * can happen under several conditions such as
678 * if a user sets a watchpoint on a buffer and
679 * then passes that buffer to a system call.
680 * We still want to get TRCTRAPS for addresses
681 * in kernel space because that is useful when
682 * debugging the kernel.
685 if (user_dbreg_trap()) {
687 * Reset breakpoint bits because the
690 /* XXX check upper bits here */
691 load_dr6(rdr6() & 0xfffffff0);
696 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
700 * If DDB is enabled, let it handle the debugger trap.
701 * Otherwise, debugger traps "can't happen".
705 MAKEMPSAFE(have_mplock
);
706 if (kdb_trap(type
, 0, frame
))
712 MAKEMPSAFE(have_mplock
);
713 /* machine/parity/power fail/"kitchen sink" faults */
715 if (isa_nmi(code
) == 0) {
718 * NMI can be hooked up to a pushbutton
722 kprintf ("NMI ... going to debugger\n");
723 kdb_trap(type
, 0, frame
);
727 } else if (panic_on_nmi
== 0)
730 #endif /* NISA > 0 */
732 MAKEMPSAFE(have_mplock
);
733 trap_fatal(frame
, 0);
738 * Virtual kernel intercept - if the fault is directly related to a
739 * VM context managed by a virtual kernel then let the virtual kernel
742 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
) {
743 vkernel_trap(lp
, frame
);
748 * Translate fault for emulators (e.g. Linux)
750 if (*p
->p_sysent
->sv_transtrap
)
751 i
= (*p
->p_sysent
->sv_transtrap
)(i
, type
);
753 MAKEMPSAFE(have_mplock
);
754 trapsignal(lp
, i
, ucode
);
757 if (type
<= MAX_TRAP_MSG
) {
758 uprintf("fatal process exception: %s",
760 if ((type
== T_PAGEFLT
) || (type
== T_PROTFLT
))
761 uprintf(", fault VA = 0x%lx", frame
->tf_addr
);
768 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
769 KASSERT(td
->td_mpcount
== have_mplock
,
770 ("badmpcount trap/end from %p", (void *)frame
->tf_rip
));
773 userret(lp
, frame
, sticks
);
780 if (p
!= NULL
&& lp
!= NULL
)
781 KTR_LOG(kernentry_trap_ret
, p
->p_pid
, lp
->lwp_tid
);
783 KASSERT(crit_count
== td
->td_critcount
,
784 ("trap: critical section count mismatch! %d/%d",
785 crit_count
, td
->td_pri
));
786 KASSERT(curstop
== td
->td_toks_stop
,
787 ("trap: extra tokens held after trap! %ld/%ld",
788 curstop
- &td
->td_toks_base
,
789 td
->td_toks_stop
- &td
->td_toks_base
));
794 trap_pfault(struct trapframe
*frame
, int usermode
)
797 struct vmspace
*vm
= NULL
;
802 thread_t td
= curthread
;
803 struct lwp
*lp
= td
->td_lwp
;
806 va
= trunc_page(frame
->tf_addr
);
807 if (va
>= VM_MIN_KERNEL_ADDRESS
) {
809 * Don't allow user-mode faults in kernel address space.
820 * This is a fault on non-kernel virtual memory.
821 * vm is initialized above to NULL. If curproc is NULL
822 * or curproc->p_vmspace is NULL the fault is fatal.
825 vm
= lp
->lwp_vmspace
;
837 * PGEX_I is defined only if the execute disable bit capability is
838 * supported and enabled.
840 if (frame
->tf_err
& PGEX_W
)
841 ftype
= VM_PROT_WRITE
;
843 else if ((frame
->tf_err
& PGEX_I
) && pg_nx
!= 0)
844 ftype
= VM_PROT_EXECUTE
;
847 ftype
= VM_PROT_READ
;
849 if (map
!= &kernel_map
) {
851 * Keep swapout from messing with us during this
861 fault_flags
|= VM_FAULT_BURST
;
862 if (ftype
& VM_PROT_WRITE
)
863 fault_flags
|= VM_FAULT_DIRTY
;
865 fault_flags
|= VM_FAULT_NORMAL
;
866 rv
= vm_fault(map
, va
, ftype
, fault_flags
);
871 * Don't have to worry about process locking or stacks
874 fault_flags
= VM_FAULT_NORMAL
;
875 rv
= vm_fault(map
, va
, ftype
, VM_FAULT_NORMAL
);
878 if (rv
== KERN_SUCCESS
)
882 if (td
->td_gd
->gd_intr_nesting_level
== 0 &&
883 td
->td_pcb
->pcb_onfault
) {
884 frame
->tf_rip
= (register_t
)td
->td_pcb
->pcb_onfault
;
887 trap_fatal(frame
, frame
->tf_addr
);
892 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no
893 * kludge is needed to pass the fault address to signal handlers.
896 if (td
->td_lwp
->lwp_vkernel
== NULL
) {
897 kprintf("seg-fault ft=%04x ff=%04x addr=%p rip=%p "
898 "pid=%d p_comm=%s\n",
900 (void *)frame
->tf_addr
,
901 (void *)frame
->tf_rip
,
902 p
->p_pid
, p
->p_comm
);
903 if (ddb_on_seg_fault
)
904 Debugger("ddb_on_seg_fault");
906 /* Debugger("seg-fault"); */
908 return((rv
== KERN_PROTECTION_FAILURE
) ? SIGBUS
: SIGSEGV
);
912 trap_fatal(struct trapframe
*frame
, vm_offset_t eva
)
917 struct soft_segment_descriptor softseg
;
920 code
= frame
->tf_err
;
921 type
= frame
->tf_trapno
;
922 sdtossd(&gdt
[IDXSEL(frame
->tf_cs
& 0xffff)], &softseg
);
924 if (type
<= MAX_TRAP_MSG
)
925 msg
= trap_msg
[type
];
928 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type
, msg
,
929 ISPL(frame
->tf_cs
) == SEL_UPL
? "user" : "kernel");
931 /* three separate prints in case of a trap on an unmapped page */
932 kprintf("mp_lock = %08x; ", mp_lock
);
933 kprintf("cpuid = %d; ", mycpu
->gd_cpuid
);
934 kprintf("lapic->id = %08x\n", lapic
->id
);
936 if (type
== T_PAGEFLT
) {
937 kprintf("fault virtual address = 0x%lx\n", eva
);
938 kprintf("fault code = %s %s %s, %s\n",
939 code
& PGEX_U
? "user" : "supervisor",
940 code
& PGEX_W
? "write" : "read",
941 code
& PGEX_I
? "instruction" : "data",
942 code
& PGEX_P
? "protection violation" : "page not present");
944 kprintf("instruction pointer = 0x%lx:0x%lx\n",
945 frame
->tf_cs
& 0xffff, frame
->tf_rip
);
946 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
947 ss
= frame
->tf_ss
& 0xffff;
950 ss
= GSEL(GDATA_SEL
, SEL_KPL
);
951 rsp
= (long)&frame
->tf_rsp
;
953 kprintf("stack pointer = 0x%x:0x%lx\n", ss
, rsp
);
954 kprintf("frame pointer = 0x%x:0x%lx\n", ss
, frame
->tf_rbp
);
955 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n",
956 softseg
.ssd_base
, softseg
.ssd_limit
, softseg
.ssd_type
);
957 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
958 softseg
.ssd_dpl
, softseg
.ssd_p
, softseg
.ssd_long
, softseg
.ssd_def32
,
960 kprintf("processor eflags = ");
961 if (frame
->tf_rflags
& PSL_T
)
962 kprintf("trace trap, ");
963 if (frame
->tf_rflags
& PSL_I
)
964 kprintf("interrupt enabled, ");
965 if (frame
->tf_rflags
& PSL_NT
)
966 kprintf("nested task, ");
967 if (frame
->tf_rflags
& PSL_RF
)
969 kprintf("IOPL = %ld\n", (frame
->tf_rflags
& PSL_IOPL
) >> 12);
970 kprintf("current process = ");
973 (u_long
)curproc
->p_pid
);
977 kprintf("current thread = pri %d ", curthread
->td_pri
);
978 if (curthread
->td_critcount
)
983 if ((debugger_on_panic
|| db_active
) && kdb_trap(type
, code
, frame
))
986 kprintf("trap number = %d\n", type
);
987 if (type
<= MAX_TRAP_MSG
)
988 panic("%s", trap_msg
[type
]);
990 panic("unknown/reserved trap");
994 * Double fault handler. Called when a fault occurs while writing
995 * a frame for a trap/exception onto the stack. This usually occurs
996 * when the stack overflows (such is the case with infinite recursion,
1001 in_kstack_guard(register_t rptr
)
1003 thread_t td
= curthread
;
1005 if ((char *)rptr
>= td
->td_kstack
&&
1006 (char *)rptr
< td
->td_kstack
+ PAGE_SIZE
) {
1013 dblfault_handler(struct trapframe
*frame
)
1015 thread_t td
= curthread
;
1017 if (in_kstack_guard(frame
->tf_rsp
) || in_kstack_guard(frame
->tf_rbp
)) {
1018 kprintf0("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n");
1019 if (in_kstack_guard(frame
->tf_rsp
))
1020 frame
->tf_rsp
= (register_t
)(td
->td_kstack
+ PAGE_SIZE
);
1021 if (in_kstack_guard(frame
->tf_rbp
))
1022 frame
->tf_rbp
= (register_t
)(td
->td_kstack
+ PAGE_SIZE
);
1024 kprintf0("DOUBLE FAULT\n");
1026 kprintf("\nFatal double fault\n");
1027 kprintf("rip = 0x%lx\n", frame
->tf_rip
);
1028 kprintf("rsp = 0x%lx\n", frame
->tf_rsp
);
1029 kprintf("rbp = 0x%lx\n", frame
->tf_rbp
);
1031 /* three separate prints in case of a trap on an unmapped page */
1032 kprintf("mp_lock = %08x; ", mp_lock
);
1033 kprintf("cpuid = %d; ", mycpu
->gd_cpuid
);
1034 kprintf("lapic->id = %08x\n", lapic
->id
);
1036 panic("double fault");
1040 * syscall2 - MP aware system call request C handler
1042 * A system call is essentially treated as a trap except that the
1043 * MP lock is not held on entry or return. We are responsible for
1044 * obtaining the MP lock if necessary and for handling ASTs
1045 * (e.g. a task switch) prior to return.
1050 syscall2(struct trapframe
*frame
)
1052 struct thread
*td
= curthread
;
1053 struct proc
*p
= td
->td_proc
;
1054 struct lwp
*lp
= td
->td_lwp
;
1056 struct sysent
*callp
;
1057 register_t orig_tf_rflags
;
1062 int crit_count
= td
->td_critcount
;
1065 int have_mplock
= 0;
1070 union sysunion args
;
1071 register_t
*argsdst
;
1073 mycpu
->gd_cnt
.v_syscall
++;
1076 if (ISPL(frame
->tf_cs
) != SEL_UPL
) {
1083 KTR_LOG(kernentry_syscall
, p
->p_pid
, lp
->lwp_tid
,
1087 KASSERT(td
->td_mpcount
== 0,
1088 ("badmpcount syscall2 from %p", (void *)frame
->tf_rip
));
1090 userenter(td
, p
); /* lazy raise our priority */
1097 sticks
= (int)td
->td_sticks
;
1098 orig_tf_rflags
= frame
->tf_rflags
;
1101 * Virtual kernel intercept - if a VM context managed by a virtual
1102 * kernel issues a system call the virtual kernel handles it, not us.
1103 * Restore the virtual kernel context and return from its system
1104 * call. The current frame is copied out to the virtual kernel.
1106 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
) {
1107 vkernel_trap(lp
, frame
);
1108 error
= EJUSTRETURN
;
1113 * Get the system call parameters and account for time
1115 lp
->lwp_md
.md_regs
= frame
;
1116 params
= (caddr_t
)frame
->tf_rsp
+ sizeof(register_t
);
1117 code
= frame
->tf_rax
;
1119 if (p
->p_sysent
->sv_prepsyscall
) {
1120 (*p
->p_sysent
->sv_prepsyscall
)(
1121 frame
, (int *)(&args
.nosys
.sysmsg
+ 1),
1124 if (code
== SYS_syscall
|| code
== SYS___syscall
) {
1125 code
= frame
->tf_rdi
;
1131 if (p
->p_sysent
->sv_mask
)
1132 code
&= p
->p_sysent
->sv_mask
;
1134 if (code
>= p
->p_sysent
->sv_size
)
1135 callp
= &p
->p_sysent
->sv_table
[0];
1137 callp
= &p
->p_sysent
->sv_table
[code
];
1139 narg
= callp
->sy_narg
& SYF_ARGMASK
;
1142 * On x86_64 we get up to six arguments in registers. The rest are
1143 * on the stack. The first six members of 'struct trapframe' happen
1144 * to be the registers used to pass arguments, in exactly the right
1147 argp
= &frame
->tf_rdi
;
1149 argsdst
= (register_t
*)(&args
.nosys
.sysmsg
+ 1);
1151 * JG can we overflow the space pointed to by 'argsdst'
1152 * either with 'bcopy' or with 'copyin'?
1154 bcopy(argp
, argsdst
, sizeof(register_t
) * regcnt
);
1156 * copyin is MP aware, but the tracing code is not
1158 if (narg
> regcnt
) {
1159 KASSERT(params
!= NULL
, ("copyin args with no params!"));
1160 error
= copyin(params
, &argsdst
[regcnt
],
1161 (narg
- regcnt
) * sizeof(register_t
));
1164 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1165 MAKEMPSAFE(have_mplock
);
1167 ktrsyscall(lp
, code
, narg
,
1168 (void *)(&args
.nosys
.sysmsg
+ 1));
1176 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1177 MAKEMPSAFE(have_mplock
);
1178 ktrsyscall(lp
, code
, narg
, (void *)(&args
.nosys
.sysmsg
+ 1));
1183 * Default return value is 0 (will be copied to %rax). Double-value
1184 * returns use %rax and %rdx. %rdx is left unchanged for system
1185 * calls which return only one result.
1187 args
.sysmsg_fds
[0] = 0;
1188 args
.sysmsg_fds
[1] = frame
->tf_rdx
;
1191 * The syscall might manipulate the trap frame. If it does it
1192 * will probably return EJUSTRETURN.
1194 args
.sysmsg_frame
= frame
;
1196 STOPEVENT(p
, S_SCE
, narg
); /* MP aware */
1199 * NOTE: All system calls run MPSAFE now. The system call itself
1200 * is responsible for getting the MP lock.
1202 error
= (*callp
->sy_call
)(&args
);
1206 * MP SAFE (we may or may not have the MP lock at this point)
1208 //kprintf("SYSMSG %d ", error);
1212 * Reinitialize proc pointer `p' as it may be different
1213 * if this is a child returning from fork syscall.
1216 lp
= curthread
->td_lwp
;
1217 frame
->tf_rax
= args
.sysmsg_fds
[0];
1218 frame
->tf_rdx
= args
.sysmsg_fds
[1];
1219 frame
->tf_rflags
&= ~PSL_C
;
1223 * Reconstruct pc, we know that 'syscall' is 2 bytes.
1224 * We have to do a full context restore so that %r10
1225 * (which was holding the value of %rcx) is restored for
1226 * the next iteration.
1228 frame
->tf_rip
-= frame
->tf_err
;
1229 frame
->tf_r10
= frame
->tf_rcx
;
1234 panic("Unexpected EASYNC return value (for now)");
1237 if (p
->p_sysent
->sv_errsize
) {
1238 if (error
>= p
->p_sysent
->sv_errsize
)
1239 error
= -1; /* XXX */
1241 error
= p
->p_sysent
->sv_errtbl
[error
];
1243 frame
->tf_rax
= error
;
1244 frame
->tf_rflags
|= PSL_C
;
1249 * Traced syscall. trapsignal() is not MP aware.
1251 if (orig_tf_rflags
& PSL_T
) {
1252 MAKEMPSAFE(have_mplock
);
1253 frame
->tf_rflags
&= ~PSL_T
;
1254 trapsignal(lp
, SIGTRAP
, TRAP_TRACE
);
1258 * Handle reschedule and other end-of-syscall issues
1260 userret(lp
, frame
, sticks
);
1263 if (KTRPOINT(td
, KTR_SYSRET
)) {
1264 MAKEMPSAFE(have_mplock
);
1265 ktrsysret(lp
, code
, error
, args
.sysmsg_result
);
1270 * This works because errno is findable through the
1271 * register set. If we ever support an emulation where this
1272 * is not the case, this code will need to be revisited.
1274 STOPEVENT(p
, S_SCX
, code
);
1279 * Release the MP lock if we had to get it
1281 KASSERT(td
->td_mpcount
== have_mplock
,
1282 ("badmpcount syscall2/end from %p", (void *)frame
->tf_rip
));
1286 KTR_LOG(kernentry_syscall_ret
, p
->p_pid
, lp
->lwp_tid
, error
);
1288 KASSERT(crit_count
== td
->td_critcount
,
1289 ("syscall: critical section count mismatch! %d/%d",
1290 crit_count
, td
->td_pri
));
1291 KASSERT(&td
->td_toks_base
== td
->td_toks_stop
,
1292 ("syscall: extra tokens held after trap! %ld",
1293 td
->td_toks_stop
- &td
->td_toks_base
));
1298 * NOTE: mplock not held at any point
1301 fork_return(struct lwp
*lp
, struct trapframe
*frame
)
1303 frame
->tf_rax
= 0; /* Child returns zero */
1304 frame
->tf_rflags
&= ~PSL_C
; /* success */
1307 generic_lwp_return(lp
, frame
);
1308 KTR_LOG(kernentry_fork_ret
, lp
->lwp_proc
->p_pid
, lp
->lwp_tid
);
1312 * Simplified back end of syscall(), used when returning from fork()
1313 * directly into user mode.
1315 * This code will return back into the fork trampoline code which then
1318 * NOTE: The mplock is not held at any point.
1321 generic_lwp_return(struct lwp
*lp
, struct trapframe
*frame
)
1323 struct proc
*p
= lp
->lwp_proc
;
1326 * Newly forked processes are given a kernel priority. We have to
1327 * adjust the priority to a normal user priority and fake entry
1328 * into the kernel (call userenter()) to install a passive release
1329 * function just in case userret() decides to stop the process. This
1330 * can occur when ^Z races a fork. If we do not install the passive
1331 * release function the current process designation will not be
1332 * released when the thread goes to sleep.
1334 lwkt_setpri_self(TDPRI_USER_NORM
);
1335 userenter(lp
->lwp_thread
, p
);
1336 userret(lp
, frame
, 0);
1338 if (KTRPOINT(lp
->lwp_thread
, KTR_SYSRET
))
1339 ktrsysret(lp
, SYS_fork
, 0, 0);
1341 p
->p_flag
|= P_PASSIVE_ACQ
;
1343 p
->p_flag
&= ~P_PASSIVE_ACQ
;
1347 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1348 * fault (which is then passed back to the virtual kernel) if an attempt is
1349 * made to use the FP unit.
1351 * XXX this is a fairly big hack.
1354 set_vkernel_fp(struct trapframe
*frame
)
1356 struct thread
*td
= curthread
;
1358 if (frame
->tf_xflags
& PGEX_FPFAULT
) {
1359 td
->td_pcb
->pcb_flags
|= FP_VIRTFP
;
1360 if (mdcpu
->gd_npxthread
== td
)
1363 td
->td_pcb
->pcb_flags
&= ~FP_VIRTFP
;
1368 * Called from vkernel_trap() to fixup the vkernel's syscall
1369 * frame for vmspace_ctl() return.
1372 cpu_vkernel_trap(struct trapframe
*frame
, int error
)
1374 frame
->tf_rax
= error
;
1376 frame
->tf_rflags
|= PSL_C
;
1378 frame
->tf_rflags
&= ~PSL_C
;