2 * Copyright (c) 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (C) 1994, David Greenman
5 * Copyright (c) 2008 The DragonFly Project.
6 * Copyright (c) 2008 Jordan Gordeev.
8 * This code is derived from software contributed to Berkeley by
9 * the University of Utah, and William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
44 * x86_64 Trap and System call handling
48 #include "opt_ktrace.h"
50 #include <machine/frame.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
55 #include <sys/pioctl.h>
56 #include <sys/types.h>
57 #include <sys/signal2.h>
58 #include <sys/syscall.h>
59 #include <sys/sysctl.h>
60 #include <sys/sysent.h>
61 #include <sys/systm.h>
63 #include <sys/ktrace.h>
66 #include <sys/sysmsg.h>
67 #include <sys/sysproto.h>
68 #include <sys/sysunion.h>
72 #include <vm/vm_extern.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_param.h>
75 #include <machine/cpu.h>
76 #include <machine/pcb.h>
77 #include <machine/smp.h>
78 #include <machine/thread.h>
79 #include <machine/vmparam.h>
80 #include <machine/md_var.h>
81 #include <machine_base/isa/intr_machdep.h>
85 #include <sys/thread2.h>
86 #include <sys/mplock2.h>
90 #define MAKEMPSAFE(have_mplock) \
91 if (have_mplock == 0) { \
98 #define MAKEMPSAFE(have_mplock)
102 extern void trap(struct trapframe
*frame
);
104 static int trap_pfault(struct trapframe
*, int);
105 static void trap_fatal(struct trapframe
*, vm_offset_t
);
106 void dblfault_handler(struct trapframe
*frame
);
108 #define MAX_TRAP_MSG 30
109 static char *trap_msg
[] = {
111 "privileged instruction fault", /* 1 T_PRIVINFLT */
113 "breakpoint instruction fault", /* 3 T_BPTFLT */
116 "arithmetic trap", /* 6 T_ARITHTRAP */
117 "system forced exception", /* 7 T_ASTFLT */
119 "general protection fault", /* 9 T_PROTFLT */
120 "trace trap", /* 10 T_TRCTRAP */
122 "page fault", /* 12 T_PAGEFLT */
124 "alignment fault", /* 14 T_ALIGNFLT */
128 "integer divide fault", /* 18 T_DIVIDE */
129 "non-maskable interrupt trap", /* 19 T_NMI */
130 "overflow trap", /* 20 T_OFLOW */
131 "FPU bounds check fault", /* 21 T_BOUND */
132 "FPU device not available", /* 22 T_DNA */
133 "double fault", /* 23 T_DOUBLEFLT */
134 "FPU operand fetch fault", /* 24 T_FPOPFLT */
135 "invalid TSS fault", /* 25 T_TSSFLT */
136 "segment not present fault", /* 26 T_SEGNPFLT */
137 "stack fault", /* 27 T_STKFLT */
138 "machine check trap", /* 28 T_MCHK */
139 "SIMD floating-point exception", /* 29 T_XMMFLT */
140 "reserved (unknown) fault", /* 30 T_RESERVED */
144 static int ddb_on_nmi
= 1;
145 SYSCTL_INT(_machdep
, OID_AUTO
, ddb_on_nmi
, CTLFLAG_RW
,
146 &ddb_on_nmi
, 0, "Go to DDB on NMI");
147 static int ddb_on_seg_fault
= 0;
148 SYSCTL_INT(_machdep
, OID_AUTO
, ddb_on_seg_fault
, CTLFLAG_RW
,
149 &ddb_on_seg_fault
, 0, "Go to DDB on user seg-fault");
151 static int panic_on_nmi
= 1;
152 SYSCTL_INT(_machdep
, OID_AUTO
, panic_on_nmi
, CTLFLAG_RW
,
153 &panic_on_nmi
, 0, "Panic on NMI");
154 static int fast_release
;
155 SYSCTL_INT(_machdep
, OID_AUTO
, fast_release
, CTLFLAG_RW
,
156 &fast_release
, 0, "Passive Release was optimal");
157 static int slow_release
;
158 SYSCTL_INT(_machdep
, OID_AUTO
, slow_release
, CTLFLAG_RW
,
159 &slow_release
, 0, "Passive Release was nonoptimal");
162 * Passively intercepts the thread switch function to increase
163 * the thread priority from a user priority to a kernel priority, reducing
164 * syscall and trap overhead for the case where no switch occurs.
166 * Synchronizes td_ucred with p_ucred. This is used by system calls,
167 * signal handling, faults, AST traps, and anything else that enters the
168 * kernel from userland and provides the kernel with a stable read-only
169 * copy of the process ucred.
172 userenter(struct thread
*curtd
, struct proc
*curp
)
177 curtd
->td_release
= lwkt_passive_release
;
179 if (curtd
->td_ucred
!= curp
->p_ucred
) {
180 ncred
= crhold(curp
->p_ucred
);
181 ocred
= curtd
->td_ucred
;
182 curtd
->td_ucred
= ncred
;
189 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
190 * must be completed before we can return to or try to return to userland.
192 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
193 * arithmatic on the delta calculation so the absolute tick values are
194 * truncated to an integer.
197 userret(struct lwp
*lp
, struct trapframe
*frame
, int sticks
)
199 struct proc
*p
= lp
->lwp_proc
;
203 * Charge system time if profiling. Note: times are in microseconds.
204 * This may do a copyout and block, so do it first even though it
205 * means some system time will be charged as user time.
207 if (p
->p_flag
& P_PROFIL
) {
208 addupc_task(p
, frame
->tf_rip
,
209 (u_int
)((int)lp
->lwp_thread
->td_sticks
- sticks
));
214 * If the jungle wants us dead, so be it.
216 if (lp
->lwp_flag
& LWP_WEXIT
) {
219 rel_mplock(); /* NOT REACHED */
223 * Block here if we are in a stopped state.
225 if (p
->p_stat
== SSTOP
) {
233 * Post any pending upcalls. If running a virtual kernel be sure
234 * to restore the virtual kernel's vmspace before posting the upcall.
236 if (p
->p_flag
& P_UPCALLPEND
) {
237 p
->p_flag
&= ~P_UPCALLPEND
;
245 * Post any pending signals. If running a virtual kernel be sure
246 * to restore the virtual kernel's vmspace before posting the signal.
248 * WARNING! postsig() can exit and not return.
250 if ((sig
= CURSIG_TRACE(lp
)) != 0) {
258 * block here if we are swapped out, but still process signals
259 * (such as SIGKILL). proc0 (the swapin scheduler) is already
260 * aware of our situation, we do not have to wake it up.
262 if (p
->p_flag
& P_SWAPPEDOUT
) {
264 p
->p_flag
|= P_SWAPWAIT
;
266 if (p
->p_flag
& P_SWAPWAIT
)
267 tsleep(p
, PCATCH
, "SWOUT", 0);
268 p
->p_flag
&= ~P_SWAPWAIT
;
274 * Make sure postsig() handled request to restore old signal mask after
275 * running signal handler.
277 KKASSERT((lp
->lwp_flag
& LWP_OLDMASK
) == 0);
281 * Cleanup from userenter and any passive release that might have occured.
282 * We must reclaim the current-process designation before we can return
283 * to usermode. We also handle both LWKT and USER reschedule requests.
286 userexit(struct lwp
*lp
)
288 struct thread
*td
= lp
->lwp_thread
;
289 /* globaldata_t gd = td->td_gd;*/
292 * Handle stop requests at kernel priority. Any requests queued
293 * after this loop will generate another AST.
295 while (lp
->lwp_proc
->p_stat
== SSTOP
) {
302 * Reduce our priority in preparation for a return to userland. If
303 * our passive release function was still in place, our priority was
304 * never raised and does not need to be reduced.
306 lwkt_passive_recover(td
);
309 * Become the current user scheduled process if we aren't already,
310 * and deal with reschedule requests and other factors.
312 lp
->lwp_proc
->p_usched
->acquire_curproc(lp
);
313 /* WARNING: we may have migrated cpu's */
314 /* gd = td->td_gd; */
317 #if !defined(KTR_KERNENTRY)
318 #define KTR_KERNENTRY KTR_ALL
320 KTR_INFO_MASTER(kernentry
);
321 KTR_INFO(KTR_KERNENTRY
, kernentry
, trap
, 0, "STR",
322 sizeof(long) + sizeof(long) + sizeof(long) + sizeof(vm_offset_t
));
323 KTR_INFO(KTR_KERNENTRY
, kernentry
, trap_ret
, 0, "STR",
324 sizeof(long) + sizeof(long));
325 KTR_INFO(KTR_KERNENTRY
, kernentry
, syscall
, 0, "STR",
326 sizeof(long) + sizeof(long) + sizeof(long));
327 KTR_INFO(KTR_KERNENTRY
, kernentry
, syscall_ret
, 0, "STR",
328 sizeof(long) + sizeof(long) + sizeof(long));
329 KTR_INFO(KTR_KERNENTRY
, kernentry
, fork_ret
, 0, "STR",
330 sizeof(long) + sizeof(long));
333 * Exception, fault, and trap interface to the kernel.
334 * This common code is called from assembly language IDT gate entry
335 * routines that prepare a suitable stack frame, and restore this
336 * frame after the exception has been processed.
338 * This function is also called from doreti in an interlock to handle ASTs.
339 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
341 * NOTE! We have to retrieve the fault address prior to obtaining the
342 * MP lock because get_mplock() may switch out. YYY cr2 really ought
343 * to be retrieved by the assembly code, not here.
345 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
346 * if an attempt is made to switch from a fast interrupt or IPI. This is
347 * necessary to properly take fatal kernel traps on SMP machines if
348 * get_mplock() has to block.
352 trap(struct trapframe
*frame
)
354 struct globaldata
*gd
= mycpu
;
355 struct thread
*td
= gd
->gd_curthread
;
356 struct lwp
*lp
= td
->td_lwp
;
359 int i
= 0, ucode
= 0, type
, code
;
364 int crit_count
= td
->td_critcount
;
365 lwkt_tokref_t curstop
= td
->td_toks_stop
;
373 kprintf0("\"%s\" type=%ld\n",
374 trap_msg
[frame
->tf_trapno
], frame
->tf_trapno
);
375 kprintf0(" rip=%lx rsp=%lx\n", frame
->tf_rip
, frame
->tf_rsp
);
376 kprintf0(" err=%lx addr=%lx\n", frame
->tf_err
, frame
->tf_addr
);
377 kprintf0(" cs=%lx ss=%lx rflags=%lx\n", (unsigned long)frame
->tf_cs
, (unsigned long)frame
->tf_ss
, frame
->tf_rflags
);
382 ++gd
->gd_trap_nesting_level
;
383 MAKEMPSAFE(have_mplock
);
384 trap_fatal(frame
, frame
->tf_addr
);
385 --gd
->gd_trap_nesting_level
;
391 eva
= (frame
->tf_trapno
== T_PAGEFLT
? frame
->tf_addr
: 0);
392 ++gd
->gd_trap_nesting_level
;
393 MAKEMPSAFE(have_mplock
);
394 trap_fatal(frame
, eva
);
395 --gd
->gd_trap_nesting_level
;
402 if ((frame
->tf_rflags
& PSL_I
) == 0) {
404 * Buggy application or kernel code has disabled interrupts
405 * and then trapped. Enabling interrupts now is wrong, but
406 * it is better than running with interrupts disabled until
407 * they are accidentally enabled later.
409 type
= frame
->tf_trapno
;
410 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
411 MAKEMPSAFE(have_mplock
);
412 /* JG curproc can be NULL */
414 "pid %ld (%s): trap %d with interrupts disabled\n",
415 (long)curproc
->p_pid
, curproc
->p_comm
, type
);
416 } else if (type
!= T_NMI
&& type
!= T_BPTFLT
&&
419 * XXX not quite right, since this may be for a
420 * multiple fault in user mode.
422 MAKEMPSAFE(have_mplock
);
423 kprintf("kernel trap %d with interrupts disabled\n",
429 type
= frame
->tf_trapno
;
430 code
= frame
->tf_err
;
432 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
435 KTR_LOG(kernentry_trap
, p
->p_pid
, lp
->lwp_tid
,
436 frame
->tf_trapno
, eva
);
440 sticks
= (int)td
->td_sticks
;
441 lp
->lwp_md
.md_regs
= frame
;
444 case T_PRIVINFLT
: /* privileged instruction fault */
449 case T_BPTFLT
: /* bpt instruction fault */
450 case T_TRCTRAP
: /* trace trap */
451 frame
->tf_rflags
&= ~PSL_T
;
456 case T_ARITHTRAP
: /* arithmetic trap */
469 case T_ASTFLT
: /* Allow process switch */
470 mycpu
->gd_cnt
.v_soft
++;
471 if (mycpu
->gd_reqflags
& RQF_AST_OWEUPC
) {
472 atomic_clear_int_nonlocked(&mycpu
->gd_reqflags
,
474 addupc_task(p
, p
->p_prof
.pr_addr
,
479 case T_PROTFLT
: /* general protection fault */
483 case T_SEGNPFLT
: /* segment not present fault */
487 case T_TSSFLT
: /* invalid TSS fault */
488 case T_DOUBLEFLT
: /* double fault */
493 ucode
= code
+ BUS_SEGM_FAULT
; /* XXX: ???*/
499 case T_PAGEFLT
: /* page fault */
500 MAKEMPSAFE(have_mplock
);
501 i
= trap_pfault(frame
, TRUE
);
502 if (frame
->tf_rip
== 0)
503 kprintf("T_PAGEFLT: Warning %%rip == 0!\n");
518 case T_DIVIDE
: /* integer divide fault */
524 MAKEMPSAFE(have_mplock
);
525 /* machine/parity/power fail/"kitchen sink" faults */
526 if (isa_nmi(code
) == 0) {
529 * NMI can be hooked up to a pushbutton
533 kprintf ("NMI ... going to debugger\n");
534 kdb_trap(type
, 0, frame
);
538 } else if (panic_on_nmi
)
539 panic("NMI indicates hardware failure");
542 case T_OFLOW
: /* integer overflow fault */
547 case T_BOUND
: /* bounds check fault */
554 * Virtual kernel intercept - pass the DNA exception
555 * to the virtual kernel if it asked to handle it.
556 * This occurs when the virtual kernel is holding
557 * onto the FP context for a different emulated
558 * process then the one currently running.
560 * We must still call npxdna() since we may have
561 * saved FP state that the virtual kernel needs
562 * to hand over to a different emulated process.
564 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
&&
565 (td
->td_pcb
->pcb_flags
& FP_VIRTFP
)
572 * The kernel may have switched out the FP unit's
573 * state, causing the user process to take a fault
574 * when it tries to use the FP unit. Restore the
580 ucode
= FPE_FPU_NP_TRAP
;
583 case T_FPOPFLT
: /* FPU operand fetch fault */
588 case T_XMMFLT
: /* SIMD floating-point exception */
597 case T_PAGEFLT
: /* page fault */
598 MAKEMPSAFE(have_mplock
);
599 trap_pfault(frame
, FALSE
);
604 * The kernel is apparently using fpu for copying.
605 * XXX this should be fatal unless the kernel has
606 * registered such use.
612 case T_STKFLT
: /* stack fault */
615 case T_PROTFLT
: /* general protection fault */
616 case T_SEGNPFLT
: /* segment not present fault */
618 * Invalid segment selectors and out of bounds
619 * %rip's and %rsp's can be set up in user mode.
620 * This causes a fault in kernel mode when the
621 * kernel tries to return to user mode. We want
622 * to get this fault so that we can fix the
623 * problem here and not have to check all the
624 * selectors and pointers when the user changes
627 kprintf("trap.c line %d\n", __LINE__
);
628 if (mycpu
->gd_intr_nesting_level
== 0) {
629 if (td
->td_pcb
->pcb_onfault
) {
630 frame
->tf_rip
= (register_t
)
631 td
->td_pcb
->pcb_onfault
;
634 if (frame
->tf_rip
== (long)doreti_iret
) {
635 frame
->tf_rip
= (long)doreti_iret_fault
;
643 * PSL_NT can be set in user mode and isn't cleared
644 * automatically when the kernel is entered. This
645 * causes a TSS fault when the kernel attempts to
646 * `iret' because the TSS link is uninitialized. We
647 * want to get this fault so that we can fix the
648 * problem here and not every time the kernel is
651 if (frame
->tf_rflags
& PSL_NT
) {
652 frame
->tf_rflags
&= ~PSL_NT
;
657 case T_TRCTRAP
: /* trace trap */
659 if (frame
->tf_rip
== (int)IDTVEC(syscall
)) {
661 * We've just entered system mode via the
662 * syscall lcall. Continue single stepping
663 * silently until the syscall handler has
668 if (frame
->tf_rip
== (int)IDTVEC(syscall
) + 1) {
670 * The syscall handler has now saved the
671 * flags. Stop single stepping it.
673 frame
->tf_rflags
&= ~PSL_T
;
679 * Ignore debug register trace traps due to
680 * accesses in the user's address space, which
681 * can happen under several conditions such as
682 * if a user sets a watchpoint on a buffer and
683 * then passes that buffer to a system call.
684 * We still want to get TRCTRAPS for addresses
685 * in kernel space because that is useful when
686 * debugging the kernel.
689 if (user_dbreg_trap()) {
691 * Reset breakpoint bits because the
694 /* XXX check upper bits here */
695 load_dr6(rdr6() & 0xfffffff0);
700 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
704 * If DDB is enabled, let it handle the debugger trap.
705 * Otherwise, debugger traps "can't happen".
709 MAKEMPSAFE(have_mplock
);
710 if (kdb_trap(type
, 0, frame
))
716 MAKEMPSAFE(have_mplock
);
717 /* machine/parity/power fail/"kitchen sink" faults */
719 if (isa_nmi(code
) == 0) {
722 * NMI can be hooked up to a pushbutton
726 kprintf ("NMI ... going to debugger\n");
727 kdb_trap(type
, 0, frame
);
731 } else if (panic_on_nmi
== 0)
734 #endif /* NISA > 0 */
736 MAKEMPSAFE(have_mplock
);
737 trap_fatal(frame
, 0);
742 * Virtual kernel intercept - if the fault is directly related to a
743 * VM context managed by a virtual kernel then let the virtual kernel
746 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
) {
747 vkernel_trap(lp
, frame
);
752 * Translate fault for emulators (e.g. Linux)
754 if (*p
->p_sysent
->sv_transtrap
)
755 i
= (*p
->p_sysent
->sv_transtrap
)(i
, type
);
757 MAKEMPSAFE(have_mplock
);
758 trapsignal(lp
, i
, ucode
);
761 if (type
<= MAX_TRAP_MSG
) {
762 uprintf("fatal process exception: %s",
764 if ((type
== T_PAGEFLT
) || (type
== T_PROTFLT
))
765 uprintf(", fault VA = 0x%lx", frame
->tf_addr
);
772 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
773 KASSERT(td
->td_mpcount
== have_mplock
,
774 ("badmpcount trap/end from %p", (void *)frame
->tf_rip
));
777 userret(lp
, frame
, sticks
);
784 if (p
!= NULL
&& lp
!= NULL
)
785 KTR_LOG(kernentry_trap_ret
, p
->p_pid
, lp
->lwp_tid
);
787 KASSERT(crit_count
== td
->td_critcount
,
788 ("trap: critical section count mismatch! %d/%d",
789 crit_count
, td
->td_pri
));
790 KASSERT(curstop
== td
->td_toks_stop
,
791 ("trap: extra tokens held after trap! %ld/%ld",
792 curstop
- &td
->td_toks_base
,
793 td
->td_toks_stop
- &td
->td_toks_base
));
798 trap_pfault(struct trapframe
*frame
, int usermode
)
801 struct vmspace
*vm
= NULL
;
806 thread_t td
= curthread
;
807 struct lwp
*lp
= td
->td_lwp
;
810 va
= trunc_page(frame
->tf_addr
);
811 if (va
>= VM_MIN_KERNEL_ADDRESS
) {
813 * Don't allow user-mode faults in kernel address space.
824 * This is a fault on non-kernel virtual memory.
825 * vm is initialized above to NULL. If curproc is NULL
826 * or curproc->p_vmspace is NULL the fault is fatal.
829 vm
= lp
->lwp_vmspace
;
841 * PGEX_I is defined only if the execute disable bit capability is
842 * supported and enabled.
844 if (frame
->tf_err
& PGEX_W
)
845 ftype
= VM_PROT_WRITE
;
847 else if ((frame
->tf_err
& PGEX_I
) && pg_nx
!= 0)
848 ftype
= VM_PROT_EXECUTE
;
851 ftype
= VM_PROT_READ
;
853 if (map
!= &kernel_map
) {
855 * Keep swapout from messing with us during this
865 fault_flags
|= VM_FAULT_BURST
;
866 if (ftype
& VM_PROT_WRITE
)
867 fault_flags
|= VM_FAULT_DIRTY
;
869 fault_flags
|= VM_FAULT_NORMAL
;
870 rv
= vm_fault(map
, va
, ftype
, fault_flags
);
875 * Don't have to worry about process locking or stacks
878 fault_flags
= VM_FAULT_NORMAL
;
879 rv
= vm_fault(map
, va
, ftype
, VM_FAULT_NORMAL
);
882 if (rv
== KERN_SUCCESS
)
886 if (td
->td_gd
->gd_intr_nesting_level
== 0 &&
887 td
->td_pcb
->pcb_onfault
) {
888 frame
->tf_rip
= (register_t
)td
->td_pcb
->pcb_onfault
;
891 trap_fatal(frame
, frame
->tf_addr
);
896 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no
897 * kludge is needed to pass the fault address to signal handlers.
900 if (td
->td_lwp
->lwp_vkernel
== NULL
) {
901 kprintf("seg-fault ft=%04x ff=%04x addr=%p rip=%p "
902 "pid=%d p_comm=%s\n",
904 (void *)frame
->tf_addr
,
905 (void *)frame
->tf_rip
,
906 p
->p_pid
, p
->p_comm
);
907 if (ddb_on_seg_fault
)
908 Debugger("ddb_on_seg_fault");
910 /* Debugger("seg-fault"); */
912 return((rv
== KERN_PROTECTION_FAILURE
) ? SIGBUS
: SIGSEGV
);
916 trap_fatal(struct trapframe
*frame
, vm_offset_t eva
)
921 struct soft_segment_descriptor softseg
;
924 code
= frame
->tf_err
;
925 type
= frame
->tf_trapno
;
926 sdtossd(&gdt
[IDXSEL(frame
->tf_cs
& 0xffff)], &softseg
);
928 if (type
<= MAX_TRAP_MSG
)
929 msg
= trap_msg
[type
];
932 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type
, msg
,
933 ISPL(frame
->tf_cs
) == SEL_UPL
? "user" : "kernel");
935 /* three separate prints in case of a trap on an unmapped page */
936 kprintf("mp_lock = %08x; ", mp_lock
);
937 kprintf("cpuid = %d; ", mycpu
->gd_cpuid
);
938 kprintf("lapic->id = %08x\n", lapic
->id
);
940 if (type
== T_PAGEFLT
) {
941 kprintf("fault virtual address = 0x%lx\n", eva
);
942 kprintf("fault code = %s %s %s, %s\n",
943 code
& PGEX_U
? "user" : "supervisor",
944 code
& PGEX_W
? "write" : "read",
945 code
& PGEX_I
? "instruction" : "data",
946 code
& PGEX_P
? "protection violation" : "page not present");
948 kprintf("instruction pointer = 0x%lx:0x%lx\n",
949 frame
->tf_cs
& 0xffff, frame
->tf_rip
);
950 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
951 ss
= frame
->tf_ss
& 0xffff;
954 ss
= GSEL(GDATA_SEL
, SEL_KPL
);
955 rsp
= (long)&frame
->tf_rsp
;
957 kprintf("stack pointer = 0x%x:0x%lx\n", ss
, rsp
);
958 kprintf("frame pointer = 0x%x:0x%lx\n", ss
, frame
->tf_rbp
);
959 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n",
960 softseg
.ssd_base
, softseg
.ssd_limit
, softseg
.ssd_type
);
961 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
962 softseg
.ssd_dpl
, softseg
.ssd_p
, softseg
.ssd_long
, softseg
.ssd_def32
,
964 kprintf("processor eflags = ");
965 if (frame
->tf_rflags
& PSL_T
)
966 kprintf("trace trap, ");
967 if (frame
->tf_rflags
& PSL_I
)
968 kprintf("interrupt enabled, ");
969 if (frame
->tf_rflags
& PSL_NT
)
970 kprintf("nested task, ");
971 if (frame
->tf_rflags
& PSL_RF
)
973 kprintf("IOPL = %ld\n", (frame
->tf_rflags
& PSL_IOPL
) >> 12);
974 kprintf("current process = ");
977 (u_long
)curproc
->p_pid
);
981 kprintf("current thread = pri %d ", curthread
->td_pri
);
982 if (curthread
->td_critcount
)
987 if ((debugger_on_panic
|| db_active
) && kdb_trap(type
, code
, frame
))
990 kprintf("trap number = %d\n", type
);
991 if (type
<= MAX_TRAP_MSG
)
992 panic("%s", trap_msg
[type
]);
994 panic("unknown/reserved trap");
998 * Double fault handler. Called when a fault occurs while writing
999 * a frame for a trap/exception onto the stack. This usually occurs
1000 * when the stack overflows (such is the case with infinite recursion,
1004 dblfault_handler(struct trapframe
*frame
)
1006 kprintf0("DOUBLE FAULT\n");
1007 kprintf("\nFatal double fault\n");
1008 kprintf("rip = 0x%lx\n", frame
->tf_rip
);
1009 kprintf("rsp = 0x%lx\n", frame
->tf_rsp
);
1010 kprintf("rbp = 0x%lx\n", frame
->tf_rbp
);
1012 /* three separate prints in case of a trap on an unmapped page */
1013 kprintf("mp_lock = %08x; ", mp_lock
);
1014 kprintf("cpuid = %d; ", mycpu
->gd_cpuid
);
1015 kprintf("lapic->id = %08x\n", lapic
->id
);
1017 panic("double fault");
1021 * syscall2 - MP aware system call request C handler
1023 * A system call is essentially treated as a trap except that the
1024 * MP lock is not held on entry or return. We are responsible for
1025 * obtaining the MP lock if necessary and for handling ASTs
1026 * (e.g. a task switch) prior to return.
1031 syscall2(struct trapframe
*frame
)
1033 struct thread
*td
= curthread
;
1034 struct proc
*p
= td
->td_proc
;
1035 struct lwp
*lp
= td
->td_lwp
;
1037 struct sysent
*callp
;
1038 register_t orig_tf_rflags
;
1043 int crit_count
= td
->td_critcount
;
1046 int have_mplock
= 0;
1051 union sysunion args
;
1052 register_t
*argsdst
;
1054 mycpu
->gd_cnt
.v_syscall
++;
1057 if (ISPL(frame
->tf_cs
) != SEL_UPL
) {
1064 KTR_LOG(kernentry_syscall
, p
->p_pid
, lp
->lwp_tid
,
1068 KASSERT(td
->td_mpcount
== 0,
1069 ("badmpcount syscall2 from %p", (void *)frame
->tf_rip
));
1071 userenter(td
, p
); /* lazy raise our priority */
1078 sticks
= (int)td
->td_sticks
;
1079 orig_tf_rflags
= frame
->tf_rflags
;
1082 * Virtual kernel intercept - if a VM context managed by a virtual
1083 * kernel issues a system call the virtual kernel handles it, not us.
1084 * Restore the virtual kernel context and return from its system
1085 * call. The current frame is copied out to the virtual kernel.
1087 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
) {
1088 vkernel_trap(lp
, frame
);
1089 error
= EJUSTRETURN
;
1094 * Get the system call parameters and account for time
1096 lp
->lwp_md
.md_regs
= frame
;
1097 params
= (caddr_t
)frame
->tf_rsp
+ sizeof(register_t
);
1098 code
= frame
->tf_rax
;
1100 if (p
->p_sysent
->sv_prepsyscall
) {
1101 (*p
->p_sysent
->sv_prepsyscall
)(
1102 frame
, (int *)(&args
.nosys
.sysmsg
+ 1),
1105 if (code
== SYS_syscall
|| code
== SYS___syscall
) {
1106 code
= frame
->tf_rdi
;
1112 if (p
->p_sysent
->sv_mask
)
1113 code
&= p
->p_sysent
->sv_mask
;
1115 if (code
>= p
->p_sysent
->sv_size
)
1116 callp
= &p
->p_sysent
->sv_table
[0];
1118 callp
= &p
->p_sysent
->sv_table
[code
];
1120 narg
= callp
->sy_narg
& SYF_ARGMASK
;
1123 * On x86_64 we get up to six arguments in registers. The rest are
1124 * on the stack. The first six members of 'struct trapframe' happen
1125 * to be the registers used to pass arguments, in exactly the right
1128 argp
= &frame
->tf_rdi
;
1130 argsdst
= (register_t
*)(&args
.nosys
.sysmsg
+ 1);
1132 * JG can we overflow the space pointed to by 'argsdst'
1133 * either with 'bcopy' or with 'copyin'?
1135 bcopy(argp
, argsdst
, sizeof(register_t
) * regcnt
);
1137 * copyin is MP aware, but the tracing code is not
1139 if (narg
> regcnt
) {
1140 KASSERT(params
!= NULL
, ("copyin args with no params!"));
1141 error
= copyin(params
, &argsdst
[regcnt
],
1142 (narg
- regcnt
) * sizeof(register_t
));
1145 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1146 MAKEMPSAFE(have_mplock
);
1148 ktrsyscall(lp
, code
, narg
,
1149 (void *)(&args
.nosys
.sysmsg
+ 1));
1157 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1158 MAKEMPSAFE(have_mplock
);
1159 ktrsyscall(lp
, code
, narg
, (void *)(&args
.nosys
.sysmsg
+ 1));
1164 * Default return value is 0 (will be copied to %rax). Double-value
1165 * returns use %rax and %rdx. %rdx is left unchanged for system
1166 * calls which return only one result.
1168 args
.sysmsg_fds
[0] = 0;
1169 args
.sysmsg_fds
[1] = frame
->tf_rdx
;
1172 * The syscall might manipulate the trap frame. If it does it
1173 * will probably return EJUSTRETURN.
1175 args
.sysmsg_frame
= frame
;
1177 STOPEVENT(p
, S_SCE
, narg
); /* MP aware */
1180 * NOTE: All system calls run MPSAFE now. The system call itself
1181 * is responsible for getting the MP lock.
1183 error
= (*callp
->sy_call
)(&args
);
1187 * MP SAFE (we may or may not have the MP lock at this point)
1189 //kprintf("SYSMSG %d ", error);
1193 * Reinitialize proc pointer `p' as it may be different
1194 * if this is a child returning from fork syscall.
1197 lp
= curthread
->td_lwp
;
1198 frame
->tf_rax
= args
.sysmsg_fds
[0];
1199 frame
->tf_rdx
= args
.sysmsg_fds
[1];
1200 frame
->tf_rflags
&= ~PSL_C
;
1204 * Reconstruct pc, we know that 'syscall' is 2 bytes.
1205 * We have to do a full context restore so that %r10
1206 * (which was holding the value of %rcx) is restored for
1207 * the next iteration.
1209 frame
->tf_rip
-= frame
->tf_err
;
1210 frame
->tf_r10
= frame
->tf_rcx
;
1215 panic("Unexpected EASYNC return value (for now)");
1218 if (p
->p_sysent
->sv_errsize
) {
1219 if (error
>= p
->p_sysent
->sv_errsize
)
1220 error
= -1; /* XXX */
1222 error
= p
->p_sysent
->sv_errtbl
[error
];
1224 frame
->tf_rax
= error
;
1225 frame
->tf_rflags
|= PSL_C
;
1230 * Traced syscall. trapsignal() is not MP aware.
1232 if (orig_tf_rflags
& PSL_T
) {
1233 MAKEMPSAFE(have_mplock
);
1234 frame
->tf_rflags
&= ~PSL_T
;
1235 trapsignal(lp
, SIGTRAP
, TRAP_TRACE
);
1239 * Handle reschedule and other end-of-syscall issues
1241 userret(lp
, frame
, sticks
);
1244 if (KTRPOINT(td
, KTR_SYSRET
)) {
1245 MAKEMPSAFE(have_mplock
);
1246 ktrsysret(lp
, code
, error
, args
.sysmsg_result
);
1251 * This works because errno is findable through the
1252 * register set. If we ever support an emulation where this
1253 * is not the case, this code will need to be revisited.
1255 STOPEVENT(p
, S_SCX
, code
);
1260 * Release the MP lock if we had to get it
1262 KASSERT(td
->td_mpcount
== have_mplock
,
1263 ("badmpcount syscall2/end from %p", (void *)frame
->tf_rip
));
1267 KTR_LOG(kernentry_syscall_ret
, p
->p_pid
, lp
->lwp_tid
, error
);
1269 KASSERT(crit_count
== td
->td_critcount
,
1270 ("syscall: critical section count mismatch! %d/%d",
1271 crit_count
, td
->td_pri
));
1272 KASSERT(&td
->td_toks_base
== td
->td_toks_stop
,
1273 ("syscall: extra tokens held after trap! %ld",
1274 td
->td_toks_stop
- &td
->td_toks_base
));
1279 * NOTE: mplock not held at any point
1282 fork_return(struct lwp
*lp
, struct trapframe
*frame
)
1284 frame
->tf_rax
= 0; /* Child returns zero */
1285 frame
->tf_rflags
&= ~PSL_C
; /* success */
1288 generic_lwp_return(lp
, frame
);
1289 KTR_LOG(kernentry_fork_ret
, lp
->lwp_proc
->p_pid
, lp
->lwp_tid
);
1293 * Simplified back end of syscall(), used when returning from fork()
1294 * directly into user mode.
1296 * This code will return back into the fork trampoline code which then
1299 * NOTE: The mplock is not held at any point.
1302 generic_lwp_return(struct lwp
*lp
, struct trapframe
*frame
)
1304 struct proc
*p
= lp
->lwp_proc
;
1307 * Newly forked processes are given a kernel priority. We have to
1308 * adjust the priority to a normal user priority and fake entry
1309 * into the kernel (call userenter()) to install a passive release
1310 * function just in case userret() decides to stop the process. This
1311 * can occur when ^Z races a fork. If we do not install the passive
1312 * release function the current process designation will not be
1313 * released when the thread goes to sleep.
1315 lwkt_setpri_self(TDPRI_USER_NORM
);
1316 userenter(lp
->lwp_thread
, p
);
1317 userret(lp
, frame
, 0);
1319 if (KTRPOINT(lp
->lwp_thread
, KTR_SYSRET
))
1320 ktrsysret(lp
, SYS_fork
, 0, 0);
1322 p
->p_flag
|= P_PASSIVE_ACQ
;
1324 p
->p_flag
&= ~P_PASSIVE_ACQ
;
1328 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1329 * fault (which is then passed back to the virtual kernel) if an attempt is
1330 * made to use the FP unit.
1332 * XXX this is a fairly big hack.
1335 set_vkernel_fp(struct trapframe
*frame
)
1337 struct thread
*td
= curthread
;
1339 if (frame
->tf_xflags
& PGEX_FPFAULT
) {
1340 td
->td_pcb
->pcb_flags
|= FP_VIRTFP
;
1341 if (mdcpu
->gd_npxthread
== td
)
1344 td
->td_pcb
->pcb_flags
&= ~FP_VIRTFP
;
1349 * Called from vkernel_trap() to fixup the vkernel's syscall
1350 * frame for vmspace_ctl() return.
1353 cpu_vkernel_trap(struct trapframe
*frame
, int error
)
1355 frame
->tf_rax
= error
;
1357 frame
->tf_rflags
|= PSL_C
;
1359 frame
->tf_rflags
&= ~PSL_C
;