2 * Copyright (c) 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (C) 1994, David Greenman
5 * Copyright (c) 2008 The DragonFly Project.
6 * Copyright (c) 2008 Jordan Gordeev.
8 * This code is derived from software contributed to Berkeley by
9 * the University of Utah, and William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
41 * $DragonFly: src/sys/platform/pc64/amd64/trap.c,v 1.3 2008/09/09 04:06:18 dillon Exp $
45 * AMD64 Trap and System call handling
49 #include "opt_ktrace.h"
51 #include <machine/frame.h>
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
56 #include <sys/pioctl.h>
57 #include <sys/types.h>
58 #include <sys/signal2.h>
59 #include <sys/syscall.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysent.h>
62 #include <sys/systm.h>
64 #include <sys/ktrace.h>
67 #include <sys/sysmsg.h>
68 #include <sys/sysproto.h>
69 #include <sys/sysunion.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_kern.h>
75 #include <vm/vm_param.h>
76 #include <machine/cpu.h>
77 #include <machine/pcb.h>
78 #include <machine/thread.h>
79 #include <machine/vmparam.h>
80 #include <machine/md_var.h>
86 #define MAKEMPSAFE(have_mplock) \
87 if (have_mplock == 0) { \
94 #define MAKEMPSAFE(have_mplock)
98 extern void trap(struct trapframe
*frame
);
99 extern void syscall2(struct trapframe
*frame
);
101 static int trap_pfault(struct trapframe
*, int);
102 static void trap_fatal(struct trapframe
*, vm_offset_t
);
103 void dblfault_handler(struct trapframe
*frame
);
105 #define PCPU_GET(member) ((mycpu)->gd_##member)
106 #define PCPU_INC(member) ((mycpu)->gd_##member)++
108 #define MAX_TRAP_MSG 30
109 static char *trap_msg
[] = {
111 "privileged instruction fault", /* 1 T_PRIVINFLT */
113 "breakpoint instruction fault", /* 3 T_BPTFLT */
116 "arithmetic trap", /* 6 T_ARITHTRAP */
117 "system forced exception", /* 7 T_ASTFLT */
119 "general protection fault", /* 9 T_PROTFLT */
120 "trace trap", /* 10 T_TRCTRAP */
122 "page fault", /* 12 T_PAGEFLT */
124 "alignment fault", /* 14 T_ALIGNFLT */
128 "integer divide fault", /* 18 T_DIVIDE */
129 "non-maskable interrupt trap", /* 19 T_NMI */
130 "overflow trap", /* 20 T_OFLOW */
131 "FPU bounds check fault", /* 21 T_BOUND */
132 "FPU device not available", /* 22 T_DNA */
133 "double fault", /* 23 T_DOUBLEFLT */
134 "FPU operand fetch fault", /* 24 T_FPOPFLT */
135 "invalid TSS fault", /* 25 T_TSSFLT */
136 "segment not present fault", /* 26 T_SEGNPFLT */
137 "stack fault", /* 27 T_STKFLT */
138 "machine check trap", /* 28 T_MCHK */
139 "SIMD floating-point exception", /* 29 T_XMMFLT */
140 "reserved (unknown) fault", /* 30 T_RESERVED */
144 static int ddb_on_nmi
= 1;
145 SYSCTL_INT(_machdep
, OID_AUTO
, ddb_on_nmi
, CTLFLAG_RW
,
146 &ddb_on_nmi
, 0, "Go to DDB on NMI");
148 static int panic_on_nmi
= 1;
149 SYSCTL_INT(_machdep
, OID_AUTO
, panic_on_nmi
, CTLFLAG_RW
,
150 &panic_on_nmi
, 0, "Panic on NMI");
151 static int fast_release
;
152 SYSCTL_INT(_machdep
, OID_AUTO
, fast_release
, CTLFLAG_RW
,
153 &fast_release
, 0, "Passive Release was optimal");
154 static int slow_release
;
155 SYSCTL_INT(_machdep
, OID_AUTO
, slow_release
, CTLFLAG_RW
,
156 &slow_release
, 0, "Passive Release was nonoptimal");
158 static int syscall_mpsafe
= 1;
159 SYSCTL_INT(_kern
, OID_AUTO
, syscall_mpsafe
, CTLFLAG_RW
,
160 &syscall_mpsafe
, 0, "Allow MPSAFE marked syscalls to run without BGL");
161 TUNABLE_INT("kern.syscall_mpsafe", &syscall_mpsafe
);
162 static int trap_mpsafe
= 1;
163 SYSCTL_INT(_kern
, OID_AUTO
, trap_mpsafe
, CTLFLAG_RW
,
164 &trap_mpsafe
, 0, "Allow traps to mostly run without the BGL");
165 TUNABLE_INT("kern.trap_mpsafe", &trap_mpsafe
);
171 * Passive USER->KERNEL transition. This only occurs if we block in the
172 * kernel while still holding our userland priority. We have to fixup our
173 * priority in order to avoid potential deadlocks before we allow the system
174 * to switch us to another thread.
177 passive_release(struct thread
*td
)
179 struct lwp
*lp
= td
->td_lwp
;
181 td
->td_release
= NULL
;
182 lwkt_setpri_self(TDPRI_KERN_USER
);
183 lp
->lwp_proc
->p_usched
->release_curproc(lp
);
187 * userenter() passively intercepts the thread switch function to increase
188 * the thread priority from a user priority to a kernel priority, reducing
189 * syscall and trap overhead for the case where no switch occurs.
193 userenter(struct thread
*curtd
)
195 curtd
->td_release
= passive_release
;
199 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
200 * must be completed before we can return to or try to return to userland.
202 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
203 * arithmatic on the delta calculation so the absolute tick values are
204 * truncated to an integer.
207 userret(struct lwp
*lp
, struct trapframe
*frame
, int sticks
)
209 struct proc
*p
= lp
->lwp_proc
;
213 * Charge system time if profiling. Note: times are in microseconds.
214 * This may do a copyout and block, so do it first even though it
215 * means some system time will be charged as user time.
217 if (p
->p_flag
& P_PROFIL
) {
218 addupc_task(p
, frame
->tf_rip
,
219 (u_int
)((int)lp
->lwp_thread
->td_sticks
- sticks
));
224 * If the jungle wants us dead, so be it.
226 if (lp
->lwp_flag
& LWP_WEXIT
) {
229 rel_mplock(); /* NOT REACHED */
233 * Block here if we are in a stopped state.
235 if (p
->p_stat
== SSTOP
) {
243 * Post any pending upcalls. If running a virtual kernel be sure
244 * to restore the virtual kernel's vmspace before posting the upcall.
246 if (p
->p_flag
& P_UPCALLPEND
) {
247 p
->p_flag
&= ~P_UPCALLPEND
;
255 * Post any pending signals. If running a virtual kernel be sure
256 * to restore the virtual kernel's vmspace before posting the signal.
258 if ((sig
= CURSIG(lp
)) != 0) {
266 * block here if we are swapped out, but still process signals
267 * (such as SIGKILL). proc0 (the swapin scheduler) is already
268 * aware of our situation, we do not have to wake it up.
270 if (p
->p_flag
& P_SWAPPEDOUT
) {
272 p
->p_flag
|= P_SWAPWAIT
;
274 if (p
->p_flag
& P_SWAPWAIT
)
275 tsleep(p
, PCATCH
, "SWOUT", 0);
276 p
->p_flag
&= ~P_SWAPWAIT
;
282 * Make sure postsig() handled request to restore old signal mask after
283 * running signal handler.
285 KKASSERT((lp
->lwp_flag
& LWP_OLDMASK
) == 0);
289 * Cleanup from userenter and any passive release that might have occured.
290 * We must reclaim the current-process designation before we can return
291 * to usermode. We also handle both LWKT and USER reschedule requests.
294 userexit(struct lwp
*lp
)
296 struct thread
*td
= lp
->lwp_thread
;
297 globaldata_t gd
= td
->td_gd
;
300 * Handle stop requests at kernel priority. Any requests queued
301 * after this loop will generate another AST.
303 while (lp
->lwp_proc
->p_stat
== SSTOP
) {
310 * Reduce our priority in preparation for a return to userland. If
311 * our passive release function was still in place, our priority was
312 * never raised and does not need to be reduced.
314 if (td
->td_release
== NULL
)
315 lwkt_setpri_self(TDPRI_USER_NORM
);
316 td
->td_release
= NULL
;
319 * Become the current user scheduled process if we aren't already,
320 * and deal with reschedule requests and other factors.
322 lp
->lwp_proc
->p_usched
->acquire_curproc(lp
);
323 /* WARNING: we may have migrated cpu's */
324 /* gd = td->td_gd; */
327 #if !defined(KTR_KERNENTRY)
328 #define KTR_KERNENTRY KTR_ALL
330 KTR_INFO_MASTER(kernentry
);
331 KTR_INFO(KTR_KERNENTRY
, kernentry
, trap
, 0, "STR",
332 sizeof(long) + sizeof(long) + sizeof(long) + sizeof(vm_offset_t
));
333 KTR_INFO(KTR_KERNENTRY
, kernentry
, trap_ret
, 0, "STR",
334 sizeof(long) + sizeof(long));
335 KTR_INFO(KTR_KERNENTRY
, kernentry
, syscall
, 0, "STR",
336 sizeof(long) + sizeof(long) + sizeof(long));
337 KTR_INFO(KTR_KERNENTRY
, kernentry
, syscall_ret
, 0, "STR",
338 sizeof(long) + sizeof(long) + sizeof(long));
339 KTR_INFO(KTR_KERNENTRY
, kernentry
, fork_ret
, 0, "STR",
340 sizeof(long) + sizeof(long));
343 * Exception, fault, and trap interface to the kernel.
344 * This common code is called from assembly language IDT gate entry
345 * routines that prepare a suitable stack frame, and restore this
346 * frame after the exception has been processed.
348 * This function is also called from doreti in an interlock to handle ASTs.
349 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
351 * NOTE! We have to retrieve the fault address prior to obtaining the
352 * MP lock because get_mplock() may switch out. YYY cr2 really ought
353 * to be retrieved by the assembly code, not here.
355 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
356 * if an attempt is made to switch from a fast interrupt or IPI. This is
357 * necessary to properly take fatal kernel traps on SMP machines if
358 * get_mplock() has to block.
362 trap(struct trapframe
*frame
)
364 struct globaldata
*gd
= mycpu
;
365 struct thread
*td
= gd
->gd_curthread
;
366 struct lwp
*lp
= td
->td_lwp
;
369 int i
= 0, ucode
= 0, type
, code
;
374 int crit_count
= td
->td_pri
& ~TDPRI_MASK
;
382 kprintf0("\"%s\" type=%ld\n",
383 trap_msg
[frame
->tf_trapno
], frame
->tf_trapno
);
384 kprintf0(" rip=%lx rsp=%lx\n", frame
->tf_rip
, frame
->tf_rsp
);
385 kprintf0(" err=%lx addr=%lx\n", frame
->tf_err
, frame
->tf_addr
);
386 kprintf0(" cs=%lx ss=%lx rflags=%lx\n", (unsigned long)frame
->tf_cs
, (unsigned long)frame
->tf_ss
, frame
->tf_rflags
);
391 ++gd
->gd_trap_nesting_level
;
392 MAKEMPSAFE(have_mplock
);
393 trap_fatal(frame
, frame
->tf_addr
);
394 --gd
->gd_trap_nesting_level
;
400 eva
= (frame
->tf_trapno
== T_PAGEFLT
? frame
->tf_addr
: 0);
401 ++gd
->gd_trap_nesting_level
;
402 MAKEMPSAFE(have_mplock
);
403 trap_fatal(frame
, eva
);
404 --gd
->gd_trap_nesting_level
;
412 if (trap_mpsafe
== 0) {
413 ++gd
->gd_trap_nesting_level
;
414 MAKEMPSAFE(have_mplock
);
415 --gd
->gd_trap_nesting_level
;
419 if ((frame
->tf_rflags
& PSL_I
) == 0) {
421 * Buggy application or kernel code has disabled interrupts
422 * and then trapped. Enabling interrupts now is wrong, but
423 * it is better than running with interrupts disabled until
424 * they are accidentally enabled later.
426 type
= frame
->tf_trapno
;
427 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
428 MAKEMPSAFE(have_mplock
);
429 /* JG curproc can be NULL */
431 "pid %ld (%s): trap %d with interrupts disabled\n",
432 (long)curproc
->p_pid
, curproc
->p_comm
, type
);
433 } else if (type
!= T_NMI
&& type
!= T_BPTFLT
&&
436 * XXX not quite right, since this may be for a
437 * multiple fault in user mode.
439 MAKEMPSAFE(have_mplock
);
440 kprintf("kernel trap %d with interrupts disabled\n",
446 type
= frame
->tf_trapno
;
447 code
= frame
->tf_err
;
449 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
452 KTR_LOG(kernentry_trap
, p
->p_pid
, lp
->lwp_tid
,
453 frame
->tf_trapno
, eva
);
457 sticks
= (int)td
->td_sticks
;
458 lp
->lwp_md
.md_regs
= frame
;
461 case T_PRIVINFLT
: /* privileged instruction fault */
466 case T_BPTFLT
: /* bpt instruction fault */
467 case T_TRCTRAP
: /* trace trap */
468 frame
->tf_rflags
&= ~PSL_T
;
472 case T_ARITHTRAP
: /* arithmetic trap */
485 case T_ASTFLT
: /* Allow process switch */
486 mycpu
->gd_cnt
.v_soft
++;
487 if (mycpu
->gd_reqflags
& RQF_AST_OWEUPC
) {
488 atomic_clear_int_nonlocked(&mycpu
->gd_reqflags
,
490 addupc_task(p
, p
->p_prof
.pr_addr
,
495 case T_PROTFLT
: /* general protection fault */
496 case T_SEGNPFLT
: /* segment not present fault */
497 case T_TSSFLT
: /* invalid TSS fault */
498 case T_DOUBLEFLT
: /* double fault */
500 ucode
= code
+ BUS_SEGM_FAULT
;
504 case T_PAGEFLT
: /* page fault */
505 MAKEMPSAFE(have_mplock
);
506 i
= trap_pfault(frame
, TRUE
);
507 //kprintf("TRAP_PFAULT %d\n", i);
508 if (frame
->tf_rip
== 0)
518 case T_DIVIDE
: /* integer divide fault */
524 MAKEMPSAFE(have_mplock
);
525 /* machine/parity/power fail/"kitchen sink" faults */
526 if (isa_nmi(code
) == 0) {
529 * NMI can be hooked up to a pushbutton
533 kprintf ("NMI ... going to debugger\n");
534 kdb_trap(type
, 0, frame
);
538 } else if (panic_on_nmi
)
539 panic("NMI indicates hardware failure");
542 case T_OFLOW
: /* integer overflow fault */
547 case T_BOUND
: /* bounds check fault */
554 * Virtual kernel intercept - pass the DNA exception
555 * to the virtual kernel if it asked to handle it.
556 * This occurs when the virtual kernel is holding
557 * onto the FP context for a different emulated
558 * process then the one currently running.
560 * We must still call npxdna() since we may have
561 * saved FP state that the virtual kernel needs
562 * to hand over to a different emulated process.
564 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
&&
565 (td
->td_pcb
->pcb_flags
& FP_VIRTFP
)
572 * The kernel may have switched out the FP unit's
573 * state, causing the user process to take a fault
574 * when it tries to use the FP unit. Restore the
580 ucode
= FPE_FPU_NP_TRAP
;
583 case T_FPOPFLT
: /* FPU operand fetch fault */
588 case T_XMMFLT
: /* SIMD floating-point exception */
597 case T_PAGEFLT
: /* page fault */
598 MAKEMPSAFE(have_mplock
);
599 trap_pfault(frame
, FALSE
);
604 * The kernel is apparently using fpu for copying.
605 * XXX this should be fatal unless the kernel has
606 * registered such use.
612 case T_STKFLT
: /* stack fault */
615 case T_PROTFLT
: /* general protection fault */
616 case T_SEGNPFLT
: /* segment not present fault */
618 * Invalid segment selectors and out of bounds
619 * %rip's and %rsp's can be set up in user mode.
620 * This causes a fault in kernel mode when the
621 * kernel tries to return to user mode. We want
622 * to get this fault so that we can fix the
623 * problem here and not have to check all the
624 * selectors and pointers when the user changes
627 kprintf0("trap.c line %d\n", __LINE__
);
628 if (mycpu
->gd_intr_nesting_level
== 0) {
629 if (td
->td_pcb
->pcb_onfault
) {
630 frame
->tf_rip
= (register_t
)
631 td
->td_pcb
->pcb_onfault
;
634 if (frame
->tf_rip
== (long)doreti_iret
) {
635 frame
->tf_rip
= (long)doreti_iret_fault
;
643 * PSL_NT can be set in user mode and isn't cleared
644 * automatically when the kernel is entered. This
645 * causes a TSS fault when the kernel attempts to
646 * `iret' because the TSS link is uninitialized. We
647 * want to get this fault so that we can fix the
648 * problem here and not every time the kernel is
651 if (frame
->tf_rflags
& PSL_NT
) {
652 frame
->tf_rflags
&= ~PSL_NT
;
657 case T_TRCTRAP
: /* trace trap */
659 if (frame
->tf_rip
== (int)IDTVEC(syscall
)) {
661 * We've just entered system mode via the
662 * syscall lcall. Continue single stepping
663 * silently until the syscall handler has
668 if (frame
->tf_rip
== (int)IDTVEC(syscall
) + 1) {
670 * The syscall handler has now saved the
671 * flags. Stop single stepping it.
673 frame
->tf_rflags
&= ~PSL_T
;
679 * Ignore debug register trace traps due to
680 * accesses in the user's address space, which
681 * can happen under several conditions such as
682 * if a user sets a watchpoint on a buffer and
683 * then passes that buffer to a system call.
684 * We still want to get TRCTRAPS for addresses
685 * in kernel space because that is useful when
686 * debugging the kernel.
689 if (user_dbreg_trap()) {
691 * Reset breakpoint bits because the
694 /* XXX check upper bits here */
695 load_dr6(rdr6() & 0xfffffff0);
700 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
704 * If DDB is enabled, let it handle the debugger trap.
705 * Otherwise, debugger traps "can't happen".
708 MAKEMPSAFE(have_mplock
);
709 if (kdb_trap(type
, 0, frame
))
715 MAKEMPSAFE(have_mplock
);
716 /* machine/parity/power fail/"kitchen sink" faults */
718 if (isa_nmi(code
) == 0) {
721 * NMI can be hooked up to a pushbutton
725 kprintf ("NMI ... going to debugger\n");
726 kdb_trap(type
, 0, frame
);
730 } else if (panic_on_nmi
== 0)
733 #endif /* NISA > 0 */
735 MAKEMPSAFE(have_mplock
);
736 trap_fatal(frame
, 0);
741 * Virtual kernel intercept - if the fault is directly related to a
742 * VM context managed by a virtual kernel then let the virtual kernel
745 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
) {
746 vkernel_trap(lp
, frame
);
751 * Virtual kernel intercept - if the fault is directly related to a
752 * VM context managed by a virtual kernel then let the virtual kernel
755 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
) {
756 vkernel_trap(lp
, frame
);
761 * Translate fault for emulators (e.g. Linux)
763 if (*p
->p_sysent
->sv_transtrap
)
764 i
= (*p
->p_sysent
->sv_transtrap
)(i
, type
);
766 MAKEMPSAFE(have_mplock
);
767 trapsignal(lp
, i
, ucode
);
770 if (type
<= MAX_TRAP_MSG
) {
771 uprintf("fatal process exception: %s",
773 if ((type
== T_PAGEFLT
) || (type
== T_PROTFLT
))
774 uprintf(", fault VA = 0x%lx", frame
->tf_addr
);
781 if (ISPL(frame
->tf_cs
) == SEL_UPL
)
782 KASSERT(td
->td_mpcount
== have_mplock
, ("badmpcount trap/end from %p", (void *)frame
->tf_rip
));
784 userret(lp
, frame
, sticks
);
791 if (p
!= NULL
&& lp
!= NULL
)
792 KTR_LOG(kernentry_trap_ret
, p
->p_pid
, lp
->lwp_tid
);
794 KASSERT(crit_count
== (td
->td_pri
& ~TDPRI_MASK
),
795 ("syscall: critical section count mismatch! %d/%d",
796 crit_count
/ TDPRI_CRIT
, td
->td_pri
/ TDPRI_CRIT
));
801 trap_pfault(struct trapframe
*frame
, int usermode
)
804 struct vmspace
*vm
= NULL
;
808 thread_t td
= curthread
;
809 struct lwp
*lp
= td
->td_lwp
;
811 va
= trunc_page(frame
->tf_addr
);
812 if (va
>= VM_MIN_KERNEL_ADDRESS
) {
814 * Don't allow user-mode faults in kernel address space.
822 * This is a fault on non-kernel virtual memory.
823 * vm is initialized above to NULL. If curproc is NULL
824 * or curproc->p_vmspace is NULL the fault is fatal.
827 vm
= lp
->lwp_vmspace
;
836 * PGEX_I is defined only if the execute disable bit capability is
837 * supported and enabled.
839 if (frame
->tf_err
& PGEX_W
)
840 ftype
= VM_PROT_WRITE
;
842 else if ((frame
->tf_err
& PGEX_I
) && pg_nx
!= 0)
843 ftype
= VM_PROT_EXECUTE
;
846 ftype
= VM_PROT_READ
;
848 if (map
!= &kernel_map
) {
850 * Keep swapout from messing with us during this
856 * Grow the stack if necessary
858 /* grow_stack returns false only if va falls into
859 * a growable stack region and the stack growth
860 * fails. It returns true if va was not within
861 * a growable stack region, or if the stack
864 if (!grow_stack(lp
->lwp_proc
, va
)) {
870 /* Fault in the user page: */
871 rv
= vm_fault(map
, va
, ftype
,
872 (ftype
& VM_PROT_WRITE
) ? VM_FAULT_DIRTY
878 * Don't have to worry about process locking or stacks
881 rv
= vm_fault(map
, va
, ftype
, VM_FAULT_NORMAL
);
884 if (rv
== KERN_SUCCESS
)
888 if (td
->td_gd
->gd_intr_nesting_level
== 0 &&
889 td
->td_pcb
->pcb_onfault
) {
890 frame
->tf_rip
= (register_t
)td
->td_pcb
->pcb_onfault
;
893 trap_fatal(frame
, frame
->tf_addr
);
898 * NOTE: on amd64 we have a tf_addr field in the trapframe, no
899 * kludge is needed to pass the fault address to signal handlers.
902 return((rv
== KERN_PROTECTION_FAILURE
) ? SIGBUS
: SIGSEGV
);
906 trap_fatal(struct trapframe
*frame
, vm_offset_t eva
)
911 struct soft_segment_descriptor softseg
;
914 code
= frame
->tf_err
;
915 type
= frame
->tf_trapno
;
916 sdtossd(&gdt
[IDXSEL(frame
->tf_cs
& 0xffff)], &softseg
);
918 if (type
<= MAX_TRAP_MSG
)
919 msg
= trap_msg
[type
];
922 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type
, msg
,
923 ISPL(frame
->tf_cs
) == SEL_UPL
? "user" : "kernel");
925 /* two separate prints in case of a trap on an unmapped page */
926 kprintf("cpuid = %d; ", PCPU_GET(cpuid
));
927 kprintf("apic id = %02x\n", PCPU_GET(apic_id
));
929 if (type
== T_PAGEFLT
) {
930 kprintf("fault virtual address = 0x%lx\n", eva
);
931 kprintf("fault code = %s %s %s, %s\n",
932 code
& PGEX_U
? "user" : "supervisor",
933 code
& PGEX_W
? "write" : "read",
934 code
& PGEX_I
? "instruction" : "data",
935 code
& PGEX_P
? "protection violation" : "page not present");
937 kprintf("instruction pointer = 0x%lx:0x%lx\n",
938 frame
->tf_cs
& 0xffff, frame
->tf_rip
);
939 if (ISPL(frame
->tf_cs
) == SEL_UPL
) {
940 ss
= frame
->tf_ss
& 0xffff;
943 ss
= GSEL(GDATA_SEL
, SEL_KPL
);
944 rsp
= (long)&frame
->tf_rsp
;
946 kprintf("stack pointer = 0x%x:0x%lx\n", ss
, rsp
);
947 kprintf("frame pointer = 0x%x:0x%lx\n", ss
, frame
->tf_rbp
);
948 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n",
949 softseg
.ssd_base
, softseg
.ssd_limit
, softseg
.ssd_type
);
950 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
951 softseg
.ssd_dpl
, softseg
.ssd_p
, softseg
.ssd_long
, softseg
.ssd_def32
,
953 kprintf("processor eflags = ");
954 if (frame
->tf_rflags
& PSL_T
)
955 kprintf("trace trap, ");
956 if (frame
->tf_rflags
& PSL_I
)
957 kprintf("interrupt enabled, ");
958 if (frame
->tf_rflags
& PSL_NT
)
959 kprintf("nested task, ");
960 if (frame
->tf_rflags
& PSL_RF
)
962 kprintf("IOPL = %ld\n", (frame
->tf_rflags
& PSL_IOPL
) >> 12);
963 kprintf("current process = ");
966 (u_long
)curproc
->p_pid
);
970 kprintf("current thread = pri %d ", curthread
->td_pri
);
971 if (curthread
->td_pri
>= TDPRI_CRIT
)
976 if ((debugger_on_panic
|| db_active
) && kdb_trap(type
, code
, frame
))
979 kprintf("trap number = %d\n", type
);
980 if (type
<= MAX_TRAP_MSG
)
981 panic("%s", trap_msg
[type
]);
983 panic("unknown/reserved trap");
987 * Double fault handler. Called when a fault occurs while writing
988 * a frame for a trap/exception onto the stack. This usually occurs
989 * when the stack overflows (such is the case with infinite recursion,
993 dblfault_handler(struct trapframe
*frame
)
995 kprintf0("DOUBLE FAULT\n");
996 kprintf("\nFatal double fault\n");
997 kprintf("rip = 0x%lx\n", frame
->tf_rip
);
998 kprintf("rsp = 0x%lx\n", frame
->tf_rsp
);
999 kprintf("rbp = 0x%lx\n", frame
->tf_rbp
);
1001 /* two separate prints in case of a trap on an unmapped page */
1002 kprintf("cpuid = %d; ", PCPU_GET(cpuid
));
1003 kprintf("apic id = %02x\n", PCPU_GET(apic_id
));
1005 panic("double fault");
1009 * syscall2 - MP aware system call request C handler
1011 * A system call is essentially treated as a trap except that the
1012 * MP lock is not held on entry or return. We are responsible for
1013 * obtaining the MP lock if necessary and for handling ASTs
1014 * (e.g. a task switch) prior to return.
1016 * In general, only simple access and manipulation of curproc and
1017 * the current stack is allowed without having to hold MP lock.
1019 * MPSAFE - note that large sections of this routine are run without
1023 syscall2(struct trapframe
*frame
)
1025 struct thread
*td
= curthread
;
1026 struct proc
*p
= td
->td_proc
;
1027 struct lwp
*lp
= td
->td_lwp
;
1029 struct sysent
*callp
;
1030 register_t orig_tf_rflags
;
1035 int crit_count
= td
->td_pri
& ~TDPRI_MASK
;
1038 int have_mplock
= 0;
1043 union sysunion args
;
1044 register_t
*argsdst
;
1045 kprintf0("SYSCALL rip = %016llx\n", frame
->tf_rip
);
1047 PCPU_INC(cnt
.v_syscall
);
1049 kprintf0("\033[31mSYSCALL %ld\033[39m\n", frame
->tf_rax
);
1051 if (ISPL(frame
->tf_cs
) != SEL_UPL
) {
1058 KTR_LOG(kernentry_syscall
, p
->p_pid
, lp
->lwp_tid
,
1062 KASSERT(td
->td_mpcount
== 0, ("badmpcount syscall2 from %p", (void *)frame
->tf_eip
));
1063 if (syscall_mpsafe
== 0)
1064 MAKEMPSAFE(have_mplock
);
1066 userenter(td
); /* lazy raise our priority */
1073 sticks
= (int)td
->td_sticks
;
1074 orig_tf_rflags
= frame
->tf_rflags
;
1077 * Virtual kernel intercept - if a VM context managed by a virtual
1078 * kernel issues a system call the virtual kernel handles it, not us.
1079 * Restore the virtual kernel context and return from its system
1080 * call. The current frame is copied out to the virtual kernel.
1082 if (lp
->lwp_vkernel
&& lp
->lwp_vkernel
->ve
) {
1083 error
= vkernel_trap(lp
, frame
);
1084 frame
->tf_rax
= error
;
1086 frame
->tf_rflags
|= PSL_C
;
1087 error
= EJUSTRETURN
;
1092 * Get the system call parameters and account for time
1094 lp
->lwp_md
.md_regs
= frame
;
1095 params
= (caddr_t
)frame
->tf_rsp
+ sizeof(register_t
);
1096 code
= frame
->tf_rax
;
1098 if (p
->p_sysent
->sv_prepsyscall
) {
1099 (*p
->p_sysent
->sv_prepsyscall
)(
1100 frame
, (int *)(&args
.nosys
.sysmsg
+ 1),
1103 if (code
== SYS_syscall
|| code
== SYS___syscall
) {
1104 code
= frame
->tf_rdi
;
1110 if (p
->p_sysent
->sv_mask
)
1111 code
&= p
->p_sysent
->sv_mask
;
1113 if (code
>= p
->p_sysent
->sv_size
)
1114 callp
= &p
->p_sysent
->sv_table
[0];
1116 callp
= &p
->p_sysent
->sv_table
[code
];
1118 narg
= callp
->sy_narg
& SYF_ARGMASK
;
1121 * On amd64 we get up to six arguments in registers. The rest are
1122 * on the stack. The first six members of 'struct trampframe' happen
1123 * to be the registers used to pass arguments, in exactly the right
1126 argp
= &frame
->tf_rdi
;
1128 argsdst
= (register_t
*)(&args
.nosys
.sysmsg
+ 1);
1130 * JG can we overflow the space pointed to by 'argsdst'
1131 * either with 'bcopy' or with 'copyin'?
1133 bcopy(argp
, argsdst
, sizeof(register_t
) * regcnt
);
1135 * copyin is MP aware, but the tracing code is not
1137 if (narg
> regcnt
) {
1138 KASSERT(params
!= NULL
, ("copyin args with no params!"));
1139 error
= copyin(params
, &argsdst
[regcnt
],
1140 (narg
- regcnt
) * sizeof(register_t
));
1143 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1144 MAKEMPSAFE(have_mplock
);
1146 ktrsyscall(lp
, code
, narg
,
1147 (void *)(&args
.nosys
.sysmsg
+ 1));
1155 if (KTRPOINT(td
, KTR_SYSCALL
)) {
1156 MAKEMPSAFE(have_mplock
);
1157 ktrsyscall(lp
, code
, narg
, (void *)(&args
.nosys
.sysmsg
+ 1));
1162 * Default return value is 0 (will be copied to %rax). Double-value
1163 * returns use %rax and %rdx. %rdx is left unchanged for system
1164 * calls which return only one result.
1166 args
.sysmsg_fds
[0] = 0;
1167 args
.sysmsg_fds
[1] = frame
->tf_rdx
;
1170 * The syscall might manipulate the trap frame. If it does it
1171 * will probably return EJUSTRETURN.
1173 args
.sysmsg_frame
= frame
;
1175 STOPEVENT(p
, S_SCE
, narg
); /* MP aware */
1179 * Try to run the syscall without the MP lock if the syscall
1180 * is MP safe. We have to obtain the MP lock no matter what if
1183 if ((callp
->sy_narg
& SYF_MPSAFE
) == 0)
1184 MAKEMPSAFE(have_mplock
);
1187 error
= (*callp
->sy_call
)(&args
);
1191 * MP SAFE (we may or may not have the MP lock at this point)
1193 //kprintf("SYSMSG %d ", error);
1197 * Reinitialize proc pointer `p' as it may be different
1198 * if this is a child returning from fork syscall.
1201 lp
= curthread
->td_lwp
;
1202 frame
->tf_rax
= args
.sysmsg_fds
[0];
1203 frame
->tf_rdx
= args
.sysmsg_fds
[1];
1204 kprintf0("RESULT %lld %lld\n", frame
->tf_rax
, frame
->tf_rdx
);
1205 frame
->tf_rflags
&= ~PSL_C
;
1209 * Reconstruct pc, we know that 'syscall' is 2 bytes.
1210 * We have to do a full context restore so that %r10
1211 * (which was holding the value of %rcx) is restored for
1212 * the next iteration.
1214 frame
->tf_rip
-= frame
->tf_err
;
1215 frame
->tf_r10
= frame
->tf_rcx
;
1216 td
->td_pcb
->pcb_flags
|= PCB_FULLCTX
;
1221 panic("Unexpected EASYNC return value (for now)");
1224 if (p
->p_sysent
->sv_errsize
) {
1225 if (error
>= p
->p_sysent
->sv_errsize
)
1226 error
= -1; /* XXX */
1228 error
= p
->p_sysent
->sv_errtbl
[error
];
1230 kprintf0("ERROR %d\n", error
);
1231 frame
->tf_rax
= error
;
1232 frame
->tf_rflags
|= PSL_C
;
1237 * Traced syscall. trapsignal() is not MP aware.
1239 if (orig_tf_rflags
& PSL_T
) {
1240 MAKEMPSAFE(have_mplock
);
1241 frame
->tf_rflags
&= ~PSL_T
;
1242 trapsignal(lp
, SIGTRAP
, 0);
1246 * Handle reschedule and other end-of-syscall issues
1248 userret(lp
, frame
, sticks
);
1251 if (KTRPOINT(td
, KTR_SYSRET
)) {
1252 MAKEMPSAFE(have_mplock
);
1253 ktrsysret(lp
, code
, error
, args
.sysmsg_result
);
1258 * This works because errno is findable through the
1259 * register set. If we ever support an emulation where this
1260 * is not the case, this code will need to be revisited.
1262 STOPEVENT(p
, S_SCX
, code
);
1267 * Release the MP lock if we had to get it
1269 KASSERT(td
->td_mpcount
== have_mplock
,
1270 ("badmpcount syscall2/end from %p", (void *)frame
->tf_eip
));
1274 KTR_LOG(kernentry_syscall_ret
, p
->p_pid
, lp
->lwp_tid
, error
);
1276 KASSERT(crit_count
== (td
->td_pri
& ~TDPRI_MASK
),
1277 ("syscall: critical section count mismatch! %d/%d",
1278 crit_count
/ TDPRI_CRIT
, td
->td_pri
/ TDPRI_CRIT
));
1283 fork_return(struct lwp
*lp
, struct trapframe
*frame
)
1285 kprintf0("fork return\n");
1286 frame
->tf_rax
= 0; /* Child returns zero */
1287 frame
->tf_rflags
&= ~PSL_C
; /* success */
1290 generic_lwp_return(lp
, frame
);
1291 KTR_LOG(kernentry_fork_ret
, lp
->lwp_proc
->p_pid
, lp
->lwp_tid
);
1295 * Simplified back end of syscall(), used when returning from fork()
1296 * directly into user mode. MP lock is held on entry and should be
1297 * released on return. This code will return back into the fork
1298 * trampoline code which then runs doreti.
1301 generic_lwp_return(struct lwp
*lp
, struct trapframe
*frame
)
1303 kprintf0("generic_lwp_return\n");
1304 struct proc
*p
= lp
->lwp_proc
;
1307 * Newly forked processes are given a kernel priority. We have to
1308 * adjust the priority to a normal user priority and fake entry
1309 * into the kernel (call userenter()) to install a passive release
1310 * function just in case userret() decides to stop the process. This
1311 * can occur when ^Z races a fork. If we do not install the passive
1312 * release function the current process designation will not be
1313 * released when the thread goes to sleep.
1315 lwkt_setpri_self(TDPRI_USER_NORM
);
1316 userenter(lp
->lwp_thread
);
1317 userret(lp
, frame
, 0);
1319 if (KTRPOINT(lp
->lwp_thread
, KTR_SYSRET
))
1320 ktrsysret(lp
, SYS_fork
, 0, 0);
1322 p
->p_flag
|= P_PASSIVE_ACQ
;
1324 p
->p_flag
&= ~P_PASSIVE_ACQ
;
1326 KKASSERT(lp
->lwp_thread
->td_mpcount
== 1);
1332 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1333 * fault (which is then passed back to the virtual kernel) if an attempt is
1334 * made to use the FP unit.
1336 * XXX this is a fairly big hack.
1339 set_vkernel_fp(struct trapframe
*frame
)