2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (C) 1994, David Greenman
4 * Copyright (c) 1982, 1987, 1990, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
39 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $
44 #include "opt_msgbuf.h"
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/sysproto.h>
50 #include <sys/signalvar.h>
51 #include <sys/kernel.h>
52 #include <sys/linker.h>
53 #include <sys/malloc.h>
56 #include <sys/reboot.h>
58 #include <sys/msgbuf.h>
59 #include <sys/sysent.h>
60 #include <sys/sysctl.h>
61 #include <sys/vmmeter.h>
63 #include <sys/usched.h>
67 #include <vm/vm_param.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_pager.h>
74 #include <vm/vm_extern.h>
76 #include <sys/thread2.h>
77 #include <sys/mplock2.h>
85 #include <machine/cpu.h>
86 #include <machine/clock.h>
87 #include <machine/specialreg.h>
88 #include <machine/md_var.h>
89 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
90 #include <machine/globaldata.h> /* CPU_prvspace */
91 #include <machine/smp.h>
92 #include <machine/cputypes.h>
94 #include <bus/isa/rtc.h>
95 #include <sys/random.h>
96 #include <sys/ptrace.h>
97 #include <machine/sigframe.h>
98 #include <unistd.h> /* umtx_* functions */
99 #include <pthread.h> /* pthread_yield() */
101 extern void dblfault_handler (void);
103 static void set_fpregs_xmm (struct save87
*, struct savexmm
*);
104 static void fill_fpregs_xmm (struct savexmm
*, struct save87
*);
106 int64_t tsc_offsets
[MAXCPU
];
108 #if defined(SWTCH_OPTIM_STATS)
109 extern int swtch_optim_stats
;
110 SYSCTL_INT(_debug
, OID_AUTO
, swtch_optim_stats
,
111 CTLFLAG_RD
, &swtch_optim_stats
, 0, "");
112 SYSCTL_INT(_debug
, OID_AUTO
, tlb_flush_count
,
113 CTLFLAG_RD
, &tlb_flush_count
, 0, "");
117 sysctl_hw_physmem(SYSCTL_HANDLER_ARGS
)
119 u_long pmem
= ctob(physmem
);
122 error
= sysctl_handle_long(oidp
, &pmem
, 0, req
);
127 SYSCTL_PROC(_hw
, HW_PHYSMEM
, physmem
, CTLTYPE_ULONG
|CTLFLAG_RD
,
128 0, 0, sysctl_hw_physmem
, "LU", "Total system memory in bytes (number of pages * page size)");
131 sysctl_hw_usermem(SYSCTL_HANDLER_ARGS
)
133 u_long usermem
= ctob(Maxmem
- vmstats
.v_wire_count
);
136 error
= sysctl_handle_long(oidp
, &usermem
, 0, req
);
141 SYSCTL_PROC(_hw
, HW_USERMEM
, usermem
, CTLTYPE_ULONG
|CTLFLAG_RD
,
142 0, 0, sysctl_hw_usermem
, "LU", "");
144 SYSCTL_ULONG(_hw
, OID_AUTO
, availpages
, CTLFLAG_RD
, &Maxmem
, 0, "");
147 * Send an interrupt to process.
149 * Stack is set up to allow sigcode stored
150 * at top to call routine, followed by kcall
151 * to sigreturn routine below. After sigreturn
152 * resets the signal mask, the stack, and the
153 * frame pointer, it returns to the user
157 sendsig(sig_t catcher
, int sig
, sigset_t
*mask
, u_long code
)
159 struct lwp
*lp
= curthread
->td_lwp
;
160 struct proc
*p
= lp
->lwp_proc
;
161 struct trapframe
*regs
;
162 struct sigacts
*psp
= p
->p_sigacts
;
163 struct sigframe sf
, *sfp
;
167 regs
= lp
->lwp_md
.md_regs
;
168 oonstack
= (lp
->lwp_sigstk
.ss_flags
& SS_ONSTACK
) ? 1 : 0;
170 /* Save user context */
171 bzero(&sf
, sizeof(struct sigframe
));
172 sf
.sf_uc
.uc_sigmask
= *mask
;
173 sf
.sf_uc
.uc_stack
= lp
->lwp_sigstk
;
174 sf
.sf_uc
.uc_mcontext
.mc_onstack
= oonstack
;
175 KKASSERT(__offsetof(struct trapframe
, tf_rdi
) == 0);
176 bcopy(regs
, &sf
.sf_uc
.uc_mcontext
.mc_rdi
, sizeof(struct trapframe
));
178 /* Make the size of the saved context visible to userland */
179 sf
.sf_uc
.uc_mcontext
.mc_len
= sizeof(sf
.sf_uc
.uc_mcontext
);
181 /* Allocate and validate space for the signal handler context. */
182 if ((lp
->lwp_flags
& LWP_ALTSTACK
) != 0 && !oonstack
&&
183 SIGISMEMBER(psp
->ps_sigonstack
, sig
)) {
184 sp
= (char *)(lp
->lwp_sigstk
.ss_sp
+ lp
->lwp_sigstk
.ss_size
-
185 sizeof(struct sigframe
));
186 lp
->lwp_sigstk
.ss_flags
|= SS_ONSTACK
;
188 /* We take red zone into account */
189 sp
= (char *)regs
->tf_rsp
- sizeof(struct sigframe
) - 128;
192 /* Align to 16 bytes */
193 sfp
= (struct sigframe
*)((intptr_t)sp
& ~0xFUL
);
195 /* Translate the signal is appropriate */
196 if (p
->p_sysent
->sv_sigtbl
) {
197 if (sig
<= p
->p_sysent
->sv_sigsize
)
198 sig
= p
->p_sysent
->sv_sigtbl
[_SIG_IDX(sig
)];
202 * Build the argument list for the signal handler.
204 * Arguments are in registers (%rdi, %rsi, %rdx, %rcx)
206 regs
->tf_rdi
= sig
; /* argument 1 */
207 regs
->tf_rdx
= (register_t
)&sfp
->sf_uc
; /* argument 3 */
209 if (SIGISMEMBER(psp
->ps_siginfo
, sig
)) {
211 * Signal handler installed with SA_SIGINFO.
213 * action(signo, siginfo, ucontext)
215 regs
->tf_rsi
= (register_t
)&sfp
->sf_si
; /* argument 2 */
216 regs
->tf_rcx
= (register_t
)regs
->tf_err
; /* argument 4 */
217 sf
.sf_ahu
.sf_action
= (__siginfohandler_t
*)catcher
;
219 /* fill siginfo structure */
220 sf
.sf_si
.si_signo
= sig
;
221 sf
.sf_si
.si_code
= code
;
222 sf
.sf_si
.si_addr
= (void *)regs
->tf_addr
;
225 * Old FreeBSD-style arguments.
227 * handler (signo, code, [uc], addr)
229 regs
->tf_rsi
= (register_t
)code
; /* argument 2 */
230 regs
->tf_rcx
= (register_t
)regs
->tf_addr
; /* argument 4 */
231 sf
.sf_ahu
.sf_handler
= catcher
;
236 * If we're a vm86 process, we want to save the segment registers.
237 * We also change eflags to be our emulated eflags, not the actual
240 if (regs
->tf_eflags
& PSL_VM
) {
241 struct trapframe_vm86
*tf
= (struct trapframe_vm86
*)regs
;
242 struct vm86_kernel
*vm86
= &lp
->lwp_thread
->td_pcb
->pcb_ext
->ext_vm86
;
244 sf
.sf_uc
.uc_mcontext
.mc_gs
= tf
->tf_vm86_gs
;
245 sf
.sf_uc
.uc_mcontext
.mc_fs
= tf
->tf_vm86_fs
;
246 sf
.sf_uc
.uc_mcontext
.mc_es
= tf
->tf_vm86_es
;
247 sf
.sf_uc
.uc_mcontext
.mc_ds
= tf
->tf_vm86_ds
;
249 if (vm86
->vm86_has_vme
== 0)
250 sf
.sf_uc
.uc_mcontext
.mc_eflags
=
251 (tf
->tf_eflags
& ~(PSL_VIF
| PSL_VIP
)) |
252 (vm86
->vm86_eflags
& (PSL_VIF
| PSL_VIP
));
255 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
256 * syscalls made by the signal handler. This just avoids
257 * wasting time for our lazy fixup of such faults. PSL_NT
258 * does nothing in vm86 mode, but vm86 programs can set it
259 * almost legitimately in probes for old cpu types.
261 tf
->tf_eflags
&= ~(PSL_VM
| PSL_NT
| PSL_VIF
| PSL_VIP
);
266 * Save the FPU state and reinit the FP unit
268 npxpush(&sf
.sf_uc
.uc_mcontext
);
271 * Copy the sigframe out to the user's stack.
273 if (copyout(&sf
, sfp
, sizeof(struct sigframe
)) != 0) {
275 * Something is wrong with the stack pointer.
276 * ...Kill the process.
281 regs
->tf_rsp
= (register_t
)sfp
;
282 regs
->tf_rip
= trunc_page64(PS_STRINGS
- *(p
->p_sysent
->sv_szsigcode
));
283 regs
->tf_rip
-= SZSIGCODE_EXTRA_BYTES
;
286 * x86 abi specifies that the direction flag must be cleared
289 regs
->tf_rflags
&= ~(PSL_T
|PSL_D
);
292 * 64 bit mode has a code and stack selector but
293 * no data or extra selector. %fs and %gs are not
296 regs
->tf_cs
= _ucodesel
;
297 regs
->tf_ss
= _udatasel
;
301 * Sanitize the trapframe for a virtual kernel passing control to a custom
302 * VM context. Remove any items that would otherwise create a privilage
305 * XXX at the moment we allow userland to set the resume flag. Is this a
309 cpu_sanitize_frame(struct trapframe
*frame
)
311 frame
->tf_cs
= _ucodesel
;
312 frame
->tf_ss
= _udatasel
;
313 /* XXX VM (8086) mode not supported? */
314 frame
->tf_rflags
&= (PSL_RF
| PSL_USERCHANGE
| PSL_VM_UNSUPP
);
315 frame
->tf_rflags
|= PSL_RESERVED_DEFAULT
| PSL_I
;
321 * Sanitize the tls so loading the descriptor does not blow up
322 * on us. For x86_64 we don't have to do anything.
325 cpu_sanitize_tls(struct savetls
*tls
)
331 * sigreturn(ucontext_t *sigcntxp)
333 * System call to cleanup state after a signal
334 * has been taken. Reset signal mask and
335 * stack state from context left by sendsig (above).
336 * Return to previous pc and psl as specified by
337 * context left by sendsig. Check carefully to
338 * make sure that the user has not modified the
339 * state to gain improper privileges.
341 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
342 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
345 sys_sigreturn(struct sigreturn_args
*uap
)
347 struct lwp
*lp
= curthread
->td_lwp
;
348 struct trapframe
*regs
;
356 * We have to copy the information into kernel space so userland
357 * can't modify it while we are sniffing it.
359 regs
= lp
->lwp_md
.md_regs
;
360 error
= copyin(uap
->sigcntxp
, &uc
, sizeof(uc
));
364 rflags
= ucp
->uc_mcontext
.mc_rflags
;
366 /* VM (8086) mode not supported */
367 rflags
&= ~PSL_VM_UNSUPP
;
370 if (eflags
& PSL_VM
) {
371 struct trapframe_vm86
*tf
= (struct trapframe_vm86
*)regs
;
372 struct vm86_kernel
*vm86
;
375 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
376 * set up the vm86 area, and we can't enter vm86 mode.
378 if (lp
->lwp_thread
->td_pcb
->pcb_ext
== 0)
380 vm86
= &lp
->lwp_thread
->td_pcb
->pcb_ext
->ext_vm86
;
381 if (vm86
->vm86_inited
== 0)
384 /* go back to user mode if both flags are set */
385 if ((eflags
& PSL_VIP
) && (eflags
& PSL_VIF
))
386 trapsignal(lp
->lwp_proc
, SIGBUS
, 0);
388 if (vm86
->vm86_has_vme
) {
389 eflags
= (tf
->tf_eflags
& ~VME_USERCHANGE
) |
390 (eflags
& VME_USERCHANGE
) | PSL_VM
;
392 vm86
->vm86_eflags
= eflags
; /* save VIF, VIP */
393 eflags
= (tf
->tf_eflags
& ~VM_USERCHANGE
) | (eflags
& VM_USERCHANGE
) | PSL_VM
;
395 bcopy(&ucp
.uc_mcontext
.mc_gs
, tf
, sizeof(struct trapframe
));
396 tf
->tf_eflags
= eflags
;
397 tf
->tf_vm86_ds
= tf
->tf_ds
;
398 tf
->tf_vm86_es
= tf
->tf_es
;
399 tf
->tf_vm86_fs
= tf
->tf_fs
;
400 tf
->tf_vm86_gs
= tf
->tf_gs
;
401 tf
->tf_ds
= _udatasel
;
402 tf
->tf_es
= _udatasel
;
404 tf
->tf_fs
= _udatasel
;
405 tf
->tf_gs
= _udatasel
;
411 * Don't allow users to change privileged or reserved flags.
414 * XXX do allow users to change the privileged flag PSL_RF.
415 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
416 * should sometimes set it there too. tf_eflags is kept in
417 * the signal context during signal handling and there is no
418 * other place to remember it, so the PSL_RF bit may be
419 * corrupted by the signal handler without us knowing.
420 * Corruption of the PSL_RF bit at worst causes one more or
421 * one less debugger trap, so allowing it is fairly harmless.
423 if (!EFL_SECURE(rflags
& ~PSL_RF
, regs
->tf_rflags
& ~PSL_RF
)) {
424 kprintf("sigreturn: rflags = 0x%lx\n", (long)rflags
);
429 * Don't allow users to load a valid privileged %cs. Let the
430 * hardware check for invalid selectors, excess privilege in
431 * other selectors, invalid %eip's and invalid %esp's.
433 cs
= ucp
->uc_mcontext
.mc_cs
;
434 if (!CS_SECURE(cs
)) {
435 kprintf("sigreturn: cs = 0x%x\n", cs
);
436 trapsignal(lp
, SIGBUS
, T_PROTFLT
);
439 bcopy(&ucp
->uc_mcontext
.mc_rdi
, regs
, sizeof(struct trapframe
));
443 * Restore the FPU state from the frame
445 npxpop(&ucp
->uc_mcontext
);
447 if (ucp
->uc_mcontext
.mc_onstack
& 1)
448 lp
->lwp_sigstk
.ss_flags
|= SS_ONSTACK
;
450 lp
->lwp_sigstk
.ss_flags
&= ~SS_ONSTACK
;
452 lp
->lwp_sigmask
= ucp
->uc_sigmask
;
453 SIG_CANTMASK(lp
->lwp_sigmask
);
458 * cpu_idle() represents the idle LWKT. You cannot return from this function
459 * (unless you want to blow things up!). Instead we look for runnable threads
460 * and loop or halt as appropriate. Giant is not held on entry to the thread.
462 * The main loop is entered with a critical section held, we must release
463 * the critical section before doing anything else. lwkt_switch() will
464 * check for pending interrupts due to entering and exiting its own
467 * Note on cpu_idle_hlt: On an SMP system we rely on a scheduler IPI
468 * to wake a HLTed cpu up.
470 static int cpu_idle_hlt
= 1;
471 static int cpu_idle_hltcnt
;
472 static int cpu_idle_spincnt
;
473 SYSCTL_INT(_machdep
, OID_AUTO
, cpu_idle_hlt
, CTLFLAG_RW
,
474 &cpu_idle_hlt
, 0, "Idle loop HLT enable");
475 SYSCTL_INT(_machdep
, OID_AUTO
, cpu_idle_hltcnt
, CTLFLAG_RW
,
476 &cpu_idle_hltcnt
, 0, "Idle loop entry halts");
477 SYSCTL_INT(_machdep
, OID_AUTO
, cpu_idle_spincnt
, CTLFLAG_RW
,
478 &cpu_idle_spincnt
, 0, "Idle loop entry spins");
483 struct thread
*td
= curthread
;
484 struct mdglobaldata
*gd
= mdcpu
;
488 KKASSERT(td
->td_critcount
== 0);
493 * See if there are any LWKTs ready to go.
498 * The idle loop halts only if no threads are scheduleable
499 * and no signals have occured.
502 (td
->td_gd
->gd_reqflags
& RQF_IDLECHECK_WK_MASK
) == 0) {
504 if ((td
->td_gd
->gd_reqflags
& RQF_IDLECHECK_WK_MASK
) == 0) {
506 struct timeval tv1
, tv2
;
507 gettimeofday(&tv1
, NULL
);
509 reqflags
= gd
->mi
.gd_reqflags
&
510 ~RQF_IDLECHECK_WK_MASK
;
511 KKASSERT(gd
->mi
.gd_processing_ipiq
== 0);
512 umtx_sleep(&gd
->mi
.gd_reqflags
, reqflags
,
515 gettimeofday(&tv2
, NULL
);
516 if (tv2
.tv_usec
- tv1
.tv_usec
+
517 (tv2
.tv_sec
- tv1
.tv_sec
) * 1000000
519 kprintf("cpu %d idlelock %08x %08x\n",
529 __asm
__volatile("pause");
536 * Called by the spinlock code with or without a critical section held
537 * when a spinlock is found to be seriously constested.
539 * We need to enter a critical section to prevent signals from recursing
543 cpu_spinlock_contested(void)
549 * Clear registers on exec
552 exec_setregs(u_long entry
, u_long stack
, u_long ps_strings
)
554 struct thread
*td
= curthread
;
555 struct lwp
*lp
= td
->td_lwp
;
556 struct pcb
*pcb
= td
->td_pcb
;
557 struct trapframe
*regs
= lp
->lwp_md
.md_regs
;
561 bzero((char *)regs
, sizeof(struct trapframe
));
562 regs
->tf_rip
= entry
;
563 regs
->tf_rsp
= ((stack
- 8) & ~0xFul
) + 8; /* align the stack */
564 regs
->tf_rdi
= stack
; /* argv */
565 regs
->tf_rflags
= PSL_USER
| (regs
->tf_rflags
& PSL_T
);
566 regs
->tf_ss
= _udatasel
;
567 regs
->tf_cs
= _ucodesel
;
568 regs
->tf_rbx
= ps_strings
;
571 * Reset the hardware debug registers if they were in use.
572 * They won't have any meaning for the newly exec'd process.
574 if (pcb
->pcb_flags
& PCB_DBREGS
) {
580 pcb
->pcb_dr7
= 0; /* JG set bit 10? */
581 if (pcb
== td
->td_pcb
) {
583 * Clear the debug registers on the running
584 * CPU, otherwise they will end up affecting
585 * the next process we switch to.
589 pcb
->pcb_flags
&= ~PCB_DBREGS
;
593 * Initialize the math emulator (if any) for the current process.
594 * Actually, just clear the bit that says that the emulator has
595 * been initialized. Initialization is delayed until the process
596 * traps to the emulator (if it is done at all) mainly because
597 * emulators don't provide an entry point for initialization.
599 pcb
->pcb_flags
&= ~FP_SOFTFP
;
602 * NOTE: do not set CR0_TS here. npxinit() must do it after clearing
603 * gd_npxthread. Otherwise a preemptive interrupt thread
604 * may panic in npxdna().
608 load_cr0(rcr0() | CR0_MP
);
612 * NOTE: The MSR values must be correct so we can return to
613 * userland. gd_user_fs/gs must be correct so the switch
614 * code knows what the current MSR values are.
616 pcb
->pcb_fsbase
= 0; /* Values loaded from PCB on switch */
618 /* Initialize the npx (if any) for the current process. */
623 * note: linux emulator needs edx to be 0x0 on entry, which is
624 * handled in execve simply by setting the 64 bit syscall
636 cr0
|= CR0_NE
; /* Done by npxinit() */
637 cr0
|= CR0_MP
| CR0_TS
; /* Done at every execve() too. */
638 cr0
|= CR0_WP
| CR0_AM
;
645 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS
)
648 error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
,
650 if (!error
&& req
->newptr
)
655 SYSCTL_PROC(_machdep
, CPU_ADJKERNTZ
, adjkerntz
, CTLTYPE_INT
|CTLFLAG_RW
,
656 &adjkerntz
, 0, sysctl_machdep_adjkerntz
, "I", "");
659 * Initialize 386 and configure to run kernel
663 * Initialize segments & interrupt table
666 extern struct user
*proc0paddr
;
671 IDTVEC(div
), IDTVEC(dbg
), IDTVEC(nmi
), IDTVEC(bpt
), IDTVEC(ofl
),
672 IDTVEC(bnd
), IDTVEC(ill
), IDTVEC(dna
), IDTVEC(fpusegm
),
673 IDTVEC(tss
), IDTVEC(missing
), IDTVEC(stk
), IDTVEC(prot
),
674 IDTVEC(page
), IDTVEC(mchk
), IDTVEC(rsvd
), IDTVEC(fpu
), IDTVEC(align
),
675 IDTVEC(xmm
), IDTVEC(dblfault
),
676 IDTVEC(fast_syscall
), IDTVEC(fast_syscall32
);
680 ptrace_set_pc(struct lwp
*lp
, unsigned long addr
)
682 lp
->lwp_md
.md_regs
->tf_rip
= addr
;
687 ptrace_single_step(struct lwp
*lp
)
689 lp
->lwp_md
.md_regs
->tf_rflags
|= PSL_T
;
694 fill_regs(struct lwp
*lp
, struct reg
*regs
)
696 struct trapframe
*tp
;
698 if ((tp
= lp
->lwp_md
.md_regs
) == NULL
)
700 bcopy(&tp
->tf_rdi
, ®s
->r_rdi
, sizeof(*regs
));
705 set_regs(struct lwp
*lp
, struct reg
*regs
)
707 struct trapframe
*tp
;
709 tp
= lp
->lwp_md
.md_regs
;
710 if (!EFL_SECURE(regs
->r_rflags
, tp
->tf_rflags
) ||
711 !CS_SECURE(regs
->r_cs
))
713 bcopy(®s
->r_rdi
, &tp
->tf_rdi
, sizeof(*regs
));
718 fill_fpregs_xmm(struct savexmm
*sv_xmm
, struct save87
*sv_87
)
720 struct env87
*penv_87
= &sv_87
->sv_env
;
721 struct envxmm
*penv_xmm
= &sv_xmm
->sv_env
;
724 /* FPU control/status */
725 penv_87
->en_cw
= penv_xmm
->en_cw
;
726 penv_87
->en_sw
= penv_xmm
->en_sw
;
727 penv_87
->en_tw
= penv_xmm
->en_tw
;
728 penv_87
->en_fip
= penv_xmm
->en_fip
;
729 penv_87
->en_fcs
= penv_xmm
->en_fcs
;
730 penv_87
->en_opcode
= penv_xmm
->en_opcode
;
731 penv_87
->en_foo
= penv_xmm
->en_foo
;
732 penv_87
->en_fos
= penv_xmm
->en_fos
;
735 for (i
= 0; i
< 8; ++i
)
736 sv_87
->sv_ac
[i
] = sv_xmm
->sv_fp
[i
].fp_acc
;
740 set_fpregs_xmm(struct save87
*sv_87
, struct savexmm
*sv_xmm
)
742 struct env87
*penv_87
= &sv_87
->sv_env
;
743 struct envxmm
*penv_xmm
= &sv_xmm
->sv_env
;
746 /* FPU control/status */
747 penv_xmm
->en_cw
= penv_87
->en_cw
;
748 penv_xmm
->en_sw
= penv_87
->en_sw
;
749 penv_xmm
->en_tw
= penv_87
->en_tw
;
750 penv_xmm
->en_fip
= penv_87
->en_fip
;
751 penv_xmm
->en_fcs
= penv_87
->en_fcs
;
752 penv_xmm
->en_opcode
= penv_87
->en_opcode
;
753 penv_xmm
->en_foo
= penv_87
->en_foo
;
754 penv_xmm
->en_fos
= penv_87
->en_fos
;
757 for (i
= 0; i
< 8; ++i
)
758 sv_xmm
->sv_fp
[i
].fp_acc
= sv_87
->sv_ac
[i
];
762 fill_fpregs(struct lwp
*lp
, struct fpreg
*fpregs
)
764 if (lp
->lwp_thread
== NULL
|| lp
->lwp_thread
->td_pcb
== NULL
)
767 fill_fpregs_xmm(&lp
->lwp_thread
->td_pcb
->pcb_save
.sv_xmm
,
768 (struct save87
*)fpregs
);
771 bcopy(&lp
->lwp_thread
->td_pcb
->pcb_save
.sv_87
, fpregs
, sizeof *fpregs
);
776 set_fpregs(struct lwp
*lp
, struct fpreg
*fpregs
)
779 set_fpregs_xmm((struct save87
*)fpregs
,
780 &lp
->lwp_thread
->td_pcb
->pcb_save
.sv_xmm
);
783 bcopy(fpregs
, &lp
->lwp_thread
->td_pcb
->pcb_save
.sv_87
, sizeof *fpregs
);
788 fill_dbregs(struct lwp
*lp
, struct dbreg
*dbregs
)
794 set_dbregs(struct lwp
*lp
, struct dbreg
*dbregs
)
801 * Return > 0 if a hardware breakpoint has been hit, and the
802 * breakpoint was in user space. Return 0, otherwise.
805 user_dbreg_trap(void)
807 u_int32_t dr7
, dr6
; /* debug registers dr6 and dr7 */
808 u_int32_t bp
; /* breakpoint bits extracted from dr6 */
809 int nbp
; /* number of breakpoints that triggered */
810 caddr_t addr
[4]; /* breakpoint addresses */
814 if ((dr7
& 0x000000ff) == 0) {
816 * all GE and LE bits in the dr7 register are zero,
817 * thus the trap couldn't have been caused by the
818 * hardware debug registers
825 bp
= dr6
& 0x0000000f;
829 * None of the breakpoint bits are set meaning this
830 * trap was not caused by any of the debug registers
836 * at least one of the breakpoints were hit, check to see
837 * which ones and if any of them are user space addresses
841 addr
[nbp
++] = (caddr_t
)rdr0();
844 addr
[nbp
++] = (caddr_t
)rdr1();
847 addr
[nbp
++] = (caddr_t
)rdr2();
850 addr
[nbp
++] = (caddr_t
)rdr3();
853 for (i
=0; i
<nbp
; i
++) {
855 (caddr_t
)VM_MAX_USER_ADDRESS
) {
857 * addr[i] is in user space
864 * None of the breakpoints are in user space.
877 cpu_feature
= regs
[3];
883 Debugger(const char *msg
)
885 kprintf("Debugger(\"%s\") called.\n", msg
);