9597 Want hypervisor API for FPU management
[unleashed.git] / usr / src / uts / i86pc / os / hma_fpu.c
blob14cfa8baed22bbea542e20574794f94920231727
1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
13 * Copyright (c) 2018, Joyent, Inc.
17 * This implements the hypervisor multiplexor FPU API. Its purpose is to make it
18 * easy to switch between the host and guest hypervisor while hiding all the
19 * details about CR0.TS and how to save the host's state as required.
22 #include <sys/pcb.h>
23 #include <sys/kmem.h>
24 #include <sys/debug.h>
25 #include <sys/cmn_err.h>
26 #include <sys/ddi.h>
27 #include <sys/sunddi.h>
28 #include <sys/hma.h>
29 #include <sys/x86_archext.h>
30 #include <sys/archsystm.h>
32 struct hma_fpu {
33 fpu_ctx_t hf_guest_fpu;
34 kthread_t *hf_curthread;
35 boolean_t hf_inguest;
38 int
39 hma_fpu_init(hma_fpu_t *fpu)
41 struct xsave_state *xs;
43 ASSERT0(fpu->hf_inguest);
45 switch (fp_save_mech) {
46 case FP_FXSAVE:
47 bcopy(&sse_initial, fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_fx,
48 sizeof (struct fxsave_state));
49 fpu->hf_guest_fpu.fpu_xsave_mask = 0;
50 break;
51 case FP_XSAVE:
53 * Zero everything in the xsave case as we may have data in
54 * the structure that's not part of the initial value (which
55 * only really deals with a small portion of the xsave state).
57 xs = fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_xs;
58 bzero(xs, cpuid_get_xsave_size());
59 bcopy(&avx_initial, xs, sizeof (*xs));
60 xs->xs_xstate_bv = XFEATURE_LEGACY_FP | XFEATURE_SSE;
61 fpu->hf_guest_fpu.fpu_xsave_mask = XFEATURE_FP_ALL;
62 break;
63 default:
64 panic("Invalid fp_save_mech");
67 fpu->hf_guest_fpu.fpu_flags = FPU_EN | FPU_VALID;
69 return (0);
72 void
73 hma_fpu_free(hma_fpu_t *fpu)
75 if (fpu == NULL)
76 return;
78 ASSERT3P(fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_generic, !=, NULL);
79 kmem_cache_free(fpsave_cachep,
80 fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_generic);
81 kmem_free(fpu, sizeof (*fpu));
84 hma_fpu_t *
85 hma_fpu_alloc(int kmflag)
87 hma_fpu_t *fpu;
89 fpu = kmem_zalloc(sizeof (hma_fpu_t), kmflag);
90 if (fpu == NULL)
91 return (NULL);
93 fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_generic =
94 kmem_cache_alloc(fpsave_cachep, kmflag);
95 if (fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_generic == NULL) {
96 kmem_free(fpu, sizeof (hma_fpu_t));
97 return (NULL);
99 fpu->hf_inguest = B_FALSE;
102 * Make sure the entire structure is zero.
104 switch (fp_save_mech) {
105 case FP_FXSAVE:
106 bzero(fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_generic,
107 sizeof (struct fxsave_state));
108 break;
109 case FP_XSAVE:
110 bzero(fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_generic,
111 cpuid_get_xsave_size());
112 break;
113 default:
114 panic("Invalid fp_save_mech");
117 return (fpu);
120 void
121 hma_fpu_start_guest(hma_fpu_t *fpu)
124 * Note, we don't check / assert whether or not t_prempt is true because
125 * there are contexts where this is safe to call (from a context op)
126 * where t_preempt may not be set.
128 ASSERT3S(fpu->hf_inguest, ==, B_FALSE);
129 ASSERT3P(fpu->hf_curthread, ==, NULL);
130 ASSERT3P(curthread->t_lwp, !=, NULL);
131 ASSERT3U(fpu->hf_guest_fpu.fpu_flags & FPU_EN, !=, 0);
132 ASSERT3U(fpu->hf_guest_fpu.fpu_flags & FPU_VALID, !=, 0);
134 fpu->hf_inguest = B_TRUE;
135 fpu->hf_curthread = curthread;
138 fp_save(&curthread->t_lwp->lwp_pcb.pcb_fpu);
139 fp_restore(&fpu->hf_guest_fpu);
140 fpu->hf_guest_fpu.fpu_flags &= ~FPU_VALID;
143 void
144 hma_fpu_stop_guest(hma_fpu_t *fpu)
146 ASSERT3S(fpu->hf_inguest, ==, B_TRUE);
147 ASSERT3P(fpu->hf_curthread, ==, curthread);
148 ASSERT3U(fpu->hf_guest_fpu.fpu_flags & FPU_EN, !=, 0);
149 ASSERT3U(fpu->hf_guest_fpu.fpu_flags & FPU_VALID, ==, 0);
152 * Note, we can't use fp_save because it assumes that we're saving to
153 * the thread's PCB and not somewhere else. Because this is a different
154 * FPU context, we instead have to do this ourselves.
156 switch (fp_save_mech) {
157 case FP_FXSAVE:
158 fpxsave(fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_fx);
159 break;
160 case FP_XSAVE:
161 xsavep(fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_xs,
162 fpu->hf_guest_fpu.fpu_xsave_mask);
163 break;
164 default:
165 panic("Invalid fp_save_mech");
166 /*NOTREACHED*/
168 fpu->hf_guest_fpu.fpu_flags |= FPU_VALID;
170 fp_restore(&curthread->t_lwp->lwp_pcb.pcb_fpu);
172 fpu->hf_inguest = B_FALSE;
173 fpu->hf_curthread = NULL;
176 void
177 hma_fpu_get_fxsave_state(const hma_fpu_t *fpu, struct fxsave_state *fx)
179 const struct fxsave_state *guest;
181 ASSERT3S(fpu->hf_inguest, ==, B_FALSE);
183 guest = fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_fx;
184 bcopy(guest, fx, sizeof (*fx));
188 hma_fpu_set_fxsave_state(hma_fpu_t *fpu, const struct fxsave_state *fx)
190 struct fxsave_state *gfx;
191 struct xsave_state *gxs;
193 ASSERT3S(fpu->hf_inguest, ==, B_FALSE);
196 * If reserved bits are set in fx_mxcsr, then we will take a #GP when
197 * we restore them. Reject this outright.
199 * We do not need to check if we are dealing with state that has pending
200 * exceptions. This was only the case with the original FPU save and
201 * restore mechanisms (fsave/frstor). When using fxsave/fxrstor and
202 * xsave/xrstor they will be deferred to the user using the FPU, which
203 * is what we'd want here (they'd be used in guest context).
205 if ((fx->fx_mxcsr & ~sse_mxcsr_mask) != 0)
206 return (EINVAL);
208 switch (fp_save_mech) {
209 case FP_FXSAVE:
210 gfx = fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_fx;
211 bcopy(fx, gfx, sizeof (*fx));
212 break;
213 case FP_XSAVE:
214 gxs = fpu->hf_guest_fpu.fpu_regs.kfpu_u.kfpu_xs;
215 bzero(gxs, cpuid_get_xsave_size());
216 bcopy(fx, &gxs->xs_fxsave, sizeof (*fx));
217 gxs->xs_xstate_bv = XFEATURE_LEGACY_FP | XFEATURE_SSE;
218 break;
219 default:
220 panic("Invalid fp_save_mech");
221 /* NOTREACHED */
224 return (0);