2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
13 * Copyright (c) 2018, Joyent, Inc.
17 * This implements the hypervisor multiplexor FPU API. Its purpose is to make it
18 * easy to switch between the host and guest hypervisor while hiding all the
19 * details about CR0.TS and how to save the host's state as required.
24 #include <sys/debug.h>
25 #include <sys/cmn_err.h>
27 #include <sys/sunddi.h>
29 #include <sys/x86_archext.h>
30 #include <sys/archsystm.h>
33 fpu_ctx_t hf_guest_fpu
;
34 kthread_t
*hf_curthread
;
39 hma_fpu_init(hma_fpu_t
*fpu
)
41 struct xsave_state
*xs
;
43 ASSERT0(fpu
->hf_inguest
);
45 switch (fp_save_mech
) {
47 bcopy(&sse_initial
, fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_fx
,
48 sizeof (struct fxsave_state
));
49 fpu
->hf_guest_fpu
.fpu_xsave_mask
= 0;
53 * Zero everything in the xsave case as we may have data in
54 * the structure that's not part of the initial value (which
55 * only really deals with a small portion of the xsave state).
57 xs
= fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_xs
;
58 bzero(xs
, cpuid_get_xsave_size());
59 bcopy(&avx_initial
, xs
, sizeof (*xs
));
60 xs
->xs_xstate_bv
= XFEATURE_LEGACY_FP
| XFEATURE_SSE
;
61 fpu
->hf_guest_fpu
.fpu_xsave_mask
= XFEATURE_FP_ALL
;
64 panic("Invalid fp_save_mech");
67 fpu
->hf_guest_fpu
.fpu_flags
= FPU_EN
| FPU_VALID
;
73 hma_fpu_free(hma_fpu_t
*fpu
)
78 ASSERT3P(fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_generic
, !=, NULL
);
79 kmem_cache_free(fpsave_cachep
,
80 fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_generic
);
81 kmem_free(fpu
, sizeof (*fpu
));
85 hma_fpu_alloc(int kmflag
)
89 fpu
= kmem_zalloc(sizeof (hma_fpu_t
), kmflag
);
93 fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_generic
=
94 kmem_cache_alloc(fpsave_cachep
, kmflag
);
95 if (fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_generic
== NULL
) {
96 kmem_free(fpu
, sizeof (hma_fpu_t
));
99 fpu
->hf_inguest
= B_FALSE
;
102 * Make sure the entire structure is zero.
104 switch (fp_save_mech
) {
106 bzero(fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_generic
,
107 sizeof (struct fxsave_state
));
110 bzero(fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_generic
,
111 cpuid_get_xsave_size());
114 panic("Invalid fp_save_mech");
121 hma_fpu_start_guest(hma_fpu_t
*fpu
)
124 * Note, we don't check / assert whether or not t_prempt is true because
125 * there are contexts where this is safe to call (from a context op)
126 * where t_preempt may not be set.
128 ASSERT3S(fpu
->hf_inguest
, ==, B_FALSE
);
129 ASSERT3P(fpu
->hf_curthread
, ==, NULL
);
130 ASSERT3P(curthread
->t_lwp
, !=, NULL
);
131 ASSERT3U(fpu
->hf_guest_fpu
.fpu_flags
& FPU_EN
, !=, 0);
132 ASSERT3U(fpu
->hf_guest_fpu
.fpu_flags
& FPU_VALID
, !=, 0);
134 fpu
->hf_inguest
= B_TRUE
;
135 fpu
->hf_curthread
= curthread
;
138 fp_save(&curthread
->t_lwp
->lwp_pcb
.pcb_fpu
);
139 fp_restore(&fpu
->hf_guest_fpu
);
140 fpu
->hf_guest_fpu
.fpu_flags
&= ~FPU_VALID
;
144 hma_fpu_stop_guest(hma_fpu_t
*fpu
)
146 ASSERT3S(fpu
->hf_inguest
, ==, B_TRUE
);
147 ASSERT3P(fpu
->hf_curthread
, ==, curthread
);
148 ASSERT3U(fpu
->hf_guest_fpu
.fpu_flags
& FPU_EN
, !=, 0);
149 ASSERT3U(fpu
->hf_guest_fpu
.fpu_flags
& FPU_VALID
, ==, 0);
152 * Note, we can't use fp_save because it assumes that we're saving to
153 * the thread's PCB and not somewhere else. Because this is a different
154 * FPU context, we instead have to do this ourselves.
156 switch (fp_save_mech
) {
158 fpxsave(fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_fx
);
161 xsavep(fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_xs
,
162 fpu
->hf_guest_fpu
.fpu_xsave_mask
);
165 panic("Invalid fp_save_mech");
168 fpu
->hf_guest_fpu
.fpu_flags
|= FPU_VALID
;
170 fp_restore(&curthread
->t_lwp
->lwp_pcb
.pcb_fpu
);
172 fpu
->hf_inguest
= B_FALSE
;
173 fpu
->hf_curthread
= NULL
;
177 hma_fpu_get_fxsave_state(const hma_fpu_t
*fpu
, struct fxsave_state
*fx
)
179 const struct fxsave_state
*guest
;
181 ASSERT3S(fpu
->hf_inguest
, ==, B_FALSE
);
183 guest
= fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_fx
;
184 bcopy(guest
, fx
, sizeof (*fx
));
188 hma_fpu_set_fxsave_state(hma_fpu_t
*fpu
, const struct fxsave_state
*fx
)
190 struct fxsave_state
*gfx
;
191 struct xsave_state
*gxs
;
193 ASSERT3S(fpu
->hf_inguest
, ==, B_FALSE
);
196 * If reserved bits are set in fx_mxcsr, then we will take a #GP when
197 * we restore them. Reject this outright.
199 * We do not need to check if we are dealing with state that has pending
200 * exceptions. This was only the case with the original FPU save and
201 * restore mechanisms (fsave/frstor). When using fxsave/fxrstor and
202 * xsave/xrstor they will be deferred to the user using the FPU, which
203 * is what we'd want here (they'd be used in guest context).
205 if ((fx
->fx_mxcsr
& ~sse_mxcsr_mask
) != 0)
208 switch (fp_save_mech
) {
210 gfx
= fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_fx
;
211 bcopy(fx
, gfx
, sizeof (*fx
));
214 gxs
= fpu
->hf_guest_fpu
.fpu_regs
.kfpu_u
.kfpu_xs
;
215 bzero(gxs
, cpuid_get_xsave_size());
216 bcopy(fx
, &gxs
->xs_fxsave
, sizeof (*fx
));
217 gxs
->xs_xstate_bv
= XFEATURE_LEGACY_FP
| XFEATURE_SSE
;
220 panic("Invalid fp_save_mech");