kernel - More threaded core dump fixes
[dragonfly.git] / sys / platform / pc64 / x86_64 / npx.c
blob3e575953b7727f65e3d448f62787f4bf87fc9519
1 /*
2 * Copyright (c) 1990 William Jolitz.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * Copyright (c) 2006 The DragonFly Project.
5 * Copyright (c) 2006 Matthew Dillon.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
36 * $FreeBSD: src/sys/i386/isa/npx.c,v 1.80.2.3 2001/10/20 19:04:38 tegge Exp $
39 #include "opt_cpu.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/bus.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/sysctl.h>
48 #include <sys/proc.h>
49 #include <sys/rman.h>
50 #include <sys/signalvar.h>
52 #include <sys/thread2.h>
53 #include <sys/mplock2.h>
55 #include <machine/cputypes.h>
56 #include <machine/frame.h>
57 #include <machine/md_var.h>
58 #include <machine/pcb.h>
59 #include <machine/psl.h>
60 #include <machine/specialreg.h>
61 #include <machine/segments.h>
62 #include <machine/globaldata.h>
64 #define fldcw(addr) __asm("fldcw %0" : : "m" (*(addr)))
65 #define fnclex() __asm("fnclex")
66 #define fninit() __asm("fninit")
67 #define fnop() __asm("fnop")
68 #define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr)))
69 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr)))
70 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=m" (*(addr)))
71 #define frstor(addr) __asm("frstor %0" : : "m" (*(addr)))
72 #define fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr)))
73 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr)))
74 #ifndef CPU_DISABLE_AVX
75 #define xrstor(eax,edx,addr) __asm __volatile(".byte 0x0f,0xae,0x2f" : : "D" (addr), "a" (eax), "d" (edx))
76 #define xsave(eax,edx,addr) __asm __volatile(".byte 0x0f,0xae,0x27" : : "D" (addr), "a" (eax), "d" (edx) : "memory")
77 #endif
78 #define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
79 : : "n" (CR0_TS) : "ax")
80 #define stop_emulating() __asm("clts")
82 typedef u_char bool_t;
83 static void fpu_clean_state(void);
84 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr))
86 static struct krate badfprate = { 1 };
88 static void fpusave (union savefpu *);
89 static void fpurstor (union savefpu *);
91 uint32_t npx_mxcsr_mask = 0xFFBF; /* this is the default */
94 * Probe the npx_mxcsr_mask as described in the intel document
95 * "Intel processor identification and the CPUID instruction" Section 7
96 * "Denormals are Zero".
97 * Note that for fxsave to work reliably, the os support bit for
98 * FXSAVE/FXRESTORE operations in CR4 has to be set as per
99 * Intel 64 and IA-32 Architectures Developer's Manual: Vol. 1,
100 * 10.5.1.2.
102 void npxprobemask(void)
104 /*64-Byte alignment required for xsave*/
105 static union savefpu dummy __aligned(64);
107 crit_enter();
108 stop_emulating();
109 load_cr4(rcr4() | CR4_FXSR);
110 fxsave(&dummy);
111 npx_mxcsr_mask = ((uint32_t *)&dummy)[7];
112 start_emulating();
113 crit_exit();
117 * Initialize the floating point unit.
119 void npxinit(void)
121 /*64-Byte alignment required for xsave*/
122 static union savefpu dummy __aligned(64);
123 u_short control = __INITIAL_FPUCW__;
124 u_int mxcsr = __INITIAL_MXCSR__;
127 * fninit has the same h/w bugs as fnsave. Use the detoxified
128 * fnsave to throw away any junk in the fpu. npxsave() initializes
129 * the fpu and sets npxthread = NULL as important side effects.
132 npxsave(&dummy);
133 crit_enter();
134 stop_emulating();
135 fldcw(&control);
136 ldmxcsr(mxcsr);
137 fpusave(curthread->td_savefpu);
138 mdcpu->gd_npxthread = NULL;
139 start_emulating();
140 crit_exit();
144 * Free coprocessor (if we have it).
146 void
147 npxexit(void)
149 if (curthread == mdcpu->gd_npxthread)
150 npxsave(curthread->td_savefpu);
153 #if 0
155 * The following mechanism is used to ensure that the FPE_... value
156 * that is passed as a trapcode to the signal handler of the user
157 * process does not have more than one bit set.
159 * Multiple bits may be set if the user process modifies the control
160 * word while a status word bit is already set. While this is a sign
161 * of bad coding, we have no choise than to narrow them down to one
162 * bit, since we must not send a trapcode that is not exactly one of
163 * the FPE_ macros.
165 * The mechanism has a static table with 127 entries. Each combination
166 * of the 7 FPU status word exception bits directly translates to a
167 * position in this table, where a single FPE_... value is stored.
168 * This FPE_... value stored there is considered the "most important"
169 * of the exception bits and will be sent as the signal code. The
170 * precedence of the bits is based upon Intel Document "Numerical
171 * Applications", Chapter "Special Computational Situations".
173 * The macro to choose one of these values does these steps: 1) Throw
174 * away status word bits that cannot be masked. 2) Throw away the bits
175 * currently masked in the control word, assuming the user isn't
176 * interested in them anymore. 3) Reinsert status word bit 7 (stack
177 * fault) if it is set, which cannot be masked but must be presered.
178 * 4) Use the remaining bits to point into the trapcode table.
180 * The 6 maskable bits in order of their preference, as stated in the
181 * above referenced Intel manual:
182 * 1 Invalid operation (FP_X_INV)
183 * 1a Stack underflow
184 * 1b Stack overflow
185 * 1c Operand of unsupported format
186 * 1d SNaN operand.
187 * 2 QNaN operand (not an exception, irrelavant here)
188 * 3 Any other invalid-operation not mentioned above or zero divide
189 * (FP_X_INV, FP_X_DZ)
190 * 4 Denormal operand (FP_X_DNML)
191 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
192 * 6 Inexact result (FP_X_IMP)
194 static char fpetable[128] = {
196 FPE_FLTINV, /* 1 - INV */
197 FPE_FLTUND, /* 2 - DNML */
198 FPE_FLTINV, /* 3 - INV | DNML */
199 FPE_FLTDIV, /* 4 - DZ */
200 FPE_FLTINV, /* 5 - INV | DZ */
201 FPE_FLTDIV, /* 6 - DNML | DZ */
202 FPE_FLTINV, /* 7 - INV | DNML | DZ */
203 FPE_FLTOVF, /* 8 - OFL */
204 FPE_FLTINV, /* 9 - INV | OFL */
205 FPE_FLTUND, /* A - DNML | OFL */
206 FPE_FLTINV, /* B - INV | DNML | OFL */
207 FPE_FLTDIV, /* C - DZ | OFL */
208 FPE_FLTINV, /* D - INV | DZ | OFL */
209 FPE_FLTDIV, /* E - DNML | DZ | OFL */
210 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
211 FPE_FLTUND, /* 10 - UFL */
212 FPE_FLTINV, /* 11 - INV | UFL */
213 FPE_FLTUND, /* 12 - DNML | UFL */
214 FPE_FLTINV, /* 13 - INV | DNML | UFL */
215 FPE_FLTDIV, /* 14 - DZ | UFL */
216 FPE_FLTINV, /* 15 - INV | DZ | UFL */
217 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
218 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
219 FPE_FLTOVF, /* 18 - OFL | UFL */
220 FPE_FLTINV, /* 19 - INV | OFL | UFL */
221 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
222 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
223 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
224 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
225 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
226 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
227 FPE_FLTRES, /* 20 - IMP */
228 FPE_FLTINV, /* 21 - INV | IMP */
229 FPE_FLTUND, /* 22 - DNML | IMP */
230 FPE_FLTINV, /* 23 - INV | DNML | IMP */
231 FPE_FLTDIV, /* 24 - DZ | IMP */
232 FPE_FLTINV, /* 25 - INV | DZ | IMP */
233 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
234 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
235 FPE_FLTOVF, /* 28 - OFL | IMP */
236 FPE_FLTINV, /* 29 - INV | OFL | IMP */
237 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
238 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
239 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
240 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
241 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
242 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
243 FPE_FLTUND, /* 30 - UFL | IMP */
244 FPE_FLTINV, /* 31 - INV | UFL | IMP */
245 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
246 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
247 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
248 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
249 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
250 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
251 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
252 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
253 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
254 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
255 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
256 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
257 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
258 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
259 FPE_FLTSUB, /* 40 - STK */
260 FPE_FLTSUB, /* 41 - INV | STK */
261 FPE_FLTUND, /* 42 - DNML | STK */
262 FPE_FLTSUB, /* 43 - INV | DNML | STK */
263 FPE_FLTDIV, /* 44 - DZ | STK */
264 FPE_FLTSUB, /* 45 - INV | DZ | STK */
265 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
266 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
267 FPE_FLTOVF, /* 48 - OFL | STK */
268 FPE_FLTSUB, /* 49 - INV | OFL | STK */
269 FPE_FLTUND, /* 4A - DNML | OFL | STK */
270 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
271 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
272 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
273 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
274 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
275 FPE_FLTUND, /* 50 - UFL | STK */
276 FPE_FLTSUB, /* 51 - INV | UFL | STK */
277 FPE_FLTUND, /* 52 - DNML | UFL | STK */
278 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
279 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
280 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
281 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
282 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
283 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
284 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
285 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
286 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
287 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
288 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
289 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
290 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
291 FPE_FLTRES, /* 60 - IMP | STK */
292 FPE_FLTSUB, /* 61 - INV | IMP | STK */
293 FPE_FLTUND, /* 62 - DNML | IMP | STK */
294 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
295 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
296 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
297 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
298 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
299 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
300 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
301 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
302 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
303 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
304 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
305 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
306 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
307 FPE_FLTUND, /* 70 - UFL | IMP | STK */
308 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
309 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
310 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
311 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
312 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
313 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
314 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
315 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
316 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
317 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
318 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
319 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
320 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
321 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
322 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
325 #endif
328 * Implement the device not available (DNA) exception. gd_npxthread had
329 * better be NULL. Restore the current thread's FP state and set gd_npxthread
330 * to curthread.
332 * Interrupts are enabled and preemption can occur. Enter a critical
333 * section to stabilize the FP state.
336 npxdna(void)
338 thread_t td = curthread;
339 int didinit = 0;
341 if (mdcpu->gd_npxthread != NULL) {
342 kprintf("npxdna: npxthread = %p, curthread = %p\n",
343 mdcpu->gd_npxthread, curthread);
344 panic("npxdna");
348 * Setup the initial saved state if the thread has never before
349 * used the FP unit. This also occurs when a thread pushes a
350 * signal handler and uses FP in the handler.
352 crit_enter();
353 if ((td->td_flags & (TDF_USINGFP | TDF_KERNELFP)) == 0) {
354 td->td_flags |= TDF_USINGFP;
355 npxinit();
356 didinit = 1;
360 * The setting of gd_npxthread and the call to fpurstor() must not
361 * be preempted by an interrupt thread or we will take an npxdna
362 * trap and potentially save our current fpstate (which is garbage)
363 * and then restore the garbage rather then the originally saved
364 * fpstate.
366 stop_emulating();
368 * Record new context early in case frstor causes an IRQ13.
370 mdcpu->gd_npxthread = td;
372 * The following frstor may cause an IRQ13 when the state being
373 * restored has a pending error. The error will appear to have been
374 * triggered by the current (npx) user instruction even when that
375 * instruction is a no-wait instruction that should not trigger an
376 * error (e.g., fnclex). On at least one 486 system all of the
377 * no-wait instructions are broken the same as frstor, so our
378 * treatment does not amplify the breakage. On at least one
379 * 386/Cyrix 387 system, fnclex works correctly while frstor and
380 * fnsave are broken, so our treatment breaks fnclex if it is the
381 * first FPU instruction after a context switch.
383 if ((td->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~npx_mxcsr_mask) &&
384 cpu_fxsr) {
385 krateprintf(&badfprate,
386 "%s: FXRSTR: illegal FP MXCSR %08x didinit = %d\n",
387 td->td_comm, td->td_savefpu->sv_xmm.sv_env.en_mxcsr,
388 didinit);
389 td->td_savefpu->sv_xmm.sv_env.en_mxcsr &= npx_mxcsr_mask;
390 lwpsignal(curproc, curthread->td_lwp, SIGFPE);
392 fpurstor(td->td_savefpu);
393 crit_exit();
395 return (1);
399 * Wrapper for the fnsave instruction to handle h/w bugs. If there is an error
400 * pending, then fnsave generates a bogus IRQ13 on some systems. Force
401 * any IRQ13 to be handled immediately, and then ignore it. This routine is
402 * often called at splhigh so it must not use many system services. In
403 * particular, it's much easier to install a special handler than to
404 * guarantee that it's safe to use npxintr() and its supporting code.
406 * WARNING! This call is made during a switch and the MP lock will be
407 * setup for the new target thread rather then the current thread, so we
408 * cannot do anything here that depends on the *_mplock() functions as
409 * we may trip over their assertions.
411 * WARNING! When using fxsave we MUST fninit after saving the FP state. The
412 * kernel will always assume that the FP state is 'safe' (will not cause
413 * exceptions) for mmx/xmm use if npxthread is NULL. The kernel must still
414 * setup a custom save area before actually using the FP unit, but it will
415 * not bother calling fninit. This greatly improves kernel performance when
416 * it wishes to use the FP unit.
418 void
419 npxsave(union savefpu *addr)
421 crit_enter();
422 stop_emulating();
423 fpusave(addr);
424 mdcpu->gd_npxthread = NULL;
425 fninit();
426 start_emulating();
427 crit_exit();
430 static void
431 fpusave(union savefpu *addr)
433 #ifndef CPU_DISABLE_AVX
434 if (cpu_xsave)
435 xsave(CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0, addr);
436 else
437 #endif
438 if (cpu_fxsr)
439 fxsave(addr);
440 else
441 fnsave(addr);
445 * Save the FP state to the mcontext structure.
447 * WARNING: If you want to try to npxsave() directly to mctx->mc_fpregs,
448 * then it MUST be 16-byte aligned. Currently this is not guarenteed.
450 void
451 npxpush(mcontext_t *mctx)
453 thread_t td = curthread;
455 KKASSERT((td->td_flags & TDF_KERNELFP) == 0);
457 if (td->td_flags & TDF_USINGFP) {
458 if (mdcpu->gd_npxthread == td) {
460 * XXX Note: This is a bit inefficient if the signal
461 * handler uses floating point, extra faults will
462 * occur.
464 mctx->mc_ownedfp = _MC_FPOWNED_FPU;
465 npxsave(td->td_savefpu);
466 } else {
467 mctx->mc_ownedfp = _MC_FPOWNED_PCB;
469 KKASSERT(sizeof(*td->td_savefpu) <= sizeof(mctx->mc_fpregs));
470 bcopy(td->td_savefpu, mctx->mc_fpregs, sizeof(*td->td_savefpu));
471 td->td_flags &= ~TDF_USINGFP;
472 #ifndef CPU_DISABLE_AVX
473 if (cpu_xsave)
474 mctx->mc_fpformat = _MC_FPFMT_YMM;
475 else
476 #endif
478 if (cpu_fxsr)
479 mctx->mc_fpformat = _MC_FPFMT_XMM;
480 else
481 mctx->mc_fpformat = _MC_FPFMT_387;
483 } else {
484 mctx->mc_ownedfp = _MC_FPOWNED_NONE;
485 mctx->mc_fpformat = _MC_FPFMT_NODEV;
490 * Restore the FP state from the mcontext structure.
492 void
493 npxpop(mcontext_t *mctx)
495 thread_t td = curthread;
497 switch(mctx->mc_ownedfp) {
498 case _MC_FPOWNED_NONE:
500 * If the signal handler used the FP unit but the interrupted
501 * code did not, release the FP unit. Clear TDF_USINGFP will
502 * force the FP unit to reinit so the interrupted code sees
503 * a clean slate.
505 if (td->td_flags & TDF_USINGFP) {
506 if (td == mdcpu->gd_npxthread)
507 npxsave(td->td_savefpu);
508 td->td_flags &= ~TDF_USINGFP;
510 break;
511 case _MC_FPOWNED_FPU:
512 case _MC_FPOWNED_PCB:
514 * Clear ownership of the FP unit and restore our saved state.
516 * NOTE: The signal handler may have set-up some FP state and
517 * enabled the FP unit, so we have to restore no matter what.
519 * XXX: This is bit inefficient, if the code being returned
520 * to is actively using the FP this results in multiple
521 * kernel faults.
523 * WARNING: The saved state was exposed to userland and may
524 * have to be sanitized to avoid a GP fault in the kernel.
526 if (td == mdcpu->gd_npxthread)
527 npxsave(td->td_savefpu);
528 KKASSERT(sizeof(*td->td_savefpu) <= sizeof(mctx->mc_fpregs));
529 bcopy(mctx->mc_fpregs, td->td_savefpu, sizeof(*td->td_savefpu));
530 if ((td->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~npx_mxcsr_mask) &&
531 cpu_fxsr) {
532 krateprintf(&badfprate,
533 "pid %d (%s) signal return from user: "
534 "illegal FP MXCSR %08x\n",
535 td->td_proc->p_pid,
536 td->td_proc->p_comm,
537 td->td_savefpu->sv_xmm.sv_env.en_mxcsr);
539 td->td_flags |= TDF_USINGFP;
540 break;
546 * On AuthenticAMD processors, the fxrstor instruction does not restore
547 * the x87's stored last instruction pointer, last data pointer, and last
548 * opcode values, except in the rare case in which the exception summary
549 * (ES) bit in the x87 status word is set to 1.
551 * In order to avoid leaking this information across processes, we clean
552 * these values by performing a dummy load before executing fxrstor().
554 static double dummy_variable = 0.0;
555 static void
556 fpu_clean_state(void)
558 u_short status;
561 * Clear the ES bit in the x87 status word if it is currently
562 * set, in order to avoid causing a fault in the upcoming load.
564 fnstsw(&status);
565 if (status & 0x80)
566 fnclex();
569 * Load the dummy variable into the x87 stack. This mangles
570 * the x87 stack, but we don't care since we're about to call
571 * fxrstor() anyway.
573 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
576 static void
577 fpurstor(union savefpu *addr)
579 #ifndef CPU_DISABLE_AVX
580 if (cpu_xsave)
581 xrstor(CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0, addr);
582 else
583 #endif
584 if (cpu_fxsr) {
585 fpu_clean_state();
586 fxrstor(addr);
587 } else {
588 frstor(addr);