2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <linux/module.h>
9 #include <linux/regset.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
13 #include <asm/sigcontext.h>
14 #include <asm/processor.h>
15 #include <asm/math_emu.h>
16 #include <asm/uaccess.h>
17 #include <asm/ptrace.h>
19 #include <asm/fpu-internal.h>
23 # include <asm/sigcontext32.h>
24 # include <asm/user32.h>
26 # define save_i387_xstate_ia32 save_i387_xstate
27 # define restore_i387_xstate_ia32 restore_i387_xstate
28 # define _fpstate_ia32 _fpstate
29 # define _xstate_ia32 _xstate
30 # define sig_xstate_ia32_size sig_xstate_size
31 # define fx_sw_reserved_ia32 fx_sw_reserved
32 # define user_i387_ia32_struct user_i387_struct
33 # define user32_fxsr_struct user_fxsr_struct
37 * Were we in an interrupt that interrupted kernel mode?
39 * We can do a kernel_fpu_begin/end() pair *ONLY* if that
40 * pair does nothing at all: the thread must not have fpu (so
41 * that we don't try to save the FPU state), and TS must
42 * be set (so that the clts/stts pair does nothing that is
43 * visible in the interrupted kernel thread).
45 static inline bool interrupted_kernel_fpu_idle(void)
47 return !__thread_has_fpu(current
) &&
48 (read_cr0() & X86_CR0_TS
);
52 * Were we in user mode (or vm86 mode) when we were
55 * Doing kernel_fpu_begin/end() is ok if we are running
56 * in an interrupt context from user mode - we'll just
57 * save the FPU state as required.
59 static inline bool interrupted_user_mode(void)
61 struct pt_regs
*regs
= get_irq_regs();
62 return regs
&& user_mode_vm(regs
);
66 * Can we use the FPU in kernel mode with the
67 * whole "kernel_fpu_begin/end()" sequence?
69 * It's always ok in process context (ie "not interrupt")
70 * but it is sometimes ok even from an irq.
72 bool irq_fpu_usable(void)
74 return !in_interrupt() ||
75 interrupted_user_mode() ||
76 interrupted_kernel_fpu_idle();
78 EXPORT_SYMBOL(irq_fpu_usable
);
80 void kernel_fpu_begin(void)
82 struct task_struct
*me
= current
;
84 WARN_ON_ONCE(!irq_fpu_usable());
86 if (__thread_has_fpu(me
)) {
88 __thread_clear_has_fpu(me
);
89 /* We do 'stts()' in kernel_fpu_end() */
91 this_cpu_write(fpu_owner_task
, NULL
);
95 EXPORT_SYMBOL(kernel_fpu_begin
);
97 void kernel_fpu_end(void)
102 EXPORT_SYMBOL(kernel_fpu_end
);
104 void unlazy_fpu(struct task_struct
*tsk
)
107 if (__thread_has_fpu(tsk
)) {
108 __save_init_fpu(tsk
);
109 __thread_fpu_end(tsk
);
111 tsk
->fpu_counter
= 0;
114 EXPORT_SYMBOL(unlazy_fpu
);
116 #ifdef CONFIG_MATH_EMULATION
117 # define HAVE_HWFP (boot_cpu_data.hard_math)
122 static unsigned int mxcsr_feature_mask __read_mostly
= 0xffffffffu
;
123 unsigned int xstate_size
;
124 EXPORT_SYMBOL_GPL(xstate_size
);
125 unsigned int sig_xstate_ia32_size
= sizeof(struct _fpstate_ia32
);
126 static struct i387_fxsave_struct fx_scratch __cpuinitdata
;
128 static void __cpuinit
mxcsr_feature_mask_init(void)
130 unsigned long mask
= 0;
134 memset(&fx_scratch
, 0, sizeof(struct i387_fxsave_struct
));
135 asm volatile("fxsave %0" : : "m" (fx_scratch
));
136 mask
= fx_scratch
.mxcsr_mask
;
140 mxcsr_feature_mask
&= mask
;
144 static void __cpuinit
init_thread_xstate(void)
147 * Note that xstate_size might be overwriten later during
153 * Disable xsave as we do not support it if i387
154 * emulation is enabled.
156 setup_clear_cpu_cap(X86_FEATURE_XSAVE
);
157 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT
);
158 xstate_size
= sizeof(struct i387_soft_struct
);
163 xstate_size
= sizeof(struct i387_fxsave_struct
);
165 xstate_size
= sizeof(struct i387_fsave_struct
);
169 * Called at bootup to set up the initial FPU state that is later cloned
170 * into all processes.
173 void __cpuinit
fpu_init(void)
176 unsigned long cr4_mask
= 0;
179 cr4_mask
|= X86_CR4_OSFXSR
;
181 cr4_mask
|= X86_CR4_OSXMMEXCPT
;
183 set_in_cr4(cr4_mask
);
186 cr0
&= ~(X86_CR0_TS
|X86_CR0_EM
); /* clear TS and EM */
191 if (!smp_processor_id())
192 init_thread_xstate();
194 mxcsr_feature_mask_init();
195 /* clean state in init */
196 current_thread_info()->status
= 0;
200 void fpu_finit(struct fpu
*fpu
)
203 finit_soft_fpu(&fpu
->state
->soft
);
208 struct i387_fxsave_struct
*fx
= &fpu
->state
->fxsave
;
210 memset(fx
, 0, xstate_size
);
213 fx
->mxcsr
= MXCSR_DEFAULT
;
215 struct i387_fsave_struct
*fp
= &fpu
->state
->fsave
;
216 memset(fp
, 0, xstate_size
);
217 fp
->cwd
= 0xffff037fu
;
218 fp
->swd
= 0xffff0000u
;
219 fp
->twd
= 0xffffffffu
;
220 fp
->fos
= 0xffff0000u
;
223 EXPORT_SYMBOL_GPL(fpu_finit
);
226 * The _current_ task is using the FPU for the first time
227 * so initialize it and set the mxcsr to its default
228 * value at reset if we support XMM instructions and then
229 * remember the current task has used the FPU.
231 int init_fpu(struct task_struct
*tsk
)
235 if (tsk_used_math(tsk
)) {
236 if (HAVE_HWFP
&& tsk
== current
)
238 tsk
->thread
.fpu
.last_cpu
= ~0;
243 * Memory allocation at the first usage of the FPU and other state.
245 ret
= fpu_alloc(&tsk
->thread
.fpu
);
249 fpu_finit(&tsk
->thread
.fpu
);
251 set_stopped_child_used_math(tsk
);
254 EXPORT_SYMBOL_GPL(init_fpu
);
257 * The xstateregs_active() routine is the same as the fpregs_active() routine,
258 * as the "regset->n" for the xstate regset will be updated based on the feature
259 * capabilites supported by the xsave.
261 int fpregs_active(struct task_struct
*target
, const struct user_regset
*regset
)
263 return tsk_used_math(target
) ? regset
->n
: 0;
266 int xfpregs_active(struct task_struct
*target
, const struct user_regset
*regset
)
268 return (cpu_has_fxsr
&& tsk_used_math(target
)) ? regset
->n
: 0;
271 int xfpregs_get(struct task_struct
*target
, const struct user_regset
*regset
,
272 unsigned int pos
, unsigned int count
,
273 void *kbuf
, void __user
*ubuf
)
280 ret
= init_fpu(target
);
284 sanitize_i387_state(target
);
286 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
287 &target
->thread
.fpu
.state
->fxsave
, 0, -1);
290 int xfpregs_set(struct task_struct
*target
, const struct user_regset
*regset
,
291 unsigned int pos
, unsigned int count
,
292 const void *kbuf
, const void __user
*ubuf
)
299 ret
= init_fpu(target
);
303 sanitize_i387_state(target
);
305 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
306 &target
->thread
.fpu
.state
->fxsave
, 0, -1);
309 * mxcsr reserved bits must be masked to zero for security reasons.
311 target
->thread
.fpu
.state
->fxsave
.mxcsr
&= mxcsr_feature_mask
;
314 * update the header bits in the xsave header, indicating the
315 * presence of FP and SSE state.
318 target
->thread
.fpu
.state
->xsave
.xsave_hdr
.xstate_bv
|= XSTATE_FPSSE
;
323 int xstateregs_get(struct task_struct
*target
, const struct user_regset
*regset
,
324 unsigned int pos
, unsigned int count
,
325 void *kbuf
, void __user
*ubuf
)
332 ret
= init_fpu(target
);
337 * Copy the 48bytes defined by the software first into the xstate
338 * memory layout in the thread struct, so that we can copy the entire
339 * xstateregs to the user using one user_regset_copyout().
341 memcpy(&target
->thread
.fpu
.state
->fxsave
.sw_reserved
,
342 xstate_fx_sw_bytes
, sizeof(xstate_fx_sw_bytes
));
345 * Copy the xstate memory layout.
347 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
348 &target
->thread
.fpu
.state
->xsave
, 0, -1);
352 int xstateregs_set(struct task_struct
*target
, const struct user_regset
*regset
,
353 unsigned int pos
, unsigned int count
,
354 const void *kbuf
, const void __user
*ubuf
)
357 struct xsave_hdr_struct
*xsave_hdr
;
362 ret
= init_fpu(target
);
366 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
367 &target
->thread
.fpu
.state
->xsave
, 0, -1);
370 * mxcsr reserved bits must be masked to zero for security reasons.
372 target
->thread
.fpu
.state
->fxsave
.mxcsr
&= mxcsr_feature_mask
;
374 xsave_hdr
= &target
->thread
.fpu
.state
->xsave
.xsave_hdr
;
376 xsave_hdr
->xstate_bv
&= pcntxt_mask
;
378 * These bits must be zero.
380 xsave_hdr
->reserved1
[0] = xsave_hdr
->reserved1
[1] = 0;
385 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
388 * FPU tag word conversions.
391 static inline unsigned short twd_i387_to_fxsr(unsigned short twd
)
393 unsigned int tmp
; /* to avoid 16 bit prefixes in the code */
395 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
397 tmp
= (tmp
| (tmp
>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
398 /* and move the valid bits to the lower byte. */
399 tmp
= (tmp
| (tmp
>> 1)) & 0x3333; /* 00VV00VV00VV00VV */
400 tmp
= (tmp
| (tmp
>> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
401 tmp
= (tmp
| (tmp
>> 4)) & 0x00ff; /* 00000000VVVVVVVV */
406 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
407 #define FP_EXP_TAG_VALID 0
408 #define FP_EXP_TAG_ZERO 1
409 #define FP_EXP_TAG_SPECIAL 2
410 #define FP_EXP_TAG_EMPTY 3
412 static inline u32
twd_fxsr_to_i387(struct i387_fxsave_struct
*fxsave
)
415 u32 tos
= (fxsave
->swd
>> 11) & 7;
416 u32 twd
= (unsigned long) fxsave
->twd
;
418 u32 ret
= 0xffff0000u
;
421 for (i
= 0; i
< 8; i
++, twd
>>= 1) {
423 st
= FPREG_ADDR(fxsave
, (i
- tos
) & 7);
425 switch (st
->exponent
& 0x7fff) {
427 tag
= FP_EXP_TAG_SPECIAL
;
430 if (!st
->significand
[0] &&
431 !st
->significand
[1] &&
432 !st
->significand
[2] &&
434 tag
= FP_EXP_TAG_ZERO
;
436 tag
= FP_EXP_TAG_SPECIAL
;
439 if (st
->significand
[3] & 0x8000)
440 tag
= FP_EXP_TAG_VALID
;
442 tag
= FP_EXP_TAG_SPECIAL
;
446 tag
= FP_EXP_TAG_EMPTY
;
448 ret
|= tag
<< (2 * i
);
454 * FXSR floating point environment conversions.
458 convert_from_fxsr(struct user_i387_ia32_struct
*env
, struct task_struct
*tsk
)
460 struct i387_fxsave_struct
*fxsave
= &tsk
->thread
.fpu
.state
->fxsave
;
461 struct _fpreg
*to
= (struct _fpreg
*) &env
->st_space
[0];
462 struct _fpxreg
*from
= (struct _fpxreg
*) &fxsave
->st_space
[0];
465 env
->cwd
= fxsave
->cwd
| 0xffff0000u
;
466 env
->swd
= fxsave
->swd
| 0xffff0000u
;
467 env
->twd
= twd_fxsr_to_i387(fxsave
);
470 env
->fip
= fxsave
->rip
;
471 env
->foo
= fxsave
->rdp
;
473 * should be actually ds/cs at fpu exception time, but
474 * that information is not available in 64bit mode.
476 env
->fcs
= task_pt_regs(tsk
)->cs
;
477 if (tsk
== current
) {
478 savesegment(ds
, env
->fos
);
480 env
->fos
= tsk
->thread
.ds
;
482 env
->fos
|= 0xffff0000;
484 env
->fip
= fxsave
->fip
;
485 env
->fcs
= (u16
) fxsave
->fcs
| ((u32
) fxsave
->fop
<< 16);
486 env
->foo
= fxsave
->foo
;
487 env
->fos
= fxsave
->fos
;
490 for (i
= 0; i
< 8; ++i
)
491 memcpy(&to
[i
], &from
[i
], sizeof(to
[0]));
494 static void convert_to_fxsr(struct task_struct
*tsk
,
495 const struct user_i387_ia32_struct
*env
)
498 struct i387_fxsave_struct
*fxsave
= &tsk
->thread
.fpu
.state
->fxsave
;
499 struct _fpreg
*from
= (struct _fpreg
*) &env
->st_space
[0];
500 struct _fpxreg
*to
= (struct _fpxreg
*) &fxsave
->st_space
[0];
503 fxsave
->cwd
= env
->cwd
;
504 fxsave
->swd
= env
->swd
;
505 fxsave
->twd
= twd_i387_to_fxsr(env
->twd
);
506 fxsave
->fop
= (u16
) ((u32
) env
->fcs
>> 16);
508 fxsave
->rip
= env
->fip
;
509 fxsave
->rdp
= env
->foo
;
510 /* cs and ds ignored */
512 fxsave
->fip
= env
->fip
;
513 fxsave
->fcs
= (env
->fcs
& 0xffff);
514 fxsave
->foo
= env
->foo
;
515 fxsave
->fos
= env
->fos
;
518 for (i
= 0; i
< 8; ++i
)
519 memcpy(&to
[i
], &from
[i
], sizeof(from
[0]));
522 int fpregs_get(struct task_struct
*target
, const struct user_regset
*regset
,
523 unsigned int pos
, unsigned int count
,
524 void *kbuf
, void __user
*ubuf
)
526 struct user_i387_ia32_struct env
;
529 ret
= init_fpu(target
);
534 return fpregs_soft_get(target
, regset
, pos
, count
, kbuf
, ubuf
);
537 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
538 &target
->thread
.fpu
.state
->fsave
, 0,
542 sanitize_i387_state(target
);
544 if (kbuf
&& pos
== 0 && count
== sizeof(env
)) {
545 convert_from_fxsr(kbuf
, target
);
549 convert_from_fxsr(&env
, target
);
551 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &env
, 0, -1);
554 int fpregs_set(struct task_struct
*target
, const struct user_regset
*regset
,
555 unsigned int pos
, unsigned int count
,
556 const void *kbuf
, const void __user
*ubuf
)
558 struct user_i387_ia32_struct env
;
561 ret
= init_fpu(target
);
565 sanitize_i387_state(target
);
568 return fpregs_soft_set(target
, regset
, pos
, count
, kbuf
, ubuf
);
571 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
572 &target
->thread
.fpu
.state
->fsave
, 0, -1);
575 if (pos
> 0 || count
< sizeof(env
))
576 convert_from_fxsr(&env
, target
);
578 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &env
, 0, -1);
580 convert_to_fxsr(target
, &env
);
583 * update the header bit in the xsave header, indicating the
587 target
->thread
.fpu
.state
->xsave
.xsave_hdr
.xstate_bv
|= XSTATE_FP
;
592 * Signal frame handlers.
595 static inline int save_i387_fsave(struct _fpstate_ia32 __user
*buf
)
597 struct task_struct
*tsk
= current
;
598 struct i387_fsave_struct
*fp
= &tsk
->thread
.fpu
.state
->fsave
;
600 fp
->status
= fp
->swd
;
601 if (__copy_to_user(buf
, fp
, sizeof(struct i387_fsave_struct
)))
606 static int save_i387_fxsave(struct _fpstate_ia32 __user
*buf
)
608 struct task_struct
*tsk
= current
;
609 struct i387_fxsave_struct
*fx
= &tsk
->thread
.fpu
.state
->fxsave
;
610 struct user_i387_ia32_struct env
;
613 convert_from_fxsr(&env
, tsk
);
614 if (__copy_to_user(buf
, &env
, sizeof(env
)))
617 err
|= __put_user(fx
->swd
, &buf
->status
);
618 err
|= __put_user(X86_FXSR_MAGIC
, &buf
->magic
);
622 if (__copy_to_user(&buf
->_fxsr_env
[0], fx
, xstate_size
))
627 static int save_i387_xsave(void __user
*buf
)
629 struct task_struct
*tsk
= current
;
630 struct _fpstate_ia32 __user
*fx
= buf
;
634 sanitize_i387_state(tsk
);
637 * For legacy compatible, we always set FP/SSE bits in the bit
638 * vector while saving the state to the user context.
639 * This will enable us capturing any changes(during sigreturn) to
640 * the FP/SSE bits by the legacy applications which don't touch
641 * xstate_bv in the xsave header.
643 * xsave aware applications can change the xstate_bv in the xsave
644 * header as well as change any contents in the memory layout.
645 * xrestore as part of sigreturn will capture all the changes.
647 tsk
->thread
.fpu
.state
->xsave
.xsave_hdr
.xstate_bv
|= XSTATE_FPSSE
;
649 if (save_i387_fxsave(fx
) < 0)
652 err
= __copy_to_user(&fx
->sw_reserved
, &fx_sw_reserved_ia32
,
653 sizeof(struct _fpx_sw_bytes
));
654 err
|= __put_user(FP_XSTATE_MAGIC2
,
655 (__u32 __user
*) (buf
+ sig_xstate_ia32_size
656 - FP_XSTATE_MAGIC2_SIZE
));
663 int save_i387_xstate_ia32(void __user
*buf
)
665 struct _fpstate_ia32 __user
*fp
= (struct _fpstate_ia32 __user
*) buf
;
666 struct task_struct
*tsk
= current
;
671 if (!access_ok(VERIFY_WRITE
, buf
, sig_xstate_ia32_size
))
674 * This will cause a "finit" to be triggered by the next
675 * attempted FPU operation by the 'current' process.
680 return fpregs_soft_get(current
, NULL
,
681 0, sizeof(struct user_i387_ia32_struct
),
688 return save_i387_xsave(fp
);
690 return save_i387_fxsave(fp
);
692 return save_i387_fsave(fp
);
695 static inline int restore_i387_fsave(struct _fpstate_ia32 __user
*buf
)
697 struct task_struct
*tsk
= current
;
699 return __copy_from_user(&tsk
->thread
.fpu
.state
->fsave
, buf
,
700 sizeof(struct i387_fsave_struct
));
703 static int restore_i387_fxsave(struct _fpstate_ia32 __user
*buf
,
706 struct task_struct
*tsk
= current
;
707 struct user_i387_ia32_struct env
;
710 err
= __copy_from_user(&tsk
->thread
.fpu
.state
->fxsave
, &buf
->_fxsr_env
[0],
712 /* mxcsr reserved bits must be masked to zero for security reasons */
713 tsk
->thread
.fpu
.state
->fxsave
.mxcsr
&= mxcsr_feature_mask
;
714 if (err
|| __copy_from_user(&env
, buf
, sizeof(env
)))
716 convert_to_fxsr(tsk
, &env
);
721 static int restore_i387_xsave(void __user
*buf
)
723 struct _fpx_sw_bytes fx_sw_user
;
724 struct _fpstate_ia32 __user
*fx_user
=
725 ((struct _fpstate_ia32 __user
*) buf
);
726 struct i387_fxsave_struct __user
*fx
=
727 (struct i387_fxsave_struct __user
*) &fx_user
->_fxsr_env
[0];
728 struct xsave_hdr_struct
*xsave_hdr
=
729 ¤t
->thread
.fpu
.state
->xsave
.xsave_hdr
;
733 if (check_for_xstate(fx
, buf
, &fx_sw_user
))
736 mask
= fx_sw_user
.xstate_bv
;
738 err
= restore_i387_fxsave(buf
, fx_sw_user
.xstate_size
);
740 xsave_hdr
->xstate_bv
&= pcntxt_mask
;
742 * These bits must be zero.
744 xsave_hdr
->reserved1
[0] = xsave_hdr
->reserved1
[1] = 0;
747 * Init the state that is not present in the memory layout
748 * and enabled by the OS.
750 mask
= ~(pcntxt_mask
& ~mask
);
751 xsave_hdr
->xstate_bv
&= mask
;
756 * Couldn't find the extended state information in the memory
757 * layout. Restore the FP/SSE and init the other extended state
760 xsave_hdr
->xstate_bv
= XSTATE_FPSSE
;
761 return restore_i387_fxsave(buf
, sizeof(struct i387_fxsave_struct
));
764 int restore_i387_xstate_ia32(void __user
*buf
)
767 struct task_struct
*tsk
= current
;
768 struct _fpstate_ia32 __user
*fp
= (struct _fpstate_ia32 __user
*) buf
;
781 if (!access_ok(VERIFY_READ
, buf
, sig_xstate_ia32_size
))
792 err
= restore_i387_xsave(buf
);
793 else if (cpu_has_fxsr
)
794 err
= restore_i387_fxsave(fp
, sizeof(struct
795 i387_fxsave_struct
));
797 err
= restore_i387_fsave(fp
);
799 err
= fpregs_soft_set(current
, NULL
,
800 0, sizeof(struct user_i387_ia32_struct
),
809 * FPU state for core dumps.
810 * This is only used for a.out dumps now.
811 * It is declared generically using elf_fpregset_t (which is
812 * struct user_i387_struct) but is in fact only used for 32-bit
813 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
815 int dump_fpu(struct pt_regs
*regs
, struct user_i387_struct
*fpu
)
817 struct task_struct
*tsk
= current
;
820 fpvalid
= !!used_math();
822 fpvalid
= !fpregs_get(tsk
, NULL
,
823 0, sizeof(struct user_i387_ia32_struct
),
828 EXPORT_SYMBOL(dump_fpu
);
830 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */