IPVS: convert __ip_vs_securetcp_lock to a spinlock
[linux-2.6/btrfs-unstable.git] / arch / x86 / kernel / i387.c
blob1f11f5ce668f93aadff67c425fae359da6efc6a8
1 /*
2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8 #include <linux/module.h>
9 #include <linux/regset.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
13 #include <asm/sigcontext.h>
14 #include <asm/processor.h>
15 #include <asm/math_emu.h>
16 #include <asm/uaccess.h>
17 #include <asm/ptrace.h>
18 #include <asm/i387.h>
19 #include <asm/user.h>
21 #ifdef CONFIG_X86_64
22 # include <asm/sigcontext32.h>
23 # include <asm/user32.h>
24 #else
25 # define save_i387_xstate_ia32 save_i387_xstate
26 # define restore_i387_xstate_ia32 restore_i387_xstate
27 # define _fpstate_ia32 _fpstate
28 # define _xstate_ia32 _xstate
29 # define sig_xstate_ia32_size sig_xstate_size
30 # define fx_sw_reserved_ia32 fx_sw_reserved
31 # define user_i387_ia32_struct user_i387_struct
32 # define user32_fxsr_struct user_fxsr_struct
33 #endif
35 #ifdef CONFIG_MATH_EMULATION
36 # define HAVE_HWFP (boot_cpu_data.hard_math)
37 #else
38 # define HAVE_HWFP 1
39 #endif
41 static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
42 unsigned int xstate_size;
43 unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
44 static struct i387_fxsave_struct fx_scratch __cpuinitdata;
46 void __cpuinit mxcsr_feature_mask_init(void)
48 unsigned long mask = 0;
50 clts();
51 if (cpu_has_fxsr) {
52 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
53 asm volatile("fxsave %0" : : "m" (fx_scratch));
54 mask = fx_scratch.mxcsr_mask;
55 if (mask == 0)
56 mask = 0x0000ffbf;
58 mxcsr_feature_mask &= mask;
59 stts();
62 static void __cpuinit init_thread_xstate(void)
65 * Note that xstate_size might be overwriten later during
66 * xsave_init().
69 if (!HAVE_HWFP) {
70 xstate_size = sizeof(struct i387_soft_struct);
71 return;
74 if (cpu_has_fxsr)
75 xstate_size = sizeof(struct i387_fxsave_struct);
76 #ifdef CONFIG_X86_32
77 else
78 xstate_size = sizeof(struct i387_fsave_struct);
79 #endif
82 #ifdef CONFIG_X86_64
84 * Called at bootup to set up the initial FPU state that is later cloned
85 * into all processes.
88 void __cpuinit fpu_init(void)
90 unsigned long oldcr0 = read_cr0();
92 set_in_cr4(X86_CR4_OSFXSR);
93 set_in_cr4(X86_CR4_OSXMMEXCPT);
95 write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */
97 if (!smp_processor_id())
98 init_thread_xstate();
100 mxcsr_feature_mask_init();
101 /* clean state in init */
102 current_thread_info()->status = 0;
103 clear_used_math();
106 #else /* CONFIG_X86_64 */
108 void __cpuinit fpu_init(void)
110 if (!smp_processor_id())
111 init_thread_xstate();
114 #endif /* CONFIG_X86_32 */
116 void fpu_finit(struct fpu *fpu)
118 #ifdef CONFIG_X86_32
119 if (!HAVE_HWFP) {
120 finit_soft_fpu(&fpu->state->soft);
121 return;
123 #endif
125 if (cpu_has_fxsr) {
126 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
128 memset(fx, 0, xstate_size);
129 fx->cwd = 0x37f;
130 if (cpu_has_xmm)
131 fx->mxcsr = MXCSR_DEFAULT;
132 } else {
133 struct i387_fsave_struct *fp = &fpu->state->fsave;
134 memset(fp, 0, xstate_size);
135 fp->cwd = 0xffff037fu;
136 fp->swd = 0xffff0000u;
137 fp->twd = 0xffffffffu;
138 fp->fos = 0xffff0000u;
141 EXPORT_SYMBOL_GPL(fpu_finit);
144 * The _current_ task is using the FPU for the first time
145 * so initialize it and set the mxcsr to its default
146 * value at reset if we support XMM instructions and then
147 * remeber the current task has used the FPU.
149 int init_fpu(struct task_struct *tsk)
151 int ret;
153 if (tsk_used_math(tsk)) {
154 if (HAVE_HWFP && tsk == current)
155 unlazy_fpu(tsk);
156 return 0;
160 * Memory allocation at the first usage of the FPU and other state.
162 ret = fpu_alloc(&tsk->thread.fpu);
163 if (ret)
164 return ret;
166 fpu_finit(&tsk->thread.fpu);
168 set_stopped_child_used_math(tsk);
169 return 0;
173 * The xstateregs_active() routine is the same as the fpregs_active() routine,
174 * as the "regset->n" for the xstate regset will be updated based on the feature
175 * capabilites supported by the xsave.
177 int fpregs_active(struct task_struct *target, const struct user_regset *regset)
179 return tsk_used_math(target) ? regset->n : 0;
182 int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
184 return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
187 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
188 unsigned int pos, unsigned int count,
189 void *kbuf, void __user *ubuf)
191 int ret;
193 if (!cpu_has_fxsr)
194 return -ENODEV;
196 ret = init_fpu(target);
197 if (ret)
198 return ret;
200 sanitize_i387_state(target);
202 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
203 &target->thread.fpu.state->fxsave, 0, -1);
206 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
207 unsigned int pos, unsigned int count,
208 const void *kbuf, const void __user *ubuf)
210 int ret;
212 if (!cpu_has_fxsr)
213 return -ENODEV;
215 ret = init_fpu(target);
216 if (ret)
217 return ret;
219 sanitize_i387_state(target);
221 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
222 &target->thread.fpu.state->fxsave, 0, -1);
225 * mxcsr reserved bits must be masked to zero for security reasons.
227 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
230 * update the header bits in the xsave header, indicating the
231 * presence of FP and SSE state.
233 if (cpu_has_xsave)
234 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
236 return ret;
239 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
240 unsigned int pos, unsigned int count,
241 void *kbuf, void __user *ubuf)
243 int ret;
245 if (!cpu_has_xsave)
246 return -ENODEV;
248 ret = init_fpu(target);
249 if (ret)
250 return ret;
253 * Copy the 48bytes defined by the software first into the xstate
254 * memory layout in the thread struct, so that we can copy the entire
255 * xstateregs to the user using one user_regset_copyout().
257 memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
258 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
261 * Copy the xstate memory layout.
263 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
264 &target->thread.fpu.state->xsave, 0, -1);
265 return ret;
268 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
269 unsigned int pos, unsigned int count,
270 const void *kbuf, const void __user *ubuf)
272 int ret;
273 struct xsave_hdr_struct *xsave_hdr;
275 if (!cpu_has_xsave)
276 return -ENODEV;
278 ret = init_fpu(target);
279 if (ret)
280 return ret;
282 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
283 &target->thread.fpu.state->xsave, 0, -1);
286 * mxcsr reserved bits must be masked to zero for security reasons.
288 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
290 xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
292 xsave_hdr->xstate_bv &= pcntxt_mask;
294 * These bits must be zero.
296 xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
298 return ret;
301 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
304 * FPU tag word conversions.
307 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
309 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
311 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
312 tmp = ~twd;
313 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
314 /* and move the valid bits to the lower byte. */
315 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
316 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
317 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
319 return tmp;
322 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16);
323 #define FP_EXP_TAG_VALID 0
324 #define FP_EXP_TAG_ZERO 1
325 #define FP_EXP_TAG_SPECIAL 2
326 #define FP_EXP_TAG_EMPTY 3
328 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
330 struct _fpxreg *st;
331 u32 tos = (fxsave->swd >> 11) & 7;
332 u32 twd = (unsigned long) fxsave->twd;
333 u32 tag;
334 u32 ret = 0xffff0000u;
335 int i;
337 for (i = 0; i < 8; i++, twd >>= 1) {
338 if (twd & 0x1) {
339 st = FPREG_ADDR(fxsave, (i - tos) & 7);
341 switch (st->exponent & 0x7fff) {
342 case 0x7fff:
343 tag = FP_EXP_TAG_SPECIAL;
344 break;
345 case 0x0000:
346 if (!st->significand[0] &&
347 !st->significand[1] &&
348 !st->significand[2] &&
349 !st->significand[3])
350 tag = FP_EXP_TAG_ZERO;
351 else
352 tag = FP_EXP_TAG_SPECIAL;
353 break;
354 default:
355 if (st->significand[3] & 0x8000)
356 tag = FP_EXP_TAG_VALID;
357 else
358 tag = FP_EXP_TAG_SPECIAL;
359 break;
361 } else {
362 tag = FP_EXP_TAG_EMPTY;
364 ret |= tag << (2 * i);
366 return ret;
370 * FXSR floating point environment conversions.
373 static void
374 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
376 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
377 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
378 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
379 int i;
381 env->cwd = fxsave->cwd | 0xffff0000u;
382 env->swd = fxsave->swd | 0xffff0000u;
383 env->twd = twd_fxsr_to_i387(fxsave);
385 #ifdef CONFIG_X86_64
386 env->fip = fxsave->rip;
387 env->foo = fxsave->rdp;
388 if (tsk == current) {
390 * should be actually ds/cs at fpu exception time, but
391 * that information is not available in 64bit mode.
393 asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos));
394 asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs));
395 } else {
396 struct pt_regs *regs = task_pt_regs(tsk);
398 env->fos = 0xffff0000 | tsk->thread.ds;
399 env->fcs = regs->cs;
401 #else
402 env->fip = fxsave->fip;
403 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
404 env->foo = fxsave->foo;
405 env->fos = fxsave->fos;
406 #endif
408 for (i = 0; i < 8; ++i)
409 memcpy(&to[i], &from[i], sizeof(to[0]));
412 static void convert_to_fxsr(struct task_struct *tsk,
413 const struct user_i387_ia32_struct *env)
416 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
417 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
418 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
419 int i;
421 fxsave->cwd = env->cwd;
422 fxsave->swd = env->swd;
423 fxsave->twd = twd_i387_to_fxsr(env->twd);
424 fxsave->fop = (u16) ((u32) env->fcs >> 16);
425 #ifdef CONFIG_X86_64
426 fxsave->rip = env->fip;
427 fxsave->rdp = env->foo;
428 /* cs and ds ignored */
429 #else
430 fxsave->fip = env->fip;
431 fxsave->fcs = (env->fcs & 0xffff);
432 fxsave->foo = env->foo;
433 fxsave->fos = env->fos;
434 #endif
436 for (i = 0; i < 8; ++i)
437 memcpy(&to[i], &from[i], sizeof(from[0]));
440 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
441 unsigned int pos, unsigned int count,
442 void *kbuf, void __user *ubuf)
444 struct user_i387_ia32_struct env;
445 int ret;
447 ret = init_fpu(target);
448 if (ret)
449 return ret;
451 if (!HAVE_HWFP)
452 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
454 if (!cpu_has_fxsr) {
455 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
456 &target->thread.fpu.state->fsave, 0,
457 -1);
460 sanitize_i387_state(target);
462 if (kbuf && pos == 0 && count == sizeof(env)) {
463 convert_from_fxsr(kbuf, target);
464 return 0;
467 convert_from_fxsr(&env, target);
469 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
472 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
473 unsigned int pos, unsigned int count,
474 const void *kbuf, const void __user *ubuf)
476 struct user_i387_ia32_struct env;
477 int ret;
479 ret = init_fpu(target);
480 if (ret)
481 return ret;
483 sanitize_i387_state(target);
485 if (!HAVE_HWFP)
486 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
488 if (!cpu_has_fxsr) {
489 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
490 &target->thread.fpu.state->fsave, 0, -1);
493 if (pos > 0 || count < sizeof(env))
494 convert_from_fxsr(&env, target);
496 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
497 if (!ret)
498 convert_to_fxsr(target, &env);
501 * update the header bit in the xsave header, indicating the
502 * presence of FP.
504 if (cpu_has_xsave)
505 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
506 return ret;
510 * Signal frame handlers.
513 static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
515 struct task_struct *tsk = current;
516 struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave;
518 fp->status = fp->swd;
519 if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
520 return -1;
521 return 1;
524 static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
526 struct task_struct *tsk = current;
527 struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
528 struct user_i387_ia32_struct env;
529 int err = 0;
531 convert_from_fxsr(&env, tsk);
532 if (__copy_to_user(buf, &env, sizeof(env)))
533 return -1;
535 err |= __put_user(fx->swd, &buf->status);
536 err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
537 if (err)
538 return -1;
540 if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size))
541 return -1;
542 return 1;
545 static int save_i387_xsave(void __user *buf)
547 struct task_struct *tsk = current;
548 struct _fpstate_ia32 __user *fx = buf;
549 int err = 0;
552 sanitize_i387_state(tsk);
555 * For legacy compatible, we always set FP/SSE bits in the bit
556 * vector while saving the state to the user context.
557 * This will enable us capturing any changes(during sigreturn) to
558 * the FP/SSE bits by the legacy applications which don't touch
559 * xstate_bv in the xsave header.
561 * xsave aware applications can change the xstate_bv in the xsave
562 * header as well as change any contents in the memory layout.
563 * xrestore as part of sigreturn will capture all the changes.
565 tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
567 if (save_i387_fxsave(fx) < 0)
568 return -1;
570 err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32,
571 sizeof(struct _fpx_sw_bytes));
572 err |= __put_user(FP_XSTATE_MAGIC2,
573 (__u32 __user *) (buf + sig_xstate_ia32_size
574 - FP_XSTATE_MAGIC2_SIZE));
575 if (err)
576 return -1;
578 return 1;
581 int save_i387_xstate_ia32(void __user *buf)
583 struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
584 struct task_struct *tsk = current;
586 if (!used_math())
587 return 0;
589 if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size))
590 return -EACCES;
592 * This will cause a "finit" to be triggered by the next
593 * attempted FPU operation by the 'current' process.
595 clear_used_math();
597 if (!HAVE_HWFP) {
598 return fpregs_soft_get(current, NULL,
599 0, sizeof(struct user_i387_ia32_struct),
600 NULL, fp) ? -1 : 1;
603 unlazy_fpu(tsk);
605 if (cpu_has_xsave)
606 return save_i387_xsave(fp);
607 if (cpu_has_fxsr)
608 return save_i387_fxsave(fp);
609 else
610 return save_i387_fsave(fp);
613 static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
615 struct task_struct *tsk = current;
617 return __copy_from_user(&tsk->thread.fpu.state->fsave, buf,
618 sizeof(struct i387_fsave_struct));
621 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
622 unsigned int size)
624 struct task_struct *tsk = current;
625 struct user_i387_ia32_struct env;
626 int err;
628 err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0],
629 size);
630 /* mxcsr reserved bits must be masked to zero for security reasons */
631 tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
632 if (err || __copy_from_user(&env, buf, sizeof(env)))
633 return 1;
634 convert_to_fxsr(tsk, &env);
636 return 0;
639 static int restore_i387_xsave(void __user *buf)
641 struct _fpx_sw_bytes fx_sw_user;
642 struct _fpstate_ia32 __user *fx_user =
643 ((struct _fpstate_ia32 __user *) buf);
644 struct i387_fxsave_struct __user *fx =
645 (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
646 struct xsave_hdr_struct *xsave_hdr =
647 &current->thread.fpu.state->xsave.xsave_hdr;
648 u64 mask;
649 int err;
651 if (check_for_xstate(fx, buf, &fx_sw_user))
652 goto fx_only;
654 mask = fx_sw_user.xstate_bv;
656 err = restore_i387_fxsave(buf, fx_sw_user.xstate_size);
658 xsave_hdr->xstate_bv &= pcntxt_mask;
660 * These bits must be zero.
662 xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
665 * Init the state that is not present in the memory layout
666 * and enabled by the OS.
668 mask = ~(pcntxt_mask & ~mask);
669 xsave_hdr->xstate_bv &= mask;
671 return err;
672 fx_only:
674 * Couldn't find the extended state information in the memory
675 * layout. Restore the FP/SSE and init the other extended state
676 * enabled by the OS.
678 xsave_hdr->xstate_bv = XSTATE_FPSSE;
679 return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct));
682 int restore_i387_xstate_ia32(void __user *buf)
684 int err;
685 struct task_struct *tsk = current;
686 struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
688 if (HAVE_HWFP)
689 clear_fpu(tsk);
691 if (!buf) {
692 if (used_math()) {
693 clear_fpu(tsk);
694 clear_used_math();
697 return 0;
698 } else
699 if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size))
700 return -EACCES;
702 if (!used_math()) {
703 err = init_fpu(tsk);
704 if (err)
705 return err;
708 if (HAVE_HWFP) {
709 if (cpu_has_xsave)
710 err = restore_i387_xsave(buf);
711 else if (cpu_has_fxsr)
712 err = restore_i387_fxsave(fp, sizeof(struct
713 i387_fxsave_struct));
714 else
715 err = restore_i387_fsave(fp);
716 } else {
717 err = fpregs_soft_set(current, NULL,
718 0, sizeof(struct user_i387_ia32_struct),
719 NULL, fp) != 0;
721 set_used_math();
723 return err;
727 * FPU state for core dumps.
728 * This is only used for a.out dumps now.
729 * It is declared generically using elf_fpregset_t (which is
730 * struct user_i387_struct) but is in fact only used for 32-bit
731 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
733 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
735 struct task_struct *tsk = current;
736 int fpvalid;
738 fpvalid = !!used_math();
739 if (fpvalid)
740 fpvalid = !fpregs_get(tsk, NULL,
741 0, sizeof(struct user_i387_ia32_struct),
742 fpu, NULL);
744 return fpvalid;
746 EXPORT_SYMBOL(dump_fpu);
748 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */