netfilter: xt_TCPOPTSTRIP: signed tcphoff for ipv6_skip_exthdr() retval
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-x86 / i387.h
blob54522b814f1c796e36f30c65632b30a35eb8d1a8
1 /*
2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
8 */
10 #ifndef _ASM_X86_I387_H
11 #define _ASM_X86_I387_H
13 #include <linux/sched.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/regset.h>
16 #include <asm/asm.h>
17 #include <asm/processor.h>
18 #include <asm/sigcontext.h>
19 #include <asm/user.h>
20 #include <asm/uaccess.h>
22 extern void fpu_init(void);
23 extern void mxcsr_feature_mask_init(void);
24 extern void init_fpu(struct task_struct *child);
25 extern asmlinkage void math_state_restore(void);
27 extern user_regset_active_fn fpregs_active, xfpregs_active;
28 extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
29 extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set;
31 #ifdef CONFIG_IA32_EMULATION
32 struct _fpstate_ia32;
33 extern int save_i387_ia32(struct _fpstate_ia32 __user *buf);
34 extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf);
35 #endif
37 #ifdef CONFIG_X86_64
39 /* Ignore delayed exceptions from user space */
40 static inline void tolerant_fwait(void)
42 asm volatile("1: fwait\n"
43 "2:\n"
44 _ASM_EXTABLE(1b, 2b));
47 static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
49 int err;
51 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
52 "2:\n"
53 ".section .fixup,\"ax\"\n"
54 "3: movl $-1,%[err]\n"
55 " jmp 2b\n"
56 ".previous\n"
57 _ASM_EXTABLE(1b, 3b)
58 : [err] "=r" (err)
59 #if 0 /* See comment in __save_init_fpu() below. */
60 : [fx] "r" (fx), "m" (*fx), "0" (0));
61 #else
62 : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
63 #endif
64 if (unlikely(err))
65 init_fpu(current);
66 return err;
69 #define X87_FSW_ES (1 << 7) /* Exception Summary */
71 /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
72 is pending. Clear the x87 state here by setting it to fixed
73 values. The kernel data segment can be sometimes 0 and sometimes
74 new user value. Both should be ok.
75 Use the PDA as safe address because it should be already in L1. */
76 static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
78 if (unlikely(fx->swd & X87_FSW_ES))
79 asm volatile("fnclex");
80 alternative_input(ASM_NOP8 ASM_NOP2,
81 " emms\n" /* clear stack tags */
82 " fildl %%gs:0", /* load to clear state */
83 X86_FEATURE_FXSAVE_LEAK);
86 static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
88 int err;
90 asm volatile("1: rex64/fxsave (%[fx])\n\t"
91 "2:\n"
92 ".section .fixup,\"ax\"\n"
93 "3: movl $-1,%[err]\n"
94 " jmp 2b\n"
95 ".previous\n"
96 _ASM_EXTABLE(1b, 3b)
97 : [err] "=r" (err), "=m" (*fx)
98 #if 0 /* See comment in __fxsave_clear() below. */
99 : [fx] "r" (fx), "0" (0));
100 #else
101 : [fx] "cdaSDb" (fx), "0" (0));
102 #endif
103 if (unlikely(err) &&
104 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
105 err = -EFAULT;
106 /* No need to clear here because the caller clears USED_MATH */
107 return err;
110 static inline void __save_init_fpu(struct task_struct *tsk)
112 /* Using "rex64; fxsave %0" is broken because, if the memory operand
113 uses any extended registers for addressing, a second REX prefix
114 will be generated (to the assembler, rex64 followed by semicolon
115 is a separate instruction), and hence the 64-bitness is lost. */
116 #if 0
117 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
118 starting with gas 2.16. */
119 __asm__ __volatile__("fxsaveq %0"
120 : "=m" (tsk->thread.i387.fxsave));
121 #elif 0
122 /* Using, as a workaround, the properly prefixed form below isn't
123 accepted by any binutils version so far released, complaining that
124 the same type of prefix is used twice if an extended register is
125 needed for addressing (fix submitted to mainline 2005-11-21). */
126 __asm__ __volatile__("rex64/fxsave %0"
127 : "=m" (tsk->thread.i387.fxsave));
128 #else
129 /* This, however, we can work around by forcing the compiler to select
130 an addressing mode that doesn't require extended registers. */
131 __asm__ __volatile__("rex64/fxsave %P2(%1)"
132 : "=m" (tsk->thread.i387.fxsave)
133 : "cdaSDb" (tsk),
134 "i" (offsetof(__typeof__(*tsk),
135 thread.i387.fxsave)));
136 #endif
137 clear_fpu_state(&tsk->thread.i387.fxsave);
138 task_thread_info(tsk)->status &= ~TS_USEDFPU;
142 * Signal frame handlers.
145 static inline int save_i387(struct _fpstate __user *buf)
147 struct task_struct *tsk = current;
148 int err = 0;
150 BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
151 sizeof(tsk->thread.i387.fxsave));
153 if ((unsigned long)buf % 16)
154 printk("save_i387: bad fpstate %p\n", buf);
156 if (!used_math())
157 return 0;
158 clear_used_math(); /* trigger finit */
159 if (task_thread_info(tsk)->status & TS_USEDFPU) {
160 err = save_i387_checking((struct i387_fxsave_struct __user *)
161 buf);
162 if (err)
163 return err;
164 task_thread_info(tsk)->status &= ~TS_USEDFPU;
165 stts();
166 } else {
167 if (__copy_to_user(buf, &tsk->thread.i387.fxsave,
168 sizeof(struct i387_fxsave_struct)))
169 return -1;
171 return 1;
175 * This restores directly out of user space. Exceptions are handled.
177 static inline int restore_i387(struct _fpstate __user *buf)
179 set_used_math();
180 if (!(task_thread_info(current)->status & TS_USEDFPU)) {
181 clts();
182 task_thread_info(current)->status |= TS_USEDFPU;
184 return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
187 #else /* CONFIG_X86_32 */
189 static inline void tolerant_fwait(void)
191 asm volatile("fnclex ; fwait");
194 static inline void restore_fpu(struct task_struct *tsk)
197 * The "nop" is needed to make the instructions the same
198 * length.
200 alternative_input(
201 "nop ; frstor %1",
202 "fxrstor %1",
203 X86_FEATURE_FXSR,
204 "m" ((tsk)->thread.i387.fxsave));
207 /* We need a safe address that is cheap to find and that is already
208 in L1 during context switch. The best choices are unfortunately
209 different for UP and SMP */
210 #ifdef CONFIG_SMP
211 #define safe_address (__per_cpu_offset[0])
212 #else
213 #define safe_address (kstat_cpu(0).cpustat.user)
214 #endif
217 * These must be called with preempt disabled
219 static inline void __save_init_fpu(struct task_struct *tsk)
221 /* Use more nops than strictly needed in case the compiler
222 varies code */
223 alternative_input(
224 "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
225 "fxsave %[fx]\n"
226 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
227 X86_FEATURE_FXSR,
228 [fx] "m" (tsk->thread.i387.fxsave),
229 [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory");
230 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
231 is pending. Clear the x87 state here by setting it to fixed
232 values. safe_address is a random variable that should be in L1 */
233 alternative_input(
234 GENERIC_NOP8 GENERIC_NOP2,
235 "emms\n\t" /* clear stack tags */
236 "fildl %[addr]", /* set F?P to defined value */
237 X86_FEATURE_FXSAVE_LEAK,
238 [addr] "m" (safe_address));
239 task_thread_info(tsk)->status &= ~TS_USEDFPU;
243 * Signal frame handlers...
245 extern int save_i387(struct _fpstate __user *buf);
246 extern int restore_i387(struct _fpstate __user *buf);
248 #endif /* CONFIG_X86_64 */
250 static inline void __unlazy_fpu(struct task_struct *tsk)
252 if (task_thread_info(tsk)->status & TS_USEDFPU) {
253 __save_init_fpu(tsk);
254 stts();
255 } else
256 tsk->fpu_counter = 0;
259 static inline void __clear_fpu(struct task_struct *tsk)
261 if (task_thread_info(tsk)->status & TS_USEDFPU) {
262 tolerant_fwait();
263 task_thread_info(tsk)->status &= ~TS_USEDFPU;
264 stts();
268 static inline void kernel_fpu_begin(void)
270 struct thread_info *me = current_thread_info();
271 preempt_disable();
272 if (me->status & TS_USEDFPU)
273 __save_init_fpu(me->task);
274 else
275 clts();
278 static inline void kernel_fpu_end(void)
280 stts();
281 preempt_enable();
284 #ifdef CONFIG_X86_64
286 static inline void save_init_fpu(struct task_struct *tsk)
288 __save_init_fpu(tsk);
289 stts();
292 #define unlazy_fpu __unlazy_fpu
293 #define clear_fpu __clear_fpu
295 #else /* CONFIG_X86_32 */
298 * These disable preemption on their own and are safe
300 static inline void save_init_fpu(struct task_struct *tsk)
302 preempt_disable();
303 __save_init_fpu(tsk);
304 stts();
305 preempt_enable();
308 static inline void unlazy_fpu(struct task_struct *tsk)
310 preempt_disable();
311 __unlazy_fpu(tsk);
312 preempt_enable();
315 static inline void clear_fpu(struct task_struct *tsk)
317 preempt_disable();
318 __clear_fpu(tsk);
319 preempt_enable();
322 #endif /* CONFIG_X86_64 */
325 * i387 state interaction
327 static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
329 if (cpu_has_fxsr) {
330 return tsk->thread.i387.fxsave.cwd;
331 } else {
332 return (unsigned short)tsk->thread.i387.fsave.cwd;
336 static inline unsigned short get_fpu_swd(struct task_struct *tsk)
338 if (cpu_has_fxsr) {
339 return tsk->thread.i387.fxsave.swd;
340 } else {
341 return (unsigned short)tsk->thread.i387.fsave.swd;
345 static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
347 if (cpu_has_xmm) {
348 return tsk->thread.i387.fxsave.mxcsr;
349 } else {
350 return MXCSR_DEFAULT;
354 #endif /* _ASM_X86_I387_H */