2 * linux/arch/i386/kernel/i387.c
4 * Copyright (C) 1994 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * General FPU state handling cleanups
8 * Gareth Hughes <gareth@valinux.com>, May 2000
11 #include <linux/config.h>
12 #include <linux/sched.h>
13 #include <linux/module.h>
14 #include <asm/processor.h>
16 #include <asm/math_emu.h>
17 #include <asm/sigcontext.h>
19 #include <asm/ptrace.h>
20 #include <asm/uaccess.h>
22 #ifdef CONFIG_MATH_EMULATION
23 #define HAVE_HWFP (boot_cpu_data.hard_math)
28 static unsigned long mxcsr_feature_mask
= 0xffffffff;
30 void mxcsr_feature_mask_init(void)
32 unsigned long mask
= 0;
35 memset(¤t
->thread
.i387
.fxsave
, 0, sizeof(struct i387_fxsave_struct
));
36 asm volatile("fxsave %0" : : "m" (current
->thread
.i387
.fxsave
));
37 mask
= current
->thread
.i387
.fxsave
.mxcsr_mask
;
38 if (mask
== 0) mask
= 0x0000ffbf;
40 mxcsr_feature_mask
&= mask
;
45 * The _current_ task is using the FPU for the first time
46 * so initialize it and set the mxcsr to its default
47 * value at reset if we support XMM instructions and then
48 * remeber the current task has used the FPU.
50 void init_fpu(struct task_struct
*tsk
)
53 memset(&tsk
->thread
.i387
.fxsave
, 0, sizeof(struct i387_fxsave_struct
));
54 tsk
->thread
.i387
.fxsave
.cwd
= 0x37f;
56 tsk
->thread
.i387
.fxsave
.mxcsr
= 0x1f80;
58 memset(&tsk
->thread
.i387
.fsave
, 0, sizeof(struct i387_fsave_struct
));
59 tsk
->thread
.i387
.fsave
.cwd
= 0xffff037fu
;
60 tsk
->thread
.i387
.fsave
.swd
= 0xffff0000u
;
61 tsk
->thread
.i387
.fsave
.twd
= 0xffffffffu
;
62 tsk
->thread
.i387
.fsave
.fos
= 0xffff0000u
;
64 /* only the device not available exception or ptrace can call init_fpu */
65 set_stopped_child_used_math(tsk
);
69 * FPU lazy state save handling.
72 void kernel_fpu_begin(void)
74 struct thread_info
*thread
= current_thread_info();
77 if (thread
->status
& TS_USEDFPU
) {
78 __save_init_fpu(thread
->task
);
83 EXPORT_SYMBOL_GPL(kernel_fpu_begin
);
86 * FPU tag word conversions.
89 static inline unsigned short twd_i387_to_fxsr( unsigned short twd
)
91 unsigned int tmp
; /* to avoid 16 bit prefixes in the code */
93 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
95 tmp
= (tmp
| (tmp
>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
96 /* and move the valid bits to the lower byte. */
97 tmp
= (tmp
| (tmp
>> 1)) & 0x3333; /* 00VV00VV00VV00VV */
98 tmp
= (tmp
| (tmp
>> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
99 tmp
= (tmp
| (tmp
>> 4)) & 0x00ff; /* 00000000VVVVVVVV */
103 static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct
*fxsave
)
105 struct _fpxreg
*st
= NULL
;
106 unsigned long tos
= (fxsave
->swd
>> 11) & 7;
107 unsigned long twd
= (unsigned long) fxsave
->twd
;
109 unsigned long ret
= 0xffff0000u
;
112 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16);
114 for ( i
= 0 ; i
< 8 ; i
++ ) {
116 st
= FPREG_ADDR( fxsave
, (i
- tos
) & 7 );
118 switch ( st
->exponent
& 0x7fff ) {
120 tag
= 2; /* Special */
123 if ( !st
->significand
[0] &&
124 !st
->significand
[1] &&
125 !st
->significand
[2] &&
126 !st
->significand
[3] ) {
129 tag
= 2; /* Special */
133 if ( st
->significand
[3] & 0x8000 ) {
136 tag
= 2; /* Special */
143 ret
|= (tag
<< (2 * i
));
150 * FPU state interaction.
153 unsigned short get_fpu_cwd( struct task_struct
*tsk
)
155 if ( cpu_has_fxsr
) {
156 return tsk
->thread
.i387
.fxsave
.cwd
;
158 return (unsigned short)tsk
->thread
.i387
.fsave
.cwd
;
162 unsigned short get_fpu_swd( struct task_struct
*tsk
)
164 if ( cpu_has_fxsr
) {
165 return tsk
->thread
.i387
.fxsave
.swd
;
167 return (unsigned short)tsk
->thread
.i387
.fsave
.swd
;
172 unsigned short get_fpu_twd( struct task_struct
*tsk
)
174 if ( cpu_has_fxsr
) {
175 return tsk
->thread
.i387
.fxsave
.twd
;
177 return (unsigned short)tsk
->thread
.i387
.fsave
.twd
;
182 unsigned short get_fpu_mxcsr( struct task_struct
*tsk
)
185 return tsk
->thread
.i387
.fxsave
.mxcsr
;
193 void set_fpu_cwd( struct task_struct
*tsk
, unsigned short cwd
)
195 if ( cpu_has_fxsr
) {
196 tsk
->thread
.i387
.fxsave
.cwd
= cwd
;
198 tsk
->thread
.i387
.fsave
.cwd
= ((long)cwd
| 0xffff0000u
);
202 void set_fpu_swd( struct task_struct
*tsk
, unsigned short swd
)
204 if ( cpu_has_fxsr
) {
205 tsk
->thread
.i387
.fxsave
.swd
= swd
;
207 tsk
->thread
.i387
.fsave
.swd
= ((long)swd
| 0xffff0000u
);
211 void set_fpu_twd( struct task_struct
*tsk
, unsigned short twd
)
213 if ( cpu_has_fxsr
) {
214 tsk
->thread
.i387
.fxsave
.twd
= twd_i387_to_fxsr(twd
);
216 tsk
->thread
.i387
.fsave
.twd
= ((long)twd
| 0xffff0000u
);
223 * FXSR floating point environment conversions.
226 static int convert_fxsr_to_user( struct _fpstate __user
*buf
,
227 struct i387_fxsave_struct
*fxsave
)
229 unsigned long env
[7];
230 struct _fpreg __user
*to
;
231 struct _fpxreg
*from
;
234 env
[0] = (unsigned long)fxsave
->cwd
| 0xffff0000ul
;
235 env
[1] = (unsigned long)fxsave
->swd
| 0xffff0000ul
;
236 env
[2] = twd_fxsr_to_i387(fxsave
);
237 env
[3] = fxsave
->fip
;
238 env
[4] = fxsave
->fcs
| ((unsigned long)fxsave
->fop
<< 16);
239 env
[5] = fxsave
->foo
;
240 env
[6] = fxsave
->fos
;
242 if ( __copy_to_user( buf
, env
, 7 * sizeof(unsigned long) ) )
246 from
= (struct _fpxreg
*) &fxsave
->st_space
[0];
247 for ( i
= 0 ; i
< 8 ; i
++, to
++, from
++ ) {
248 unsigned long __user
*t
= (unsigned long __user
*)to
;
249 unsigned long *f
= (unsigned long *)from
;
251 if (__put_user(*f
, t
) ||
252 __put_user(*(f
+ 1), t
+ 1) ||
253 __put_user(from
->exponent
, &to
->exponent
))
259 static int convert_fxsr_from_user( struct i387_fxsave_struct
*fxsave
,
260 struct _fpstate __user
*buf
)
262 unsigned long env
[7];
264 struct _fpreg __user
*from
;
267 if ( __copy_from_user( env
, buf
, 7 * sizeof(long) ) )
270 fxsave
->cwd
= (unsigned short)(env
[0] & 0xffff);
271 fxsave
->swd
= (unsigned short)(env
[1] & 0xffff);
272 fxsave
->twd
= twd_i387_to_fxsr((unsigned short)(env
[2] & 0xffff));
273 fxsave
->fip
= env
[3];
274 fxsave
->fop
= (unsigned short)((env
[4] & 0xffff0000ul
) >> 16);
275 fxsave
->fcs
= (env
[4] & 0xffff);
276 fxsave
->foo
= env
[5];
277 fxsave
->fos
= env
[6];
279 to
= (struct _fpxreg
*) &fxsave
->st_space
[0];
281 for ( i
= 0 ; i
< 8 ; i
++, to
++, from
++ ) {
282 unsigned long *t
= (unsigned long *)to
;
283 unsigned long __user
*f
= (unsigned long __user
*)from
;
285 if (__get_user(*t
, f
) ||
286 __get_user(*(t
+ 1), f
+ 1) ||
287 __get_user(to
->exponent
, &from
->exponent
))
294 * Signal frame handlers.
297 static inline int save_i387_fsave( struct _fpstate __user
*buf
)
299 struct task_struct
*tsk
= current
;
302 tsk
->thread
.i387
.fsave
.status
= tsk
->thread
.i387
.fsave
.swd
;
303 if ( __copy_to_user( buf
, &tsk
->thread
.i387
.fsave
,
304 sizeof(struct i387_fsave_struct
) ) )
309 static int save_i387_fxsave( struct _fpstate __user
*buf
)
311 struct task_struct
*tsk
= current
;
316 if ( convert_fxsr_to_user( buf
, &tsk
->thread
.i387
.fxsave
) )
319 err
|= __put_user( tsk
->thread
.i387
.fxsave
.swd
, &buf
->status
);
320 err
|= __put_user( X86_FXSR_MAGIC
, &buf
->magic
);
324 if ( __copy_to_user( &buf
->_fxsr_env
[0], &tsk
->thread
.i387
.fxsave
,
325 sizeof(struct i387_fxsave_struct
) ) )
330 int save_i387( struct _fpstate __user
*buf
)
335 /* This will cause a "finit" to be triggered by the next
336 * attempted FPU operation by the 'current' process.
341 if ( cpu_has_fxsr
) {
342 return save_i387_fxsave( buf
);
344 return save_i387_fsave( buf
);
347 return save_i387_soft( ¤t
->thread
.i387
.soft
, buf
);
351 static inline int restore_i387_fsave( struct _fpstate __user
*buf
)
353 struct task_struct
*tsk
= current
;
355 return __copy_from_user( &tsk
->thread
.i387
.fsave
, buf
,
356 sizeof(struct i387_fsave_struct
) );
359 static int restore_i387_fxsave( struct _fpstate __user
*buf
)
362 struct task_struct
*tsk
= current
;
364 err
= __copy_from_user( &tsk
->thread
.i387
.fxsave
, &buf
->_fxsr_env
[0],
365 sizeof(struct i387_fxsave_struct
) );
366 /* mxcsr reserved bits must be masked to zero for security reasons */
367 tsk
->thread
.i387
.fxsave
.mxcsr
&= mxcsr_feature_mask
;
368 return err
? 1 : convert_fxsr_from_user( &tsk
->thread
.i387
.fxsave
, buf
);
371 int restore_i387( struct _fpstate __user
*buf
)
376 if ( cpu_has_fxsr
) {
377 err
= restore_i387_fxsave( buf
);
379 err
= restore_i387_fsave( buf
);
382 err
= restore_i387_soft( ¤t
->thread
.i387
.soft
, buf
);
389 * ptrace request handlers.
392 static inline int get_fpregs_fsave( struct user_i387_struct __user
*buf
,
393 struct task_struct
*tsk
)
395 return __copy_to_user( buf
, &tsk
->thread
.i387
.fsave
,
396 sizeof(struct user_i387_struct
) );
399 static inline int get_fpregs_fxsave( struct user_i387_struct __user
*buf
,
400 struct task_struct
*tsk
)
402 return convert_fxsr_to_user( (struct _fpstate __user
*)buf
,
403 &tsk
->thread
.i387
.fxsave
);
406 int get_fpregs( struct user_i387_struct __user
*buf
, struct task_struct
*tsk
)
409 if ( cpu_has_fxsr
) {
410 return get_fpregs_fxsave( buf
, tsk
);
412 return get_fpregs_fsave( buf
, tsk
);
415 return save_i387_soft( &tsk
->thread
.i387
.soft
,
416 (struct _fpstate __user
*)buf
);
420 static inline int set_fpregs_fsave( struct task_struct
*tsk
,
421 struct user_i387_struct __user
*buf
)
423 return __copy_from_user( &tsk
->thread
.i387
.fsave
, buf
,
424 sizeof(struct user_i387_struct
) );
427 static inline int set_fpregs_fxsave( struct task_struct
*tsk
,
428 struct user_i387_struct __user
*buf
)
430 return convert_fxsr_from_user( &tsk
->thread
.i387
.fxsave
,
431 (struct _fpstate __user
*)buf
);
434 int set_fpregs( struct task_struct
*tsk
, struct user_i387_struct __user
*buf
)
437 if ( cpu_has_fxsr
) {
438 return set_fpregs_fxsave( tsk
, buf
);
440 return set_fpregs_fsave( tsk
, buf
);
443 return restore_i387_soft( &tsk
->thread
.i387
.soft
,
444 (struct _fpstate __user
*)buf
);
448 int get_fpxregs( struct user_fxsr_struct __user
*buf
, struct task_struct
*tsk
)
450 if ( cpu_has_fxsr
) {
451 if (__copy_to_user( buf
, &tsk
->thread
.i387
.fxsave
,
452 sizeof(struct user_fxsr_struct
) ))
460 int set_fpxregs( struct task_struct
*tsk
, struct user_fxsr_struct __user
*buf
)
464 if ( cpu_has_fxsr
) {
465 if (__copy_from_user( &tsk
->thread
.i387
.fxsave
, buf
,
466 sizeof(struct user_fxsr_struct
) ))
468 /* mxcsr reserved bits must be masked to zero for security reasons */
469 tsk
->thread
.i387
.fxsave
.mxcsr
&= mxcsr_feature_mask
;
477 * FPU state for core dumps.
480 static inline void copy_fpu_fsave( struct task_struct
*tsk
,
481 struct user_i387_struct
*fpu
)
483 memcpy( fpu
, &tsk
->thread
.i387
.fsave
,
484 sizeof(struct user_i387_struct
) );
487 static inline void copy_fpu_fxsave( struct task_struct
*tsk
,
488 struct user_i387_struct
*fpu
)
491 unsigned short *from
;
494 memcpy( fpu
, &tsk
->thread
.i387
.fxsave
, 7 * sizeof(long) );
496 to
= (unsigned short *)&fpu
->st_space
[0];
497 from
= (unsigned short *)&tsk
->thread
.i387
.fxsave
.st_space
[0];
498 for ( i
= 0 ; i
< 8 ; i
++, to
+= 5, from
+= 8 ) {
499 memcpy( to
, from
, 5 * sizeof(unsigned short) );
503 int dump_fpu( struct pt_regs
*regs
, struct user_i387_struct
*fpu
)
506 struct task_struct
*tsk
= current
;
508 fpvalid
= !!used_math();
511 if ( cpu_has_fxsr
) {
512 copy_fpu_fxsave( tsk
, fpu
);
514 copy_fpu_fsave( tsk
, fpu
);
520 EXPORT_SYMBOL(dump_fpu
);
522 int dump_task_fpu(struct task_struct
*tsk
, struct user_i387_struct
*fpu
)
524 int fpvalid
= !!tsk_used_math(tsk
);
530 copy_fpu_fxsave(tsk
, fpu
);
532 copy_fpu_fsave(tsk
, fpu
);
537 int dump_task_extended_fpu(struct task_struct
*tsk
, struct user_fxsr_struct
*fpu
)
539 int fpvalid
= tsk_used_math(tsk
) && cpu_has_fxsr
;
544 memcpy(fpu
, &tsk
->thread
.i387
.fxsave
, sizeof(*fpu
));