2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "exec/helper-proto.h"
22 /*****************************************************************************/
23 /* Floating point operations helpers */
24 uint64_t helper_float32_to_float64(CPUPPCState
*env
, uint32_t arg
)
30 d
.d
= float32_to_float64(f
.f
, &env
->fp_status
);
34 uint32_t helper_float64_to_float32(CPUPPCState
*env
, uint64_t arg
)
40 f
.f
= float64_to_float32(d
.d
, &env
->fp_status
);
44 static inline int isden(float64 d
)
50 return ((u
.ll
>> 52) & 0x7FF) == 0;
53 static inline int ppc_float32_get_unbiased_exp(float32 f
)
55 return ((f
>> 23) & 0xFF) - 127;
58 static inline int ppc_float64_get_unbiased_exp(float64 f
)
60 return ((f
>> 52) & 0x7FF) - 1023;
63 uint32_t helper_compute_fprf(CPUPPCState
*env
, uint64_t arg
, uint32_t set_fprf
)
70 isneg
= float64_is_neg(farg
.d
);
71 if (unlikely(float64_is_any_nan(farg
.d
))) {
72 if (float64_is_signaling_nan(farg
.d
)) {
73 /* Signaling NaN: flags are undefined */
79 } else if (unlikely(float64_is_infinity(farg
.d
))) {
87 if (float64_is_zero(farg
.d
)) {
96 /* Denormalized numbers */
99 /* Normalized numbers */
110 /* We update FPSCR_FPRF */
111 env
->fpscr
&= ~(0x1F << FPSCR_FPRF
);
112 env
->fpscr
|= ret
<< FPSCR_FPRF
;
114 /* We just need fpcc to update Rc1 */
118 /* Floating-point invalid operations exception */
119 static inline uint64_t fload_invalid_op_excp(CPUPPCState
*env
, int op
,
122 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
128 case POWERPC_EXCP_FP_VXSNAN
:
129 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
131 case POWERPC_EXCP_FP_VXSOFT
:
132 env
->fpscr
|= 1 << FPSCR_VXSOFT
;
134 case POWERPC_EXCP_FP_VXISI
:
135 /* Magnitude subtraction of infinities */
136 env
->fpscr
|= 1 << FPSCR_VXISI
;
138 case POWERPC_EXCP_FP_VXIDI
:
139 /* Division of infinity by infinity */
140 env
->fpscr
|= 1 << FPSCR_VXIDI
;
142 case POWERPC_EXCP_FP_VXZDZ
:
143 /* Division of zero by zero */
144 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
146 case POWERPC_EXCP_FP_VXIMZ
:
147 /* Multiplication of zero by infinity */
148 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
150 case POWERPC_EXCP_FP_VXVC
:
151 /* Ordered comparison of NaN */
152 env
->fpscr
|= 1 << FPSCR_VXVC
;
154 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
155 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
157 /* We must update the target FPR before raising the exception */
159 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
160 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
161 /* Update the floating-point enabled exception summary */
162 env
->fpscr
|= 1 << FPSCR_FEX
;
163 /* Exception is differed */
167 case POWERPC_EXCP_FP_VXSQRT
:
168 /* Square root of a negative number */
169 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
171 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
173 /* Set the result to quiet NaN */
174 ret
= 0x7FF8000000000000ULL
;
176 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
177 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
181 case POWERPC_EXCP_FP_VXCVI
:
182 /* Invalid conversion */
183 env
->fpscr
|= 1 << FPSCR_VXCVI
;
184 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
186 /* Set the result to quiet NaN */
187 ret
= 0x7FF8000000000000ULL
;
189 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
190 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
195 /* Update the floating-point invalid operation summary */
196 env
->fpscr
|= 1 << FPSCR_VX
;
197 /* Update the floating-point exception summary */
198 env
->fpscr
|= 1 << FPSCR_FX
;
200 /* Update the floating-point enabled exception summary */
201 env
->fpscr
|= 1 << FPSCR_FEX
;
202 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
203 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
204 POWERPC_EXCP_FP
| op
);
210 static inline void float_zero_divide_excp(CPUPPCState
*env
)
212 env
->fpscr
|= 1 << FPSCR_ZX
;
213 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
214 /* Update the floating-point exception summary */
215 env
->fpscr
|= 1 << FPSCR_FX
;
217 /* Update the floating-point enabled exception summary */
218 env
->fpscr
|= 1 << FPSCR_FEX
;
219 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
220 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
221 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
);
226 static inline void float_overflow_excp(CPUPPCState
*env
)
228 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
230 env
->fpscr
|= 1 << FPSCR_OX
;
231 /* Update the floating-point exception summary */
232 env
->fpscr
|= 1 << FPSCR_FX
;
234 /* XXX: should adjust the result */
235 /* Update the floating-point enabled exception summary */
236 env
->fpscr
|= 1 << FPSCR_FEX
;
237 /* We must update the target FPR before raising the exception */
238 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
239 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
241 env
->fpscr
|= 1 << FPSCR_XX
;
242 env
->fpscr
|= 1 << FPSCR_FI
;
246 static inline void float_underflow_excp(CPUPPCState
*env
)
248 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
250 env
->fpscr
|= 1 << FPSCR_UX
;
251 /* Update the floating-point exception summary */
252 env
->fpscr
|= 1 << FPSCR_FX
;
254 /* XXX: should adjust the result */
255 /* Update the floating-point enabled exception summary */
256 env
->fpscr
|= 1 << FPSCR_FEX
;
257 /* We must update the target FPR before raising the exception */
258 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
259 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
263 static inline void float_inexact_excp(CPUPPCState
*env
)
265 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
267 env
->fpscr
|= 1 << FPSCR_XX
;
268 /* Update the floating-point exception summary */
269 env
->fpscr
|= 1 << FPSCR_FX
;
271 /* Update the floating-point enabled exception summary */
272 env
->fpscr
|= 1 << FPSCR_FEX
;
273 /* We must update the target FPR before raising the exception */
274 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
275 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
279 static inline void fpscr_set_rounding_mode(CPUPPCState
*env
)
283 /* Set rounding mode */
286 /* Best approximation (round to nearest) */
287 rnd_type
= float_round_nearest_even
;
290 /* Smaller magnitude (round toward zero) */
291 rnd_type
= float_round_to_zero
;
294 /* Round toward +infinite */
295 rnd_type
= float_round_up
;
299 /* Round toward -infinite */
300 rnd_type
= float_round_down
;
303 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
306 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
310 prev
= (env
->fpscr
>> bit
) & 1;
311 env
->fpscr
&= ~(1 << bit
);
316 fpscr_set_rounding_mode(env
);
324 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
326 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
329 prev
= (env
->fpscr
>> bit
) & 1;
330 env
->fpscr
|= 1 << bit
;
334 env
->fpscr
|= 1 << FPSCR_FX
;
340 env
->fpscr
|= 1 << FPSCR_FX
;
346 env
->fpscr
|= 1 << FPSCR_FX
;
352 env
->fpscr
|= 1 << FPSCR_FX
;
358 env
->fpscr
|= 1 << FPSCR_FX
;
372 env
->fpscr
|= 1 << FPSCR_VX
;
373 env
->fpscr
|= 1 << FPSCR_FX
;
381 env
->error_code
= POWERPC_EXCP_FP
;
383 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
386 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
389 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
392 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
395 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
398 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
401 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
404 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
407 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
415 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
422 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
429 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
436 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
442 fpscr_set_rounding_mode(env
);
447 /* Update the floating-point enabled exception summary */
448 env
->fpscr
|= 1 << FPSCR_FEX
;
449 /* We have to update Rc1 before raising the exception */
450 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
456 void helper_store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
458 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
459 target_ulong prev
, new;
463 new = (target_ulong
)arg
;
464 new &= ~0x60000000LL
;
465 new |= prev
& 0x60000000LL
;
466 for (i
= 0; i
< sizeof(target_ulong
) * 2; i
++) {
467 if (mask
& (1 << i
)) {
468 env
->fpscr
&= ~(0xFLL
<< (4 * i
));
469 env
->fpscr
|= new & (0xFLL
<< (4 * i
));
472 /* Update VX and FEX */
474 env
->fpscr
|= 1 << FPSCR_VX
;
476 env
->fpscr
&= ~(1 << FPSCR_VX
);
478 if ((fpscr_ex
& fpscr_eex
) != 0) {
479 env
->fpscr
|= 1 << FPSCR_FEX
;
480 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
481 /* XXX: we should compute it properly */
482 env
->error_code
= POWERPC_EXCP_FP
;
484 env
->fpscr
&= ~(1 << FPSCR_FEX
);
486 fpscr_set_rounding_mode(env
);
489 void store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
491 helper_store_fpscr(env
, arg
, mask
);
494 void helper_float_check_status(CPUPPCState
*env
)
496 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
497 int status
= get_float_exception_flags(&env
->fp_status
);
499 if (status
& float_flag_divbyzero
) {
500 float_zero_divide_excp(env
);
501 } else if (status
& float_flag_overflow
) {
502 float_overflow_excp(env
);
503 } else if (status
& float_flag_underflow
) {
504 float_underflow_excp(env
);
505 } else if (status
& float_flag_inexact
) {
506 float_inexact_excp(env
);
509 if (cs
->exception_index
== POWERPC_EXCP_PROGRAM
&&
510 (env
->error_code
& POWERPC_EXCP_FP
)) {
511 /* Differred floating-point exception after target FPR update */
512 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
513 helper_raise_exception_err(env
, cs
->exception_index
,
519 void helper_reset_fpstatus(CPUPPCState
*env
)
521 set_float_exception_flags(0, &env
->fp_status
);
525 uint64_t helper_fadd(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
527 CPU_DoubleU farg1
, farg2
;
532 if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
533 float64_is_neg(farg1
.d
) != float64_is_neg(farg2
.d
))) {
534 /* Magnitude subtraction of infinities */
535 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
537 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
538 float64_is_signaling_nan(farg2
.d
))) {
540 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
542 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
549 uint64_t helper_fsub(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
551 CPU_DoubleU farg1
, farg2
;
556 if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
557 float64_is_neg(farg1
.d
) == float64_is_neg(farg2
.d
))) {
558 /* Magnitude subtraction of infinities */
559 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
561 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
562 float64_is_signaling_nan(farg2
.d
))) {
563 /* sNaN subtraction */
564 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
566 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
573 uint64_t helper_fmul(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
575 CPU_DoubleU farg1
, farg2
;
580 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
581 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
582 /* Multiplication of zero by infinity */
583 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
585 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
586 float64_is_signaling_nan(farg2
.d
))) {
587 /* sNaN multiplication */
588 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
590 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
597 uint64_t helper_fdiv(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
599 CPU_DoubleU farg1
, farg2
;
604 if (unlikely(float64_is_infinity(farg1
.d
) &&
605 float64_is_infinity(farg2
.d
))) {
606 /* Division of infinity by infinity */
607 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIDI
, 1);
608 } else if (unlikely(float64_is_zero(farg1
.d
) && float64_is_zero(farg2
.d
))) {
609 /* Division of zero by zero */
610 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXZDZ
, 1);
612 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
613 float64_is_signaling_nan(farg2
.d
))) {
615 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
617 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
624 #define FPU_FCTI(op, cvt, nanval) \
625 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
630 farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \
632 if (unlikely(env->fp_status.float_exception_flags)) { \
633 if (float64_is_any_nan(arg)) { \
634 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
635 if (float64_is_signaling_nan(arg)) { \
636 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
639 } else if (env->fp_status.float_exception_flags & \
640 float_flag_invalid) { \
641 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
643 helper_float_check_status(env); \
648 FPU_FCTI(fctiw
, int32
, 0x80000000U
)
649 FPU_FCTI(fctiwz
, int32_round_to_zero
, 0x80000000U
)
650 FPU_FCTI(fctiwu
, uint32
, 0x00000000U
)
651 FPU_FCTI(fctiwuz
, uint32_round_to_zero
, 0x00000000U
)
652 FPU_FCTI(fctid
, int64
, 0x8000000000000000ULL
)
653 FPU_FCTI(fctidz
, int64_round_to_zero
, 0x8000000000000000ULL
)
654 FPU_FCTI(fctidu
, uint64
, 0x0000000000000000ULL
)
655 FPU_FCTI(fctiduz
, uint64_round_to_zero
, 0x0000000000000000ULL
)
657 #define FPU_FCFI(op, cvtr, is_single) \
658 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
663 float32 tmp = cvtr(arg, &env->fp_status); \
664 farg.d = float32_to_float64(tmp, &env->fp_status); \
666 farg.d = cvtr(arg, &env->fp_status); \
668 helper_float_check_status(env); \
672 FPU_FCFI(fcfid
, int64_to_float64
, 0)
673 FPU_FCFI(fcfids
, int64_to_float32
, 1)
674 FPU_FCFI(fcfidu
, uint64_to_float64
, 0)
675 FPU_FCFI(fcfidus
, uint64_to_float32
, 1)
677 static inline uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
684 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
686 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
687 farg
.ll
= arg
| 0x0008000000000000ULL
;
689 int inexact
= get_float_exception_flags(&env
->fp_status
) &
691 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
692 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
693 /* Restore rounding mode from FPSCR */
694 fpscr_set_rounding_mode(env
);
696 /* fri* does not set FPSCR[XX] */
698 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
701 helper_float_check_status(env
);
705 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
707 return do_fri(env
, arg
, float_round_ties_away
);
710 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
712 return do_fri(env
, arg
, float_round_to_zero
);
715 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
717 return do_fri(env
, arg
, float_round_up
);
720 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
722 return do_fri(env
, arg
, float_round_down
);
726 uint64_t helper_fmadd(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
729 CPU_DoubleU farg1
, farg2
, farg3
;
735 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
736 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
737 /* Multiplication of zero by infinity */
738 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
740 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
741 float64_is_signaling_nan(farg2
.d
) ||
742 float64_is_signaling_nan(farg3
.d
))) {
744 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
746 /* This is the way the PowerPC specification defines it */
747 float128 ft0_128
, ft1_128
;
749 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
750 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
751 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
752 if (unlikely(float128_is_infinity(ft0_128
) &&
753 float64_is_infinity(farg3
.d
) &&
754 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
755 /* Magnitude subtraction of infinities */
756 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
758 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
759 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
760 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
768 uint64_t helper_fmsub(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
771 CPU_DoubleU farg1
, farg2
, farg3
;
777 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
778 (float64_is_zero(farg1
.d
) &&
779 float64_is_infinity(farg2
.d
)))) {
780 /* Multiplication of zero by infinity */
781 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
783 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
784 float64_is_signaling_nan(farg2
.d
) ||
785 float64_is_signaling_nan(farg3
.d
))) {
787 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
789 /* This is the way the PowerPC specification defines it */
790 float128 ft0_128
, ft1_128
;
792 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
793 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
794 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
795 if (unlikely(float128_is_infinity(ft0_128
) &&
796 float64_is_infinity(farg3
.d
) &&
797 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
798 /* Magnitude subtraction of infinities */
799 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
801 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
802 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
803 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
809 /* fnmadd - fnmadd. */
810 uint64_t helper_fnmadd(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
813 CPU_DoubleU farg1
, farg2
, farg3
;
819 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
820 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
821 /* Multiplication of zero by infinity */
822 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
824 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
825 float64_is_signaling_nan(farg2
.d
) ||
826 float64_is_signaling_nan(farg3
.d
))) {
828 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
830 /* This is the way the PowerPC specification defines it */
831 float128 ft0_128
, ft1_128
;
833 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
834 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
835 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
836 if (unlikely(float128_is_infinity(ft0_128
) &&
837 float64_is_infinity(farg3
.d
) &&
838 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
839 /* Magnitude subtraction of infinities */
840 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
842 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
843 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
844 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
846 if (likely(!float64_is_any_nan(farg1
.d
))) {
847 farg1
.d
= float64_chs(farg1
.d
);
853 /* fnmsub - fnmsub. */
854 uint64_t helper_fnmsub(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
857 CPU_DoubleU farg1
, farg2
, farg3
;
863 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
864 (float64_is_zero(farg1
.d
) &&
865 float64_is_infinity(farg2
.d
)))) {
866 /* Multiplication of zero by infinity */
867 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
869 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
870 float64_is_signaling_nan(farg2
.d
) ||
871 float64_is_signaling_nan(farg3
.d
))) {
873 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
875 /* This is the way the PowerPC specification defines it */
876 float128 ft0_128
, ft1_128
;
878 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
879 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
880 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
881 if (unlikely(float128_is_infinity(ft0_128
) &&
882 float64_is_infinity(farg3
.d
) &&
883 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
884 /* Magnitude subtraction of infinities */
885 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
887 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
888 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
889 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
891 if (likely(!float64_is_any_nan(farg1
.d
))) {
892 farg1
.d
= float64_chs(farg1
.d
);
899 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
906 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
907 /* sNaN square root */
908 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
910 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
911 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
917 uint64_t helper_fsqrt(CPUPPCState
*env
, uint64_t arg
)
923 if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
924 /* Square root of a negative nonzero number */
925 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
927 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
928 /* sNaN square root */
929 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
931 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
937 uint64_t helper_fre(CPUPPCState
*env
, uint64_t arg
)
943 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
944 /* sNaN reciprocal */
945 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
947 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
952 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
959 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
960 /* sNaN reciprocal */
961 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
963 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
964 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
965 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
970 /* frsqrte - frsqrte. */
971 uint64_t helper_frsqrte(CPUPPCState
*env
, uint64_t arg
)
977 if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
978 /* Reciprocal square root of a negative nonzero number */
979 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
981 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
982 /* sNaN reciprocal square root */
983 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
985 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
986 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
992 uint64_t helper_fsel(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
999 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) &&
1000 !float64_is_any_nan(farg1
.d
)) {
1007 uint32_t helper_ftdiv(uint64_t fra
, uint64_t frb
)
1012 if (unlikely(float64_is_infinity(fra
) ||
1013 float64_is_infinity(frb
) ||
1014 float64_is_zero(frb
))) {
1018 int e_a
= ppc_float64_get_unbiased_exp(fra
);
1019 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1021 if (unlikely(float64_is_any_nan(fra
) ||
1022 float64_is_any_nan(frb
))) {
1024 } else if ((e_b
<= -1022) || (e_b
>= 1021)) {
1026 } else if (!float64_is_zero(fra
) &&
1027 (((e_a
- e_b
) >= 1023) ||
1028 ((e_a
- e_b
) <= -1021) ||
1033 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1034 /* XB is not zero because of the above check and */
1035 /* so must be denormalized. */
1040 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1043 uint32_t helper_ftsqrt(uint64_t frb
)
1048 if (unlikely(float64_is_infinity(frb
) || float64_is_zero(frb
))) {
1052 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1054 if (unlikely(float64_is_any_nan(frb
))) {
1056 } else if (unlikely(float64_is_zero(frb
))) {
1058 } else if (unlikely(float64_is_neg(frb
))) {
1060 } else if (!float64_is_zero(frb
) && (e_b
<= (-1022+52))) {
1064 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1065 /* XB is not zero because of the above check and */
1066 /* therefore must be denormalized. */
1071 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1074 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1077 CPU_DoubleU farg1
, farg2
;
1083 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1084 float64_is_any_nan(farg2
.d
))) {
1086 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1088 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1094 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1095 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1096 env
->crf
[crfD
] = ret
;
1097 if (unlikely(ret
== 0x01UL
1098 && (float64_is_signaling_nan(farg1
.d
) ||
1099 float64_is_signaling_nan(farg2
.d
)))) {
1100 /* sNaN comparison */
1101 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1105 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1108 CPU_DoubleU farg1
, farg2
;
1114 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1115 float64_is_any_nan(farg2
.d
))) {
1117 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1119 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1125 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1126 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1127 env
->crf
[crfD
] = ret
;
1128 if (unlikely(ret
== 0x01UL
)) {
1129 if (float64_is_signaling_nan(farg1
.d
) ||
1130 float64_is_signaling_nan(farg2
.d
)) {
1131 /* sNaN comparison */
1132 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
|
1133 POWERPC_EXCP_FP_VXVC
, 1);
1135 /* qNaN comparison */
1136 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXVC
, 1);
1141 /* Single-precision floating-point conversions */
1142 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
1146 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1151 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
1155 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1160 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
1165 /* NaN are not treated the same way IEEE 754 does */
1166 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1170 return float32_to_int32(u
.f
, &env
->vec_status
);
1173 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1178 /* NaN are not treated the same way IEEE 754 does */
1179 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1183 return float32_to_uint32(u
.f
, &env
->vec_status
);
1186 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1191 /* NaN are not treated the same way IEEE 754 does */
1192 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1196 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1199 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1204 /* NaN are not treated the same way IEEE 754 does */
1205 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1209 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1212 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1217 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1218 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1219 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1224 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1229 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1230 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1231 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1236 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1242 /* NaN are not treated the same way IEEE 754 does */
1243 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1246 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1247 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1249 return float32_to_int32(u
.f
, &env
->vec_status
);
1252 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1258 /* NaN are not treated the same way IEEE 754 does */
1259 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1262 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1263 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1265 return float32_to_uint32(u
.f
, &env
->vec_status
);
1268 #define HELPER_SPE_SINGLE_CONV(name) \
1269 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1271 return e##name(env, val); \
1274 HELPER_SPE_SINGLE_CONV(fscfsi
);
1276 HELPER_SPE_SINGLE_CONV(fscfui
);
1278 HELPER_SPE_SINGLE_CONV(fscfuf
);
1280 HELPER_SPE_SINGLE_CONV(fscfsf
);
1282 HELPER_SPE_SINGLE_CONV(fsctsi
);
1284 HELPER_SPE_SINGLE_CONV(fsctui
);
1286 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1288 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1290 HELPER_SPE_SINGLE_CONV(fsctsf
);
1292 HELPER_SPE_SINGLE_CONV(fsctuf
);
1294 #define HELPER_SPE_VECTOR_CONV(name) \
1295 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1297 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1298 (uint64_t)e##name(env, val); \
1301 HELPER_SPE_VECTOR_CONV(fscfsi
);
1303 HELPER_SPE_VECTOR_CONV(fscfui
);
1305 HELPER_SPE_VECTOR_CONV(fscfuf
);
1307 HELPER_SPE_VECTOR_CONV(fscfsf
);
1309 HELPER_SPE_VECTOR_CONV(fsctsi
);
1311 HELPER_SPE_VECTOR_CONV(fsctui
);
1313 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1315 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1317 HELPER_SPE_VECTOR_CONV(fsctsf
);
1319 HELPER_SPE_VECTOR_CONV(fsctuf
);
1321 /* Single-precision floating-point arithmetic */
1322 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1328 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1332 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1338 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1342 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1348 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1352 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1358 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1362 #define HELPER_SPE_SINGLE_ARITH(name) \
1363 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1365 return e##name(env, op1, op2); \
1368 HELPER_SPE_SINGLE_ARITH(fsadd
);
1370 HELPER_SPE_SINGLE_ARITH(fssub
);
1372 HELPER_SPE_SINGLE_ARITH(fsmul
);
1374 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1376 #define HELPER_SPE_VECTOR_ARITH(name) \
1377 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1379 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1380 (uint64_t)e##name(env, op1, op2); \
1383 HELPER_SPE_VECTOR_ARITH(fsadd
);
1385 HELPER_SPE_VECTOR_ARITH(fssub
);
1387 HELPER_SPE_VECTOR_ARITH(fsmul
);
1389 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1391 /* Single-precision floating-point comparisons */
1392 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1398 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1401 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1407 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1410 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1416 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1419 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1421 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1422 return efscmplt(env
, op1
, op2
);
1425 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1427 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1428 return efscmpgt(env
, op1
, op2
);
1431 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1433 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1434 return efscmpeq(env
, op1
, op2
);
1437 #define HELPER_SINGLE_SPE_CMP(name) \
1438 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1440 return e##name(env, op1, op2) << 2; \
1443 HELPER_SINGLE_SPE_CMP(fststlt
);
1445 HELPER_SINGLE_SPE_CMP(fststgt
);
1447 HELPER_SINGLE_SPE_CMP(fststeq
);
1449 HELPER_SINGLE_SPE_CMP(fscmplt
);
1451 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1453 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1455 static inline uint32_t evcmp_merge(int t0
, int t1
)
1457 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1460 #define HELPER_VECTOR_SPE_CMP(name) \
1461 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1463 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1464 e##name(env, op1, op2)); \
1467 HELPER_VECTOR_SPE_CMP(fststlt
);
1469 HELPER_VECTOR_SPE_CMP(fststgt
);
1471 HELPER_VECTOR_SPE_CMP(fststeq
);
1473 HELPER_VECTOR_SPE_CMP(fscmplt
);
1475 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1477 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1479 /* Double-precision floating-point conversion */
1480 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1484 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1489 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1493 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1498 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1502 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1507 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1511 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1516 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1521 /* NaN are not treated the same way IEEE 754 does */
1522 if (unlikely(float64_is_any_nan(u
.d
))) {
1526 return float64_to_int32(u
.d
, &env
->vec_status
);
1529 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1534 /* NaN are not treated the same way IEEE 754 does */
1535 if (unlikely(float64_is_any_nan(u
.d
))) {
1539 return float64_to_uint32(u
.d
, &env
->vec_status
);
1542 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1547 /* NaN are not treated the same way IEEE 754 does */
1548 if (unlikely(float64_is_any_nan(u
.d
))) {
1552 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1555 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1560 /* NaN are not treated the same way IEEE 754 does */
1561 if (unlikely(float64_is_any_nan(u
.d
))) {
1565 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1568 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1573 /* NaN are not treated the same way IEEE 754 does */
1574 if (unlikely(float64_is_any_nan(u
.d
))) {
1578 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1581 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1586 /* NaN are not treated the same way IEEE 754 does */
1587 if (unlikely(float64_is_any_nan(u
.d
))) {
1591 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1594 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1599 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1600 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1601 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1606 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1611 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1612 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1613 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1618 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1624 /* NaN are not treated the same way IEEE 754 does */
1625 if (unlikely(float64_is_any_nan(u
.d
))) {
1628 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1629 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1631 return float64_to_int32(u
.d
, &env
->vec_status
);
1634 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1640 /* NaN are not treated the same way IEEE 754 does */
1641 if (unlikely(float64_is_any_nan(u
.d
))) {
1644 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1645 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1647 return float64_to_uint32(u
.d
, &env
->vec_status
);
1650 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1656 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1661 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1667 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1672 /* Double precision fixed-point arithmetic */
1673 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1679 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1683 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1689 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1693 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1699 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1703 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1709 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1713 /* Double precision floating point helpers */
1714 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1720 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1723 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1729 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1732 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1738 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1741 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1743 /* XXX: TODO: test special values (NaN, infinites, ...) */
1744 return helper_efdtstlt(env
, op1
, op2
);
1747 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1749 /* XXX: TODO: test special values (NaN, infinites, ...) */
1750 return helper_efdtstgt(env
, op1
, op2
);
1753 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1755 /* XXX: TODO: test special values (NaN, infinites, ...) */
1756 return helper_efdtsteq(env
, op1
, op2
);
1759 #define DECODE_SPLIT(opcode, shift1, nb1, shift2, nb2) \
1760 (((((opcode) >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
1761 (((opcode) >> (shift2)) & ((1 << (nb2)) - 1)))
1763 #define xT(opcode) DECODE_SPLIT(opcode, 0, 1, 21, 5)
1764 #define xA(opcode) DECODE_SPLIT(opcode, 2, 1, 16, 5)
1765 #define xB(opcode) DECODE_SPLIT(opcode, 1, 1, 11, 5)
1766 #define xC(opcode) DECODE_SPLIT(opcode, 3, 1, 6, 5)
1767 #define BF(opcode) (((opcode) >> (31-8)) & 7)
1769 typedef union _ppc_vsr_t
{
1776 #if defined(HOST_WORDS_BIGENDIAN)
1777 #define VsrW(i) u32[i]
1778 #define VsrD(i) u64[i]
1780 #define VsrW(i) u32[3-(i)]
1781 #define VsrD(i) u64[1-(i)]
1784 static void getVSR(int n
, ppc_vsr_t
*vsr
, CPUPPCState
*env
)
1787 vsr
->VsrD(0) = env
->fpr
[n
];
1788 vsr
->VsrD(1) = env
->vsr
[n
];
1790 vsr
->u64
[0] = env
->avr
[n
-32].u64
[0];
1791 vsr
->u64
[1] = env
->avr
[n
-32].u64
[1];
1795 static void putVSR(int n
, ppc_vsr_t
*vsr
, CPUPPCState
*env
)
1798 env
->fpr
[n
] = vsr
->VsrD(0);
1799 env
->vsr
[n
] = vsr
->VsrD(1);
1801 env
->avr
[n
-32].u64
[0] = vsr
->u64
[0];
1802 env
->avr
[n
-32].u64
[1] = vsr
->u64
[1];
1806 #define float64_to_float64(x, env) x
1809 /* VSX_ADD_SUB - VSX floating point add/subract
1810 * name - instruction mnemonic
1811 * op - operation (add or sub)
1812 * nels - number of elements (1, 2 or 4)
1813 * tp - type (float32 or float64)
1814 * fld - vsr_t field (VsrD(*) or VsrW(*))
1817 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1818 void helper_##name(CPUPPCState *env, uint32_t opcode) \
1820 ppc_vsr_t xt, xa, xb; \
1823 getVSR(xA(opcode), &xa, env); \
1824 getVSR(xB(opcode), &xb, env); \
1825 getVSR(xT(opcode), &xt, env); \
1826 helper_reset_fpstatus(env); \
1828 for (i = 0; i < nels; i++) { \
1829 float_status tstat = env->fp_status; \
1830 set_float_exception_flags(0, &tstat); \
1831 xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
1832 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1834 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1835 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1836 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
1837 } else if (tp##_is_signaling_nan(xa.fld) || \
1838 tp##_is_signaling_nan(xb.fld)) { \
1839 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1844 xt.fld = helper_frsp(env, xt.fld); \
1848 helper_compute_fprf(env, xt.fld, sfprf); \
1851 putVSR(xT(opcode), &xt, env); \
1852 helper_float_check_status(env); \
1855 VSX_ADD_SUB(xsadddp
, add
, 1, float64
, VsrD(0), 1, 0)
1856 VSX_ADD_SUB(xsaddsp
, add
, 1, float64
, VsrD(0), 1, 1)
1857 VSX_ADD_SUB(xvadddp
, add
, 2, float64
, VsrD(i
), 0, 0)
1858 VSX_ADD_SUB(xvaddsp
, add
, 4, float32
, VsrW(i
), 0, 0)
1859 VSX_ADD_SUB(xssubdp
, sub
, 1, float64
, VsrD(0), 1, 0)
1860 VSX_ADD_SUB(xssubsp
, sub
, 1, float64
, VsrD(0), 1, 1)
1861 VSX_ADD_SUB(xvsubdp
, sub
, 2, float64
, VsrD(i
), 0, 0)
1862 VSX_ADD_SUB(xvsubsp
, sub
, 4, float32
, VsrW(i
), 0, 0)
1864 /* VSX_MUL - VSX floating point multiply
1865 * op - instruction mnemonic
1866 * nels - number of elements (1, 2 or 4)
1867 * tp - type (float32 or float64)
1868 * fld - vsr_t field (VsrD(*) or VsrW(*))
1871 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1872 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1874 ppc_vsr_t xt, xa, xb; \
1877 getVSR(xA(opcode), &xa, env); \
1878 getVSR(xB(opcode), &xb, env); \
1879 getVSR(xT(opcode), &xt, env); \
1880 helper_reset_fpstatus(env); \
1882 for (i = 0; i < nels; i++) { \
1883 float_status tstat = env->fp_status; \
1884 set_float_exception_flags(0, &tstat); \
1885 xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
1886 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1888 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1889 if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
1890 (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
1891 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
1892 } else if (tp##_is_signaling_nan(xa.fld) || \
1893 tp##_is_signaling_nan(xb.fld)) { \
1894 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1899 xt.fld = helper_frsp(env, xt.fld); \
1903 helper_compute_fprf(env, xt.fld, sfprf); \
1907 putVSR(xT(opcode), &xt, env); \
1908 helper_float_check_status(env); \
1911 VSX_MUL(xsmuldp
, 1, float64
, VsrD(0), 1, 0)
1912 VSX_MUL(xsmulsp
, 1, float64
, VsrD(0), 1, 1)
1913 VSX_MUL(xvmuldp
, 2, float64
, VsrD(i
), 0, 0)
1914 VSX_MUL(xvmulsp
, 4, float32
, VsrW(i
), 0, 0)
1916 /* VSX_DIV - VSX floating point divide
1917 * op - instruction mnemonic
1918 * nels - number of elements (1, 2 or 4)
1919 * tp - type (float32 or float64)
1920 * fld - vsr_t field (VsrD(*) or VsrW(*))
1923 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1924 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1926 ppc_vsr_t xt, xa, xb; \
1929 getVSR(xA(opcode), &xa, env); \
1930 getVSR(xB(opcode), &xb, env); \
1931 getVSR(xT(opcode), &xt, env); \
1932 helper_reset_fpstatus(env); \
1934 for (i = 0; i < nels; i++) { \
1935 float_status tstat = env->fp_status; \
1936 set_float_exception_flags(0, &tstat); \
1937 xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
1938 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1940 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1941 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1942 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
1943 } else if (tp##_is_zero(xa.fld) && \
1944 tp##_is_zero(xb.fld)) { \
1945 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
1946 } else if (tp##_is_signaling_nan(xa.fld) || \
1947 tp##_is_signaling_nan(xb.fld)) { \
1948 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1953 xt.fld = helper_frsp(env, xt.fld); \
1957 helper_compute_fprf(env, xt.fld, sfprf); \
1961 putVSR(xT(opcode), &xt, env); \
1962 helper_float_check_status(env); \
1965 VSX_DIV(xsdivdp
, 1, float64
, VsrD(0), 1, 0)
1966 VSX_DIV(xsdivsp
, 1, float64
, VsrD(0), 1, 1)
1967 VSX_DIV(xvdivdp
, 2, float64
, VsrD(i
), 0, 0)
1968 VSX_DIV(xvdivsp
, 4, float32
, VsrW(i
), 0, 0)
1970 /* VSX_RE - VSX floating point reciprocal estimate
1971 * op - instruction mnemonic
1972 * nels - number of elements (1, 2 or 4)
1973 * tp - type (float32 or float64)
1974 * fld - vsr_t field (VsrD(*) or VsrW(*))
1977 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
1978 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1983 getVSR(xB(opcode), &xb, env); \
1984 getVSR(xT(opcode), &xt, env); \
1985 helper_reset_fpstatus(env); \
1987 for (i = 0; i < nels; i++) { \
1988 if (unlikely(tp##_is_signaling_nan(xb.fld))) { \
1989 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1991 xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
1994 xt.fld = helper_frsp(env, xt.fld); \
1998 helper_compute_fprf(env, xt.fld, sfprf); \
2002 putVSR(xT(opcode), &xt, env); \
2003 helper_float_check_status(env); \
2006 VSX_RE(xsredp
, 1, float64
, VsrD(0), 1, 0)
2007 VSX_RE(xsresp
, 1, float64
, VsrD(0), 1, 1)
2008 VSX_RE(xvredp
, 2, float64
, VsrD(i
), 0, 0)
2009 VSX_RE(xvresp
, 4, float32
, VsrW(i
), 0, 0)
2011 /* VSX_SQRT - VSX floating point square root
2012 * op - instruction mnemonic
2013 * nels - number of elements (1, 2 or 4)
2014 * tp - type (float32 or float64)
2015 * fld - vsr_t field (VsrD(*) or VsrW(*))
2018 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
2019 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2024 getVSR(xB(opcode), &xb, env); \
2025 getVSR(xT(opcode), &xt, env); \
2026 helper_reset_fpstatus(env); \
2028 for (i = 0; i < nels; i++) { \
2029 float_status tstat = env->fp_status; \
2030 set_float_exception_flags(0, &tstat); \
2031 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2032 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2034 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2035 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2036 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2037 } else if (tp##_is_signaling_nan(xb.fld)) { \
2038 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2043 xt.fld = helper_frsp(env, xt.fld); \
2047 helper_compute_fprf(env, xt.fld, sfprf); \
2051 putVSR(xT(opcode), &xt, env); \
2052 helper_float_check_status(env); \
2055 VSX_SQRT(xssqrtdp
, 1, float64
, VsrD(0), 1, 0)
2056 VSX_SQRT(xssqrtsp
, 1, float64
, VsrD(0), 1, 1)
2057 VSX_SQRT(xvsqrtdp
, 2, float64
, VsrD(i
), 0, 0)
2058 VSX_SQRT(xvsqrtsp
, 4, float32
, VsrW(i
), 0, 0)
2060 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2061 * op - instruction mnemonic
2062 * nels - number of elements (1, 2 or 4)
2063 * tp - type (float32 or float64)
2064 * fld - vsr_t field (VsrD(*) or VsrW(*))
2067 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
2068 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2073 getVSR(xB(opcode), &xb, env); \
2074 getVSR(xT(opcode), &xt, env); \
2075 helper_reset_fpstatus(env); \
2077 for (i = 0; i < nels; i++) { \
2078 float_status tstat = env->fp_status; \
2079 set_float_exception_flags(0, &tstat); \
2080 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2081 xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
2082 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2084 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2085 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2086 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2087 } else if (tp##_is_signaling_nan(xb.fld)) { \
2088 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2093 xt.fld = helper_frsp(env, xt.fld); \
2097 helper_compute_fprf(env, xt.fld, sfprf); \
2101 putVSR(xT(opcode), &xt, env); \
2102 helper_float_check_status(env); \
2105 VSX_RSQRTE(xsrsqrtedp
, 1, float64
, VsrD(0), 1, 0)
2106 VSX_RSQRTE(xsrsqrtesp
, 1, float64
, VsrD(0), 1, 1)
2107 VSX_RSQRTE(xvrsqrtedp
, 2, float64
, VsrD(i
), 0, 0)
2108 VSX_RSQRTE(xvrsqrtesp
, 4, float32
, VsrW(i
), 0, 0)
2110 /* VSX_TDIV - VSX floating point test for divide
2111 * op - instruction mnemonic
2112 * nels - number of elements (1, 2 or 4)
2113 * tp - type (float32 or float64)
2114 * fld - vsr_t field (VsrD(*) or VsrW(*))
2115 * emin - minimum unbiased exponent
2116 * emax - maximum unbiased exponent
2117 * nbits - number of fraction bits
2119 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2120 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2127 getVSR(xA(opcode), &xa, env); \
2128 getVSR(xB(opcode), &xb, env); \
2130 for (i = 0; i < nels; i++) { \
2131 if (unlikely(tp##_is_infinity(xa.fld) || \
2132 tp##_is_infinity(xb.fld) || \
2133 tp##_is_zero(xb.fld))) { \
2137 int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
2138 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2140 if (unlikely(tp##_is_any_nan(xa.fld) || \
2141 tp##_is_any_nan(xb.fld))) { \
2143 } else if ((e_b <= emin) || (e_b >= (emax-2))) { \
2145 } else if (!tp##_is_zero(xa.fld) && \
2146 (((e_a - e_b) >= emax) || \
2147 ((e_a - e_b) <= (emin+1)) || \
2148 (e_a <= (emin+nbits)))) { \
2152 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2153 /* XB is not zero because of the above check and */ \
2154 /* so must be denormalized. */ \
2160 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2163 VSX_TDIV(xstdivdp
, 1, float64
, VsrD(0), -1022, 1023, 52)
2164 VSX_TDIV(xvtdivdp
, 2, float64
, VsrD(i
), -1022, 1023, 52)
2165 VSX_TDIV(xvtdivsp
, 4, float32
, VsrW(i
), -126, 127, 23)
2167 /* VSX_TSQRT - VSX floating point test for square root
2168 * op - instruction mnemonic
2169 * nels - number of elements (1, 2 or 4)
2170 * tp - type (float32 or float64)
2171 * fld - vsr_t field (VsrD(*) or VsrW(*))
2172 * emin - minimum unbiased exponent
2173 * emax - maximum unbiased exponent
2174 * nbits - number of fraction bits
2176 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2177 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2184 getVSR(xA(opcode), &xa, env); \
2185 getVSR(xB(opcode), &xb, env); \
2187 for (i = 0; i < nels; i++) { \
2188 if (unlikely(tp##_is_infinity(xb.fld) || \
2189 tp##_is_zero(xb.fld))) { \
2193 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2195 if (unlikely(tp##_is_any_nan(xb.fld))) { \
2197 } else if (unlikely(tp##_is_zero(xb.fld))) { \
2199 } else if (unlikely(tp##_is_neg(xb.fld))) { \
2201 } else if (!tp##_is_zero(xb.fld) && \
2202 (e_b <= (emin+nbits))) { \
2206 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2207 /* XB is not zero because of the above check and */ \
2208 /* therefore must be denormalized. */ \
2214 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2217 VSX_TSQRT(xstsqrtdp
, 1, float64
, VsrD(0), -1022, 52)
2218 VSX_TSQRT(xvtsqrtdp
, 2, float64
, VsrD(i
), -1022, 52)
2219 VSX_TSQRT(xvtsqrtsp
, 4, float32
, VsrW(i
), -126, 23)
2221 /* VSX_MADD - VSX floating point muliply/add variations
2222 * op - instruction mnemonic
2223 * nels - number of elements (1, 2 or 4)
2224 * tp - type (float32 or float64)
2225 * fld - vsr_t field (VsrD(*) or VsrW(*))
2226 * maddflgs - flags for the float*muladd routine that control the
2227 * various forms (madd, msub, nmadd, nmsub)
2228 * afrm - A form (1=A, 0=M)
2231 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
2232 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2234 ppc_vsr_t xt_in, xa, xb, xt_out; \
2238 if (afrm) { /* AxB + T */ \
2241 } else { /* AxT + B */ \
2246 getVSR(xA(opcode), &xa, env); \
2247 getVSR(xB(opcode), &xb, env); \
2248 getVSR(xT(opcode), &xt_in, env); \
2252 helper_reset_fpstatus(env); \
2254 for (i = 0; i < nels; i++) { \
2255 float_status tstat = env->fp_status; \
2256 set_float_exception_flags(0, &tstat); \
2257 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2258 /* Avoid double rounding errors by rounding the intermediate */ \
2259 /* result to odd. */ \
2260 set_float_rounding_mode(float_round_to_zero, &tstat); \
2261 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2262 maddflgs, &tstat); \
2263 xt_out.fld |= (get_float_exception_flags(&tstat) & \
2264 float_flag_inexact) != 0; \
2266 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2267 maddflgs, &tstat); \
2269 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2271 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2272 if (tp##_is_signaling_nan(xa.fld) || \
2273 tp##_is_signaling_nan(b->fld) || \
2274 tp##_is_signaling_nan(c->fld)) { \
2275 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2276 tstat.float_exception_flags &= ~float_flag_invalid; \
2278 if ((tp##_is_infinity(xa.fld) && tp##_is_zero(b->fld)) || \
2279 (tp##_is_zero(xa.fld) && tp##_is_infinity(b->fld))) { \
2280 xt_out.fld = float64_to_##tp(fload_invalid_op_excp(env, \
2281 POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status); \
2282 tstat.float_exception_flags &= ~float_flag_invalid; \
2284 if ((tstat.float_exception_flags & float_flag_invalid) && \
2285 ((tp##_is_infinity(xa.fld) || \
2286 tp##_is_infinity(b->fld)) && \
2287 tp##_is_infinity(c->fld))) { \
2288 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
2293 xt_out.fld = helper_frsp(env, xt_out.fld); \
2297 helper_compute_fprf(env, xt_out.fld, sfprf); \
2300 putVSR(xT(opcode), &xt_out, env); \
2301 helper_float_check_status(env); \
2305 #define MSUB_FLGS float_muladd_negate_c
2306 #define NMADD_FLGS float_muladd_negate_result
2307 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
2309 VSX_MADD(xsmaddadp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 0)
2310 VSX_MADD(xsmaddmdp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 0)
2311 VSX_MADD(xsmsubadp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 0)
2312 VSX_MADD(xsmsubmdp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 0)
2313 VSX_MADD(xsnmaddadp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 0)
2314 VSX_MADD(xsnmaddmdp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 0)
2315 VSX_MADD(xsnmsubadp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 0)
2316 VSX_MADD(xsnmsubmdp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 0)
2318 VSX_MADD(xsmaddasp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 1)
2319 VSX_MADD(xsmaddmsp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 1)
2320 VSX_MADD(xsmsubasp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 1)
2321 VSX_MADD(xsmsubmsp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 1)
2322 VSX_MADD(xsnmaddasp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 1)
2323 VSX_MADD(xsnmaddmsp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 1)
2324 VSX_MADD(xsnmsubasp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 1)
2325 VSX_MADD(xsnmsubmsp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 1)
2327 VSX_MADD(xvmaddadp
, 2, float64
, VsrD(i
), MADD_FLGS
, 1, 0, 0)
2328 VSX_MADD(xvmaddmdp
, 2, float64
, VsrD(i
), MADD_FLGS
, 0, 0, 0)
2329 VSX_MADD(xvmsubadp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 1, 0, 0)
2330 VSX_MADD(xvmsubmdp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 0, 0, 0)
2331 VSX_MADD(xvnmaddadp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 1, 0, 0)
2332 VSX_MADD(xvnmaddmdp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 0, 0, 0)
2333 VSX_MADD(xvnmsubadp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 1, 0, 0)
2334 VSX_MADD(xvnmsubmdp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 0, 0, 0)
2336 VSX_MADD(xvmaddasp
, 4, float32
, VsrW(i
), MADD_FLGS
, 1, 0, 0)
2337 VSX_MADD(xvmaddmsp
, 4, float32
, VsrW(i
), MADD_FLGS
, 0, 0, 0)
2338 VSX_MADD(xvmsubasp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 1, 0, 0)
2339 VSX_MADD(xvmsubmsp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 0, 0, 0)
2340 VSX_MADD(xvnmaddasp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 1, 0, 0)
2341 VSX_MADD(xvnmaddmsp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 0, 0, 0)
2342 VSX_MADD(xvnmsubasp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 1, 0, 0)
2343 VSX_MADD(xvnmsubmsp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 0, 0, 0)
2345 #define VSX_SCALAR_CMP(op, ordered) \
2346 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2351 getVSR(xA(opcode), &xa, env); \
2352 getVSR(xB(opcode), &xb, env); \
2354 if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
2355 float64_is_any_nan(xb.VsrD(0)))) { \
2356 if (float64_is_signaling_nan(xa.VsrD(0)) || \
2357 float64_is_signaling_nan(xb.VsrD(0))) { \
2358 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2361 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2365 if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2367 } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), \
2368 &env->fp_status)) { \
2375 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2376 env->fpscr |= cc << FPSCR_FPRF; \
2377 env->crf[BF(opcode)] = cc; \
2379 helper_float_check_status(env); \
2382 VSX_SCALAR_CMP(xscmpodp
, 1)
2383 VSX_SCALAR_CMP(xscmpudp
, 0)
2385 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
2386 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
2388 /* VSX_MAX_MIN - VSX floating point maximum/minimum
2389 * name - instruction mnemonic
2390 * op - operation (max or min)
2391 * nels - number of elements (1, 2 or 4)
2392 * tp - type (float32 or float64)
2393 * fld - vsr_t field (VsrD(*) or VsrW(*))
2395 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2396 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2398 ppc_vsr_t xt, xa, xb; \
2401 getVSR(xA(opcode), &xa, env); \
2402 getVSR(xB(opcode), &xb, env); \
2403 getVSR(xT(opcode), &xt, env); \
2405 for (i = 0; i < nels; i++) { \
2406 xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
2407 if (unlikely(tp##_is_signaling_nan(xa.fld) || \
2408 tp##_is_signaling_nan(xb.fld))) { \
2409 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2413 putVSR(xT(opcode), &xt, env); \
2414 helper_float_check_status(env); \
2417 VSX_MAX_MIN(xsmaxdp
, maxnum
, 1, float64
, VsrD(0))
2418 VSX_MAX_MIN(xvmaxdp
, maxnum
, 2, float64
, VsrD(i
))
2419 VSX_MAX_MIN(xvmaxsp
, maxnum
, 4, float32
, VsrW(i
))
2420 VSX_MAX_MIN(xsmindp
, minnum
, 1, float64
, VsrD(0))
2421 VSX_MAX_MIN(xvmindp
, minnum
, 2, float64
, VsrD(i
))
2422 VSX_MAX_MIN(xvminsp
, minnum
, 4, float32
, VsrW(i
))
2424 /* VSX_CMP - VSX floating point compare
2425 * op - instruction mnemonic
2426 * nels - number of elements (1, 2 or 4)
2427 * tp - type (float32 or float64)
2428 * fld - vsr_t field (VsrD(*) or VsrW(*))
2429 * cmp - comparison operation
2430 * svxvc - set VXVC bit
2432 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc) \
2433 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2435 ppc_vsr_t xt, xa, xb; \
2438 int all_false = 1; \
2440 getVSR(xA(opcode), &xa, env); \
2441 getVSR(xB(opcode), &xb, env); \
2442 getVSR(xT(opcode), &xt, env); \
2444 for (i = 0; i < nels; i++) { \
2445 if (unlikely(tp##_is_any_nan(xa.fld) || \
2446 tp##_is_any_nan(xb.fld))) { \
2447 if (tp##_is_signaling_nan(xa.fld) || \
2448 tp##_is_signaling_nan(xb.fld)) { \
2449 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2452 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2457 if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == 1) { \
2467 putVSR(xT(opcode), &xt, env); \
2468 if ((opcode >> (31-21)) & 1) { \
2469 env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2471 helper_float_check_status(env); \
2474 VSX_CMP(xvcmpeqdp
, 2, float64
, VsrD(i
), eq
, 0)
2475 VSX_CMP(xvcmpgedp
, 2, float64
, VsrD(i
), le
, 1)
2476 VSX_CMP(xvcmpgtdp
, 2, float64
, VsrD(i
), lt
, 1)
2477 VSX_CMP(xvcmpeqsp
, 4, float32
, VsrW(i
), eq
, 0)
2478 VSX_CMP(xvcmpgesp
, 4, float32
, VsrW(i
), le
, 1)
2479 VSX_CMP(xvcmpgtsp
, 4, float32
, VsrW(i
), lt
, 1)
2481 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2482 * op - instruction mnemonic
2483 * nels - number of elements (1, 2 or 4)
2484 * stp - source type (float32 or float64)
2485 * ttp - target type (float32 or float64)
2486 * sfld - source vsr_t field
2487 * tfld - target vsr_t field (f32 or f64)
2490 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2491 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2496 getVSR(xB(opcode), &xb, env); \
2497 getVSR(xT(opcode), &xt, env); \
2499 for (i = 0; i < nels; i++) { \
2500 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2501 if (unlikely(stp##_is_signaling_nan(xb.sfld))) { \
2502 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2503 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2506 helper_compute_fprf(env, ttp##_to_float64(xt.tfld, \
2507 &env->fp_status), sfprf); \
2511 putVSR(xT(opcode), &xt, env); \
2512 helper_float_check_status(env); \
2515 VSX_CVT_FP_TO_FP(xscvdpsp
, 1, float64
, float32
, VsrD(0), VsrW(0), 1)
2516 VSX_CVT_FP_TO_FP(xscvspdp
, 1, float32
, float64
, VsrW(0), VsrD(0), 1)
2517 VSX_CVT_FP_TO_FP(xvcvdpsp
, 2, float64
, float32
, VsrD(i
), VsrW(2*i
), 0)
2518 VSX_CVT_FP_TO_FP(xvcvspdp
, 2, float32
, float64
, VsrW(2*i
), VsrD(i
), 0)
2520 uint64_t helper_xscvdpspn(CPUPPCState
*env
, uint64_t xb
)
2522 float_status tstat
= env
->fp_status
;
2523 set_float_exception_flags(0, &tstat
);
2525 return (uint64_t)float64_to_float32(xb
, &tstat
) << 32;
2528 uint64_t helper_xscvspdpn(CPUPPCState
*env
, uint64_t xb
)
2530 float_status tstat
= env
->fp_status
;
2531 set_float_exception_flags(0, &tstat
);
2533 return float32_to_float64(xb
>> 32, &tstat
);
2536 /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2537 * op - instruction mnemonic
2538 * nels - number of elements (1, 2 or 4)
2539 * stp - source type (float32 or float64)
2540 * ttp - target type (int32, uint32, int64 or uint64)
2541 * sfld - source vsr_t field
2542 * tfld - target vsr_t field
2543 * rnan - resulting NaN
2545 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2546 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2551 getVSR(xB(opcode), &xb, env); \
2552 getVSR(xT(opcode), &xt, env); \
2554 for (i = 0; i < nels; i++) { \
2555 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
2556 if (stp##_is_signaling_nan(xb.sfld)) { \
2557 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2559 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2562 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
2564 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2565 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2570 putVSR(xT(opcode), &xt, env); \
2571 helper_float_check_status(env); \
2574 VSX_CVT_FP_TO_INT(xscvdpsxds
, 1, float64
, int64
, VsrD(0), VsrD(0), \
2575 0x8000000000000000ULL
)
2576 VSX_CVT_FP_TO_INT(xscvdpsxws
, 1, float64
, int32
, VsrD(0), VsrW(1), \
2578 VSX_CVT_FP_TO_INT(xscvdpuxds
, 1, float64
, uint64
, VsrD(0), VsrD(0), 0ULL)
2579 VSX_CVT_FP_TO_INT(xscvdpuxws
, 1, float64
, uint32
, VsrD(0), VsrW(1), 0U)
2580 VSX_CVT_FP_TO_INT(xvcvdpsxds
, 2, float64
, int64
, VsrD(i
), VsrD(i
), \
2581 0x8000000000000000ULL
)
2582 VSX_CVT_FP_TO_INT(xvcvdpsxws
, 2, float64
, int32
, VsrD(i
), VsrW(2*i
), \
2584 VSX_CVT_FP_TO_INT(xvcvdpuxds
, 2, float64
, uint64
, VsrD(i
), VsrD(i
), 0ULL)
2585 VSX_CVT_FP_TO_INT(xvcvdpuxws
, 2, float64
, uint32
, VsrD(i
), VsrW(2*i
), 0U)
2586 VSX_CVT_FP_TO_INT(xvcvspsxds
, 2, float32
, int64
, VsrW(2*i
), VsrD(i
), \
2587 0x8000000000000000ULL
)
2588 VSX_CVT_FP_TO_INT(xvcvspsxws
, 4, float32
, int32
, VsrW(i
), VsrW(i
), 0x80000000U
)
2589 VSX_CVT_FP_TO_INT(xvcvspuxds
, 2, float32
, uint64
, VsrW(2*i
), VsrD(i
), 0ULL)
2590 VSX_CVT_FP_TO_INT(xvcvspuxws
, 4, float32
, uint32
, VsrW(i
), VsrW(i
), 0U)
2592 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2593 * op - instruction mnemonic
2594 * nels - number of elements (1, 2 or 4)
2595 * stp - source type (int32, uint32, int64 or uint64)
2596 * ttp - target type (float32 or float64)
2597 * sfld - source vsr_t field
2598 * tfld - target vsr_t field
2599 * jdef - definition of the j index (i or 2*i)
2602 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
2603 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2608 getVSR(xB(opcode), &xb, env); \
2609 getVSR(xT(opcode), &xt, env); \
2611 for (i = 0; i < nels; i++) { \
2612 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2614 xt.tfld = helper_frsp(env, xt.tfld); \
2617 helper_compute_fprf(env, xt.tfld, sfprf); \
2621 putVSR(xT(opcode), &xt, env); \
2622 helper_float_check_status(env); \
2625 VSX_CVT_INT_TO_FP(xscvsxddp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 0)
2626 VSX_CVT_INT_TO_FP(xscvuxddp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 0)
2627 VSX_CVT_INT_TO_FP(xscvsxdsp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 1)
2628 VSX_CVT_INT_TO_FP(xscvuxdsp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 1)
2629 VSX_CVT_INT_TO_FP(xvcvsxddp
, 2, int64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
2630 VSX_CVT_INT_TO_FP(xvcvuxddp
, 2, uint64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
2631 VSX_CVT_INT_TO_FP(xvcvsxwdp
, 2, int32
, float64
, VsrW(2*i
), VsrD(i
), 0, 0)
2632 VSX_CVT_INT_TO_FP(xvcvuxwdp
, 2, uint64
, float64
, VsrW(2*i
), VsrD(i
), 0, 0)
2633 VSX_CVT_INT_TO_FP(xvcvsxdsp
, 2, int64
, float32
, VsrD(i
), VsrW(2*i
), 0, 0)
2634 VSX_CVT_INT_TO_FP(xvcvuxdsp
, 2, uint64
, float32
, VsrD(i
), VsrW(2*i
), 0, 0)
2635 VSX_CVT_INT_TO_FP(xvcvsxwsp
, 4, int32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
2636 VSX_CVT_INT_TO_FP(xvcvuxwsp
, 4, uint32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
2638 /* For "use current rounding mode", define a value that will not be one of
2639 * the existing rounding model enums.
2641 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
2642 float_round_up + float_round_to_zero)
2644 /* VSX_ROUND - VSX floating point round
2645 * op - instruction mnemonic
2646 * nels - number of elements (1, 2 or 4)
2647 * tp - type (float32 or float64)
2648 * fld - vsr_t field (VsrD(*) or VsrW(*))
2649 * rmode - rounding mode
2652 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
2653 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2657 getVSR(xB(opcode), &xb, env); \
2658 getVSR(xT(opcode), &xt, env); \
2660 if (rmode != FLOAT_ROUND_CURRENT) { \
2661 set_float_rounding_mode(rmode, &env->fp_status); \
2664 for (i = 0; i < nels; i++) { \
2665 if (unlikely(tp##_is_signaling_nan(xb.fld))) { \
2666 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2667 xt.fld = tp##_snan_to_qnan(xb.fld); \
2669 xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
2672 helper_compute_fprf(env, xt.fld, sfprf); \
2676 /* If this is not a "use current rounding mode" instruction, \
2677 * then inhibit setting of the XX bit and restore rounding \
2678 * mode from FPSCR */ \
2679 if (rmode != FLOAT_ROUND_CURRENT) { \
2680 fpscr_set_rounding_mode(env); \
2681 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
2684 putVSR(xT(opcode), &xt, env); \
2685 helper_float_check_status(env); \
2688 VSX_ROUND(xsrdpi
, 1, float64
, VsrD(0), float_round_nearest_even
, 1)
2689 VSX_ROUND(xsrdpic
, 1, float64
, VsrD(0), FLOAT_ROUND_CURRENT
, 1)
2690 VSX_ROUND(xsrdpim
, 1, float64
, VsrD(0), float_round_down
, 1)
2691 VSX_ROUND(xsrdpip
, 1, float64
, VsrD(0), float_round_up
, 1)
2692 VSX_ROUND(xsrdpiz
, 1, float64
, VsrD(0), float_round_to_zero
, 1)
2694 VSX_ROUND(xvrdpi
, 2, float64
, VsrD(i
), float_round_nearest_even
, 0)
2695 VSX_ROUND(xvrdpic
, 2, float64
, VsrD(i
), FLOAT_ROUND_CURRENT
, 0)
2696 VSX_ROUND(xvrdpim
, 2, float64
, VsrD(i
), float_round_down
, 0)
2697 VSX_ROUND(xvrdpip
, 2, float64
, VsrD(i
), float_round_up
, 0)
2698 VSX_ROUND(xvrdpiz
, 2, float64
, VsrD(i
), float_round_to_zero
, 0)
2700 VSX_ROUND(xvrspi
, 4, float32
, VsrW(i
), float_round_nearest_even
, 0)
2701 VSX_ROUND(xvrspic
, 4, float32
, VsrW(i
), FLOAT_ROUND_CURRENT
, 0)
2702 VSX_ROUND(xvrspim
, 4, float32
, VsrW(i
), float_round_down
, 0)
2703 VSX_ROUND(xvrspip
, 4, float32
, VsrW(i
), float_round_up
, 0)
2704 VSX_ROUND(xvrspiz
, 4, float32
, VsrW(i
), float_round_to_zero
, 0)
2706 uint64_t helper_xsrsp(CPUPPCState
*env
, uint64_t xb
)
2708 helper_reset_fpstatus(env
);
2710 uint64_t xt
= helper_frsp(env
, xb
);
2712 helper_compute_fprf(env
, xt
, 1);
2713 helper_float_check_status(env
);