2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
26 static inline float128
float128_snan_to_qnan(float128 x
)
30 r
.high
= x
.high
| 0x0000800000000000;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 /*****************************************************************************/
40 /* Floating point operations helpers */
41 uint64_t helper_float32_to_float64(CPUPPCState
*env
, uint32_t arg
)
47 d
.d
= float32_to_float64(f
.f
, &env
->fp_status
);
51 uint32_t helper_float64_to_float32(CPUPPCState
*env
, uint64_t arg
)
57 f
.f
= float64_to_float32(d
.d
, &env
->fp_status
);
61 static inline int ppc_float32_get_unbiased_exp(float32 f
)
63 return ((f
>> 23) & 0xFF) - 127;
66 static inline int ppc_float64_get_unbiased_exp(float64 f
)
68 return ((f
>> 52) & 0x7FF) - 1023;
71 #define COMPUTE_FPRF(tp) \
72 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
77 isneg = tp##_is_neg(arg); \
78 if (unlikely(tp##_is_any_nan(arg))) { \
79 if (tp##_is_signaling_nan(arg, &env->fp_status)) { \
80 /* Signaling NaN: flags are undefined */ \
86 } else if (unlikely(tp##_is_infinity(arg))) { \
94 if (tp##_is_zero(arg)) { \
102 if (tp##_is_zero_or_denormal(arg)) { \
103 /* Denormalized numbers */ \
106 /* Normalized numbers */ \
116 /* We update FPSCR_FPRF */ \
117 env->fpscr &= ~(0x1F << FPSCR_FPRF); \
118 env->fpscr |= fprf << FPSCR_FPRF; \
121 COMPUTE_FPRF(float16
)
122 COMPUTE_FPRF(float32
)
123 COMPUTE_FPRF(float64
)
124 COMPUTE_FPRF(float128
)
126 /* Floating-point invalid operations exception */
127 static inline __attribute__((__always_inline__
))
128 uint64_t float_invalid_op_excp(CPUPPCState
*env
, int op
, int set_fpcc
)
130 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
136 case POWERPC_EXCP_FP_VXSNAN
:
137 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
139 case POWERPC_EXCP_FP_VXSOFT
:
140 env
->fpscr
|= 1 << FPSCR_VXSOFT
;
142 case POWERPC_EXCP_FP_VXISI
:
143 /* Magnitude subtraction of infinities */
144 env
->fpscr
|= 1 << FPSCR_VXISI
;
146 case POWERPC_EXCP_FP_VXIDI
:
147 /* Division of infinity by infinity */
148 env
->fpscr
|= 1 << FPSCR_VXIDI
;
150 case POWERPC_EXCP_FP_VXZDZ
:
151 /* Division of zero by zero */
152 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
154 case POWERPC_EXCP_FP_VXIMZ
:
155 /* Multiplication of zero by infinity */
156 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
158 case POWERPC_EXCP_FP_VXVC
:
159 /* Ordered comparison of NaN */
160 env
->fpscr
|= 1 << FPSCR_VXVC
;
162 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
163 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
165 /* We must update the target FPR before raising the exception */
167 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
168 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
169 /* Update the floating-point enabled exception summary */
170 env
->fpscr
|= 1 << FPSCR_FEX
;
171 /* Exception is differed */
175 case POWERPC_EXCP_FP_VXSQRT
:
176 /* Square root of a negative number */
177 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
179 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
181 /* Set the result to quiet NaN */
182 ret
= 0x7FF8000000000000ULL
;
184 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
185 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
189 case POWERPC_EXCP_FP_VXCVI
:
190 /* Invalid conversion */
191 env
->fpscr
|= 1 << FPSCR_VXCVI
;
192 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
194 /* Set the result to quiet NaN */
195 ret
= 0x7FF8000000000000ULL
;
197 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
198 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
203 /* Update the floating-point invalid operation summary */
204 env
->fpscr
|= 1 << FPSCR_VX
;
205 /* Update the floating-point exception summary */
208 /* Update the floating-point enabled exception summary */
209 env
->fpscr
|= 1 << FPSCR_FEX
;
210 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
211 /* GETPC() works here because this is inline */
212 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
213 POWERPC_EXCP_FP
| op
, GETPC());
219 static inline void float_zero_divide_excp(CPUPPCState
*env
, uintptr_t raddr
)
221 env
->fpscr
|= 1 << FPSCR_ZX
;
222 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
223 /* Update the floating-point exception summary */
226 /* Update the floating-point enabled exception summary */
227 env
->fpscr
|= 1 << FPSCR_FEX
;
228 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
229 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
230 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
,
236 static inline void float_overflow_excp(CPUPPCState
*env
)
238 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
240 env
->fpscr
|= 1 << FPSCR_OX
;
241 /* Update the floating-point exception summary */
244 /* XXX: should adjust the result */
245 /* Update the floating-point enabled exception summary */
246 env
->fpscr
|= 1 << FPSCR_FEX
;
247 /* We must update the target FPR before raising the exception */
248 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
249 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
251 env
->fpscr
|= 1 << FPSCR_XX
;
252 env
->fpscr
|= 1 << FPSCR_FI
;
256 static inline void float_underflow_excp(CPUPPCState
*env
)
258 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
260 env
->fpscr
|= 1 << FPSCR_UX
;
261 /* Update the floating-point exception summary */
264 /* XXX: should adjust the result */
265 /* Update the floating-point enabled exception summary */
266 env
->fpscr
|= 1 << FPSCR_FEX
;
267 /* We must update the target FPR before raising the exception */
268 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
269 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
273 static inline void float_inexact_excp(CPUPPCState
*env
)
275 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
277 env
->fpscr
|= 1 << FPSCR_FI
;
278 env
->fpscr
|= 1 << FPSCR_XX
;
279 /* Update the floating-point exception summary */
282 /* Update the floating-point enabled exception summary */
283 env
->fpscr
|= 1 << FPSCR_FEX
;
284 /* We must update the target FPR before raising the exception */
285 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
286 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
290 static inline void fpscr_set_rounding_mode(CPUPPCState
*env
)
294 /* Set rounding mode */
297 /* Best approximation (round to nearest) */
298 rnd_type
= float_round_nearest_even
;
301 /* Smaller magnitude (round toward zero) */
302 rnd_type
= float_round_to_zero
;
305 /* Round toward +infinite */
306 rnd_type
= float_round_up
;
310 /* Round toward -infinite */
311 rnd_type
= float_round_down
;
314 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
317 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
321 prev
= (env
->fpscr
>> bit
) & 1;
322 env
->fpscr
&= ~(1 << bit
);
327 fpscr_set_rounding_mode(env
);
339 /* Set VX bit to zero */
340 env
->fpscr
&= ~(1 << FPSCR_VX
);
353 /* Set the FEX bit */
354 env
->fpscr
&= ~(1 << FPSCR_FEX
);
363 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
365 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
368 prev
= (env
->fpscr
>> bit
) & 1;
369 env
->fpscr
|= 1 << bit
;
411 env
->fpscr
|= 1 << FPSCR_VX
;
420 env
->error_code
= POWERPC_EXCP_FP
;
422 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
425 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
428 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
431 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
434 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
437 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
440 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
443 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
446 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
454 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
461 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
468 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
475 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
481 fpscr_set_rounding_mode(env
);
486 /* Update the floating-point enabled exception summary */
487 env
->fpscr
|= 1 << FPSCR_FEX
;
488 /* We have to update Rc1 before raising the exception */
489 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
495 void helper_store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
497 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
498 target_ulong prev
, new;
502 new = (target_ulong
)arg
;
503 new &= ~0x60000000LL
;
504 new |= prev
& 0x60000000LL
;
505 for (i
= 0; i
< sizeof(target_ulong
) * 2; i
++) {
506 if (mask
& (1 << i
)) {
507 env
->fpscr
&= ~(0xFLL
<< (4 * i
));
508 env
->fpscr
|= new & (0xFLL
<< (4 * i
));
511 /* Update VX and FEX */
513 env
->fpscr
|= 1 << FPSCR_VX
;
515 env
->fpscr
&= ~(1 << FPSCR_VX
);
517 if ((fpscr_ex
& fpscr_eex
) != 0) {
518 env
->fpscr
|= 1 << FPSCR_FEX
;
519 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
520 /* XXX: we should compute it properly */
521 env
->error_code
= POWERPC_EXCP_FP
;
523 env
->fpscr
&= ~(1 << FPSCR_FEX
);
525 fpscr_set_rounding_mode(env
);
528 void store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
530 helper_store_fpscr(env
, arg
, mask
);
533 static void do_float_check_status(CPUPPCState
*env
, uintptr_t raddr
)
535 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
536 int status
= get_float_exception_flags(&env
->fp_status
);
537 bool inexact_happened
= false;
539 if (status
& float_flag_divbyzero
) {
540 float_zero_divide_excp(env
, raddr
);
541 } else if (status
& float_flag_overflow
) {
542 float_overflow_excp(env
);
543 } else if (status
& float_flag_underflow
) {
544 float_underflow_excp(env
);
545 } else if (status
& float_flag_inexact
) {
546 float_inexact_excp(env
);
547 inexact_happened
= true;
550 /* if the inexact flag was not set */
551 if (inexact_happened
== false) {
552 env
->fpscr
&= ~(1 << FPSCR_FI
); /* clear the FPSCR[FI] bit */
555 if (cs
->exception_index
== POWERPC_EXCP_PROGRAM
&&
556 (env
->error_code
& POWERPC_EXCP_FP
)) {
557 /* Differred floating-point exception after target FPR update */
558 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
559 raise_exception_err_ra(env
, cs
->exception_index
,
560 env
->error_code
, raddr
);
565 static inline __attribute__((__always_inline__
))
566 void float_check_status(CPUPPCState
*env
)
568 /* GETPC() works here because this is inline */
569 do_float_check_status(env
, GETPC());
572 void helper_float_check_status(CPUPPCState
*env
)
574 do_float_check_status(env
, GETPC());
577 void helper_reset_fpstatus(CPUPPCState
*env
)
579 set_float_exception_flags(0, &env
->fp_status
);
583 uint64_t helper_fadd(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
585 CPU_DoubleU farg1
, farg2
;
590 if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
591 float64_is_neg(farg1
.d
) != float64_is_neg(farg2
.d
))) {
592 /* Magnitude subtraction of infinities */
593 farg1
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
595 if (unlikely(float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
596 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
))) {
598 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
600 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
607 uint64_t helper_fsub(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
609 CPU_DoubleU farg1
, farg2
;
614 if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
615 float64_is_neg(farg1
.d
) == float64_is_neg(farg2
.d
))) {
616 /* Magnitude subtraction of infinities */
617 farg1
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
619 if (unlikely(float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
620 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
))) {
621 /* sNaN subtraction */
622 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
624 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
631 uint64_t helper_fmul(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
633 CPU_DoubleU farg1
, farg2
;
638 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
639 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
640 /* Multiplication of zero by infinity */
641 farg1
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
643 if (unlikely(float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
644 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
))) {
645 /* sNaN multiplication */
646 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
648 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
655 uint64_t helper_fdiv(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
657 CPU_DoubleU farg1
, farg2
;
662 if (unlikely(float64_is_infinity(farg1
.d
) &&
663 float64_is_infinity(farg2
.d
))) {
664 /* Division of infinity by infinity */
665 farg1
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIDI
, 1);
666 } else if (unlikely(float64_is_zero(farg1
.d
) && float64_is_zero(farg2
.d
))) {
667 /* Division of zero by zero */
668 farg1
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXZDZ
, 1);
670 if (unlikely(float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
671 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
))) {
673 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
675 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
682 #define FPU_FCTI(op, cvt, nanval) \
683 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
688 farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \
690 if (unlikely(env->fp_status.float_exception_flags)) { \
691 if (float64_is_any_nan(arg)) { \
692 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
693 if (float64_is_signaling_nan(arg, &env->fp_status)) { \
694 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
697 } else if (env->fp_status.float_exception_flags & \
698 float_flag_invalid) { \
699 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
701 float_check_status(env); \
706 FPU_FCTI(fctiw
, int32
, 0x80000000U
)
707 FPU_FCTI(fctiwz
, int32_round_to_zero
, 0x80000000U
)
708 FPU_FCTI(fctiwu
, uint32
, 0x00000000U
)
709 FPU_FCTI(fctiwuz
, uint32_round_to_zero
, 0x00000000U
)
710 FPU_FCTI(fctid
, int64
, 0x8000000000000000ULL
)
711 FPU_FCTI(fctidz
, int64_round_to_zero
, 0x8000000000000000ULL
)
712 FPU_FCTI(fctidu
, uint64
, 0x0000000000000000ULL
)
713 FPU_FCTI(fctiduz
, uint64_round_to_zero
, 0x0000000000000000ULL
)
715 #define FPU_FCFI(op, cvtr, is_single) \
716 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
721 float32 tmp = cvtr(arg, &env->fp_status); \
722 farg.d = float32_to_float64(tmp, &env->fp_status); \
724 farg.d = cvtr(arg, &env->fp_status); \
726 float_check_status(env); \
730 FPU_FCFI(fcfid
, int64_to_float64
, 0)
731 FPU_FCFI(fcfids
, int64_to_float32
, 1)
732 FPU_FCFI(fcfidu
, uint64_to_float64
, 0)
733 FPU_FCFI(fcfidus
, uint64_to_float32
, 1)
735 static inline uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
742 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
744 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
745 farg
.ll
= arg
| 0x0008000000000000ULL
;
747 int inexact
= get_float_exception_flags(&env
->fp_status
) &
749 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
750 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
751 /* Restore rounding mode from FPSCR */
752 fpscr_set_rounding_mode(env
);
754 /* fri* does not set FPSCR[XX] */
756 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
759 float_check_status(env
);
763 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
765 return do_fri(env
, arg
, float_round_ties_away
);
768 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
770 return do_fri(env
, arg
, float_round_to_zero
);
773 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
775 return do_fri(env
, arg
, float_round_up
);
778 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
780 return do_fri(env
, arg
, float_round_down
);
783 #define FPU_MADDSUB_UPDATE(NAME, TP) \
784 static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
785 unsigned int madd_flags) \
787 if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
788 TP##_is_signaling_nan(arg2, &env->fp_status) || \
789 TP##_is_signaling_nan(arg3, &env->fp_status)) { \
790 /* sNaN operation */ \
791 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
793 if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
794 (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
795 /* Multiplication of zero by infinity */ \
796 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); \
798 if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
799 TP##_is_infinity(arg3)) { \
800 uint8_t aSign, bSign, cSign; \
802 aSign = TP##_is_neg(arg1); \
803 bSign = TP##_is_neg(arg2); \
804 cSign = TP##_is_neg(arg3); \
805 if (madd_flags & float_muladd_negate_c) { \
808 if (aSign ^ bSign ^ cSign) { \
809 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); \
813 FPU_MADDSUB_UPDATE(float32_maddsub_update_excp
, float32
)
814 FPU_MADDSUB_UPDATE(float64_maddsub_update_excp
, float64
)
816 #define FPU_FMADD(op, madd_flags) \
817 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
818 uint64_t arg2, uint64_t arg3) \
821 float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
823 flags = get_float_exception_flags(&env->fp_status); \
825 if (flags & float_flag_invalid) { \
826 float64_maddsub_update_excp(env, arg1, arg2, arg3, \
829 float_check_status(env); \
835 #define MSUB_FLGS float_muladd_negate_c
836 #define NMADD_FLGS float_muladd_negate_result
837 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
839 FPU_FMADD(fmadd
, MADD_FLGS
)
840 FPU_FMADD(fnmadd
, NMADD_FLGS
)
841 FPU_FMADD(fmsub
, MSUB_FLGS
)
842 FPU_FMADD(fnmsub
, NMSUB_FLGS
)
845 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
852 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
853 /* sNaN square root */
854 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
856 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
857 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
863 uint64_t helper_fsqrt(CPUPPCState
*env
, uint64_t arg
)
869 if (unlikely(float64_is_any_nan(farg
.d
))) {
870 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
871 /* sNaN reciprocal square root */
872 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
873 farg
.ll
= float64_snan_to_qnan(farg
.ll
);
875 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
876 /* Square root of a negative nonzero number */
877 farg
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
879 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
885 uint64_t helper_fre(CPUPPCState
*env
, uint64_t arg
)
891 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
892 /* sNaN reciprocal */
893 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
895 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
900 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
907 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
908 /* sNaN reciprocal */
909 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
911 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
912 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
913 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
918 /* frsqrte - frsqrte. */
919 uint64_t helper_frsqrte(CPUPPCState
*env
, uint64_t arg
)
925 if (unlikely(float64_is_any_nan(farg
.d
))) {
926 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
927 /* sNaN reciprocal square root */
928 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
929 farg
.ll
= float64_snan_to_qnan(farg
.ll
);
931 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
932 /* Reciprocal square root of a negative nonzero number */
933 farg
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
935 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
936 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
943 uint64_t helper_fsel(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
950 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) &&
951 !float64_is_any_nan(farg1
.d
)) {
958 uint32_t helper_ftdiv(uint64_t fra
, uint64_t frb
)
963 if (unlikely(float64_is_infinity(fra
) ||
964 float64_is_infinity(frb
) ||
965 float64_is_zero(frb
))) {
969 int e_a
= ppc_float64_get_unbiased_exp(fra
);
970 int e_b
= ppc_float64_get_unbiased_exp(frb
);
972 if (unlikely(float64_is_any_nan(fra
) ||
973 float64_is_any_nan(frb
))) {
975 } else if ((e_b
<= -1022) || (e_b
>= 1021)) {
977 } else if (!float64_is_zero(fra
) &&
978 (((e_a
- e_b
) >= 1023) ||
979 ((e_a
- e_b
) <= -1021) ||
984 if (unlikely(float64_is_zero_or_denormal(frb
))) {
985 /* XB is not zero because of the above check and */
986 /* so must be denormalized. */
991 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
994 uint32_t helper_ftsqrt(uint64_t frb
)
999 if (unlikely(float64_is_infinity(frb
) || float64_is_zero(frb
))) {
1003 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1005 if (unlikely(float64_is_any_nan(frb
))) {
1007 } else if (unlikely(float64_is_zero(frb
))) {
1009 } else if (unlikely(float64_is_neg(frb
))) {
1011 } else if (!float64_is_zero(frb
) && (e_b
<= (-1022+52))) {
1015 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1016 /* XB is not zero because of the above check and */
1017 /* therefore must be denormalized. */
1022 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1025 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1028 CPU_DoubleU farg1
, farg2
;
1034 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1035 float64_is_any_nan(farg2
.d
))) {
1037 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1039 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1045 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1046 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1047 env
->crf
[crfD
] = ret
;
1048 if (unlikely(ret
== 0x01UL
1049 && (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1050 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)))) {
1051 /* sNaN comparison */
1052 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1056 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1059 CPU_DoubleU farg1
, farg2
;
1065 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1066 float64_is_any_nan(farg2
.d
))) {
1068 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1070 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1076 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1077 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1078 env
->crf
[crfD
] = ret
;
1079 if (unlikely(ret
== 0x01UL
)) {
1080 if (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1081 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)) {
1082 /* sNaN comparison */
1083 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
|
1084 POWERPC_EXCP_FP_VXVC
, 1);
1086 /* qNaN comparison */
1087 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXVC
, 1);
1092 /* Single-precision floating-point conversions */
1093 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
1097 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1102 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
1106 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1111 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
1116 /* NaN are not treated the same way IEEE 754 does */
1117 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1121 return float32_to_int32(u
.f
, &env
->vec_status
);
1124 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1129 /* NaN are not treated the same way IEEE 754 does */
1130 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1134 return float32_to_uint32(u
.f
, &env
->vec_status
);
1137 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1142 /* NaN are not treated the same way IEEE 754 does */
1143 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1147 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1150 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1155 /* NaN are not treated the same way IEEE 754 does */
1156 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1160 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1163 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1168 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1169 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1170 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1175 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1180 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1181 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1182 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1187 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1193 /* NaN are not treated the same way IEEE 754 does */
1194 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1197 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1198 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1200 return float32_to_int32(u
.f
, &env
->vec_status
);
1203 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1209 /* NaN are not treated the same way IEEE 754 does */
1210 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1213 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1214 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1216 return float32_to_uint32(u
.f
, &env
->vec_status
);
1219 #define HELPER_SPE_SINGLE_CONV(name) \
1220 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1222 return e##name(env, val); \
1225 HELPER_SPE_SINGLE_CONV(fscfsi
);
1227 HELPER_SPE_SINGLE_CONV(fscfui
);
1229 HELPER_SPE_SINGLE_CONV(fscfuf
);
1231 HELPER_SPE_SINGLE_CONV(fscfsf
);
1233 HELPER_SPE_SINGLE_CONV(fsctsi
);
1235 HELPER_SPE_SINGLE_CONV(fsctui
);
1237 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1239 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1241 HELPER_SPE_SINGLE_CONV(fsctsf
);
1243 HELPER_SPE_SINGLE_CONV(fsctuf
);
1245 #define HELPER_SPE_VECTOR_CONV(name) \
1246 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1248 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1249 (uint64_t)e##name(env, val); \
1252 HELPER_SPE_VECTOR_CONV(fscfsi
);
1254 HELPER_SPE_VECTOR_CONV(fscfui
);
1256 HELPER_SPE_VECTOR_CONV(fscfuf
);
1258 HELPER_SPE_VECTOR_CONV(fscfsf
);
1260 HELPER_SPE_VECTOR_CONV(fsctsi
);
1262 HELPER_SPE_VECTOR_CONV(fsctui
);
1264 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1266 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1268 HELPER_SPE_VECTOR_CONV(fsctsf
);
1270 HELPER_SPE_VECTOR_CONV(fsctuf
);
1272 /* Single-precision floating-point arithmetic */
1273 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1279 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1283 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1289 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1293 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1299 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1303 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1309 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1313 #define HELPER_SPE_SINGLE_ARITH(name) \
1314 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1316 return e##name(env, op1, op2); \
1319 HELPER_SPE_SINGLE_ARITH(fsadd
);
1321 HELPER_SPE_SINGLE_ARITH(fssub
);
1323 HELPER_SPE_SINGLE_ARITH(fsmul
);
1325 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1327 #define HELPER_SPE_VECTOR_ARITH(name) \
1328 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1330 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1331 (uint64_t)e##name(env, op1, op2); \
1334 HELPER_SPE_VECTOR_ARITH(fsadd
);
1336 HELPER_SPE_VECTOR_ARITH(fssub
);
1338 HELPER_SPE_VECTOR_ARITH(fsmul
);
1340 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1342 /* Single-precision floating-point comparisons */
1343 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1349 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1352 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1358 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1361 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1367 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1370 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1372 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1373 return efscmplt(env
, op1
, op2
);
1376 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1378 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1379 return efscmpgt(env
, op1
, op2
);
1382 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1384 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1385 return efscmpeq(env
, op1
, op2
);
1388 #define HELPER_SINGLE_SPE_CMP(name) \
1389 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1391 return e##name(env, op1, op2); \
1394 HELPER_SINGLE_SPE_CMP(fststlt
);
1396 HELPER_SINGLE_SPE_CMP(fststgt
);
1398 HELPER_SINGLE_SPE_CMP(fststeq
);
1400 HELPER_SINGLE_SPE_CMP(fscmplt
);
1402 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1404 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1406 static inline uint32_t evcmp_merge(int t0
, int t1
)
1408 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1411 #define HELPER_VECTOR_SPE_CMP(name) \
1412 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1414 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1415 e##name(env, op1, op2)); \
1418 HELPER_VECTOR_SPE_CMP(fststlt
);
1420 HELPER_VECTOR_SPE_CMP(fststgt
);
1422 HELPER_VECTOR_SPE_CMP(fststeq
);
1424 HELPER_VECTOR_SPE_CMP(fscmplt
);
1426 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1428 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1430 /* Double-precision floating-point conversion */
1431 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1435 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1440 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1444 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1449 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1453 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1458 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1462 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1467 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1472 /* NaN are not treated the same way IEEE 754 does */
1473 if (unlikely(float64_is_any_nan(u
.d
))) {
1477 return float64_to_int32(u
.d
, &env
->vec_status
);
1480 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1485 /* NaN are not treated the same way IEEE 754 does */
1486 if (unlikely(float64_is_any_nan(u
.d
))) {
1490 return float64_to_uint32(u
.d
, &env
->vec_status
);
1493 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1498 /* NaN are not treated the same way IEEE 754 does */
1499 if (unlikely(float64_is_any_nan(u
.d
))) {
1503 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1506 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1511 /* NaN are not treated the same way IEEE 754 does */
1512 if (unlikely(float64_is_any_nan(u
.d
))) {
1516 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1519 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1524 /* NaN are not treated the same way IEEE 754 does */
1525 if (unlikely(float64_is_any_nan(u
.d
))) {
1529 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1532 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1537 /* NaN are not treated the same way IEEE 754 does */
1538 if (unlikely(float64_is_any_nan(u
.d
))) {
1542 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1545 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1550 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1551 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1552 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1557 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1562 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1563 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1564 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1569 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1575 /* NaN are not treated the same way IEEE 754 does */
1576 if (unlikely(float64_is_any_nan(u
.d
))) {
1579 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1580 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1582 return float64_to_int32(u
.d
, &env
->vec_status
);
1585 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1591 /* NaN are not treated the same way IEEE 754 does */
1592 if (unlikely(float64_is_any_nan(u
.d
))) {
1595 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1596 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1598 return float64_to_uint32(u
.d
, &env
->vec_status
);
1601 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1607 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1612 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1618 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1623 /* Double precision fixed-point arithmetic */
1624 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1630 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1634 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1640 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1644 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1650 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1654 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1660 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1664 /* Double precision floating point helpers */
1665 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1671 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1674 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1680 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1683 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1689 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1692 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1694 /* XXX: TODO: test special values (NaN, infinites, ...) */
1695 return helper_efdtstlt(env
, op1
, op2
);
1698 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1700 /* XXX: TODO: test special values (NaN, infinites, ...) */
1701 return helper_efdtstgt(env
, op1
, op2
);
1704 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1706 /* XXX: TODO: test special values (NaN, infinites, ...) */
1707 return helper_efdtsteq(env
, op1
, op2
);
1710 #define float64_to_float64(x, env) x
1713 /* VSX_ADD_SUB - VSX floating point add/subract
1714 * name - instruction mnemonic
1715 * op - operation (add or sub)
1716 * nels - number of elements (1, 2 or 4)
1717 * tp - type (float32 or float64)
1718 * fld - vsr_t field (VsrD(*) or VsrW(*))
1721 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1722 void helper_##name(CPUPPCState *env, uint32_t opcode) \
1724 ppc_vsr_t xt, xa, xb; \
1727 getVSR(xA(opcode), &xa, env); \
1728 getVSR(xB(opcode), &xb, env); \
1729 getVSR(xT(opcode), &xt, env); \
1730 helper_reset_fpstatus(env); \
1732 for (i = 0; i < nels; i++) { \
1733 float_status tstat = env->fp_status; \
1734 set_float_exception_flags(0, &tstat); \
1735 xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
1736 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1738 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1739 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1740 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
1741 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1742 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1743 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1748 xt.fld = helper_frsp(env, xt.fld); \
1752 helper_compute_fprf_float64(env, xt.fld); \
1755 putVSR(xT(opcode), &xt, env); \
1756 float_check_status(env); \
1759 VSX_ADD_SUB(xsadddp
, add
, 1, float64
, VsrD(0), 1, 0)
1760 VSX_ADD_SUB(xsaddsp
, add
, 1, float64
, VsrD(0), 1, 1)
1761 VSX_ADD_SUB(xvadddp
, add
, 2, float64
, VsrD(i
), 0, 0)
1762 VSX_ADD_SUB(xvaddsp
, add
, 4, float32
, VsrW(i
), 0, 0)
1763 VSX_ADD_SUB(xssubdp
, sub
, 1, float64
, VsrD(0), 1, 0)
1764 VSX_ADD_SUB(xssubsp
, sub
, 1, float64
, VsrD(0), 1, 1)
1765 VSX_ADD_SUB(xvsubdp
, sub
, 2, float64
, VsrD(i
), 0, 0)
1766 VSX_ADD_SUB(xvsubsp
, sub
, 4, float32
, VsrW(i
), 0, 0)
1768 void helper_xsaddqp(CPUPPCState
*env
, uint32_t opcode
)
1770 ppc_vsr_t xt
, xa
, xb
;
1773 getVSR(rA(opcode
) + 32, &xa
, env
);
1774 getVSR(rB(opcode
) + 32, &xb
, env
);
1775 getVSR(rD(opcode
) + 32, &xt
, env
);
1776 helper_reset_fpstatus(env
);
1778 tstat
= env
->fp_status
;
1779 if (unlikely(Rc(opcode
) != 0)) {
1780 tstat
.float_rounding_mode
= float_round_to_odd
;
1783 set_float_exception_flags(0, &tstat
);
1784 xt
.f128
= float128_add(xa
.f128
, xb
.f128
, &tstat
);
1785 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1787 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1788 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
1789 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
1790 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
1791 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
1792 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1796 helper_compute_fprf_float128(env
, xt
.f128
);
1798 putVSR(rD(opcode
) + 32, &xt
, env
);
1799 float_check_status(env
);
1802 /* VSX_MUL - VSX floating point multiply
1803 * op - instruction mnemonic
1804 * nels - number of elements (1, 2 or 4)
1805 * tp - type (float32 or float64)
1806 * fld - vsr_t field (VsrD(*) or VsrW(*))
1809 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1810 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1812 ppc_vsr_t xt, xa, xb; \
1815 getVSR(xA(opcode), &xa, env); \
1816 getVSR(xB(opcode), &xb, env); \
1817 getVSR(xT(opcode), &xt, env); \
1818 helper_reset_fpstatus(env); \
1820 for (i = 0; i < nels; i++) { \
1821 float_status tstat = env->fp_status; \
1822 set_float_exception_flags(0, &tstat); \
1823 xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
1824 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1826 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1827 if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
1828 (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
1829 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
1830 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1831 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1832 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1837 xt.fld = helper_frsp(env, xt.fld); \
1841 helper_compute_fprf_float64(env, xt.fld); \
1845 putVSR(xT(opcode), &xt, env); \
1846 float_check_status(env); \
1849 VSX_MUL(xsmuldp
, 1, float64
, VsrD(0), 1, 0)
1850 VSX_MUL(xsmulsp
, 1, float64
, VsrD(0), 1, 1)
1851 VSX_MUL(xvmuldp
, 2, float64
, VsrD(i
), 0, 0)
1852 VSX_MUL(xvmulsp
, 4, float32
, VsrW(i
), 0, 0)
1854 void helper_xsmulqp(CPUPPCState
*env
, uint32_t opcode
)
1856 ppc_vsr_t xt
, xa
, xb
;
1859 getVSR(rA(opcode
) + 32, &xa
, env
);
1860 getVSR(rB(opcode
) + 32, &xb
, env
);
1861 getVSR(rD(opcode
) + 32, &xt
, env
);
1863 helper_reset_fpstatus(env
);
1864 tstat
= env
->fp_status
;
1865 if (unlikely(Rc(opcode
) != 0)) {
1866 tstat
.float_rounding_mode
= float_round_to_odd
;
1869 set_float_exception_flags(0, &tstat
);
1870 xt
.f128
= float128_mul(xa
.f128
, xb
.f128
, &tstat
);
1871 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1873 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1874 if ((float128_is_infinity(xa
.f128
) && float128_is_zero(xb
.f128
)) ||
1875 (float128_is_infinity(xb
.f128
) && float128_is_zero(xa
.f128
))) {
1876 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
1877 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
1878 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
1879 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1882 helper_compute_fprf_float128(env
, xt
.f128
);
1884 putVSR(rD(opcode
) + 32, &xt
, env
);
1885 float_check_status(env
);
1888 /* VSX_DIV - VSX floating point divide
1889 * op - instruction mnemonic
1890 * nels - number of elements (1, 2 or 4)
1891 * tp - type (float32 or float64)
1892 * fld - vsr_t field (VsrD(*) or VsrW(*))
1895 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1896 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1898 ppc_vsr_t xt, xa, xb; \
1901 getVSR(xA(opcode), &xa, env); \
1902 getVSR(xB(opcode), &xb, env); \
1903 getVSR(xT(opcode), &xt, env); \
1904 helper_reset_fpstatus(env); \
1906 for (i = 0; i < nels; i++) { \
1907 float_status tstat = env->fp_status; \
1908 set_float_exception_flags(0, &tstat); \
1909 xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
1910 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1912 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1913 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1914 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
1915 } else if (tp##_is_zero(xa.fld) && \
1916 tp##_is_zero(xb.fld)) { \
1917 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
1918 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1919 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1920 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1925 xt.fld = helper_frsp(env, xt.fld); \
1929 helper_compute_fprf_float64(env, xt.fld); \
1933 putVSR(xT(opcode), &xt, env); \
1934 float_check_status(env); \
1937 VSX_DIV(xsdivdp
, 1, float64
, VsrD(0), 1, 0)
1938 VSX_DIV(xsdivsp
, 1, float64
, VsrD(0), 1, 1)
1939 VSX_DIV(xvdivdp
, 2, float64
, VsrD(i
), 0, 0)
1940 VSX_DIV(xvdivsp
, 4, float32
, VsrW(i
), 0, 0)
1942 void helper_xsdivqp(CPUPPCState
*env
, uint32_t opcode
)
1944 ppc_vsr_t xt
, xa
, xb
;
1947 getVSR(rA(opcode
) + 32, &xa
, env
);
1948 getVSR(rB(opcode
) + 32, &xb
, env
);
1949 getVSR(rD(opcode
) + 32, &xt
, env
);
1951 helper_reset_fpstatus(env
);
1952 tstat
= env
->fp_status
;
1953 if (unlikely(Rc(opcode
) != 0)) {
1954 tstat
.float_rounding_mode
= float_round_to_odd
;
1957 set_float_exception_flags(0, &tstat
);
1958 xt
.f128
= float128_div(xa
.f128
, xb
.f128
, &tstat
);
1959 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1961 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1962 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
1963 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIDI
, 1);
1964 } else if (float128_is_zero(xa
.f128
) &&
1965 float128_is_zero(xb
.f128
)) {
1966 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXZDZ
, 1);
1967 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
1968 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
1969 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1973 helper_compute_fprf_float128(env
, xt
.f128
);
1974 putVSR(rD(opcode
) + 32, &xt
, env
);
1975 float_check_status(env
);
1978 /* VSX_RE - VSX floating point reciprocal estimate
1979 * op - instruction mnemonic
1980 * nels - number of elements (1, 2 or 4)
1981 * tp - type (float32 or float64)
1982 * fld - vsr_t field (VsrD(*) or VsrW(*))
1985 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
1986 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1991 getVSR(xB(opcode), &xb, env); \
1992 getVSR(xT(opcode), &xt, env); \
1993 helper_reset_fpstatus(env); \
1995 for (i = 0; i < nels; i++) { \
1996 if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
1997 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1999 xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
2002 xt.fld = helper_frsp(env, xt.fld); \
2006 helper_compute_fprf_float64(env, xt.fld); \
2010 putVSR(xT(opcode), &xt, env); \
2011 float_check_status(env); \
2014 VSX_RE(xsredp
, 1, float64
, VsrD(0), 1, 0)
2015 VSX_RE(xsresp
, 1, float64
, VsrD(0), 1, 1)
2016 VSX_RE(xvredp
, 2, float64
, VsrD(i
), 0, 0)
2017 VSX_RE(xvresp
, 4, float32
, VsrW(i
), 0, 0)
2019 /* VSX_SQRT - VSX floating point square root
2020 * op - instruction mnemonic
2021 * nels - number of elements (1, 2 or 4)
2022 * tp - type (float32 or float64)
2023 * fld - vsr_t field (VsrD(*) or VsrW(*))
2026 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
2027 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2032 getVSR(xB(opcode), &xb, env); \
2033 getVSR(xT(opcode), &xt, env); \
2034 helper_reset_fpstatus(env); \
2036 for (i = 0; i < nels; i++) { \
2037 float_status tstat = env->fp_status; \
2038 set_float_exception_flags(0, &tstat); \
2039 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2040 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2042 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2043 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2044 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2045 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2046 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2051 xt.fld = helper_frsp(env, xt.fld); \
2055 helper_compute_fprf_float64(env, xt.fld); \
2059 putVSR(xT(opcode), &xt, env); \
2060 float_check_status(env); \
2063 VSX_SQRT(xssqrtdp
, 1, float64
, VsrD(0), 1, 0)
2064 VSX_SQRT(xssqrtsp
, 1, float64
, VsrD(0), 1, 1)
2065 VSX_SQRT(xvsqrtdp
, 2, float64
, VsrD(i
), 0, 0)
2066 VSX_SQRT(xvsqrtsp
, 4, float32
, VsrW(i
), 0, 0)
2068 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2069 * op - instruction mnemonic
2070 * nels - number of elements (1, 2 or 4)
2071 * tp - type (float32 or float64)
2072 * fld - vsr_t field (VsrD(*) or VsrW(*))
2075 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
2076 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2081 getVSR(xB(opcode), &xb, env); \
2082 getVSR(xT(opcode), &xt, env); \
2083 helper_reset_fpstatus(env); \
2085 for (i = 0; i < nels; i++) { \
2086 float_status tstat = env->fp_status; \
2087 set_float_exception_flags(0, &tstat); \
2088 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2089 xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
2090 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2092 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2093 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2094 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2095 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2096 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2101 xt.fld = helper_frsp(env, xt.fld); \
2105 helper_compute_fprf_float64(env, xt.fld); \
2109 putVSR(xT(opcode), &xt, env); \
2110 float_check_status(env); \
2113 VSX_RSQRTE(xsrsqrtedp
, 1, float64
, VsrD(0), 1, 0)
2114 VSX_RSQRTE(xsrsqrtesp
, 1, float64
, VsrD(0), 1, 1)
2115 VSX_RSQRTE(xvrsqrtedp
, 2, float64
, VsrD(i
), 0, 0)
2116 VSX_RSQRTE(xvrsqrtesp
, 4, float32
, VsrW(i
), 0, 0)
2118 /* VSX_TDIV - VSX floating point test for divide
2119 * op - instruction mnemonic
2120 * nels - number of elements (1, 2 or 4)
2121 * tp - type (float32 or float64)
2122 * fld - vsr_t field (VsrD(*) or VsrW(*))
2123 * emin - minimum unbiased exponent
2124 * emax - maximum unbiased exponent
2125 * nbits - number of fraction bits
2127 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2128 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2135 getVSR(xA(opcode), &xa, env); \
2136 getVSR(xB(opcode), &xb, env); \
2138 for (i = 0; i < nels; i++) { \
2139 if (unlikely(tp##_is_infinity(xa.fld) || \
2140 tp##_is_infinity(xb.fld) || \
2141 tp##_is_zero(xb.fld))) { \
2145 int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
2146 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2148 if (unlikely(tp##_is_any_nan(xa.fld) || \
2149 tp##_is_any_nan(xb.fld))) { \
2151 } else if ((e_b <= emin) || (e_b >= (emax-2))) { \
2153 } else if (!tp##_is_zero(xa.fld) && \
2154 (((e_a - e_b) >= emax) || \
2155 ((e_a - e_b) <= (emin+1)) || \
2156 (e_a <= (emin+nbits)))) { \
2160 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2161 /* XB is not zero because of the above check and */ \
2162 /* so must be denormalized. */ \
2168 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2171 VSX_TDIV(xstdivdp
, 1, float64
, VsrD(0), -1022, 1023, 52)
2172 VSX_TDIV(xvtdivdp
, 2, float64
, VsrD(i
), -1022, 1023, 52)
2173 VSX_TDIV(xvtdivsp
, 4, float32
, VsrW(i
), -126, 127, 23)
2175 /* VSX_TSQRT - VSX floating point test for square root
2176 * op - instruction mnemonic
2177 * nels - number of elements (1, 2 or 4)
2178 * tp - type (float32 or float64)
2179 * fld - vsr_t field (VsrD(*) or VsrW(*))
2180 * emin - minimum unbiased exponent
2181 * emax - maximum unbiased exponent
2182 * nbits - number of fraction bits
2184 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2185 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2192 getVSR(xA(opcode), &xa, env); \
2193 getVSR(xB(opcode), &xb, env); \
2195 for (i = 0; i < nels; i++) { \
2196 if (unlikely(tp##_is_infinity(xb.fld) || \
2197 tp##_is_zero(xb.fld))) { \
2201 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2203 if (unlikely(tp##_is_any_nan(xb.fld))) { \
2205 } else if (unlikely(tp##_is_zero(xb.fld))) { \
2207 } else if (unlikely(tp##_is_neg(xb.fld))) { \
2209 } else if (!tp##_is_zero(xb.fld) && \
2210 (e_b <= (emin+nbits))) { \
2214 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2215 /* XB is not zero because of the above check and */ \
2216 /* therefore must be denormalized. */ \
2222 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2225 VSX_TSQRT(xstsqrtdp
, 1, float64
, VsrD(0), -1022, 52)
2226 VSX_TSQRT(xvtsqrtdp
, 2, float64
, VsrD(i
), -1022, 52)
2227 VSX_TSQRT(xvtsqrtsp
, 4, float32
, VsrW(i
), -126, 23)
2229 /* VSX_MADD - VSX floating point muliply/add variations
2230 * op - instruction mnemonic
2231 * nels - number of elements (1, 2 or 4)
2232 * tp - type (float32 or float64)
2233 * fld - vsr_t field (VsrD(*) or VsrW(*))
2234 * maddflgs - flags for the float*muladd routine that control the
2235 * various forms (madd, msub, nmadd, nmsub)
2236 * afrm - A form (1=A, 0=M)
2239 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
2240 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2242 ppc_vsr_t xt_in, xa, xb, xt_out; \
2246 if (afrm) { /* AxB + T */ \
2249 } else { /* AxT + B */ \
2254 getVSR(xA(opcode), &xa, env); \
2255 getVSR(xB(opcode), &xb, env); \
2256 getVSR(xT(opcode), &xt_in, env); \
2260 helper_reset_fpstatus(env); \
2262 for (i = 0; i < nels; i++) { \
2263 float_status tstat = env->fp_status; \
2264 set_float_exception_flags(0, &tstat); \
2265 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2266 /* Avoid double rounding errors by rounding the intermediate */ \
2267 /* result to odd. */ \
2268 set_float_rounding_mode(float_round_to_zero, &tstat); \
2269 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2270 maddflgs, &tstat); \
2271 xt_out.fld |= (get_float_exception_flags(&tstat) & \
2272 float_flag_inexact) != 0; \
2274 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2275 maddflgs, &tstat); \
2277 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2279 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2280 tp##_maddsub_update_excp(env, xa.fld, b->fld, c->fld, maddflgs); \
2284 xt_out.fld = helper_frsp(env, xt_out.fld); \
2288 helper_compute_fprf_float64(env, xt_out.fld); \
2291 putVSR(xT(opcode), &xt_out, env); \
2292 float_check_status(env); \
2295 VSX_MADD(xsmaddadp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 0)
2296 VSX_MADD(xsmaddmdp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 0)
2297 VSX_MADD(xsmsubadp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 0)
2298 VSX_MADD(xsmsubmdp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 0)
2299 VSX_MADD(xsnmaddadp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 0)
2300 VSX_MADD(xsnmaddmdp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 0)
2301 VSX_MADD(xsnmsubadp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 0)
2302 VSX_MADD(xsnmsubmdp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 0)
2304 VSX_MADD(xsmaddasp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 1)
2305 VSX_MADD(xsmaddmsp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 1)
2306 VSX_MADD(xsmsubasp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 1)
2307 VSX_MADD(xsmsubmsp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 1)
2308 VSX_MADD(xsnmaddasp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 1)
2309 VSX_MADD(xsnmaddmsp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 1)
2310 VSX_MADD(xsnmsubasp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 1)
2311 VSX_MADD(xsnmsubmsp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 1)
2313 VSX_MADD(xvmaddadp
, 2, float64
, VsrD(i
), MADD_FLGS
, 1, 0, 0)
2314 VSX_MADD(xvmaddmdp
, 2, float64
, VsrD(i
), MADD_FLGS
, 0, 0, 0)
2315 VSX_MADD(xvmsubadp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 1, 0, 0)
2316 VSX_MADD(xvmsubmdp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 0, 0, 0)
2317 VSX_MADD(xvnmaddadp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 1, 0, 0)
2318 VSX_MADD(xvnmaddmdp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 0, 0, 0)
2319 VSX_MADD(xvnmsubadp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 1, 0, 0)
2320 VSX_MADD(xvnmsubmdp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 0, 0, 0)
2322 VSX_MADD(xvmaddasp
, 4, float32
, VsrW(i
), MADD_FLGS
, 1, 0, 0)
2323 VSX_MADD(xvmaddmsp
, 4, float32
, VsrW(i
), MADD_FLGS
, 0, 0, 0)
2324 VSX_MADD(xvmsubasp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 1, 0, 0)
2325 VSX_MADD(xvmsubmsp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 0, 0, 0)
2326 VSX_MADD(xvnmaddasp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 1, 0, 0)
2327 VSX_MADD(xvnmaddmsp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 0, 0, 0)
2328 VSX_MADD(xvnmsubasp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 1, 0, 0)
2329 VSX_MADD(xvnmsubmsp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 0, 0, 0)
2331 /* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2332 * op - instruction mnemonic
2333 * cmp - comparison operation
2334 * exp - expected result of comparison
2335 * svxvc - set VXVC bit
2337 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2338 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2340 ppc_vsr_t xt, xa, xb; \
2341 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2343 getVSR(xA(opcode), &xa, env); \
2344 getVSR(xB(opcode), &xb, env); \
2345 getVSR(xT(opcode), &xt, env); \
2347 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2348 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2349 vxsnan_flag = true; \
2350 if (fpscr_ve == 0 && svxvc) { \
2353 } else if (svxvc) { \
2354 vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2355 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
2357 if (vxsnan_flag) { \
2358 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2361 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2363 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2366 if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
2374 putVSR(xT(opcode), &xt, env); \
2375 helper_float_check_status(env); \
2378 VSX_SCALAR_CMP_DP(xscmpeqdp
, eq
, 1, 0)
2379 VSX_SCALAR_CMP_DP(xscmpgedp
, le
, 1, 1)
2380 VSX_SCALAR_CMP_DP(xscmpgtdp
, lt
, 1, 1)
2381 VSX_SCALAR_CMP_DP(xscmpnedp
, eq
, 0, 0)
2383 void helper_xscmpexpdp(CPUPPCState
*env
, uint32_t opcode
)
2386 int64_t exp_a
, exp_b
;
2389 getVSR(xA(opcode
), &xa
, env
);
2390 getVSR(xB(opcode
), &xb
, env
);
2392 exp_a
= extract64(xa
.VsrD(0), 52, 11);
2393 exp_b
= extract64(xb
.VsrD(0), 52, 11);
2395 if (unlikely(float64_is_any_nan(xa
.VsrD(0)) ||
2396 float64_is_any_nan(xb
.VsrD(0)))) {
2399 if (exp_a
< exp_b
) {
2401 } else if (exp_a
> exp_b
) {
2408 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2409 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2410 env
->crf
[BF(opcode
)] = cc
;
2412 helper_float_check_status(env
);
2415 void helper_xscmpexpqp(CPUPPCState
*env
, uint32_t opcode
)
2418 int64_t exp_a
, exp_b
;
2421 getVSR(rA(opcode
) + 32, &xa
, env
);
2422 getVSR(rB(opcode
) + 32, &xb
, env
);
2424 exp_a
= extract64(xa
.VsrD(0), 48, 15);
2425 exp_b
= extract64(xb
.VsrD(0), 48, 15);
2427 if (unlikely(float128_is_any_nan(xa
.f128
) ||
2428 float128_is_any_nan(xb
.f128
))) {
2431 if (exp_a
< exp_b
) {
2433 } else if (exp_a
> exp_b
) {
2440 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2441 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2442 env
->crf
[BF(opcode
)] = cc
;
2444 helper_float_check_status(env
);
2447 #define VSX_SCALAR_CMP(op, ordered) \
2448 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2452 bool vxsnan_flag = false, vxvc_flag = false; \
2454 helper_reset_fpstatus(env); \
2455 getVSR(xA(opcode), &xa, env); \
2456 getVSR(xB(opcode), &xb, env); \
2458 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2459 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2460 vxsnan_flag = true; \
2462 if (fpscr_ve == 0 && ordered) { \
2465 } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2466 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) { \
2472 if (vxsnan_flag) { \
2473 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2476 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2479 if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2481 } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2487 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2488 env->fpscr |= cc << FPSCR_FPRF; \
2489 env->crf[BF(opcode)] = cc; \
2491 float_check_status(env); \
2494 VSX_SCALAR_CMP(xscmpodp
, 1)
2495 VSX_SCALAR_CMP(xscmpudp
, 0)
2497 #define VSX_SCALAR_CMPQ(op, ordered) \
2498 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2502 bool vxsnan_flag = false, vxvc_flag = false; \
2504 helper_reset_fpstatus(env); \
2505 getVSR(rA(opcode) + 32, &xa, env); \
2506 getVSR(rB(opcode) + 32, &xb, env); \
2508 if (float128_is_signaling_nan(xa.f128, &env->fp_status) || \
2509 float128_is_signaling_nan(xb.f128, &env->fp_status)) { \
2510 vxsnan_flag = true; \
2512 if (fpscr_ve == 0 && ordered) { \
2515 } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) || \
2516 float128_is_quiet_nan(xb.f128, &env->fp_status)) { \
2522 if (vxsnan_flag) { \
2523 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2526 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2529 if (float128_lt(xa.f128, xb.f128, &env->fp_status)) { \
2531 } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) { \
2537 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2538 env->fpscr |= cc << FPSCR_FPRF; \
2539 env->crf[BF(opcode)] = cc; \
2541 float_check_status(env); \
2544 VSX_SCALAR_CMPQ(xscmpoqp
, 1)
2545 VSX_SCALAR_CMPQ(xscmpuqp
, 0)
2547 /* VSX_MAX_MIN - VSX floating point maximum/minimum
2548 * name - instruction mnemonic
2549 * op - operation (max or min)
2550 * nels - number of elements (1, 2 or 4)
2551 * tp - type (float32 or float64)
2552 * fld - vsr_t field (VsrD(*) or VsrW(*))
2554 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2555 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2557 ppc_vsr_t xt, xa, xb; \
2560 getVSR(xA(opcode), &xa, env); \
2561 getVSR(xB(opcode), &xb, env); \
2562 getVSR(xT(opcode), &xt, env); \
2564 for (i = 0; i < nels; i++) { \
2565 xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
2566 if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2567 tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2568 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2572 putVSR(xT(opcode), &xt, env); \
2573 float_check_status(env); \
2576 VSX_MAX_MIN(xsmaxdp
, maxnum
, 1, float64
, VsrD(0))
2577 VSX_MAX_MIN(xvmaxdp
, maxnum
, 2, float64
, VsrD(i
))
2578 VSX_MAX_MIN(xvmaxsp
, maxnum
, 4, float32
, VsrW(i
))
2579 VSX_MAX_MIN(xsmindp
, minnum
, 1, float64
, VsrD(0))
2580 VSX_MAX_MIN(xvmindp
, minnum
, 2, float64
, VsrD(i
))
2581 VSX_MAX_MIN(xvminsp
, minnum
, 4, float32
, VsrW(i
))
2583 #define VSX_MAX_MINC(name, max) \
2584 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2586 ppc_vsr_t xt, xa, xb; \
2587 bool vxsnan_flag = false, vex_flag = false; \
2589 getVSR(rA(opcode) + 32, &xa, env); \
2590 getVSR(rB(opcode) + 32, &xb, env); \
2591 getVSR(rD(opcode) + 32, &xt, env); \
2593 if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
2594 float64_is_any_nan(xb.VsrD(0)))) { \
2595 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2596 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2597 vxsnan_flag = true; \
2599 xt.VsrD(0) = xb.VsrD(0); \
2600 } else if ((max && \
2601 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2603 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2604 xt.VsrD(0) = xa.VsrD(0); \
2606 xt.VsrD(0) = xb.VsrD(0); \
2609 vex_flag = fpscr_ve & vxsnan_flag; \
2610 if (vxsnan_flag) { \
2611 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2614 putVSR(rD(opcode) + 32, &xt, env); \
2618 VSX_MAX_MINC(xsmaxcdp, 1);
2619 VSX_MAX_MINC(xsmincdp
, 0);
2621 #define VSX_MAX_MINJ(name, max) \
2622 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2624 ppc_vsr_t xt, xa, xb; \
2625 bool vxsnan_flag = false, vex_flag = false; \
2627 getVSR(rA(opcode) + 32, &xa, env); \
2628 getVSR(rB(opcode) + 32, &xb, env); \
2629 getVSR(rD(opcode) + 32, &xt, env); \
2631 if (unlikely(float64_is_any_nan(xa.VsrD(0)))) { \
2632 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) { \
2633 vxsnan_flag = true; \
2635 xt.VsrD(0) = xa.VsrD(0); \
2636 } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) { \
2637 if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2638 vxsnan_flag = true; \
2640 xt.VsrD(0) = xb.VsrD(0); \
2641 } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) { \
2643 if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2644 xt.VsrD(0) = 0ULL; \
2646 xt.VsrD(0) = 0x8000000000000000ULL; \
2649 if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) { \
2650 xt.VsrD(0) = 0x8000000000000000ULL; \
2652 xt.VsrD(0) = 0ULL; \
2655 } else if ((max && \
2656 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2658 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2659 xt.VsrD(0) = xa.VsrD(0); \
2661 xt.VsrD(0) = xb.VsrD(0); \
2664 vex_flag = fpscr_ve & vxsnan_flag; \
2665 if (vxsnan_flag) { \
2666 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2669 putVSR(rD(opcode) + 32, &xt, env); \
2673 VSX_MAX_MINJ(xsmaxjdp, 1);
2674 VSX_MAX_MINJ(xsminjdp
, 0);
2676 /* VSX_CMP - VSX floating point compare
2677 * op - instruction mnemonic
2678 * nels - number of elements (1, 2 or 4)
2679 * tp - type (float32 or float64)
2680 * fld - vsr_t field (VsrD(*) or VsrW(*))
2681 * cmp - comparison operation
2682 * svxvc - set VXVC bit
2683 * exp - expected result of comparison
2685 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2686 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2688 ppc_vsr_t xt, xa, xb; \
2691 int all_false = 1; \
2693 getVSR(xA(opcode), &xa, env); \
2694 getVSR(xB(opcode), &xb, env); \
2695 getVSR(xT(opcode), &xt, env); \
2697 for (i = 0; i < nels; i++) { \
2698 if (unlikely(tp##_is_any_nan(xa.fld) || \
2699 tp##_is_any_nan(xb.fld))) { \
2700 if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2701 tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
2702 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2705 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2710 if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
2720 putVSR(xT(opcode), &xt, env); \
2721 if ((opcode >> (31-21)) & 1) { \
2722 env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2724 float_check_status(env); \
2727 VSX_CMP(xvcmpeqdp
, 2, float64
, VsrD(i
), eq
, 0, 1)
2728 VSX_CMP(xvcmpgedp
, 2, float64
, VsrD(i
), le
, 1, 1)
2729 VSX_CMP(xvcmpgtdp
, 2, float64
, VsrD(i
), lt
, 1, 1)
2730 VSX_CMP(xvcmpnedp
, 2, float64
, VsrD(i
), eq
, 0, 0)
2731 VSX_CMP(xvcmpeqsp
, 4, float32
, VsrW(i
), eq
, 0, 1)
2732 VSX_CMP(xvcmpgesp
, 4, float32
, VsrW(i
), le
, 1, 1)
2733 VSX_CMP(xvcmpgtsp
, 4, float32
, VsrW(i
), lt
, 1, 1)
2734 VSX_CMP(xvcmpnesp
, 4, float32
, VsrW(i
), eq
, 0, 0)
2736 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2737 * op - instruction mnemonic
2738 * nels - number of elements (1, 2 or 4)
2739 * stp - source type (float32 or float64)
2740 * ttp - target type (float32 or float64)
2741 * sfld - source vsr_t field
2742 * tfld - target vsr_t field (f32 or f64)
2745 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2746 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2751 getVSR(xB(opcode), &xb, env); \
2752 getVSR(xT(opcode), &xt, env); \
2754 for (i = 0; i < nels; i++) { \
2755 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2756 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2757 &env->fp_status))) { \
2758 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2759 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2762 helper_compute_fprf_##ttp(env, xt.tfld); \
2766 putVSR(xT(opcode), &xt, env); \
2767 float_check_status(env); \
2770 VSX_CVT_FP_TO_FP(xscvdpsp
, 1, float64
, float32
, VsrD(0), VsrW(0), 1)
2771 VSX_CVT_FP_TO_FP(xscvspdp
, 1, float32
, float64
, VsrW(0), VsrD(0), 1)
2772 VSX_CVT_FP_TO_FP(xvcvdpsp
, 2, float64
, float32
, VsrD(i
), VsrW(2*i
), 0)
2773 VSX_CVT_FP_TO_FP(xvcvspdp
, 2, float32
, float64
, VsrW(2*i
), VsrD(i
), 0)
2775 /* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2776 * op - instruction mnemonic
2777 * nels - number of elements (1, 2 or 4)
2778 * stp - source type (float32 or float64)
2779 * ttp - target type (float32 or float64)
2780 * sfld - source vsr_t field
2781 * tfld - target vsr_t field (f32 or f64)
2784 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2785 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2790 getVSR(rB(opcode) + 32, &xb, env); \
2791 getVSR(rD(opcode) + 32, &xt, env); \
2793 for (i = 0; i < nels; i++) { \
2794 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2795 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2796 &env->fp_status))) { \
2797 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2798 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2801 helper_compute_fprf_##ttp(env, xt.tfld); \
2805 putVSR(rD(opcode) + 32, &xt, env); \
2806 float_check_status(env); \
2809 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp
, 1, float64
, float128
, VsrD(0), f128
, 1)
2811 /* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2812 * involving one half precision value
2813 * op - instruction mnemonic
2814 * nels - number of elements (1, 2 or 4)
2817 * sfld - source vsr_t field
2818 * tfld - target vsr_t field
2821 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2822 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2827 getVSR(xB(opcode), &xb, env); \
2828 memset(&xt, 0, sizeof(xt)); \
2830 for (i = 0; i < nels; i++) { \
2831 xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status); \
2832 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2833 &env->fp_status))) { \
2834 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2835 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2838 helper_compute_fprf_##ttp(env, xt.tfld); \
2842 putVSR(xT(opcode), &xt, env); \
2843 float_check_status(env); \
2846 VSX_CVT_FP_TO_FP_HP(xscvdphp
, 1, float64
, float16
, VsrD(0), VsrH(3), 1)
2847 VSX_CVT_FP_TO_FP_HP(xscvhpdp
, 1, float16
, float64
, VsrH(3), VsrD(0), 1)
2848 VSX_CVT_FP_TO_FP_HP(xvcvsphp
, 4, float32
, float16
, VsrW(i
), VsrH(2 * i
+ 1), 0)
2849 VSX_CVT_FP_TO_FP_HP(xvcvhpsp
, 4, float16
, float32
, VsrH(2 * i
+ 1), VsrW(i
), 0)
2852 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2853 * added to this later.
2855 void helper_xscvqpdp(CPUPPCState
*env
, uint32_t opcode
)
2860 getVSR(rB(opcode
) + 32, &xb
, env
);
2861 memset(&xt
, 0, sizeof(xt
));
2863 tstat
= env
->fp_status
;
2864 if (unlikely(Rc(opcode
) != 0)) {
2865 tstat
.float_rounding_mode
= float_round_to_odd
;
2868 xt
.VsrD(0) = float128_to_float64(xb
.f128
, &tstat
);
2869 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2870 if (unlikely(float128_is_signaling_nan(xb
.f128
,
2872 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
2873 xt
.VsrD(0) = float64_snan_to_qnan(xt
.VsrD(0));
2875 helper_compute_fprf_float64(env
, xt
.VsrD(0));
2877 putVSR(rD(opcode
) + 32, &xt
, env
);
2878 float_check_status(env
);
2881 uint64_t helper_xscvdpspn(CPUPPCState
*env
, uint64_t xb
)
2883 float_status tstat
= env
->fp_status
;
2884 set_float_exception_flags(0, &tstat
);
2886 return (uint64_t)float64_to_float32(xb
, &tstat
) << 32;
2889 uint64_t helper_xscvspdpn(CPUPPCState
*env
, uint64_t xb
)
2891 float_status tstat
= env
->fp_status
;
2892 set_float_exception_flags(0, &tstat
);
2894 return float32_to_float64(xb
>> 32, &tstat
);
2897 /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2898 * op - instruction mnemonic
2899 * nels - number of elements (1, 2 or 4)
2900 * stp - source type (float32 or float64)
2901 * ttp - target type (int32, uint32, int64 or uint64)
2902 * sfld - source vsr_t field
2903 * tfld - target vsr_t field
2904 * rnan - resulting NaN
2906 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2907 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2912 getVSR(xB(opcode), &xb, env); \
2913 getVSR(xT(opcode), &xt, env); \
2915 for (i = 0; i < nels; i++) { \
2916 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
2917 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
2918 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2920 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2923 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
2925 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2926 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2931 putVSR(xT(opcode), &xt, env); \
2932 float_check_status(env); \
2935 VSX_CVT_FP_TO_INT(xscvdpsxds
, 1, float64
, int64
, VsrD(0), VsrD(0), \
2936 0x8000000000000000ULL
)
2937 VSX_CVT_FP_TO_INT(xscvdpsxws
, 1, float64
, int32
, VsrD(0), VsrW(1), \
2939 VSX_CVT_FP_TO_INT(xscvdpuxds
, 1, float64
, uint64
, VsrD(0), VsrD(0), 0ULL)
2940 VSX_CVT_FP_TO_INT(xscvdpuxws
, 1, float64
, uint32
, VsrD(0), VsrW(1), 0U)
2941 VSX_CVT_FP_TO_INT(xvcvdpsxds
, 2, float64
, int64
, VsrD(i
), VsrD(i
), \
2942 0x8000000000000000ULL
)
2943 VSX_CVT_FP_TO_INT(xvcvdpsxws
, 2, float64
, int32
, VsrD(i
), VsrW(2*i
), \
2945 VSX_CVT_FP_TO_INT(xvcvdpuxds
, 2, float64
, uint64
, VsrD(i
), VsrD(i
), 0ULL)
2946 VSX_CVT_FP_TO_INT(xvcvdpuxws
, 2, float64
, uint32
, VsrD(i
), VsrW(2*i
), 0U)
2947 VSX_CVT_FP_TO_INT(xvcvspsxds
, 2, float32
, int64
, VsrW(2*i
), VsrD(i
), \
2948 0x8000000000000000ULL
)
2949 VSX_CVT_FP_TO_INT(xvcvspsxws
, 4, float32
, int32
, VsrW(i
), VsrW(i
), 0x80000000U
)
2950 VSX_CVT_FP_TO_INT(xvcvspuxds
, 2, float32
, uint64
, VsrW(2*i
), VsrD(i
), 0ULL)
2951 VSX_CVT_FP_TO_INT(xvcvspuxws
, 4, float32
, uint32
, VsrW(i
), VsrW(i
), 0U)
2953 /* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2954 * op - instruction mnemonic
2955 * stp - source type (float32 or float64)
2956 * ttp - target type (int32, uint32, int64 or uint64)
2957 * sfld - source vsr_t field
2958 * tfld - target vsr_t field
2959 * rnan - resulting NaN
2961 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
2962 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2966 getVSR(rB(opcode) + 32, &xb, env); \
2967 memset(&xt, 0, sizeof(xt)); \
2969 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
2970 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
2971 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2973 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2976 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
2978 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2979 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2983 putVSR(rD(opcode) + 32, &xt, env); \
2984 float_check_status(env); \
2987 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz
, float128
, int64
, f128
, VsrD(0), \
2988 0x8000000000000000ULL
)
2990 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz
, float128
, int32
, f128
, VsrD(0), \
2991 0xffffffff80000000ULL
)
2992 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz
, float128
, uint64
, f128
, VsrD(0), 0x0ULL
)
2993 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz
, float128
, uint32
, f128
, VsrD(0), 0x0ULL
)
2995 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2996 * op - instruction mnemonic
2997 * nels - number of elements (1, 2 or 4)
2998 * stp - source type (int32, uint32, int64 or uint64)
2999 * ttp - target type (float32 or float64)
3000 * sfld - source vsr_t field
3001 * tfld - target vsr_t field
3002 * jdef - definition of the j index (i or 2*i)
3005 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
3006 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3011 getVSR(xB(opcode), &xb, env); \
3012 getVSR(xT(opcode), &xt, env); \
3014 for (i = 0; i < nels; i++) { \
3015 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3017 xt.tfld = helper_frsp(env, xt.tfld); \
3020 helper_compute_fprf_float64(env, xt.tfld); \
3024 putVSR(xT(opcode), &xt, env); \
3025 float_check_status(env); \
3028 VSX_CVT_INT_TO_FP(xscvsxddp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 0)
3029 VSX_CVT_INT_TO_FP(xscvuxddp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 0)
3030 VSX_CVT_INT_TO_FP(xscvsxdsp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 1)
3031 VSX_CVT_INT_TO_FP(xscvuxdsp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 1)
3032 VSX_CVT_INT_TO_FP(xvcvsxddp
, 2, int64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3033 VSX_CVT_INT_TO_FP(xvcvuxddp
, 2, uint64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3034 VSX_CVT_INT_TO_FP(xvcvsxwdp
, 2, int32
, float64
, VsrW(2*i
), VsrD(i
), 0, 0)
3035 VSX_CVT_INT_TO_FP(xvcvuxwdp
, 2, uint64
, float64
, VsrW(2*i
), VsrD(i
), 0, 0)
3036 VSX_CVT_INT_TO_FP(xvcvsxdsp
, 2, int64
, float32
, VsrD(i
), VsrW(2*i
), 0, 0)
3037 VSX_CVT_INT_TO_FP(xvcvuxdsp
, 2, uint64
, float32
, VsrD(i
), VsrW(2*i
), 0, 0)
3038 VSX_CVT_INT_TO_FP(xvcvsxwsp
, 4, int32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3039 VSX_CVT_INT_TO_FP(xvcvuxwsp
, 4, uint32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3041 /* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3042 * op - instruction mnemonic
3043 * stp - source type (int32, uint32, int64 or uint64)
3044 * ttp - target type (float32 or float64)
3045 * sfld - source vsr_t field
3046 * tfld - target vsr_t field
3048 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3049 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3053 getVSR(rB(opcode) + 32, &xb, env); \
3054 getVSR(rD(opcode) + 32, &xt, env); \
3056 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3057 helper_compute_fprf_##ttp(env, xt.tfld); \
3059 putVSR(xT(opcode) + 32, &xt, env); \
3060 float_check_status(env); \
3063 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp
, int64
, float128
, VsrD(0), f128
)
3064 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp
, uint64
, float128
, VsrD(0), f128
)
3066 /* For "use current rounding mode", define a value that will not be one of
3067 * the existing rounding model enums.
3069 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3070 float_round_up + float_round_to_zero)
3072 /* VSX_ROUND - VSX floating point round
3073 * op - instruction mnemonic
3074 * nels - number of elements (1, 2 or 4)
3075 * tp - type (float32 or float64)
3076 * fld - vsr_t field (VsrD(*) or VsrW(*))
3077 * rmode - rounding mode
3080 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
3081 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3085 getVSR(xB(opcode), &xb, env); \
3086 getVSR(xT(opcode), &xt, env); \
3088 if (rmode != FLOAT_ROUND_CURRENT) { \
3089 set_float_rounding_mode(rmode, &env->fp_status); \
3092 for (i = 0; i < nels; i++) { \
3093 if (unlikely(tp##_is_signaling_nan(xb.fld, \
3094 &env->fp_status))) { \
3095 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
3096 xt.fld = tp##_snan_to_qnan(xb.fld); \
3098 xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
3101 helper_compute_fprf_float64(env, xt.fld); \
3105 /* If this is not a "use current rounding mode" instruction, \
3106 * then inhibit setting of the XX bit and restore rounding \
3107 * mode from FPSCR */ \
3108 if (rmode != FLOAT_ROUND_CURRENT) { \
3109 fpscr_set_rounding_mode(env); \
3110 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3113 putVSR(xT(opcode), &xt, env); \
3114 float_check_status(env); \
3117 VSX_ROUND(xsrdpi
, 1, float64
, VsrD(0), float_round_ties_away
, 1)
3118 VSX_ROUND(xsrdpic
, 1, float64
, VsrD(0), FLOAT_ROUND_CURRENT
, 1)
3119 VSX_ROUND(xsrdpim
, 1, float64
, VsrD(0), float_round_down
, 1)
3120 VSX_ROUND(xsrdpip
, 1, float64
, VsrD(0), float_round_up
, 1)
3121 VSX_ROUND(xsrdpiz
, 1, float64
, VsrD(0), float_round_to_zero
, 1)
3123 VSX_ROUND(xvrdpi
, 2, float64
, VsrD(i
), float_round_ties_away
, 0)
3124 VSX_ROUND(xvrdpic
, 2, float64
, VsrD(i
), FLOAT_ROUND_CURRENT
, 0)
3125 VSX_ROUND(xvrdpim
, 2, float64
, VsrD(i
), float_round_down
, 0)
3126 VSX_ROUND(xvrdpip
, 2, float64
, VsrD(i
), float_round_up
, 0)
3127 VSX_ROUND(xvrdpiz
, 2, float64
, VsrD(i
), float_round_to_zero
, 0)
3129 VSX_ROUND(xvrspi
, 4, float32
, VsrW(i
), float_round_ties_away
, 0)
3130 VSX_ROUND(xvrspic
, 4, float32
, VsrW(i
), FLOAT_ROUND_CURRENT
, 0)
3131 VSX_ROUND(xvrspim
, 4, float32
, VsrW(i
), float_round_down
, 0)
3132 VSX_ROUND(xvrspip
, 4, float32
, VsrW(i
), float_round_up
, 0)
3133 VSX_ROUND(xvrspiz
, 4, float32
, VsrW(i
), float_round_to_zero
, 0)
3135 uint64_t helper_xsrsp(CPUPPCState
*env
, uint64_t xb
)
3137 helper_reset_fpstatus(env
);
3139 uint64_t xt
= helper_frsp(env
, xb
);
3141 helper_compute_fprf_float64(env
, xt
);
3142 float_check_status(env
);
3146 #define VSX_XXPERM(op, indexed) \
3147 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3149 ppc_vsr_t xt, xa, pcv, xto; \
3152 getVSR(xA(opcode), &xa, env); \
3153 getVSR(xT(opcode), &xt, env); \
3154 getVSR(xB(opcode), &pcv, env); \
3156 for (i = 0; i < 16; i++) { \
3157 idx = pcv.VsrB(i) & 0x1F; \
3161 xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3163 putVSR(xT(opcode), &xto, env); \
3166 VSX_XXPERM(xxperm
, 0)
3167 VSX_XXPERM(xxpermr
, 1)
3169 void helper_xvxsigsp(CPUPPCState
*env
, uint32_t opcode
)
3172 uint32_t exp
, i
, fraction
;
3174 getVSR(xB(opcode
), &xb
, env
);
3175 memset(&xt
, 0, sizeof(xt
));
3177 for (i
= 0; i
< 4; i
++) {
3178 exp
= (xb
.VsrW(i
) >> 23) & 0xFF;
3179 fraction
= xb
.VsrW(i
) & 0x7FFFFF;
3180 if (exp
!= 0 && exp
!= 255) {
3181 xt
.VsrW(i
) = fraction
| 0x00800000;
3183 xt
.VsrW(i
) = fraction
;
3186 putVSR(xT(opcode
), &xt
, env
);
3189 /* VSX_TEST_DC - VSX floating point test data class
3190 * op - instruction mnemonic
3191 * nels - number of elements (1, 2 or 4)
3192 * xbn - VSR register number
3193 * tp - type (float32 or float64)
3194 * fld - vsr_t field (VsrD(*) or VsrW(*))
3195 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3196 * fld_max - target field max
3197 * scrf - set result in CR and FPCC
3199 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3200 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3203 uint32_t i, sign, dcmx; \
3204 uint32_t cc, match = 0; \
3206 getVSR(xbn, &xb, env); \
3208 memset(&xt, 0, sizeof(xt)); \
3209 dcmx = DCMX_XV(opcode); \
3211 dcmx = DCMX(opcode); \
3214 for (i = 0; i < nels; i++) { \
3215 sign = tp##_is_neg(xb.fld); \
3216 if (tp##_is_any_nan(xb.fld)) { \
3217 match = extract32(dcmx, 6, 1); \
3218 } else if (tp##_is_infinity(xb.fld)) { \
3219 match = extract32(dcmx, 4 + !sign, 1); \
3220 } else if (tp##_is_zero(xb.fld)) { \
3221 match = extract32(dcmx, 2 + !sign, 1); \
3222 } else if (tp##_is_zero_or_denormal(xb.fld)) { \
3223 match = extract32(dcmx, 0 + !sign, 1); \
3227 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3228 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
3229 env->fpscr |= cc << FPSCR_FPRF; \
3230 env->crf[BF(opcode)] = cc; \
3232 xt.tfld = match ? fld_max : 0; \
3237 putVSR(xT(opcode), &xt, env); \
3241 VSX_TEST_DC(xvtstdcdp
, 2, xB(opcode
), float64
, VsrD(i
), VsrD(i
), UINT64_MAX
, 0)
3242 VSX_TEST_DC(xvtstdcsp
, 4, xB(opcode
), float32
, VsrW(i
), VsrW(i
), UINT32_MAX
, 0)
3243 VSX_TEST_DC(xststdcdp
, 1, xB(opcode
), float64
, VsrD(0), VsrD(0), 0, 1)
3244 VSX_TEST_DC(xststdcqp
, 1, (rB(opcode
) + 32), float128
, f128
, VsrD(0), 0, 1)
3246 void helper_xststdcsp(CPUPPCState
*env
, uint32_t opcode
)
3249 uint32_t dcmx
, sign
, exp
;
3250 uint32_t cc
, match
= 0, not_sp
= 0;
3252 getVSR(xB(opcode
), &xb
, env
);
3253 dcmx
= DCMX(opcode
);
3254 exp
= (xb
.VsrD(0) >> 52) & 0x7FF;
3256 sign
= float64_is_neg(xb
.VsrD(0));
3257 if (float64_is_any_nan(xb
.VsrD(0))) {
3258 match
= extract32(dcmx
, 6, 1);
3259 } else if (float64_is_infinity(xb
.VsrD(0))) {
3260 match
= extract32(dcmx
, 4 + !sign
, 1);
3261 } else if (float64_is_zero(xb
.VsrD(0))) {
3262 match
= extract32(dcmx
, 2 + !sign
, 1);
3263 } else if (float64_is_zero_or_denormal(xb
.VsrD(0)) ||
3264 (exp
> 0 && exp
< 0x381)) {
3265 match
= extract32(dcmx
, 0 + !sign
, 1);
3268 not_sp
= !float64_eq(xb
.VsrD(0),
3270 float64_to_float32(xb
.VsrD(0), &env
->fp_status
),
3271 &env
->fp_status
), &env
->fp_status
);
3273 cc
= sign
<< CRF_LT_BIT
| match
<< CRF_EQ_BIT
| not_sp
<< CRF_SO_BIT
;
3274 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
3275 env
->fpscr
|= cc
<< FPSCR_FPRF
;
3276 env
->crf
[BF(opcode
)] = cc
;
3279 void helper_xsrqpi(CPUPPCState
*env
, uint32_t opcode
)
3283 uint8_t r
= Rrm(opcode
);
3284 uint8_t ex
= Rc(opcode
);
3285 uint8_t rmc
= RMC(opcode
);
3289 getVSR(rB(opcode
) + 32, &xb
, env
);
3290 memset(&xt
, 0, sizeof(xt
));
3291 helper_reset_fpstatus(env
);
3293 if (r
== 0 && rmc
== 0) {
3294 rmode
= float_round_ties_away
;
3295 } else if (r
== 0 && rmc
== 0x3) {
3297 } else if (r
== 1) {
3300 rmode
= float_round_nearest_even
;
3303 rmode
= float_round_to_zero
;
3306 rmode
= float_round_up
;
3309 rmode
= float_round_down
;
3316 tstat
= env
->fp_status
;
3317 set_float_exception_flags(0, &tstat
);
3318 set_float_rounding_mode(rmode
, &tstat
);
3319 xt
.f128
= float128_round_to_int(xb
.f128
, &tstat
);
3320 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3322 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3323 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3324 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
3325 xt
.f128
= float128_snan_to_qnan(xt
.f128
);
3329 if (ex
== 0 && (tstat
.float_exception_flags
& float_flag_inexact
)) {
3330 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
3333 helper_compute_fprf_float128(env
, xt
.f128
);
3334 float_check_status(env
);
3335 putVSR(rD(opcode
) + 32, &xt
, env
);
3338 void helper_xsrqpxp(CPUPPCState
*env
, uint32_t opcode
)
3342 uint8_t r
= Rrm(opcode
);
3343 uint8_t rmc
= RMC(opcode
);
3348 getVSR(rB(opcode
) + 32, &xb
, env
);
3349 memset(&xt
, 0, sizeof(xt
));
3350 helper_reset_fpstatus(env
);
3352 if (r
== 0 && rmc
== 0) {
3353 rmode
= float_round_ties_away
;
3354 } else if (r
== 0 && rmc
== 0x3) {
3356 } else if (r
== 1) {
3359 rmode
= float_round_nearest_even
;
3362 rmode
= float_round_to_zero
;
3365 rmode
= float_round_up
;
3368 rmode
= float_round_down
;
3375 tstat
= env
->fp_status
;
3376 set_float_exception_flags(0, &tstat
);
3377 set_float_rounding_mode(rmode
, &tstat
);
3378 round_res
= float128_to_floatx80(xb
.f128
, &tstat
);
3379 xt
.f128
= floatx80_to_float128(round_res
, &tstat
);
3380 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3382 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3383 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3384 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
3385 xt
.f128
= float128_snan_to_qnan(xt
.f128
);
3389 helper_compute_fprf_float128(env
, xt
.f128
);
3390 putVSR(rD(opcode
) + 32, &xt
, env
);
3391 float_check_status(env
);
3394 void helper_xssqrtqp(CPUPPCState
*env
, uint32_t opcode
)
3400 getVSR(rB(opcode
) + 32, &xb
, env
);
3401 memset(&xt
, 0, sizeof(xt
));
3402 helper_reset_fpstatus(env
);
3404 tstat
= env
->fp_status
;
3405 if (unlikely(Rc(opcode
) != 0)) {
3406 tstat
.float_rounding_mode
= float_round_to_odd
;
3409 set_float_exception_flags(0, &tstat
);
3410 xt
.f128
= float128_sqrt(xb
.f128
, &tstat
);
3411 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3413 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3414 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3415 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
3416 xt
.f128
= float128_snan_to_qnan(xb
.f128
);
3417 } else if (float128_is_quiet_nan(xb
.f128
, &tstat
)) {
3419 } else if (float128_is_neg(xb
.f128
) && !float128_is_zero(xb
.f128
)) {
3420 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
3421 xt
.f128
= float128_default_nan(&env
->fp_status
);
3425 helper_compute_fprf_float128(env
, xt
.f128
);
3426 putVSR(rD(opcode
) + 32, &xt
, env
);
3427 float_check_status(env
);
3430 void helper_xssubqp(CPUPPCState
*env
, uint32_t opcode
)
3432 ppc_vsr_t xt
, xa
, xb
;
3435 getVSR(rA(opcode
) + 32, &xa
, env
);
3436 getVSR(rB(opcode
) + 32, &xb
, env
);
3437 getVSR(rD(opcode
) + 32, &xt
, env
);
3438 helper_reset_fpstatus(env
);
3440 tstat
= env
->fp_status
;
3441 if (unlikely(Rc(opcode
) != 0)) {
3442 tstat
.float_rounding_mode
= float_round_to_odd
;
3445 set_float_exception_flags(0, &tstat
);
3446 xt
.f128
= float128_sub(xa
.f128
, xb
.f128
, &tstat
);
3447 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3449 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3450 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
3451 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
3452 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
3453 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3454 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
3458 helper_compute_fprf_float128(env
, xt
.f128
);
3459 putVSR(rD(opcode
) + 32, &xt
, env
);
3460 float_check_status(env
);