2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
26 static inline float128
float128_snan_to_qnan(float128 x
)
30 r
.high
= x
.high
| 0x0000800000000000;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 static inline bool fp_exceptions_enabled(CPUPPCState
*env
)
41 #ifdef CONFIG_USER_ONLY
44 return (env
->msr
& ((1U << MSR_FE0
) | (1U << MSR_FE1
))) != 0;
48 /*****************************************************************************/
49 /* Floating point operations helpers */
50 uint64_t helper_float32_to_float64(CPUPPCState
*env
, uint32_t arg
)
56 d
.d
= float32_to_float64(f
.f
, &env
->fp_status
);
60 uint32_t helper_float64_to_float32(CPUPPCState
*env
, uint64_t arg
)
66 f
.f
= float64_to_float32(d
.d
, &env
->fp_status
);
70 static inline int ppc_float32_get_unbiased_exp(float32 f
)
72 return ((f
>> 23) & 0xFF) - 127;
75 static inline int ppc_float64_get_unbiased_exp(float64 f
)
77 return ((f
>> 52) & 0x7FF) - 1023;
80 #define COMPUTE_FPRF(tp) \
81 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
86 isneg = tp##_is_neg(arg); \
87 if (unlikely(tp##_is_any_nan(arg))) { \
88 if (tp##_is_signaling_nan(arg, &env->fp_status)) { \
89 /* Signaling NaN: flags are undefined */ \
95 } else if (unlikely(tp##_is_infinity(arg))) { \
103 if (tp##_is_zero(arg)) { \
111 if (tp##_is_zero_or_denormal(arg)) { \
112 /* Denormalized numbers */ \
115 /* Normalized numbers */ \
125 /* We update FPSCR_FPRF */ \
126 env->fpscr &= ~(0x1F << FPSCR_FPRF); \
127 env->fpscr |= fprf << FPSCR_FPRF; \
130 COMPUTE_FPRF(float16
)
131 COMPUTE_FPRF(float32
)
132 COMPUTE_FPRF(float64
)
133 COMPUTE_FPRF(float128
)
135 /* Floating-point invalid operations exception */
136 static inline __attribute__((__always_inline__
))
137 uint64_t float_invalid_op_excp(CPUPPCState
*env
, int op
, int set_fpcc
)
139 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
145 case POWERPC_EXCP_FP_VXSNAN
:
146 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
148 case POWERPC_EXCP_FP_VXSOFT
:
149 env
->fpscr
|= 1 << FPSCR_VXSOFT
;
151 case POWERPC_EXCP_FP_VXISI
:
152 /* Magnitude subtraction of infinities */
153 env
->fpscr
|= 1 << FPSCR_VXISI
;
155 case POWERPC_EXCP_FP_VXIDI
:
156 /* Division of infinity by infinity */
157 env
->fpscr
|= 1 << FPSCR_VXIDI
;
159 case POWERPC_EXCP_FP_VXZDZ
:
160 /* Division of zero by zero */
161 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
163 case POWERPC_EXCP_FP_VXIMZ
:
164 /* Multiplication of zero by infinity */
165 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
167 case POWERPC_EXCP_FP_VXVC
:
168 /* Ordered comparison of NaN */
169 env
->fpscr
|= 1 << FPSCR_VXVC
;
171 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
172 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
174 /* We must update the target FPR before raising the exception */
176 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
177 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
178 /* Update the floating-point enabled exception summary */
179 env
->fpscr
|= 1 << FPSCR_FEX
;
180 /* Exception is differed */
184 case POWERPC_EXCP_FP_VXSQRT
:
185 /* Square root of a negative number */
186 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
188 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
190 /* Set the result to quiet NaN */
191 ret
= 0x7FF8000000000000ULL
;
193 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
194 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
198 case POWERPC_EXCP_FP_VXCVI
:
199 /* Invalid conversion */
200 env
->fpscr
|= 1 << FPSCR_VXCVI
;
201 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
203 /* Set the result to quiet NaN */
204 ret
= 0x7FF8000000000000ULL
;
206 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
207 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
212 /* Update the floating-point invalid operation summary */
213 env
->fpscr
|= 1 << FPSCR_VX
;
214 /* Update the floating-point exception summary */
217 /* Update the floating-point enabled exception summary */
218 env
->fpscr
|= 1 << FPSCR_FEX
;
219 if (fp_exceptions_enabled(env
)) {
220 /* GETPC() works here because this is inline */
221 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
222 POWERPC_EXCP_FP
| op
, GETPC());
228 static inline void float_zero_divide_excp(CPUPPCState
*env
, uintptr_t raddr
)
230 env
->fpscr
|= 1 << FPSCR_ZX
;
231 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
232 /* Update the floating-point exception summary */
235 /* Update the floating-point enabled exception summary */
236 env
->fpscr
|= 1 << FPSCR_FEX
;
237 if (fp_exceptions_enabled(env
)) {
238 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
239 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
,
245 static inline void float_overflow_excp(CPUPPCState
*env
)
247 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
249 env
->fpscr
|= 1 << FPSCR_OX
;
250 /* Update the floating-point exception summary */
253 /* XXX: should adjust the result */
254 /* Update the floating-point enabled exception summary */
255 env
->fpscr
|= 1 << FPSCR_FEX
;
256 /* We must update the target FPR before raising the exception */
257 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
258 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
260 env
->fpscr
|= 1 << FPSCR_XX
;
261 env
->fpscr
|= 1 << FPSCR_FI
;
265 static inline void float_underflow_excp(CPUPPCState
*env
)
267 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
269 env
->fpscr
|= 1 << FPSCR_UX
;
270 /* Update the floating-point exception summary */
273 /* XXX: should adjust the result */
274 /* Update the floating-point enabled exception summary */
275 env
->fpscr
|= 1 << FPSCR_FEX
;
276 /* We must update the target FPR before raising the exception */
277 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
278 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
282 static inline void float_inexact_excp(CPUPPCState
*env
)
284 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
286 env
->fpscr
|= 1 << FPSCR_FI
;
287 env
->fpscr
|= 1 << FPSCR_XX
;
288 /* Update the floating-point exception summary */
291 /* Update the floating-point enabled exception summary */
292 env
->fpscr
|= 1 << FPSCR_FEX
;
293 /* We must update the target FPR before raising the exception */
294 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
295 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
299 static inline void fpscr_set_rounding_mode(CPUPPCState
*env
)
303 /* Set rounding mode */
306 /* Best approximation (round to nearest) */
307 rnd_type
= float_round_nearest_even
;
310 /* Smaller magnitude (round toward zero) */
311 rnd_type
= float_round_to_zero
;
314 /* Round toward +infinite */
315 rnd_type
= float_round_up
;
319 /* Round toward -infinite */
320 rnd_type
= float_round_down
;
323 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
326 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
330 prev
= (env
->fpscr
>> bit
) & 1;
331 env
->fpscr
&= ~(1 << bit
);
336 fpscr_set_rounding_mode(env
);
348 /* Set VX bit to zero */
349 env
->fpscr
&= ~(1 << FPSCR_VX
);
362 /* Set the FEX bit */
363 env
->fpscr
&= ~(1 << FPSCR_FEX
);
372 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
374 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
377 prev
= (env
->fpscr
>> bit
) & 1;
378 env
->fpscr
|= 1 << bit
;
420 env
->fpscr
|= 1 << FPSCR_VX
;
429 env
->error_code
= POWERPC_EXCP_FP
;
431 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
434 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
437 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
440 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
443 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
446 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
449 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
452 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
455 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
463 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
470 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
477 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
484 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
490 fpscr_set_rounding_mode(env
);
495 /* Update the floating-point enabled exception summary */
496 env
->fpscr
|= 1 << FPSCR_FEX
;
497 /* We have to update Rc1 before raising the exception */
498 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
504 void helper_store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
506 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
507 target_ulong prev
, new;
511 new = (target_ulong
)arg
;
512 new &= ~0x60000000LL
;
513 new |= prev
& 0x60000000LL
;
514 for (i
= 0; i
< sizeof(target_ulong
) * 2; i
++) {
515 if (mask
& (1 << i
)) {
516 env
->fpscr
&= ~(0xFLL
<< (4 * i
));
517 env
->fpscr
|= new & (0xFLL
<< (4 * i
));
520 /* Update VX and FEX */
522 env
->fpscr
|= 1 << FPSCR_VX
;
524 env
->fpscr
&= ~(1 << FPSCR_VX
);
526 if ((fpscr_ex
& fpscr_eex
) != 0) {
527 env
->fpscr
|= 1 << FPSCR_FEX
;
528 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
529 /* XXX: we should compute it properly */
530 env
->error_code
= POWERPC_EXCP_FP
;
532 env
->fpscr
&= ~(1 << FPSCR_FEX
);
534 fpscr_set_rounding_mode(env
);
537 void store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
539 helper_store_fpscr(env
, arg
, mask
);
542 static void do_float_check_status(CPUPPCState
*env
, uintptr_t raddr
)
544 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
545 int status
= get_float_exception_flags(&env
->fp_status
);
546 bool inexact_happened
= false;
548 if (status
& float_flag_overflow
) {
549 float_overflow_excp(env
);
550 } else if (status
& float_flag_underflow
) {
551 float_underflow_excp(env
);
552 } else if (status
& float_flag_inexact
) {
553 float_inexact_excp(env
);
554 inexact_happened
= true;
557 /* if the inexact flag was not set */
558 if (inexact_happened
== false) {
559 env
->fpscr
&= ~(1 << FPSCR_FI
); /* clear the FPSCR[FI] bit */
562 if (cs
->exception_index
== POWERPC_EXCP_PROGRAM
&&
563 (env
->error_code
& POWERPC_EXCP_FP
)) {
564 /* Differred floating-point exception after target FPR update */
565 if (fp_exceptions_enabled(env
)) {
566 raise_exception_err_ra(env
, cs
->exception_index
,
567 env
->error_code
, raddr
);
572 static inline __attribute__((__always_inline__
))
573 void float_check_status(CPUPPCState
*env
)
575 /* GETPC() works here because this is inline */
576 do_float_check_status(env
, GETPC());
579 void helper_float_check_status(CPUPPCState
*env
)
581 do_float_check_status(env
, GETPC());
584 void helper_reset_fpstatus(CPUPPCState
*env
)
586 set_float_exception_flags(0, &env
->fp_status
);
590 float64
helper_fadd(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
592 float64 ret
= float64_add(arg1
, arg2
, &env
->fp_status
);
593 int status
= get_float_exception_flags(&env
->fp_status
);
595 if (unlikely(status
& float_flag_invalid
)) {
596 if (float64_is_infinity(arg1
) && float64_is_infinity(arg2
)) {
597 /* Magnitude subtraction of infinities */
598 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
599 } else if (float64_is_signaling_nan(arg1
, &env
->fp_status
) ||
600 float64_is_signaling_nan(arg2
, &env
->fp_status
)) {
602 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
610 float64
helper_fsub(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
612 float64 ret
= float64_sub(arg1
, arg2
, &env
->fp_status
);
613 int status
= get_float_exception_flags(&env
->fp_status
);
615 if (unlikely(status
& float_flag_invalid
)) {
616 if (float64_is_infinity(arg1
) && float64_is_infinity(arg2
)) {
617 /* Magnitude subtraction of infinities */
618 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
619 } else if (float64_is_signaling_nan(arg1
, &env
->fp_status
) ||
620 float64_is_signaling_nan(arg2
, &env
->fp_status
)) {
622 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
630 float64
helper_fmul(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
632 float64 ret
= float64_mul(arg1
, arg2
, &env
->fp_status
);
633 int status
= get_float_exception_flags(&env
->fp_status
);
635 if (unlikely(status
& float_flag_invalid
)) {
636 if ((float64_is_infinity(arg1
) && float64_is_zero(arg2
)) ||
637 (float64_is_zero(arg1
) && float64_is_infinity(arg2
))) {
638 /* Multiplication of zero by infinity */
639 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
640 } else if (float64_is_signaling_nan(arg1
, &env
->fp_status
) ||
641 float64_is_signaling_nan(arg2
, &env
->fp_status
)) {
642 /* sNaN multiplication */
643 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
651 float64
helper_fdiv(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
653 float64 ret
= float64_div(arg1
, arg2
, &env
->fp_status
);
654 int status
= get_float_exception_flags(&env
->fp_status
);
656 if (unlikely(status
)) {
657 if (status
& float_flag_invalid
) {
658 /* Determine what kind of invalid operation was seen. */
659 if (float64_is_infinity(arg1
) && float64_is_infinity(arg2
)) {
660 /* Division of infinity by infinity */
661 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIDI
, 1);
662 } else if (float64_is_zero(arg1
) && float64_is_zero(arg2
)) {
663 /* Division of zero by zero */
664 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXZDZ
, 1);
665 } else if (float64_is_signaling_nan(arg1
, &env
->fp_status
) ||
666 float64_is_signaling_nan(arg2
, &env
->fp_status
)) {
668 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
671 if (status
& float_flag_divbyzero
) {
672 float_zero_divide_excp(env
, GETPC());
680 #define FPU_FCTI(op, cvt, nanval) \
681 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
686 farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \
688 if (unlikely(env->fp_status.float_exception_flags)) { \
689 if (float64_is_any_nan(arg)) { \
690 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
691 if (float64_is_signaling_nan(arg, &env->fp_status)) { \
692 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
695 } else if (env->fp_status.float_exception_flags & \
696 float_flag_invalid) { \
697 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
699 float_check_status(env); \
704 FPU_FCTI(fctiw
, int32
, 0x80000000U
)
705 FPU_FCTI(fctiwz
, int32_round_to_zero
, 0x80000000U
)
706 FPU_FCTI(fctiwu
, uint32
, 0x00000000U
)
707 FPU_FCTI(fctiwuz
, uint32_round_to_zero
, 0x00000000U
)
708 FPU_FCTI(fctid
, int64
, 0x8000000000000000ULL
)
709 FPU_FCTI(fctidz
, int64_round_to_zero
, 0x8000000000000000ULL
)
710 FPU_FCTI(fctidu
, uint64
, 0x0000000000000000ULL
)
711 FPU_FCTI(fctiduz
, uint64_round_to_zero
, 0x0000000000000000ULL
)
713 #define FPU_FCFI(op, cvtr, is_single) \
714 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
719 float32 tmp = cvtr(arg, &env->fp_status); \
720 farg.d = float32_to_float64(tmp, &env->fp_status); \
722 farg.d = cvtr(arg, &env->fp_status); \
724 float_check_status(env); \
728 FPU_FCFI(fcfid
, int64_to_float64
, 0)
729 FPU_FCFI(fcfids
, int64_to_float32
, 1)
730 FPU_FCFI(fcfidu
, uint64_to_float64
, 0)
731 FPU_FCFI(fcfidus
, uint64_to_float32
, 1)
733 static inline uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
740 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
742 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
743 farg
.ll
= arg
| 0x0008000000000000ULL
;
745 int inexact
= get_float_exception_flags(&env
->fp_status
) &
747 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
748 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
749 /* Restore rounding mode from FPSCR */
750 fpscr_set_rounding_mode(env
);
752 /* fri* does not set FPSCR[XX] */
754 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
757 float_check_status(env
);
761 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
763 return do_fri(env
, arg
, float_round_ties_away
);
766 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
768 return do_fri(env
, arg
, float_round_to_zero
);
771 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
773 return do_fri(env
, arg
, float_round_up
);
776 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
778 return do_fri(env
, arg
, float_round_down
);
781 #define FPU_MADDSUB_UPDATE(NAME, TP) \
782 static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
783 unsigned int madd_flags) \
785 if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
786 TP##_is_signaling_nan(arg2, &env->fp_status) || \
787 TP##_is_signaling_nan(arg3, &env->fp_status)) { \
788 /* sNaN operation */ \
789 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
791 if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
792 (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
793 /* Multiplication of zero by infinity */ \
794 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); \
796 if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
797 TP##_is_infinity(arg3)) { \
798 uint8_t aSign, bSign, cSign; \
800 aSign = TP##_is_neg(arg1); \
801 bSign = TP##_is_neg(arg2); \
802 cSign = TP##_is_neg(arg3); \
803 if (madd_flags & float_muladd_negate_c) { \
806 if (aSign ^ bSign ^ cSign) { \
807 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); \
811 FPU_MADDSUB_UPDATE(float32_maddsub_update_excp
, float32
)
812 FPU_MADDSUB_UPDATE(float64_maddsub_update_excp
, float64
)
814 #define FPU_FMADD(op, madd_flags) \
815 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
816 uint64_t arg2, uint64_t arg3) \
819 float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
821 flags = get_float_exception_flags(&env->fp_status); \
823 if (flags & float_flag_invalid) { \
824 float64_maddsub_update_excp(env, arg1, arg2, arg3, \
827 float_check_status(env); \
833 #define MSUB_FLGS float_muladd_negate_c
834 #define NMADD_FLGS float_muladd_negate_result
835 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
837 FPU_FMADD(fmadd
, MADD_FLGS
)
838 FPU_FMADD(fnmadd
, NMADD_FLGS
)
839 FPU_FMADD(fmsub
, MSUB_FLGS
)
840 FPU_FMADD(fnmsub
, NMSUB_FLGS
)
843 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
850 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
851 /* sNaN square root */
852 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
854 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
855 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
861 uint64_t helper_fsqrt(CPUPPCState
*env
, uint64_t arg
)
867 if (unlikely(float64_is_any_nan(farg
.d
))) {
868 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
869 /* sNaN reciprocal square root */
870 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
871 farg
.ll
= float64_snan_to_qnan(farg
.ll
);
873 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
874 /* Square root of a negative nonzero number */
875 farg
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
877 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
883 uint64_t helper_fre(CPUPPCState
*env
, uint64_t arg
)
889 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
890 /* sNaN reciprocal */
891 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
893 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
898 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
905 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
906 /* sNaN reciprocal */
907 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
909 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
910 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
911 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
916 /* frsqrte - frsqrte. */
917 uint64_t helper_frsqrte(CPUPPCState
*env
, uint64_t arg
)
923 if (unlikely(float64_is_any_nan(farg
.d
))) {
924 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
925 /* sNaN reciprocal square root */
926 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
927 farg
.ll
= float64_snan_to_qnan(farg
.ll
);
929 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
930 /* Reciprocal square root of a negative nonzero number */
931 farg
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
933 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
934 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
941 uint64_t helper_fsel(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
948 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) &&
949 !float64_is_any_nan(farg1
.d
)) {
956 uint32_t helper_ftdiv(uint64_t fra
, uint64_t frb
)
961 if (unlikely(float64_is_infinity(fra
) ||
962 float64_is_infinity(frb
) ||
963 float64_is_zero(frb
))) {
967 int e_a
= ppc_float64_get_unbiased_exp(fra
);
968 int e_b
= ppc_float64_get_unbiased_exp(frb
);
970 if (unlikely(float64_is_any_nan(fra
) ||
971 float64_is_any_nan(frb
))) {
973 } else if ((e_b
<= -1022) || (e_b
>= 1021)) {
975 } else if (!float64_is_zero(fra
) &&
976 (((e_a
- e_b
) >= 1023) ||
977 ((e_a
- e_b
) <= -1021) ||
982 if (unlikely(float64_is_zero_or_denormal(frb
))) {
983 /* XB is not zero because of the above check and */
984 /* so must be denormalized. */
989 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
992 uint32_t helper_ftsqrt(uint64_t frb
)
997 if (unlikely(float64_is_infinity(frb
) || float64_is_zero(frb
))) {
1001 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1003 if (unlikely(float64_is_any_nan(frb
))) {
1005 } else if (unlikely(float64_is_zero(frb
))) {
1007 } else if (unlikely(float64_is_neg(frb
))) {
1009 } else if (!float64_is_zero(frb
) && (e_b
<= (-1022+52))) {
1013 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1014 /* XB is not zero because of the above check and */
1015 /* therefore must be denormalized. */
1020 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1023 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1026 CPU_DoubleU farg1
, farg2
;
1032 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1033 float64_is_any_nan(farg2
.d
))) {
1035 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1037 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1043 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1044 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1045 env
->crf
[crfD
] = ret
;
1046 if (unlikely(ret
== 0x01UL
1047 && (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1048 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)))) {
1049 /* sNaN comparison */
1050 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1054 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1057 CPU_DoubleU farg1
, farg2
;
1063 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1064 float64_is_any_nan(farg2
.d
))) {
1066 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1068 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1074 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1075 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1076 env
->crf
[crfD
] = ret
;
1077 if (unlikely(ret
== 0x01UL
)) {
1078 if (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1079 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)) {
1080 /* sNaN comparison */
1081 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
|
1082 POWERPC_EXCP_FP_VXVC
, 1);
1084 /* qNaN comparison */
1085 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXVC
, 1);
1090 /* Single-precision floating-point conversions */
1091 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
1095 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1100 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
1104 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1109 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
1114 /* NaN are not treated the same way IEEE 754 does */
1115 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1119 return float32_to_int32(u
.f
, &env
->vec_status
);
1122 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1127 /* NaN are not treated the same way IEEE 754 does */
1128 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1132 return float32_to_uint32(u
.f
, &env
->vec_status
);
1135 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1140 /* NaN are not treated the same way IEEE 754 does */
1141 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1145 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1148 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1153 /* NaN are not treated the same way IEEE 754 does */
1154 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1158 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1161 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1166 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1167 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1168 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1173 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1178 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1179 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1180 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1185 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1191 /* NaN are not treated the same way IEEE 754 does */
1192 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1195 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1196 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1198 return float32_to_int32(u
.f
, &env
->vec_status
);
1201 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1207 /* NaN are not treated the same way IEEE 754 does */
1208 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1211 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1212 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1214 return float32_to_uint32(u
.f
, &env
->vec_status
);
1217 #define HELPER_SPE_SINGLE_CONV(name) \
1218 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1220 return e##name(env, val); \
1223 HELPER_SPE_SINGLE_CONV(fscfsi
);
1225 HELPER_SPE_SINGLE_CONV(fscfui
);
1227 HELPER_SPE_SINGLE_CONV(fscfuf
);
1229 HELPER_SPE_SINGLE_CONV(fscfsf
);
1231 HELPER_SPE_SINGLE_CONV(fsctsi
);
1233 HELPER_SPE_SINGLE_CONV(fsctui
);
1235 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1237 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1239 HELPER_SPE_SINGLE_CONV(fsctsf
);
1241 HELPER_SPE_SINGLE_CONV(fsctuf
);
1243 #define HELPER_SPE_VECTOR_CONV(name) \
1244 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1246 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1247 (uint64_t)e##name(env, val); \
1250 HELPER_SPE_VECTOR_CONV(fscfsi
);
1252 HELPER_SPE_VECTOR_CONV(fscfui
);
1254 HELPER_SPE_VECTOR_CONV(fscfuf
);
1256 HELPER_SPE_VECTOR_CONV(fscfsf
);
1258 HELPER_SPE_VECTOR_CONV(fsctsi
);
1260 HELPER_SPE_VECTOR_CONV(fsctui
);
1262 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1264 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1266 HELPER_SPE_VECTOR_CONV(fsctsf
);
1268 HELPER_SPE_VECTOR_CONV(fsctuf
);
1270 /* Single-precision floating-point arithmetic */
1271 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1277 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1281 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1287 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1291 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1297 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1301 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1307 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1311 #define HELPER_SPE_SINGLE_ARITH(name) \
1312 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1314 return e##name(env, op1, op2); \
1317 HELPER_SPE_SINGLE_ARITH(fsadd
);
1319 HELPER_SPE_SINGLE_ARITH(fssub
);
1321 HELPER_SPE_SINGLE_ARITH(fsmul
);
1323 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1325 #define HELPER_SPE_VECTOR_ARITH(name) \
1326 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1328 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1329 (uint64_t)e##name(env, op1, op2); \
1332 HELPER_SPE_VECTOR_ARITH(fsadd
);
1334 HELPER_SPE_VECTOR_ARITH(fssub
);
1336 HELPER_SPE_VECTOR_ARITH(fsmul
);
1338 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1340 /* Single-precision floating-point comparisons */
1341 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1347 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1350 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1356 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1359 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1365 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1368 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1370 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1371 return efscmplt(env
, op1
, op2
);
1374 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1376 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1377 return efscmpgt(env
, op1
, op2
);
1380 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1382 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1383 return efscmpeq(env
, op1
, op2
);
1386 #define HELPER_SINGLE_SPE_CMP(name) \
1387 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1389 return e##name(env, op1, op2); \
1392 HELPER_SINGLE_SPE_CMP(fststlt
);
1394 HELPER_SINGLE_SPE_CMP(fststgt
);
1396 HELPER_SINGLE_SPE_CMP(fststeq
);
1398 HELPER_SINGLE_SPE_CMP(fscmplt
);
1400 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1402 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1404 static inline uint32_t evcmp_merge(int t0
, int t1
)
1406 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1409 #define HELPER_VECTOR_SPE_CMP(name) \
1410 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1412 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1413 e##name(env, op1, op2)); \
1416 HELPER_VECTOR_SPE_CMP(fststlt
);
1418 HELPER_VECTOR_SPE_CMP(fststgt
);
1420 HELPER_VECTOR_SPE_CMP(fststeq
);
1422 HELPER_VECTOR_SPE_CMP(fscmplt
);
1424 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1426 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1428 /* Double-precision floating-point conversion */
1429 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1433 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1438 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1442 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1447 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1451 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1456 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1460 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1465 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1470 /* NaN are not treated the same way IEEE 754 does */
1471 if (unlikely(float64_is_any_nan(u
.d
))) {
1475 return float64_to_int32(u
.d
, &env
->vec_status
);
1478 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1483 /* NaN are not treated the same way IEEE 754 does */
1484 if (unlikely(float64_is_any_nan(u
.d
))) {
1488 return float64_to_uint32(u
.d
, &env
->vec_status
);
1491 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1496 /* NaN are not treated the same way IEEE 754 does */
1497 if (unlikely(float64_is_any_nan(u
.d
))) {
1501 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1504 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1509 /* NaN are not treated the same way IEEE 754 does */
1510 if (unlikely(float64_is_any_nan(u
.d
))) {
1514 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1517 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1522 /* NaN are not treated the same way IEEE 754 does */
1523 if (unlikely(float64_is_any_nan(u
.d
))) {
1527 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1530 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1535 /* NaN are not treated the same way IEEE 754 does */
1536 if (unlikely(float64_is_any_nan(u
.d
))) {
1540 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1543 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1548 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1549 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1550 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1555 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1560 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1561 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1562 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1567 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1573 /* NaN are not treated the same way IEEE 754 does */
1574 if (unlikely(float64_is_any_nan(u
.d
))) {
1577 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1578 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1580 return float64_to_int32(u
.d
, &env
->vec_status
);
1583 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1589 /* NaN are not treated the same way IEEE 754 does */
1590 if (unlikely(float64_is_any_nan(u
.d
))) {
1593 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1594 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1596 return float64_to_uint32(u
.d
, &env
->vec_status
);
1599 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1605 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1610 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1616 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1621 /* Double precision fixed-point arithmetic */
1622 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1628 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1632 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1638 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1642 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1648 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1652 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1658 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1662 /* Double precision floating point helpers */
1663 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1669 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1672 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1678 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1681 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1687 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1690 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1692 /* XXX: TODO: test special values (NaN, infinites, ...) */
1693 return helper_efdtstlt(env
, op1
, op2
);
1696 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1698 /* XXX: TODO: test special values (NaN, infinites, ...) */
1699 return helper_efdtstgt(env
, op1
, op2
);
1702 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1704 /* XXX: TODO: test special values (NaN, infinites, ...) */
1705 return helper_efdtsteq(env
, op1
, op2
);
1708 #define float64_to_float64(x, env) x
1711 /* VSX_ADD_SUB - VSX floating point add/subract
1712 * name - instruction mnemonic
1713 * op - operation (add or sub)
1714 * nels - number of elements (1, 2 or 4)
1715 * tp - type (float32 or float64)
1716 * fld - vsr_t field (VsrD(*) or VsrW(*))
1719 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1720 void helper_##name(CPUPPCState *env, uint32_t opcode) \
1722 ppc_vsr_t xt, xa, xb; \
1725 getVSR(xA(opcode), &xa, env); \
1726 getVSR(xB(opcode), &xb, env); \
1727 getVSR(xT(opcode), &xt, env); \
1728 helper_reset_fpstatus(env); \
1730 for (i = 0; i < nels; i++) { \
1731 float_status tstat = env->fp_status; \
1732 set_float_exception_flags(0, &tstat); \
1733 xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
1734 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1736 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1737 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1738 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
1739 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1740 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1741 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1746 xt.fld = helper_frsp(env, xt.fld); \
1750 helper_compute_fprf_float64(env, xt.fld); \
1753 putVSR(xT(opcode), &xt, env); \
1754 float_check_status(env); \
1757 VSX_ADD_SUB(xsadddp
, add
, 1, float64
, VsrD(0), 1, 0)
1758 VSX_ADD_SUB(xsaddsp
, add
, 1, float64
, VsrD(0), 1, 1)
1759 VSX_ADD_SUB(xvadddp
, add
, 2, float64
, VsrD(i
), 0, 0)
1760 VSX_ADD_SUB(xvaddsp
, add
, 4, float32
, VsrW(i
), 0, 0)
1761 VSX_ADD_SUB(xssubdp
, sub
, 1, float64
, VsrD(0), 1, 0)
1762 VSX_ADD_SUB(xssubsp
, sub
, 1, float64
, VsrD(0), 1, 1)
1763 VSX_ADD_SUB(xvsubdp
, sub
, 2, float64
, VsrD(i
), 0, 0)
1764 VSX_ADD_SUB(xvsubsp
, sub
, 4, float32
, VsrW(i
), 0, 0)
1766 void helper_xsaddqp(CPUPPCState
*env
, uint32_t opcode
)
1768 ppc_vsr_t xt
, xa
, xb
;
1771 getVSR(rA(opcode
) + 32, &xa
, env
);
1772 getVSR(rB(opcode
) + 32, &xb
, env
);
1773 getVSR(rD(opcode
) + 32, &xt
, env
);
1774 helper_reset_fpstatus(env
);
1776 tstat
= env
->fp_status
;
1777 if (unlikely(Rc(opcode
) != 0)) {
1778 tstat
.float_rounding_mode
= float_round_to_odd
;
1781 set_float_exception_flags(0, &tstat
);
1782 xt
.f128
= float128_add(xa
.f128
, xb
.f128
, &tstat
);
1783 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1785 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1786 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
1787 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
1788 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
1789 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
1790 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1794 helper_compute_fprf_float128(env
, xt
.f128
);
1796 putVSR(rD(opcode
) + 32, &xt
, env
);
1797 float_check_status(env
);
1800 /* VSX_MUL - VSX floating point multiply
1801 * op - instruction mnemonic
1802 * nels - number of elements (1, 2 or 4)
1803 * tp - type (float32 or float64)
1804 * fld - vsr_t field (VsrD(*) or VsrW(*))
1807 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1808 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1810 ppc_vsr_t xt, xa, xb; \
1813 getVSR(xA(opcode), &xa, env); \
1814 getVSR(xB(opcode), &xb, env); \
1815 getVSR(xT(opcode), &xt, env); \
1816 helper_reset_fpstatus(env); \
1818 for (i = 0; i < nels; i++) { \
1819 float_status tstat = env->fp_status; \
1820 set_float_exception_flags(0, &tstat); \
1821 xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
1822 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1824 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1825 if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
1826 (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
1827 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
1828 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1829 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1830 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1835 xt.fld = helper_frsp(env, xt.fld); \
1839 helper_compute_fprf_float64(env, xt.fld); \
1843 putVSR(xT(opcode), &xt, env); \
1844 float_check_status(env); \
1847 VSX_MUL(xsmuldp
, 1, float64
, VsrD(0), 1, 0)
1848 VSX_MUL(xsmulsp
, 1, float64
, VsrD(0), 1, 1)
1849 VSX_MUL(xvmuldp
, 2, float64
, VsrD(i
), 0, 0)
1850 VSX_MUL(xvmulsp
, 4, float32
, VsrW(i
), 0, 0)
1852 void helper_xsmulqp(CPUPPCState
*env
, uint32_t opcode
)
1854 ppc_vsr_t xt
, xa
, xb
;
1857 getVSR(rA(opcode
) + 32, &xa
, env
);
1858 getVSR(rB(opcode
) + 32, &xb
, env
);
1859 getVSR(rD(opcode
) + 32, &xt
, env
);
1861 helper_reset_fpstatus(env
);
1862 tstat
= env
->fp_status
;
1863 if (unlikely(Rc(opcode
) != 0)) {
1864 tstat
.float_rounding_mode
= float_round_to_odd
;
1867 set_float_exception_flags(0, &tstat
);
1868 xt
.f128
= float128_mul(xa
.f128
, xb
.f128
, &tstat
);
1869 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1871 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1872 if ((float128_is_infinity(xa
.f128
) && float128_is_zero(xb
.f128
)) ||
1873 (float128_is_infinity(xb
.f128
) && float128_is_zero(xa
.f128
))) {
1874 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
1875 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
1876 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
1877 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1880 helper_compute_fprf_float128(env
, xt
.f128
);
1882 putVSR(rD(opcode
) + 32, &xt
, env
);
1883 float_check_status(env
);
1886 /* VSX_DIV - VSX floating point divide
1887 * op - instruction mnemonic
1888 * nels - number of elements (1, 2 or 4)
1889 * tp - type (float32 or float64)
1890 * fld - vsr_t field (VsrD(*) or VsrW(*))
1893 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1894 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1896 ppc_vsr_t xt, xa, xb; \
1899 getVSR(xA(opcode), &xa, env); \
1900 getVSR(xB(opcode), &xb, env); \
1901 getVSR(xT(opcode), &xt, env); \
1902 helper_reset_fpstatus(env); \
1904 for (i = 0; i < nels; i++) { \
1905 float_status tstat = env->fp_status; \
1906 set_float_exception_flags(0, &tstat); \
1907 xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
1908 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1910 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1911 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1912 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
1913 } else if (tp##_is_zero(xa.fld) && \
1914 tp##_is_zero(xb.fld)) { \
1915 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
1916 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1917 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1918 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1921 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1922 float_zero_divide_excp(env, GETPC()); \
1926 xt.fld = helper_frsp(env, xt.fld); \
1930 helper_compute_fprf_float64(env, xt.fld); \
1934 putVSR(xT(opcode), &xt, env); \
1935 float_check_status(env); \
1938 VSX_DIV(xsdivdp
, 1, float64
, VsrD(0), 1, 0)
1939 VSX_DIV(xsdivsp
, 1, float64
, VsrD(0), 1, 1)
1940 VSX_DIV(xvdivdp
, 2, float64
, VsrD(i
), 0, 0)
1941 VSX_DIV(xvdivsp
, 4, float32
, VsrW(i
), 0, 0)
1943 void helper_xsdivqp(CPUPPCState
*env
, uint32_t opcode
)
1945 ppc_vsr_t xt
, xa
, xb
;
1948 getVSR(rA(opcode
) + 32, &xa
, env
);
1949 getVSR(rB(opcode
) + 32, &xb
, env
);
1950 getVSR(rD(opcode
) + 32, &xt
, env
);
1952 helper_reset_fpstatus(env
);
1953 tstat
= env
->fp_status
;
1954 if (unlikely(Rc(opcode
) != 0)) {
1955 tstat
.float_rounding_mode
= float_round_to_odd
;
1958 set_float_exception_flags(0, &tstat
);
1959 xt
.f128
= float128_div(xa
.f128
, xb
.f128
, &tstat
);
1960 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1962 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1963 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
1964 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIDI
, 1);
1965 } else if (float128_is_zero(xa
.f128
) &&
1966 float128_is_zero(xb
.f128
)) {
1967 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXZDZ
, 1);
1968 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
1969 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
1970 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1973 if (unlikely(tstat
.float_exception_flags
& float_flag_divbyzero
)) {
1974 float_zero_divide_excp(env
, GETPC());
1977 helper_compute_fprf_float128(env
, xt
.f128
);
1978 putVSR(rD(opcode
) + 32, &xt
, env
);
1979 float_check_status(env
);
1982 /* VSX_RE - VSX floating point reciprocal estimate
1983 * op - instruction mnemonic
1984 * nels - number of elements (1, 2 or 4)
1985 * tp - type (float32 or float64)
1986 * fld - vsr_t field (VsrD(*) or VsrW(*))
1989 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
1990 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1995 getVSR(xB(opcode), &xb, env); \
1996 getVSR(xT(opcode), &xt, env); \
1997 helper_reset_fpstatus(env); \
1999 for (i = 0; i < nels; i++) { \
2000 if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2001 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2003 xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
2006 xt.fld = helper_frsp(env, xt.fld); \
2010 helper_compute_fprf_float64(env, xt.fld); \
2014 putVSR(xT(opcode), &xt, env); \
2015 float_check_status(env); \
2018 VSX_RE(xsredp
, 1, float64
, VsrD(0), 1, 0)
2019 VSX_RE(xsresp
, 1, float64
, VsrD(0), 1, 1)
2020 VSX_RE(xvredp
, 2, float64
, VsrD(i
), 0, 0)
2021 VSX_RE(xvresp
, 4, float32
, VsrW(i
), 0, 0)
2023 /* VSX_SQRT - VSX floating point square root
2024 * op - instruction mnemonic
2025 * nels - number of elements (1, 2 or 4)
2026 * tp - type (float32 or float64)
2027 * fld - vsr_t field (VsrD(*) or VsrW(*))
2030 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
2031 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2036 getVSR(xB(opcode), &xb, env); \
2037 getVSR(xT(opcode), &xt, env); \
2038 helper_reset_fpstatus(env); \
2040 for (i = 0; i < nels; i++) { \
2041 float_status tstat = env->fp_status; \
2042 set_float_exception_flags(0, &tstat); \
2043 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2044 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2046 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2047 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2048 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2049 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2050 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2055 xt.fld = helper_frsp(env, xt.fld); \
2059 helper_compute_fprf_float64(env, xt.fld); \
2063 putVSR(xT(opcode), &xt, env); \
2064 float_check_status(env); \
2067 VSX_SQRT(xssqrtdp
, 1, float64
, VsrD(0), 1, 0)
2068 VSX_SQRT(xssqrtsp
, 1, float64
, VsrD(0), 1, 1)
2069 VSX_SQRT(xvsqrtdp
, 2, float64
, VsrD(i
), 0, 0)
2070 VSX_SQRT(xvsqrtsp
, 4, float32
, VsrW(i
), 0, 0)
2072 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2073 * op - instruction mnemonic
2074 * nels - number of elements (1, 2 or 4)
2075 * tp - type (float32 or float64)
2076 * fld - vsr_t field (VsrD(*) or VsrW(*))
2079 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
2080 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2085 getVSR(xB(opcode), &xb, env); \
2086 getVSR(xT(opcode), &xt, env); \
2087 helper_reset_fpstatus(env); \
2089 for (i = 0; i < nels; i++) { \
2090 float_status tstat = env->fp_status; \
2091 set_float_exception_flags(0, &tstat); \
2092 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2093 xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
2094 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2096 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2097 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2098 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2099 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2100 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2105 xt.fld = helper_frsp(env, xt.fld); \
2109 helper_compute_fprf_float64(env, xt.fld); \
2113 putVSR(xT(opcode), &xt, env); \
2114 float_check_status(env); \
2117 VSX_RSQRTE(xsrsqrtedp
, 1, float64
, VsrD(0), 1, 0)
2118 VSX_RSQRTE(xsrsqrtesp
, 1, float64
, VsrD(0), 1, 1)
2119 VSX_RSQRTE(xvrsqrtedp
, 2, float64
, VsrD(i
), 0, 0)
2120 VSX_RSQRTE(xvrsqrtesp
, 4, float32
, VsrW(i
), 0, 0)
2122 /* VSX_TDIV - VSX floating point test for divide
2123 * op - instruction mnemonic
2124 * nels - number of elements (1, 2 or 4)
2125 * tp - type (float32 or float64)
2126 * fld - vsr_t field (VsrD(*) or VsrW(*))
2127 * emin - minimum unbiased exponent
2128 * emax - maximum unbiased exponent
2129 * nbits - number of fraction bits
2131 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2132 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2139 getVSR(xA(opcode), &xa, env); \
2140 getVSR(xB(opcode), &xb, env); \
2142 for (i = 0; i < nels; i++) { \
2143 if (unlikely(tp##_is_infinity(xa.fld) || \
2144 tp##_is_infinity(xb.fld) || \
2145 tp##_is_zero(xb.fld))) { \
2149 int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
2150 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2152 if (unlikely(tp##_is_any_nan(xa.fld) || \
2153 tp##_is_any_nan(xb.fld))) { \
2155 } else if ((e_b <= emin) || (e_b >= (emax-2))) { \
2157 } else if (!tp##_is_zero(xa.fld) && \
2158 (((e_a - e_b) >= emax) || \
2159 ((e_a - e_b) <= (emin+1)) || \
2160 (e_a <= (emin+nbits)))) { \
2164 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2165 /* XB is not zero because of the above check and */ \
2166 /* so must be denormalized. */ \
2172 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2175 VSX_TDIV(xstdivdp
, 1, float64
, VsrD(0), -1022, 1023, 52)
2176 VSX_TDIV(xvtdivdp
, 2, float64
, VsrD(i
), -1022, 1023, 52)
2177 VSX_TDIV(xvtdivsp
, 4, float32
, VsrW(i
), -126, 127, 23)
2179 /* VSX_TSQRT - VSX floating point test for square root
2180 * op - instruction mnemonic
2181 * nels - number of elements (1, 2 or 4)
2182 * tp - type (float32 or float64)
2183 * fld - vsr_t field (VsrD(*) or VsrW(*))
2184 * emin - minimum unbiased exponent
2185 * emax - maximum unbiased exponent
2186 * nbits - number of fraction bits
2188 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2189 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2196 getVSR(xA(opcode), &xa, env); \
2197 getVSR(xB(opcode), &xb, env); \
2199 for (i = 0; i < nels; i++) { \
2200 if (unlikely(tp##_is_infinity(xb.fld) || \
2201 tp##_is_zero(xb.fld))) { \
2205 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2207 if (unlikely(tp##_is_any_nan(xb.fld))) { \
2209 } else if (unlikely(tp##_is_zero(xb.fld))) { \
2211 } else if (unlikely(tp##_is_neg(xb.fld))) { \
2213 } else if (!tp##_is_zero(xb.fld) && \
2214 (e_b <= (emin+nbits))) { \
2218 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2219 /* XB is not zero because of the above check and */ \
2220 /* therefore must be denormalized. */ \
2226 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2229 VSX_TSQRT(xstsqrtdp
, 1, float64
, VsrD(0), -1022, 52)
2230 VSX_TSQRT(xvtsqrtdp
, 2, float64
, VsrD(i
), -1022, 52)
2231 VSX_TSQRT(xvtsqrtsp
, 4, float32
, VsrW(i
), -126, 23)
2233 /* VSX_MADD - VSX floating point muliply/add variations
2234 * op - instruction mnemonic
2235 * nels - number of elements (1, 2 or 4)
2236 * tp - type (float32 or float64)
2237 * fld - vsr_t field (VsrD(*) or VsrW(*))
2238 * maddflgs - flags for the float*muladd routine that control the
2239 * various forms (madd, msub, nmadd, nmsub)
2240 * afrm - A form (1=A, 0=M)
2243 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
2244 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2246 ppc_vsr_t xt_in, xa, xb, xt_out; \
2250 if (afrm) { /* AxB + T */ \
2253 } else { /* AxT + B */ \
2258 getVSR(xA(opcode), &xa, env); \
2259 getVSR(xB(opcode), &xb, env); \
2260 getVSR(xT(opcode), &xt_in, env); \
2264 helper_reset_fpstatus(env); \
2266 for (i = 0; i < nels; i++) { \
2267 float_status tstat = env->fp_status; \
2268 set_float_exception_flags(0, &tstat); \
2269 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2270 /* Avoid double rounding errors by rounding the intermediate */ \
2271 /* result to odd. */ \
2272 set_float_rounding_mode(float_round_to_zero, &tstat); \
2273 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2274 maddflgs, &tstat); \
2275 xt_out.fld |= (get_float_exception_flags(&tstat) & \
2276 float_flag_inexact) != 0; \
2278 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2279 maddflgs, &tstat); \
2281 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2283 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2284 tp##_maddsub_update_excp(env, xa.fld, b->fld, c->fld, maddflgs); \
2288 xt_out.fld = helper_frsp(env, xt_out.fld); \
2292 helper_compute_fprf_float64(env, xt_out.fld); \
2295 putVSR(xT(opcode), &xt_out, env); \
2296 float_check_status(env); \
2299 VSX_MADD(xsmaddadp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 0)
2300 VSX_MADD(xsmaddmdp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 0)
2301 VSX_MADD(xsmsubadp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 0)
2302 VSX_MADD(xsmsubmdp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 0)
2303 VSX_MADD(xsnmaddadp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 0)
2304 VSX_MADD(xsnmaddmdp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 0)
2305 VSX_MADD(xsnmsubadp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 0)
2306 VSX_MADD(xsnmsubmdp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 0)
2308 VSX_MADD(xsmaddasp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 1)
2309 VSX_MADD(xsmaddmsp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 1)
2310 VSX_MADD(xsmsubasp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 1)
2311 VSX_MADD(xsmsubmsp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 1)
2312 VSX_MADD(xsnmaddasp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 1)
2313 VSX_MADD(xsnmaddmsp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 1)
2314 VSX_MADD(xsnmsubasp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 1)
2315 VSX_MADD(xsnmsubmsp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 1)
2317 VSX_MADD(xvmaddadp
, 2, float64
, VsrD(i
), MADD_FLGS
, 1, 0, 0)
2318 VSX_MADD(xvmaddmdp
, 2, float64
, VsrD(i
), MADD_FLGS
, 0, 0, 0)
2319 VSX_MADD(xvmsubadp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 1, 0, 0)
2320 VSX_MADD(xvmsubmdp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 0, 0, 0)
2321 VSX_MADD(xvnmaddadp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 1, 0, 0)
2322 VSX_MADD(xvnmaddmdp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 0, 0, 0)
2323 VSX_MADD(xvnmsubadp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 1, 0, 0)
2324 VSX_MADD(xvnmsubmdp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 0, 0, 0)
2326 VSX_MADD(xvmaddasp
, 4, float32
, VsrW(i
), MADD_FLGS
, 1, 0, 0)
2327 VSX_MADD(xvmaddmsp
, 4, float32
, VsrW(i
), MADD_FLGS
, 0, 0, 0)
2328 VSX_MADD(xvmsubasp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 1, 0, 0)
2329 VSX_MADD(xvmsubmsp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 0, 0, 0)
2330 VSX_MADD(xvnmaddasp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 1, 0, 0)
2331 VSX_MADD(xvnmaddmsp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 0, 0, 0)
2332 VSX_MADD(xvnmsubasp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 1, 0, 0)
2333 VSX_MADD(xvnmsubmsp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 0, 0, 0)
2335 /* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2336 * op - instruction mnemonic
2337 * cmp - comparison operation
2338 * exp - expected result of comparison
2339 * svxvc - set VXVC bit
2341 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2342 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2344 ppc_vsr_t xt, xa, xb; \
2345 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2347 getVSR(xA(opcode), &xa, env); \
2348 getVSR(xB(opcode), &xb, env); \
2349 getVSR(xT(opcode), &xt, env); \
2351 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2352 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2353 vxsnan_flag = true; \
2354 if (fpscr_ve == 0 && svxvc) { \
2357 } else if (svxvc) { \
2358 vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2359 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
2361 if (vxsnan_flag) { \
2362 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2365 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2367 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2370 if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
2378 putVSR(xT(opcode), &xt, env); \
2379 helper_float_check_status(env); \
2382 VSX_SCALAR_CMP_DP(xscmpeqdp
, eq
, 1, 0)
2383 VSX_SCALAR_CMP_DP(xscmpgedp
, le
, 1, 1)
2384 VSX_SCALAR_CMP_DP(xscmpgtdp
, lt
, 1, 1)
2385 VSX_SCALAR_CMP_DP(xscmpnedp
, eq
, 0, 0)
2387 void helper_xscmpexpdp(CPUPPCState
*env
, uint32_t opcode
)
2390 int64_t exp_a
, exp_b
;
2393 getVSR(xA(opcode
), &xa
, env
);
2394 getVSR(xB(opcode
), &xb
, env
);
2396 exp_a
= extract64(xa
.VsrD(0), 52, 11);
2397 exp_b
= extract64(xb
.VsrD(0), 52, 11);
2399 if (unlikely(float64_is_any_nan(xa
.VsrD(0)) ||
2400 float64_is_any_nan(xb
.VsrD(0)))) {
2403 if (exp_a
< exp_b
) {
2405 } else if (exp_a
> exp_b
) {
2412 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2413 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2414 env
->crf
[BF(opcode
)] = cc
;
2416 helper_float_check_status(env
);
2419 void helper_xscmpexpqp(CPUPPCState
*env
, uint32_t opcode
)
2422 int64_t exp_a
, exp_b
;
2425 getVSR(rA(opcode
) + 32, &xa
, env
);
2426 getVSR(rB(opcode
) + 32, &xb
, env
);
2428 exp_a
= extract64(xa
.VsrD(0), 48, 15);
2429 exp_b
= extract64(xb
.VsrD(0), 48, 15);
2431 if (unlikely(float128_is_any_nan(xa
.f128
) ||
2432 float128_is_any_nan(xb
.f128
))) {
2435 if (exp_a
< exp_b
) {
2437 } else if (exp_a
> exp_b
) {
2444 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2445 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2446 env
->crf
[BF(opcode
)] = cc
;
2448 helper_float_check_status(env
);
2451 #define VSX_SCALAR_CMP(op, ordered) \
2452 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2456 bool vxsnan_flag = false, vxvc_flag = false; \
2458 helper_reset_fpstatus(env); \
2459 getVSR(xA(opcode), &xa, env); \
2460 getVSR(xB(opcode), &xb, env); \
2462 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2463 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2464 vxsnan_flag = true; \
2466 if (fpscr_ve == 0 && ordered) { \
2469 } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2470 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) { \
2476 if (vxsnan_flag) { \
2477 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2480 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2483 if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2485 } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2491 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2492 env->fpscr |= cc << FPSCR_FPRF; \
2493 env->crf[BF(opcode)] = cc; \
2495 float_check_status(env); \
2498 VSX_SCALAR_CMP(xscmpodp
, 1)
2499 VSX_SCALAR_CMP(xscmpudp
, 0)
2501 #define VSX_SCALAR_CMPQ(op, ordered) \
2502 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2506 bool vxsnan_flag = false, vxvc_flag = false; \
2508 helper_reset_fpstatus(env); \
2509 getVSR(rA(opcode) + 32, &xa, env); \
2510 getVSR(rB(opcode) + 32, &xb, env); \
2512 if (float128_is_signaling_nan(xa.f128, &env->fp_status) || \
2513 float128_is_signaling_nan(xb.f128, &env->fp_status)) { \
2514 vxsnan_flag = true; \
2516 if (fpscr_ve == 0 && ordered) { \
2519 } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) || \
2520 float128_is_quiet_nan(xb.f128, &env->fp_status)) { \
2526 if (vxsnan_flag) { \
2527 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2530 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2533 if (float128_lt(xa.f128, xb.f128, &env->fp_status)) { \
2535 } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) { \
2541 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2542 env->fpscr |= cc << FPSCR_FPRF; \
2543 env->crf[BF(opcode)] = cc; \
2545 float_check_status(env); \
2548 VSX_SCALAR_CMPQ(xscmpoqp
, 1)
2549 VSX_SCALAR_CMPQ(xscmpuqp
, 0)
2551 /* VSX_MAX_MIN - VSX floating point maximum/minimum
2552 * name - instruction mnemonic
2553 * op - operation (max or min)
2554 * nels - number of elements (1, 2 or 4)
2555 * tp - type (float32 or float64)
2556 * fld - vsr_t field (VsrD(*) or VsrW(*))
2558 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2559 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2561 ppc_vsr_t xt, xa, xb; \
2564 getVSR(xA(opcode), &xa, env); \
2565 getVSR(xB(opcode), &xb, env); \
2566 getVSR(xT(opcode), &xt, env); \
2568 for (i = 0; i < nels; i++) { \
2569 xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
2570 if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2571 tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2572 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2576 putVSR(xT(opcode), &xt, env); \
2577 float_check_status(env); \
2580 VSX_MAX_MIN(xsmaxdp
, maxnum
, 1, float64
, VsrD(0))
2581 VSX_MAX_MIN(xvmaxdp
, maxnum
, 2, float64
, VsrD(i
))
2582 VSX_MAX_MIN(xvmaxsp
, maxnum
, 4, float32
, VsrW(i
))
2583 VSX_MAX_MIN(xsmindp
, minnum
, 1, float64
, VsrD(0))
2584 VSX_MAX_MIN(xvmindp
, minnum
, 2, float64
, VsrD(i
))
2585 VSX_MAX_MIN(xvminsp
, minnum
, 4, float32
, VsrW(i
))
2587 #define VSX_MAX_MINC(name, max) \
2588 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2590 ppc_vsr_t xt, xa, xb; \
2591 bool vxsnan_flag = false, vex_flag = false; \
2593 getVSR(rA(opcode) + 32, &xa, env); \
2594 getVSR(rB(opcode) + 32, &xb, env); \
2595 getVSR(rD(opcode) + 32, &xt, env); \
2597 if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
2598 float64_is_any_nan(xb.VsrD(0)))) { \
2599 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2600 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2601 vxsnan_flag = true; \
2603 xt.VsrD(0) = xb.VsrD(0); \
2604 } else if ((max && \
2605 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2607 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2608 xt.VsrD(0) = xa.VsrD(0); \
2610 xt.VsrD(0) = xb.VsrD(0); \
2613 vex_flag = fpscr_ve & vxsnan_flag; \
2614 if (vxsnan_flag) { \
2615 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2618 putVSR(rD(opcode) + 32, &xt, env); \
2622 VSX_MAX_MINC(xsmaxcdp, 1);
2623 VSX_MAX_MINC(xsmincdp
, 0);
2625 #define VSX_MAX_MINJ(name, max) \
2626 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2628 ppc_vsr_t xt, xa, xb; \
2629 bool vxsnan_flag = false, vex_flag = false; \
2631 getVSR(rA(opcode) + 32, &xa, env); \
2632 getVSR(rB(opcode) + 32, &xb, env); \
2633 getVSR(rD(opcode) + 32, &xt, env); \
2635 if (unlikely(float64_is_any_nan(xa.VsrD(0)))) { \
2636 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) { \
2637 vxsnan_flag = true; \
2639 xt.VsrD(0) = xa.VsrD(0); \
2640 } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) { \
2641 if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2642 vxsnan_flag = true; \
2644 xt.VsrD(0) = xb.VsrD(0); \
2645 } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) { \
2647 if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2648 xt.VsrD(0) = 0ULL; \
2650 xt.VsrD(0) = 0x8000000000000000ULL; \
2653 if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) { \
2654 xt.VsrD(0) = 0x8000000000000000ULL; \
2656 xt.VsrD(0) = 0ULL; \
2659 } else if ((max && \
2660 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2662 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2663 xt.VsrD(0) = xa.VsrD(0); \
2665 xt.VsrD(0) = xb.VsrD(0); \
2668 vex_flag = fpscr_ve & vxsnan_flag; \
2669 if (vxsnan_flag) { \
2670 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2673 putVSR(rD(opcode) + 32, &xt, env); \
2677 VSX_MAX_MINJ(xsmaxjdp, 1);
2678 VSX_MAX_MINJ(xsminjdp
, 0);
2680 /* VSX_CMP - VSX floating point compare
2681 * op - instruction mnemonic
2682 * nels - number of elements (1, 2 or 4)
2683 * tp - type (float32 or float64)
2684 * fld - vsr_t field (VsrD(*) or VsrW(*))
2685 * cmp - comparison operation
2686 * svxvc - set VXVC bit
2687 * exp - expected result of comparison
2689 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2690 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2692 ppc_vsr_t xt, xa, xb; \
2695 int all_false = 1; \
2697 getVSR(xA(opcode), &xa, env); \
2698 getVSR(xB(opcode), &xb, env); \
2699 getVSR(xT(opcode), &xt, env); \
2701 for (i = 0; i < nels; i++) { \
2702 if (unlikely(tp##_is_any_nan(xa.fld) || \
2703 tp##_is_any_nan(xb.fld))) { \
2704 if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2705 tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
2706 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2709 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2714 if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
2724 putVSR(xT(opcode), &xt, env); \
2725 if ((opcode >> (31-21)) & 1) { \
2726 env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2728 float_check_status(env); \
2731 VSX_CMP(xvcmpeqdp
, 2, float64
, VsrD(i
), eq
, 0, 1)
2732 VSX_CMP(xvcmpgedp
, 2, float64
, VsrD(i
), le
, 1, 1)
2733 VSX_CMP(xvcmpgtdp
, 2, float64
, VsrD(i
), lt
, 1, 1)
2734 VSX_CMP(xvcmpnedp
, 2, float64
, VsrD(i
), eq
, 0, 0)
2735 VSX_CMP(xvcmpeqsp
, 4, float32
, VsrW(i
), eq
, 0, 1)
2736 VSX_CMP(xvcmpgesp
, 4, float32
, VsrW(i
), le
, 1, 1)
2737 VSX_CMP(xvcmpgtsp
, 4, float32
, VsrW(i
), lt
, 1, 1)
2738 VSX_CMP(xvcmpnesp
, 4, float32
, VsrW(i
), eq
, 0, 0)
2740 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2741 * op - instruction mnemonic
2742 * nels - number of elements (1, 2 or 4)
2743 * stp - source type (float32 or float64)
2744 * ttp - target type (float32 or float64)
2745 * sfld - source vsr_t field
2746 * tfld - target vsr_t field (f32 or f64)
2749 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2750 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2755 getVSR(xB(opcode), &xb, env); \
2756 getVSR(xT(opcode), &xt, env); \
2758 for (i = 0; i < nels; i++) { \
2759 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2760 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2761 &env->fp_status))) { \
2762 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2763 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2766 helper_compute_fprf_##ttp(env, xt.tfld); \
2770 putVSR(xT(opcode), &xt, env); \
2771 float_check_status(env); \
2774 VSX_CVT_FP_TO_FP(xscvdpsp
, 1, float64
, float32
, VsrD(0), VsrW(0), 1)
2775 VSX_CVT_FP_TO_FP(xscvspdp
, 1, float32
, float64
, VsrW(0), VsrD(0), 1)
2776 VSX_CVT_FP_TO_FP(xvcvdpsp
, 2, float64
, float32
, VsrD(i
), VsrW(2*i
), 0)
2777 VSX_CVT_FP_TO_FP(xvcvspdp
, 2, float32
, float64
, VsrW(2*i
), VsrD(i
), 0)
2779 /* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2780 * op - instruction mnemonic
2781 * nels - number of elements (1, 2 or 4)
2782 * stp - source type (float32 or float64)
2783 * ttp - target type (float32 or float64)
2784 * sfld - source vsr_t field
2785 * tfld - target vsr_t field (f32 or f64)
2788 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2789 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2794 getVSR(rB(opcode) + 32, &xb, env); \
2795 getVSR(rD(opcode) + 32, &xt, env); \
2797 for (i = 0; i < nels; i++) { \
2798 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2799 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2800 &env->fp_status))) { \
2801 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2802 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2805 helper_compute_fprf_##ttp(env, xt.tfld); \
2809 putVSR(rD(opcode) + 32, &xt, env); \
2810 float_check_status(env); \
2813 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp
, 1, float64
, float128
, VsrD(0), f128
, 1)
2815 /* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2816 * involving one half precision value
2817 * op - instruction mnemonic
2818 * nels - number of elements (1, 2 or 4)
2821 * sfld - source vsr_t field
2822 * tfld - target vsr_t field
2825 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2826 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2831 getVSR(xB(opcode), &xb, env); \
2832 memset(&xt, 0, sizeof(xt)); \
2834 for (i = 0; i < nels; i++) { \
2835 xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status); \
2836 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2837 &env->fp_status))) { \
2838 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2839 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2842 helper_compute_fprf_##ttp(env, xt.tfld); \
2846 putVSR(xT(opcode), &xt, env); \
2847 float_check_status(env); \
2850 VSX_CVT_FP_TO_FP_HP(xscvdphp
, 1, float64
, float16
, VsrD(0), VsrH(3), 1)
2851 VSX_CVT_FP_TO_FP_HP(xscvhpdp
, 1, float16
, float64
, VsrH(3), VsrD(0), 1)
2852 VSX_CVT_FP_TO_FP_HP(xvcvsphp
, 4, float32
, float16
, VsrW(i
), VsrH(2 * i
+ 1), 0)
2853 VSX_CVT_FP_TO_FP_HP(xvcvhpsp
, 4, float16
, float32
, VsrH(2 * i
+ 1), VsrW(i
), 0)
2856 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2857 * added to this later.
2859 void helper_xscvqpdp(CPUPPCState
*env
, uint32_t opcode
)
2864 getVSR(rB(opcode
) + 32, &xb
, env
);
2865 memset(&xt
, 0, sizeof(xt
));
2867 tstat
= env
->fp_status
;
2868 if (unlikely(Rc(opcode
) != 0)) {
2869 tstat
.float_rounding_mode
= float_round_to_odd
;
2872 xt
.VsrD(0) = float128_to_float64(xb
.f128
, &tstat
);
2873 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2874 if (unlikely(float128_is_signaling_nan(xb
.f128
,
2876 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
2877 xt
.VsrD(0) = float64_snan_to_qnan(xt
.VsrD(0));
2879 helper_compute_fprf_float64(env
, xt
.VsrD(0));
2881 putVSR(rD(opcode
) + 32, &xt
, env
);
2882 float_check_status(env
);
2885 uint64_t helper_xscvdpspn(CPUPPCState
*env
, uint64_t xb
)
2887 float_status tstat
= env
->fp_status
;
2888 set_float_exception_flags(0, &tstat
);
2890 return (uint64_t)float64_to_float32(xb
, &tstat
) << 32;
2893 uint64_t helper_xscvspdpn(CPUPPCState
*env
, uint64_t xb
)
2895 float_status tstat
= env
->fp_status
;
2896 set_float_exception_flags(0, &tstat
);
2898 return float32_to_float64(xb
>> 32, &tstat
);
2901 /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2902 * op - instruction mnemonic
2903 * nels - number of elements (1, 2 or 4)
2904 * stp - source type (float32 or float64)
2905 * ttp - target type (int32, uint32, int64 or uint64)
2906 * sfld - source vsr_t field
2907 * tfld - target vsr_t field
2908 * rnan - resulting NaN
2910 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2911 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2916 getVSR(xB(opcode), &xb, env); \
2917 getVSR(xT(opcode), &xt, env); \
2919 for (i = 0; i < nels; i++) { \
2920 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
2921 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
2922 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2924 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2927 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
2929 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2930 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2935 putVSR(xT(opcode), &xt, env); \
2936 float_check_status(env); \
2939 VSX_CVT_FP_TO_INT(xscvdpsxds
, 1, float64
, int64
, VsrD(0), VsrD(0), \
2940 0x8000000000000000ULL
)
2941 VSX_CVT_FP_TO_INT(xscvdpsxws
, 1, float64
, int32
, VsrD(0), VsrW(1), \
2943 VSX_CVT_FP_TO_INT(xscvdpuxds
, 1, float64
, uint64
, VsrD(0), VsrD(0), 0ULL)
2944 VSX_CVT_FP_TO_INT(xscvdpuxws
, 1, float64
, uint32
, VsrD(0), VsrW(1), 0U)
2945 VSX_CVT_FP_TO_INT(xvcvdpsxds
, 2, float64
, int64
, VsrD(i
), VsrD(i
), \
2946 0x8000000000000000ULL
)
2947 VSX_CVT_FP_TO_INT(xvcvdpsxws
, 2, float64
, int32
, VsrD(i
), VsrW(2*i
), \
2949 VSX_CVT_FP_TO_INT(xvcvdpuxds
, 2, float64
, uint64
, VsrD(i
), VsrD(i
), 0ULL)
2950 VSX_CVT_FP_TO_INT(xvcvdpuxws
, 2, float64
, uint32
, VsrD(i
), VsrW(2*i
), 0U)
2951 VSX_CVT_FP_TO_INT(xvcvspsxds
, 2, float32
, int64
, VsrW(2*i
), VsrD(i
), \
2952 0x8000000000000000ULL
)
2953 VSX_CVT_FP_TO_INT(xvcvspsxws
, 4, float32
, int32
, VsrW(i
), VsrW(i
), 0x80000000U
)
2954 VSX_CVT_FP_TO_INT(xvcvspuxds
, 2, float32
, uint64
, VsrW(2*i
), VsrD(i
), 0ULL)
2955 VSX_CVT_FP_TO_INT(xvcvspuxws
, 4, float32
, uint32
, VsrW(i
), VsrW(i
), 0U)
2957 /* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2958 * op - instruction mnemonic
2959 * stp - source type (float32 or float64)
2960 * ttp - target type (int32, uint32, int64 or uint64)
2961 * sfld - source vsr_t field
2962 * tfld - target vsr_t field
2963 * rnan - resulting NaN
2965 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
2966 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2970 getVSR(rB(opcode) + 32, &xb, env); \
2971 memset(&xt, 0, sizeof(xt)); \
2973 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
2974 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
2975 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2977 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2980 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
2982 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2983 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2987 putVSR(rD(opcode) + 32, &xt, env); \
2988 float_check_status(env); \
2991 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz
, float128
, int64
, f128
, VsrD(0), \
2992 0x8000000000000000ULL
)
2994 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz
, float128
, int32
, f128
, VsrD(0), \
2995 0xffffffff80000000ULL
)
2996 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz
, float128
, uint64
, f128
, VsrD(0), 0x0ULL
)
2997 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz
, float128
, uint32
, f128
, VsrD(0), 0x0ULL
)
2999 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
3000 * op - instruction mnemonic
3001 * nels - number of elements (1, 2 or 4)
3002 * stp - source type (int32, uint32, int64 or uint64)
3003 * ttp - target type (float32 or float64)
3004 * sfld - source vsr_t field
3005 * tfld - target vsr_t field
3006 * jdef - definition of the j index (i or 2*i)
3009 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
3010 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3015 getVSR(xB(opcode), &xb, env); \
3016 getVSR(xT(opcode), &xt, env); \
3018 for (i = 0; i < nels; i++) { \
3019 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3021 xt.tfld = helper_frsp(env, xt.tfld); \
3024 helper_compute_fprf_float64(env, xt.tfld); \
3028 putVSR(xT(opcode), &xt, env); \
3029 float_check_status(env); \
3032 VSX_CVT_INT_TO_FP(xscvsxddp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 0)
3033 VSX_CVT_INT_TO_FP(xscvuxddp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 0)
3034 VSX_CVT_INT_TO_FP(xscvsxdsp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 1)
3035 VSX_CVT_INT_TO_FP(xscvuxdsp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 1)
3036 VSX_CVT_INT_TO_FP(xvcvsxddp
, 2, int64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3037 VSX_CVT_INT_TO_FP(xvcvuxddp
, 2, uint64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3038 VSX_CVT_INT_TO_FP(xvcvsxwdp
, 2, int32
, float64
, VsrW(2*i
), VsrD(i
), 0, 0)
3039 VSX_CVT_INT_TO_FP(xvcvuxwdp
, 2, uint64
, float64
, VsrW(2*i
), VsrD(i
), 0, 0)
3040 VSX_CVT_INT_TO_FP(xvcvsxdsp
, 2, int64
, float32
, VsrD(i
), VsrW(2*i
), 0, 0)
3041 VSX_CVT_INT_TO_FP(xvcvuxdsp
, 2, uint64
, float32
, VsrD(i
), VsrW(2*i
), 0, 0)
3042 VSX_CVT_INT_TO_FP(xvcvsxwsp
, 4, int32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3043 VSX_CVT_INT_TO_FP(xvcvuxwsp
, 4, uint32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3045 /* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3046 * op - instruction mnemonic
3047 * stp - source type (int32, uint32, int64 or uint64)
3048 * ttp - target type (float32 or float64)
3049 * sfld - source vsr_t field
3050 * tfld - target vsr_t field
3052 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3053 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3057 getVSR(rB(opcode) + 32, &xb, env); \
3058 getVSR(rD(opcode) + 32, &xt, env); \
3060 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3061 helper_compute_fprf_##ttp(env, xt.tfld); \
3063 putVSR(xT(opcode) + 32, &xt, env); \
3064 float_check_status(env); \
3067 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp
, int64
, float128
, VsrD(0), f128
)
3068 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp
, uint64
, float128
, VsrD(0), f128
)
3070 /* For "use current rounding mode", define a value that will not be one of
3071 * the existing rounding model enums.
3073 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3074 float_round_up + float_round_to_zero)
3076 /* VSX_ROUND - VSX floating point round
3077 * op - instruction mnemonic
3078 * nels - number of elements (1, 2 or 4)
3079 * tp - type (float32 or float64)
3080 * fld - vsr_t field (VsrD(*) or VsrW(*))
3081 * rmode - rounding mode
3084 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
3085 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3089 getVSR(xB(opcode), &xb, env); \
3090 getVSR(xT(opcode), &xt, env); \
3092 if (rmode != FLOAT_ROUND_CURRENT) { \
3093 set_float_rounding_mode(rmode, &env->fp_status); \
3096 for (i = 0; i < nels; i++) { \
3097 if (unlikely(tp##_is_signaling_nan(xb.fld, \
3098 &env->fp_status))) { \
3099 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
3100 xt.fld = tp##_snan_to_qnan(xb.fld); \
3102 xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
3105 helper_compute_fprf_float64(env, xt.fld); \
3109 /* If this is not a "use current rounding mode" instruction, \
3110 * then inhibit setting of the XX bit and restore rounding \
3111 * mode from FPSCR */ \
3112 if (rmode != FLOAT_ROUND_CURRENT) { \
3113 fpscr_set_rounding_mode(env); \
3114 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3117 putVSR(xT(opcode), &xt, env); \
3118 float_check_status(env); \
3121 VSX_ROUND(xsrdpi
, 1, float64
, VsrD(0), float_round_ties_away
, 1)
3122 VSX_ROUND(xsrdpic
, 1, float64
, VsrD(0), FLOAT_ROUND_CURRENT
, 1)
3123 VSX_ROUND(xsrdpim
, 1, float64
, VsrD(0), float_round_down
, 1)
3124 VSX_ROUND(xsrdpip
, 1, float64
, VsrD(0), float_round_up
, 1)
3125 VSX_ROUND(xsrdpiz
, 1, float64
, VsrD(0), float_round_to_zero
, 1)
3127 VSX_ROUND(xvrdpi
, 2, float64
, VsrD(i
), float_round_ties_away
, 0)
3128 VSX_ROUND(xvrdpic
, 2, float64
, VsrD(i
), FLOAT_ROUND_CURRENT
, 0)
3129 VSX_ROUND(xvrdpim
, 2, float64
, VsrD(i
), float_round_down
, 0)
3130 VSX_ROUND(xvrdpip
, 2, float64
, VsrD(i
), float_round_up
, 0)
3131 VSX_ROUND(xvrdpiz
, 2, float64
, VsrD(i
), float_round_to_zero
, 0)
3133 VSX_ROUND(xvrspi
, 4, float32
, VsrW(i
), float_round_ties_away
, 0)
3134 VSX_ROUND(xvrspic
, 4, float32
, VsrW(i
), FLOAT_ROUND_CURRENT
, 0)
3135 VSX_ROUND(xvrspim
, 4, float32
, VsrW(i
), float_round_down
, 0)
3136 VSX_ROUND(xvrspip
, 4, float32
, VsrW(i
), float_round_up
, 0)
3137 VSX_ROUND(xvrspiz
, 4, float32
, VsrW(i
), float_round_to_zero
, 0)
3139 uint64_t helper_xsrsp(CPUPPCState
*env
, uint64_t xb
)
3141 helper_reset_fpstatus(env
);
3143 uint64_t xt
= helper_frsp(env
, xb
);
3145 helper_compute_fprf_float64(env
, xt
);
3146 float_check_status(env
);
3150 #define VSX_XXPERM(op, indexed) \
3151 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3153 ppc_vsr_t xt, xa, pcv, xto; \
3156 getVSR(xA(opcode), &xa, env); \
3157 getVSR(xT(opcode), &xt, env); \
3158 getVSR(xB(opcode), &pcv, env); \
3160 for (i = 0; i < 16; i++) { \
3161 idx = pcv.VsrB(i) & 0x1F; \
3165 xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3167 putVSR(xT(opcode), &xto, env); \
3170 VSX_XXPERM(xxperm
, 0)
3171 VSX_XXPERM(xxpermr
, 1)
3173 void helper_xvxsigsp(CPUPPCState
*env
, uint32_t opcode
)
3176 uint32_t exp
, i
, fraction
;
3178 getVSR(xB(opcode
), &xb
, env
);
3179 memset(&xt
, 0, sizeof(xt
));
3181 for (i
= 0; i
< 4; i
++) {
3182 exp
= (xb
.VsrW(i
) >> 23) & 0xFF;
3183 fraction
= xb
.VsrW(i
) & 0x7FFFFF;
3184 if (exp
!= 0 && exp
!= 255) {
3185 xt
.VsrW(i
) = fraction
| 0x00800000;
3187 xt
.VsrW(i
) = fraction
;
3190 putVSR(xT(opcode
), &xt
, env
);
3193 /* VSX_TEST_DC - VSX floating point test data class
3194 * op - instruction mnemonic
3195 * nels - number of elements (1, 2 or 4)
3196 * xbn - VSR register number
3197 * tp - type (float32 or float64)
3198 * fld - vsr_t field (VsrD(*) or VsrW(*))
3199 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3200 * fld_max - target field max
3201 * scrf - set result in CR and FPCC
3203 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3204 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3207 uint32_t i, sign, dcmx; \
3208 uint32_t cc, match = 0; \
3210 getVSR(xbn, &xb, env); \
3212 memset(&xt, 0, sizeof(xt)); \
3213 dcmx = DCMX_XV(opcode); \
3215 dcmx = DCMX(opcode); \
3218 for (i = 0; i < nels; i++) { \
3219 sign = tp##_is_neg(xb.fld); \
3220 if (tp##_is_any_nan(xb.fld)) { \
3221 match = extract32(dcmx, 6, 1); \
3222 } else if (tp##_is_infinity(xb.fld)) { \
3223 match = extract32(dcmx, 4 + !sign, 1); \
3224 } else if (tp##_is_zero(xb.fld)) { \
3225 match = extract32(dcmx, 2 + !sign, 1); \
3226 } else if (tp##_is_zero_or_denormal(xb.fld)) { \
3227 match = extract32(dcmx, 0 + !sign, 1); \
3231 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3232 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
3233 env->fpscr |= cc << FPSCR_FPRF; \
3234 env->crf[BF(opcode)] = cc; \
3236 xt.tfld = match ? fld_max : 0; \
3241 putVSR(xT(opcode), &xt, env); \
3245 VSX_TEST_DC(xvtstdcdp
, 2, xB(opcode
), float64
, VsrD(i
), VsrD(i
), UINT64_MAX
, 0)
3246 VSX_TEST_DC(xvtstdcsp
, 4, xB(opcode
), float32
, VsrW(i
), VsrW(i
), UINT32_MAX
, 0)
3247 VSX_TEST_DC(xststdcdp
, 1, xB(opcode
), float64
, VsrD(0), VsrD(0), 0, 1)
3248 VSX_TEST_DC(xststdcqp
, 1, (rB(opcode
) + 32), float128
, f128
, VsrD(0), 0, 1)
3250 void helper_xststdcsp(CPUPPCState
*env
, uint32_t opcode
)
3253 uint32_t dcmx
, sign
, exp
;
3254 uint32_t cc
, match
= 0, not_sp
= 0;
3256 getVSR(xB(opcode
), &xb
, env
);
3257 dcmx
= DCMX(opcode
);
3258 exp
= (xb
.VsrD(0) >> 52) & 0x7FF;
3260 sign
= float64_is_neg(xb
.VsrD(0));
3261 if (float64_is_any_nan(xb
.VsrD(0))) {
3262 match
= extract32(dcmx
, 6, 1);
3263 } else if (float64_is_infinity(xb
.VsrD(0))) {
3264 match
= extract32(dcmx
, 4 + !sign
, 1);
3265 } else if (float64_is_zero(xb
.VsrD(0))) {
3266 match
= extract32(dcmx
, 2 + !sign
, 1);
3267 } else if (float64_is_zero_or_denormal(xb
.VsrD(0)) ||
3268 (exp
> 0 && exp
< 0x381)) {
3269 match
= extract32(dcmx
, 0 + !sign
, 1);
3272 not_sp
= !float64_eq(xb
.VsrD(0),
3274 float64_to_float32(xb
.VsrD(0), &env
->fp_status
),
3275 &env
->fp_status
), &env
->fp_status
);
3277 cc
= sign
<< CRF_LT_BIT
| match
<< CRF_EQ_BIT
| not_sp
<< CRF_SO_BIT
;
3278 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
3279 env
->fpscr
|= cc
<< FPSCR_FPRF
;
3280 env
->crf
[BF(opcode
)] = cc
;
3283 void helper_xsrqpi(CPUPPCState
*env
, uint32_t opcode
)
3287 uint8_t r
= Rrm(opcode
);
3288 uint8_t ex
= Rc(opcode
);
3289 uint8_t rmc
= RMC(opcode
);
3293 getVSR(rB(opcode
) + 32, &xb
, env
);
3294 memset(&xt
, 0, sizeof(xt
));
3295 helper_reset_fpstatus(env
);
3297 if (r
== 0 && rmc
== 0) {
3298 rmode
= float_round_ties_away
;
3299 } else if (r
== 0 && rmc
== 0x3) {
3301 } else if (r
== 1) {
3304 rmode
= float_round_nearest_even
;
3307 rmode
= float_round_to_zero
;
3310 rmode
= float_round_up
;
3313 rmode
= float_round_down
;
3320 tstat
= env
->fp_status
;
3321 set_float_exception_flags(0, &tstat
);
3322 set_float_rounding_mode(rmode
, &tstat
);
3323 xt
.f128
= float128_round_to_int(xb
.f128
, &tstat
);
3324 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3326 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3327 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3328 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
3329 xt
.f128
= float128_snan_to_qnan(xt
.f128
);
3333 if (ex
== 0 && (tstat
.float_exception_flags
& float_flag_inexact
)) {
3334 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
3337 helper_compute_fprf_float128(env
, xt
.f128
);
3338 float_check_status(env
);
3339 putVSR(rD(opcode
) + 32, &xt
, env
);
3342 void helper_xsrqpxp(CPUPPCState
*env
, uint32_t opcode
)
3346 uint8_t r
= Rrm(opcode
);
3347 uint8_t rmc
= RMC(opcode
);
3352 getVSR(rB(opcode
) + 32, &xb
, env
);
3353 memset(&xt
, 0, sizeof(xt
));
3354 helper_reset_fpstatus(env
);
3356 if (r
== 0 && rmc
== 0) {
3357 rmode
= float_round_ties_away
;
3358 } else if (r
== 0 && rmc
== 0x3) {
3360 } else if (r
== 1) {
3363 rmode
= float_round_nearest_even
;
3366 rmode
= float_round_to_zero
;
3369 rmode
= float_round_up
;
3372 rmode
= float_round_down
;
3379 tstat
= env
->fp_status
;
3380 set_float_exception_flags(0, &tstat
);
3381 set_float_rounding_mode(rmode
, &tstat
);
3382 round_res
= float128_to_floatx80(xb
.f128
, &tstat
);
3383 xt
.f128
= floatx80_to_float128(round_res
, &tstat
);
3384 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3386 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3387 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3388 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
3389 xt
.f128
= float128_snan_to_qnan(xt
.f128
);
3393 helper_compute_fprf_float128(env
, xt
.f128
);
3394 putVSR(rD(opcode
) + 32, &xt
, env
);
3395 float_check_status(env
);
3398 void helper_xssqrtqp(CPUPPCState
*env
, uint32_t opcode
)
3404 getVSR(rB(opcode
) + 32, &xb
, env
);
3405 memset(&xt
, 0, sizeof(xt
));
3406 helper_reset_fpstatus(env
);
3408 tstat
= env
->fp_status
;
3409 if (unlikely(Rc(opcode
) != 0)) {
3410 tstat
.float_rounding_mode
= float_round_to_odd
;
3413 set_float_exception_flags(0, &tstat
);
3414 xt
.f128
= float128_sqrt(xb
.f128
, &tstat
);
3415 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3417 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3418 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3419 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
3420 xt
.f128
= float128_snan_to_qnan(xb
.f128
);
3421 } else if (float128_is_quiet_nan(xb
.f128
, &tstat
)) {
3423 } else if (float128_is_neg(xb
.f128
) && !float128_is_zero(xb
.f128
)) {
3424 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
3425 xt
.f128
= float128_default_nan(&env
->fp_status
);
3429 helper_compute_fprf_float128(env
, xt
.f128
);
3430 putVSR(rD(opcode
) + 32, &xt
, env
);
3431 float_check_status(env
);
3434 void helper_xssubqp(CPUPPCState
*env
, uint32_t opcode
)
3436 ppc_vsr_t xt
, xa
, xb
;
3439 getVSR(rA(opcode
) + 32, &xa
, env
);
3440 getVSR(rB(opcode
) + 32, &xb
, env
);
3441 getVSR(rD(opcode
) + 32, &xt
, env
);
3442 helper_reset_fpstatus(env
);
3444 tstat
= env
->fp_status
;
3445 if (unlikely(Rc(opcode
) != 0)) {
3446 tstat
.float_rounding_mode
= float_round_to_odd
;
3449 set_float_exception_flags(0, &tstat
);
3450 xt
.f128
= float128_sub(xa
.f128
, xb
.f128
, &tstat
);
3451 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3453 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3454 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
3455 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
3456 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
3457 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3458 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
3462 helper_compute_fprf_float128(env
, xt
.f128
);
3463 putVSR(rD(opcode
) + 32, &xt
, env
);
3464 float_check_status(env
);