2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
26 static inline float128
float128_snan_to_qnan(float128 x
)
30 r
.high
= x
.high
| 0x0000800000000000;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 /*****************************************************************************/
40 /* Floating point operations helpers */
41 uint64_t helper_float32_to_float64(CPUPPCState
*env
, uint32_t arg
)
47 d
.d
= float32_to_float64(f
.f
, &env
->fp_status
);
51 uint32_t helper_float64_to_float32(CPUPPCState
*env
, uint64_t arg
)
57 f
.f
= float64_to_float32(d
.d
, &env
->fp_status
);
61 static inline int ppc_float32_get_unbiased_exp(float32 f
)
63 return ((f
>> 23) & 0xFF) - 127;
66 static inline int ppc_float64_get_unbiased_exp(float64 f
)
68 return ((f
>> 52) & 0x7FF) - 1023;
71 #define COMPUTE_FPRF(tp) \
72 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
77 isneg = tp##_is_neg(arg); \
78 if (unlikely(tp##_is_any_nan(arg))) { \
79 if (tp##_is_signaling_nan(arg, &env->fp_status)) { \
80 /* Signaling NaN: flags are undefined */ \
86 } else if (unlikely(tp##_is_infinity(arg))) { \
94 if (tp##_is_zero(arg)) { \
102 if (tp##_is_zero_or_denormal(arg)) { \
103 /* Denormalized numbers */ \
106 /* Normalized numbers */ \
116 /* We update FPSCR_FPRF */ \
117 env->fpscr &= ~(0x1F << FPSCR_FPRF); \
118 env->fpscr |= fprf << FPSCR_FPRF; \
121 COMPUTE_FPRF(float16
)
122 COMPUTE_FPRF(float32
)
123 COMPUTE_FPRF(float64
)
124 COMPUTE_FPRF(float128
)
126 /* Floating-point invalid operations exception */
127 static inline __attribute__((__always_inline__
))
128 uint64_t float_invalid_op_excp(CPUPPCState
*env
, int op
, int set_fpcc
)
130 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
136 case POWERPC_EXCP_FP_VXSNAN
:
137 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
139 case POWERPC_EXCP_FP_VXSOFT
:
140 env
->fpscr
|= 1 << FPSCR_VXSOFT
;
142 case POWERPC_EXCP_FP_VXISI
:
143 /* Magnitude subtraction of infinities */
144 env
->fpscr
|= 1 << FPSCR_VXISI
;
146 case POWERPC_EXCP_FP_VXIDI
:
147 /* Division of infinity by infinity */
148 env
->fpscr
|= 1 << FPSCR_VXIDI
;
150 case POWERPC_EXCP_FP_VXZDZ
:
151 /* Division of zero by zero */
152 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
154 case POWERPC_EXCP_FP_VXIMZ
:
155 /* Multiplication of zero by infinity */
156 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
158 case POWERPC_EXCP_FP_VXVC
:
159 /* Ordered comparison of NaN */
160 env
->fpscr
|= 1 << FPSCR_VXVC
;
162 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
163 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
165 /* We must update the target FPR before raising the exception */
167 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
168 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
169 /* Update the floating-point enabled exception summary */
170 env
->fpscr
|= 1 << FPSCR_FEX
;
171 /* Exception is differed */
175 case POWERPC_EXCP_FP_VXSQRT
:
176 /* Square root of a negative number */
177 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
179 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
181 /* Set the result to quiet NaN */
182 ret
= 0x7FF8000000000000ULL
;
184 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
185 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
189 case POWERPC_EXCP_FP_VXCVI
:
190 /* Invalid conversion */
191 env
->fpscr
|= 1 << FPSCR_VXCVI
;
192 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
194 /* Set the result to quiet NaN */
195 ret
= 0x7FF8000000000000ULL
;
197 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
198 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
203 /* Update the floating-point invalid operation summary */
204 env
->fpscr
|= 1 << FPSCR_VX
;
205 /* Update the floating-point exception summary */
208 /* Update the floating-point enabled exception summary */
209 env
->fpscr
|= 1 << FPSCR_FEX
;
210 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
211 /* GETPC() works here because this is inline */
212 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
213 POWERPC_EXCP_FP
| op
, GETPC());
219 static inline void float_zero_divide_excp(CPUPPCState
*env
, uintptr_t raddr
)
221 env
->fpscr
|= 1 << FPSCR_ZX
;
222 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
223 /* Update the floating-point exception summary */
226 /* Update the floating-point enabled exception summary */
227 env
->fpscr
|= 1 << FPSCR_FEX
;
228 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
229 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
230 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
,
236 static inline void float_overflow_excp(CPUPPCState
*env
)
238 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
240 env
->fpscr
|= 1 << FPSCR_OX
;
241 /* Update the floating-point exception summary */
244 /* XXX: should adjust the result */
245 /* Update the floating-point enabled exception summary */
246 env
->fpscr
|= 1 << FPSCR_FEX
;
247 /* We must update the target FPR before raising the exception */
248 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
249 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
251 env
->fpscr
|= 1 << FPSCR_XX
;
252 env
->fpscr
|= 1 << FPSCR_FI
;
256 static inline void float_underflow_excp(CPUPPCState
*env
)
258 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
260 env
->fpscr
|= 1 << FPSCR_UX
;
261 /* Update the floating-point exception summary */
264 /* XXX: should adjust the result */
265 /* Update the floating-point enabled exception summary */
266 env
->fpscr
|= 1 << FPSCR_FEX
;
267 /* We must update the target FPR before raising the exception */
268 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
269 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
273 static inline void float_inexact_excp(CPUPPCState
*env
)
275 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
277 env
->fpscr
|= 1 << FPSCR_XX
;
278 /* Update the floating-point exception summary */
281 /* Update the floating-point enabled exception summary */
282 env
->fpscr
|= 1 << FPSCR_FEX
;
283 /* We must update the target FPR before raising the exception */
284 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
285 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
289 static inline void fpscr_set_rounding_mode(CPUPPCState
*env
)
293 /* Set rounding mode */
296 /* Best approximation (round to nearest) */
297 rnd_type
= float_round_nearest_even
;
300 /* Smaller magnitude (round toward zero) */
301 rnd_type
= float_round_to_zero
;
304 /* Round toward +infinite */
305 rnd_type
= float_round_up
;
309 /* Round toward -infinite */
310 rnd_type
= float_round_down
;
313 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
316 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
320 prev
= (env
->fpscr
>> bit
) & 1;
321 env
->fpscr
&= ~(1 << bit
);
326 fpscr_set_rounding_mode(env
);
334 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
336 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
339 prev
= (env
->fpscr
>> bit
) & 1;
340 env
->fpscr
|= 1 << bit
;
382 env
->fpscr
|= 1 << FPSCR_VX
;
391 env
->error_code
= POWERPC_EXCP_FP
;
393 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
396 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
399 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
402 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
405 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
408 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
411 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
414 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
417 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
425 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
432 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
439 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
446 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
452 fpscr_set_rounding_mode(env
);
457 /* Update the floating-point enabled exception summary */
458 env
->fpscr
|= 1 << FPSCR_FEX
;
459 /* We have to update Rc1 before raising the exception */
460 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
466 void helper_store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
468 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
469 target_ulong prev
, new;
473 new = (target_ulong
)arg
;
474 new &= ~0x60000000LL
;
475 new |= prev
& 0x60000000LL
;
476 for (i
= 0; i
< sizeof(target_ulong
) * 2; i
++) {
477 if (mask
& (1 << i
)) {
478 env
->fpscr
&= ~(0xFLL
<< (4 * i
));
479 env
->fpscr
|= new & (0xFLL
<< (4 * i
));
482 /* Update VX and FEX */
484 env
->fpscr
|= 1 << FPSCR_VX
;
486 env
->fpscr
&= ~(1 << FPSCR_VX
);
488 if ((fpscr_ex
& fpscr_eex
) != 0) {
489 env
->fpscr
|= 1 << FPSCR_FEX
;
490 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
491 /* XXX: we should compute it properly */
492 env
->error_code
= POWERPC_EXCP_FP
;
494 env
->fpscr
&= ~(1 << FPSCR_FEX
);
496 fpscr_set_rounding_mode(env
);
499 void store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
501 helper_store_fpscr(env
, arg
, mask
);
504 static void do_float_check_status(CPUPPCState
*env
, uintptr_t raddr
)
506 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
507 int status
= get_float_exception_flags(&env
->fp_status
);
509 if (status
& float_flag_divbyzero
) {
510 float_zero_divide_excp(env
, raddr
);
511 } else if (status
& float_flag_overflow
) {
512 float_overflow_excp(env
);
513 } else if (status
& float_flag_underflow
) {
514 float_underflow_excp(env
);
515 } else if (status
& float_flag_inexact
) {
516 float_inexact_excp(env
);
519 if (cs
->exception_index
== POWERPC_EXCP_PROGRAM
&&
520 (env
->error_code
& POWERPC_EXCP_FP
)) {
521 /* Differred floating-point exception after target FPR update */
522 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
523 raise_exception_err_ra(env
, cs
->exception_index
,
524 env
->error_code
, raddr
);
529 static inline __attribute__((__always_inline__
))
530 void float_check_status(CPUPPCState
*env
)
532 /* GETPC() works here because this is inline */
533 do_float_check_status(env
, GETPC());
536 void helper_float_check_status(CPUPPCState
*env
)
538 do_float_check_status(env
, GETPC());
541 void helper_reset_fpstatus(CPUPPCState
*env
)
543 set_float_exception_flags(0, &env
->fp_status
);
547 uint64_t helper_fadd(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
549 CPU_DoubleU farg1
, farg2
;
554 if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
555 float64_is_neg(farg1
.d
) != float64_is_neg(farg2
.d
))) {
556 /* Magnitude subtraction of infinities */
557 farg1
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
559 if (unlikely(float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
560 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
))) {
562 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
564 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
571 uint64_t helper_fsub(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
573 CPU_DoubleU farg1
, farg2
;
578 if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
579 float64_is_neg(farg1
.d
) == float64_is_neg(farg2
.d
))) {
580 /* Magnitude subtraction of infinities */
581 farg1
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
583 if (unlikely(float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
584 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
))) {
585 /* sNaN subtraction */
586 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
588 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
595 uint64_t helper_fmul(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
597 CPU_DoubleU farg1
, farg2
;
602 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
603 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
604 /* Multiplication of zero by infinity */
605 farg1
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
607 if (unlikely(float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
608 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
))) {
609 /* sNaN multiplication */
610 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
612 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
619 uint64_t helper_fdiv(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
621 CPU_DoubleU farg1
, farg2
;
626 if (unlikely(float64_is_infinity(farg1
.d
) &&
627 float64_is_infinity(farg2
.d
))) {
628 /* Division of infinity by infinity */
629 farg1
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIDI
, 1);
630 } else if (unlikely(float64_is_zero(farg1
.d
) && float64_is_zero(farg2
.d
))) {
631 /* Division of zero by zero */
632 farg1
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXZDZ
, 1);
634 if (unlikely(float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
635 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
))) {
637 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
639 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
646 #define FPU_FCTI(op, cvt, nanval) \
647 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
652 farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \
654 if (unlikely(env->fp_status.float_exception_flags)) { \
655 if (float64_is_any_nan(arg)) { \
656 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
657 if (float64_is_signaling_nan(arg, &env->fp_status)) { \
658 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
661 } else if (env->fp_status.float_exception_flags & \
662 float_flag_invalid) { \
663 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
665 float_check_status(env); \
670 FPU_FCTI(fctiw
, int32
, 0x80000000U
)
671 FPU_FCTI(fctiwz
, int32_round_to_zero
, 0x80000000U
)
672 FPU_FCTI(fctiwu
, uint32
, 0x00000000U
)
673 FPU_FCTI(fctiwuz
, uint32_round_to_zero
, 0x00000000U
)
674 FPU_FCTI(fctid
, int64
, 0x8000000000000000ULL
)
675 FPU_FCTI(fctidz
, int64_round_to_zero
, 0x8000000000000000ULL
)
676 FPU_FCTI(fctidu
, uint64
, 0x0000000000000000ULL
)
677 FPU_FCTI(fctiduz
, uint64_round_to_zero
, 0x0000000000000000ULL
)
679 #define FPU_FCFI(op, cvtr, is_single) \
680 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
685 float32 tmp = cvtr(arg, &env->fp_status); \
686 farg.d = float32_to_float64(tmp, &env->fp_status); \
688 farg.d = cvtr(arg, &env->fp_status); \
690 float_check_status(env); \
694 FPU_FCFI(fcfid
, int64_to_float64
, 0)
695 FPU_FCFI(fcfids
, int64_to_float32
, 1)
696 FPU_FCFI(fcfidu
, uint64_to_float64
, 0)
697 FPU_FCFI(fcfidus
, uint64_to_float32
, 1)
699 static inline uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
706 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
708 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
709 farg
.ll
= arg
| 0x0008000000000000ULL
;
711 int inexact
= get_float_exception_flags(&env
->fp_status
) &
713 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
714 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
715 /* Restore rounding mode from FPSCR */
716 fpscr_set_rounding_mode(env
);
718 /* fri* does not set FPSCR[XX] */
720 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
723 float_check_status(env
);
727 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
729 return do_fri(env
, arg
, float_round_ties_away
);
732 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
734 return do_fri(env
, arg
, float_round_to_zero
);
737 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
739 return do_fri(env
, arg
, float_round_up
);
742 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
744 return do_fri(env
, arg
, float_round_down
);
747 #define FPU_MADDSUB_UPDATE(NAME, TP) \
748 static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
749 unsigned int madd_flags) \
751 if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
752 TP##_is_signaling_nan(arg2, &env->fp_status) || \
753 TP##_is_signaling_nan(arg3, &env->fp_status)) { \
754 /* sNaN operation */ \
755 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
757 if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
758 (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
759 /* Multiplication of zero by infinity */ \
760 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); \
762 if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
763 TP##_is_infinity(arg3)) { \
764 uint8_t aSign, bSign, cSign; \
766 aSign = TP##_is_neg(arg1); \
767 bSign = TP##_is_neg(arg2); \
768 cSign = TP##_is_neg(arg3); \
769 if (madd_flags & float_muladd_negate_c) { \
772 if (aSign ^ bSign ^ cSign) { \
773 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); \
777 FPU_MADDSUB_UPDATE(float32_maddsub_update_excp
, float32
)
778 FPU_MADDSUB_UPDATE(float64_maddsub_update_excp
, float64
)
780 #define FPU_FMADD(op, madd_flags) \
781 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
782 uint64_t arg2, uint64_t arg3) \
785 float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
787 flags = get_float_exception_flags(&env->fp_status); \
789 if (flags & float_flag_invalid) { \
790 float64_maddsub_update_excp(env, arg1, arg2, arg3, \
793 float_check_status(env); \
799 #define MSUB_FLGS float_muladd_negate_c
800 #define NMADD_FLGS float_muladd_negate_result
801 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
803 FPU_FMADD(fmadd
, MADD_FLGS
)
804 FPU_FMADD(fnmadd
, NMADD_FLGS
)
805 FPU_FMADD(fmsub
, MSUB_FLGS
)
806 FPU_FMADD(fnmsub
, NMSUB_FLGS
)
809 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
816 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
817 /* sNaN square root */
818 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
820 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
821 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
827 uint64_t helper_fsqrt(CPUPPCState
*env
, uint64_t arg
)
833 if (unlikely(float64_is_any_nan(farg
.d
))) {
834 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
835 /* sNaN reciprocal square root */
836 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
837 farg
.ll
= float64_snan_to_qnan(farg
.ll
);
839 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
840 /* Square root of a negative nonzero number */
841 farg
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
843 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
849 uint64_t helper_fre(CPUPPCState
*env
, uint64_t arg
)
855 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
856 /* sNaN reciprocal */
857 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
859 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
864 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
871 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
872 /* sNaN reciprocal */
873 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
875 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
876 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
877 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
882 /* frsqrte - frsqrte. */
883 uint64_t helper_frsqrte(CPUPPCState
*env
, uint64_t arg
)
889 if (unlikely(float64_is_any_nan(farg
.d
))) {
890 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
891 /* sNaN reciprocal square root */
892 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
893 farg
.ll
= float64_snan_to_qnan(farg
.ll
);
895 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
896 /* Reciprocal square root of a negative nonzero number */
897 farg
.ll
= float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
899 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
900 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
907 uint64_t helper_fsel(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
914 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) &&
915 !float64_is_any_nan(farg1
.d
)) {
922 uint32_t helper_ftdiv(uint64_t fra
, uint64_t frb
)
927 if (unlikely(float64_is_infinity(fra
) ||
928 float64_is_infinity(frb
) ||
929 float64_is_zero(frb
))) {
933 int e_a
= ppc_float64_get_unbiased_exp(fra
);
934 int e_b
= ppc_float64_get_unbiased_exp(frb
);
936 if (unlikely(float64_is_any_nan(fra
) ||
937 float64_is_any_nan(frb
))) {
939 } else if ((e_b
<= -1022) || (e_b
>= 1021)) {
941 } else if (!float64_is_zero(fra
) &&
942 (((e_a
- e_b
) >= 1023) ||
943 ((e_a
- e_b
) <= -1021) ||
948 if (unlikely(float64_is_zero_or_denormal(frb
))) {
949 /* XB is not zero because of the above check and */
950 /* so must be denormalized. */
955 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
958 uint32_t helper_ftsqrt(uint64_t frb
)
963 if (unlikely(float64_is_infinity(frb
) || float64_is_zero(frb
))) {
967 int e_b
= ppc_float64_get_unbiased_exp(frb
);
969 if (unlikely(float64_is_any_nan(frb
))) {
971 } else if (unlikely(float64_is_zero(frb
))) {
973 } else if (unlikely(float64_is_neg(frb
))) {
975 } else if (!float64_is_zero(frb
) && (e_b
<= (-1022+52))) {
979 if (unlikely(float64_is_zero_or_denormal(frb
))) {
980 /* XB is not zero because of the above check and */
981 /* therefore must be denormalized. */
986 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
989 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
992 CPU_DoubleU farg1
, farg2
;
998 if (unlikely(float64_is_any_nan(farg1
.d
) ||
999 float64_is_any_nan(farg2
.d
))) {
1001 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1003 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1009 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1010 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1011 env
->crf
[crfD
] = ret
;
1012 if (unlikely(ret
== 0x01UL
1013 && (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1014 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)))) {
1015 /* sNaN comparison */
1016 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1020 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1023 CPU_DoubleU farg1
, farg2
;
1029 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1030 float64_is_any_nan(farg2
.d
))) {
1032 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1034 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1040 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1041 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1042 env
->crf
[crfD
] = ret
;
1043 if (unlikely(ret
== 0x01UL
)) {
1044 if (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1045 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)) {
1046 /* sNaN comparison */
1047 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
|
1048 POWERPC_EXCP_FP_VXVC
, 1);
1050 /* qNaN comparison */
1051 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXVC
, 1);
1056 /* Single-precision floating-point conversions */
1057 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
1061 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1066 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
1070 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1075 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
1080 /* NaN are not treated the same way IEEE 754 does */
1081 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1085 return float32_to_int32(u
.f
, &env
->vec_status
);
1088 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1093 /* NaN are not treated the same way IEEE 754 does */
1094 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1098 return float32_to_uint32(u
.f
, &env
->vec_status
);
1101 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1106 /* NaN are not treated the same way IEEE 754 does */
1107 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1111 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1114 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1119 /* NaN are not treated the same way IEEE 754 does */
1120 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1124 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1127 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1132 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1133 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1134 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1139 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1144 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1145 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1146 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1151 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1157 /* NaN are not treated the same way IEEE 754 does */
1158 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1161 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1162 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1164 return float32_to_int32(u
.f
, &env
->vec_status
);
1167 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1173 /* NaN are not treated the same way IEEE 754 does */
1174 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1177 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1178 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1180 return float32_to_uint32(u
.f
, &env
->vec_status
);
1183 #define HELPER_SPE_SINGLE_CONV(name) \
1184 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1186 return e##name(env, val); \
1189 HELPER_SPE_SINGLE_CONV(fscfsi
);
1191 HELPER_SPE_SINGLE_CONV(fscfui
);
1193 HELPER_SPE_SINGLE_CONV(fscfuf
);
1195 HELPER_SPE_SINGLE_CONV(fscfsf
);
1197 HELPER_SPE_SINGLE_CONV(fsctsi
);
1199 HELPER_SPE_SINGLE_CONV(fsctui
);
1201 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1203 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1205 HELPER_SPE_SINGLE_CONV(fsctsf
);
1207 HELPER_SPE_SINGLE_CONV(fsctuf
);
1209 #define HELPER_SPE_VECTOR_CONV(name) \
1210 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1212 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1213 (uint64_t)e##name(env, val); \
1216 HELPER_SPE_VECTOR_CONV(fscfsi
);
1218 HELPER_SPE_VECTOR_CONV(fscfui
);
1220 HELPER_SPE_VECTOR_CONV(fscfuf
);
1222 HELPER_SPE_VECTOR_CONV(fscfsf
);
1224 HELPER_SPE_VECTOR_CONV(fsctsi
);
1226 HELPER_SPE_VECTOR_CONV(fsctui
);
1228 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1230 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1232 HELPER_SPE_VECTOR_CONV(fsctsf
);
1234 HELPER_SPE_VECTOR_CONV(fsctuf
);
1236 /* Single-precision floating-point arithmetic */
1237 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1243 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1247 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1253 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1257 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1263 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1267 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1273 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1277 #define HELPER_SPE_SINGLE_ARITH(name) \
1278 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1280 return e##name(env, op1, op2); \
1283 HELPER_SPE_SINGLE_ARITH(fsadd
);
1285 HELPER_SPE_SINGLE_ARITH(fssub
);
1287 HELPER_SPE_SINGLE_ARITH(fsmul
);
1289 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1291 #define HELPER_SPE_VECTOR_ARITH(name) \
1292 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1294 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1295 (uint64_t)e##name(env, op1, op2); \
1298 HELPER_SPE_VECTOR_ARITH(fsadd
);
1300 HELPER_SPE_VECTOR_ARITH(fssub
);
1302 HELPER_SPE_VECTOR_ARITH(fsmul
);
1304 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1306 /* Single-precision floating-point comparisons */
1307 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1313 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1316 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1322 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1325 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1331 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1334 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1336 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1337 return efscmplt(env
, op1
, op2
);
1340 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1342 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1343 return efscmpgt(env
, op1
, op2
);
1346 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1348 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1349 return efscmpeq(env
, op1
, op2
);
1352 #define HELPER_SINGLE_SPE_CMP(name) \
1353 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1355 return e##name(env, op1, op2); \
1358 HELPER_SINGLE_SPE_CMP(fststlt
);
1360 HELPER_SINGLE_SPE_CMP(fststgt
);
1362 HELPER_SINGLE_SPE_CMP(fststeq
);
1364 HELPER_SINGLE_SPE_CMP(fscmplt
);
1366 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1368 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1370 static inline uint32_t evcmp_merge(int t0
, int t1
)
1372 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1375 #define HELPER_VECTOR_SPE_CMP(name) \
1376 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1378 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1379 e##name(env, op1, op2)); \
1382 HELPER_VECTOR_SPE_CMP(fststlt
);
1384 HELPER_VECTOR_SPE_CMP(fststgt
);
1386 HELPER_VECTOR_SPE_CMP(fststeq
);
1388 HELPER_VECTOR_SPE_CMP(fscmplt
);
1390 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1392 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1394 /* Double-precision floating-point conversion */
1395 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1399 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1404 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1408 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1413 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1417 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1422 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1426 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1431 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1436 /* NaN are not treated the same way IEEE 754 does */
1437 if (unlikely(float64_is_any_nan(u
.d
))) {
1441 return float64_to_int32(u
.d
, &env
->vec_status
);
1444 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1449 /* NaN are not treated the same way IEEE 754 does */
1450 if (unlikely(float64_is_any_nan(u
.d
))) {
1454 return float64_to_uint32(u
.d
, &env
->vec_status
);
1457 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1462 /* NaN are not treated the same way IEEE 754 does */
1463 if (unlikely(float64_is_any_nan(u
.d
))) {
1467 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1470 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1475 /* NaN are not treated the same way IEEE 754 does */
1476 if (unlikely(float64_is_any_nan(u
.d
))) {
1480 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1483 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1488 /* NaN are not treated the same way IEEE 754 does */
1489 if (unlikely(float64_is_any_nan(u
.d
))) {
1493 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1496 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1501 /* NaN are not treated the same way IEEE 754 does */
1502 if (unlikely(float64_is_any_nan(u
.d
))) {
1506 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1509 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1514 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1515 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1516 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1521 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1526 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1527 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1528 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1533 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1539 /* NaN are not treated the same way IEEE 754 does */
1540 if (unlikely(float64_is_any_nan(u
.d
))) {
1543 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1544 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1546 return float64_to_int32(u
.d
, &env
->vec_status
);
1549 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1555 /* NaN are not treated the same way IEEE 754 does */
1556 if (unlikely(float64_is_any_nan(u
.d
))) {
1559 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1560 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1562 return float64_to_uint32(u
.d
, &env
->vec_status
);
1565 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1571 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1576 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1582 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1587 /* Double precision fixed-point arithmetic */
1588 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1594 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1598 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1604 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1608 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1614 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1618 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1624 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1628 /* Double precision floating point helpers */
1629 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1635 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1638 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1644 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1647 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1653 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1656 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1658 /* XXX: TODO: test special values (NaN, infinites, ...) */
1659 return helper_efdtstlt(env
, op1
, op2
);
1662 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1664 /* XXX: TODO: test special values (NaN, infinites, ...) */
1665 return helper_efdtstgt(env
, op1
, op2
);
1668 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1670 /* XXX: TODO: test special values (NaN, infinites, ...) */
1671 return helper_efdtsteq(env
, op1
, op2
);
1674 #define float64_to_float64(x, env) x
1677 /* VSX_ADD_SUB - VSX floating point add/subract
1678 * name - instruction mnemonic
1679 * op - operation (add or sub)
1680 * nels - number of elements (1, 2 or 4)
1681 * tp - type (float32 or float64)
1682 * fld - vsr_t field (VsrD(*) or VsrW(*))
1685 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1686 void helper_##name(CPUPPCState *env, uint32_t opcode) \
1688 ppc_vsr_t xt, xa, xb; \
1691 getVSR(xA(opcode), &xa, env); \
1692 getVSR(xB(opcode), &xb, env); \
1693 getVSR(xT(opcode), &xt, env); \
1694 helper_reset_fpstatus(env); \
1696 for (i = 0; i < nels; i++) { \
1697 float_status tstat = env->fp_status; \
1698 set_float_exception_flags(0, &tstat); \
1699 xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
1700 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1702 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1703 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1704 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
1705 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1706 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1707 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1712 xt.fld = helper_frsp(env, xt.fld); \
1716 helper_compute_fprf_float64(env, xt.fld); \
1719 putVSR(xT(opcode), &xt, env); \
1720 float_check_status(env); \
1723 VSX_ADD_SUB(xsadddp
, add
, 1, float64
, VsrD(0), 1, 0)
1724 VSX_ADD_SUB(xsaddsp
, add
, 1, float64
, VsrD(0), 1, 1)
1725 VSX_ADD_SUB(xvadddp
, add
, 2, float64
, VsrD(i
), 0, 0)
1726 VSX_ADD_SUB(xvaddsp
, add
, 4, float32
, VsrW(i
), 0, 0)
1727 VSX_ADD_SUB(xssubdp
, sub
, 1, float64
, VsrD(0), 1, 0)
1728 VSX_ADD_SUB(xssubsp
, sub
, 1, float64
, VsrD(0), 1, 1)
1729 VSX_ADD_SUB(xvsubdp
, sub
, 2, float64
, VsrD(i
), 0, 0)
1730 VSX_ADD_SUB(xvsubsp
, sub
, 4, float32
, VsrW(i
), 0, 0)
1732 void helper_xsaddqp(CPUPPCState
*env
, uint32_t opcode
)
1734 ppc_vsr_t xt
, xa
, xb
;
1737 getVSR(rA(opcode
) + 32, &xa
, env
);
1738 getVSR(rB(opcode
) + 32, &xb
, env
);
1739 getVSR(rD(opcode
) + 32, &xt
, env
);
1740 helper_reset_fpstatus(env
);
1742 tstat
= env
->fp_status
;
1743 if (unlikely(Rc(opcode
) != 0)) {
1744 tstat
.float_rounding_mode
= float_round_to_odd
;
1747 set_float_exception_flags(0, &tstat
);
1748 xt
.f128
= float128_add(xa
.f128
, xb
.f128
, &tstat
);
1749 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1751 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1752 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
1753 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
1754 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
1755 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
1756 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1760 helper_compute_fprf_float128(env
, xt
.f128
);
1762 putVSR(rD(opcode
) + 32, &xt
, env
);
1763 float_check_status(env
);
1766 /* VSX_MUL - VSX floating point multiply
1767 * op - instruction mnemonic
1768 * nels - number of elements (1, 2 or 4)
1769 * tp - type (float32 or float64)
1770 * fld - vsr_t field (VsrD(*) or VsrW(*))
1773 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1774 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1776 ppc_vsr_t xt, xa, xb; \
1779 getVSR(xA(opcode), &xa, env); \
1780 getVSR(xB(opcode), &xb, env); \
1781 getVSR(xT(opcode), &xt, env); \
1782 helper_reset_fpstatus(env); \
1784 for (i = 0; i < nels; i++) { \
1785 float_status tstat = env->fp_status; \
1786 set_float_exception_flags(0, &tstat); \
1787 xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
1788 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1790 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1791 if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
1792 (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
1793 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
1794 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1795 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1796 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1801 xt.fld = helper_frsp(env, xt.fld); \
1805 helper_compute_fprf_float64(env, xt.fld); \
1809 putVSR(xT(opcode), &xt, env); \
1810 float_check_status(env); \
1813 VSX_MUL(xsmuldp
, 1, float64
, VsrD(0), 1, 0)
1814 VSX_MUL(xsmulsp
, 1, float64
, VsrD(0), 1, 1)
1815 VSX_MUL(xvmuldp
, 2, float64
, VsrD(i
), 0, 0)
1816 VSX_MUL(xvmulsp
, 4, float32
, VsrW(i
), 0, 0)
1818 void helper_xsmulqp(CPUPPCState
*env
, uint32_t opcode
)
1820 ppc_vsr_t xt
, xa
, xb
;
1823 getVSR(rA(opcode
) + 32, &xa
, env
);
1824 getVSR(rB(opcode
) + 32, &xb
, env
);
1825 getVSR(rD(opcode
) + 32, &xt
, env
);
1827 helper_reset_fpstatus(env
);
1828 tstat
= env
->fp_status
;
1829 if (unlikely(Rc(opcode
) != 0)) {
1830 tstat
.float_rounding_mode
= float_round_to_odd
;
1833 set_float_exception_flags(0, &tstat
);
1834 xt
.f128
= float128_mul(xa
.f128
, xb
.f128
, &tstat
);
1835 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1837 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1838 if ((float128_is_infinity(xa
.f128
) && float128_is_zero(xb
.f128
)) ||
1839 (float128_is_infinity(xb
.f128
) && float128_is_zero(xa
.f128
))) {
1840 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
1841 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
1842 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
1843 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1846 helper_compute_fprf_float128(env
, xt
.f128
);
1848 putVSR(rD(opcode
) + 32, &xt
, env
);
1849 float_check_status(env
);
1852 /* VSX_DIV - VSX floating point divide
1853 * op - instruction mnemonic
1854 * nels - number of elements (1, 2 or 4)
1855 * tp - type (float32 or float64)
1856 * fld - vsr_t field (VsrD(*) or VsrW(*))
1859 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1860 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1862 ppc_vsr_t xt, xa, xb; \
1865 getVSR(xA(opcode), &xa, env); \
1866 getVSR(xB(opcode), &xb, env); \
1867 getVSR(xT(opcode), &xt, env); \
1868 helper_reset_fpstatus(env); \
1870 for (i = 0; i < nels; i++) { \
1871 float_status tstat = env->fp_status; \
1872 set_float_exception_flags(0, &tstat); \
1873 xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
1874 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1876 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1877 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1878 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
1879 } else if (tp##_is_zero(xa.fld) && \
1880 tp##_is_zero(xb.fld)) { \
1881 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
1882 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1883 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1884 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1889 xt.fld = helper_frsp(env, xt.fld); \
1893 helper_compute_fprf_float64(env, xt.fld); \
1897 putVSR(xT(opcode), &xt, env); \
1898 float_check_status(env); \
1901 VSX_DIV(xsdivdp
, 1, float64
, VsrD(0), 1, 0)
1902 VSX_DIV(xsdivsp
, 1, float64
, VsrD(0), 1, 1)
1903 VSX_DIV(xvdivdp
, 2, float64
, VsrD(i
), 0, 0)
1904 VSX_DIV(xvdivsp
, 4, float32
, VsrW(i
), 0, 0)
1906 void helper_xsdivqp(CPUPPCState
*env
, uint32_t opcode
)
1908 ppc_vsr_t xt
, xa
, xb
;
1911 getVSR(rA(opcode
) + 32, &xa
, env
);
1912 getVSR(rB(opcode
) + 32, &xb
, env
);
1913 getVSR(rD(opcode
) + 32, &xt
, env
);
1915 helper_reset_fpstatus(env
);
1916 tstat
= env
->fp_status
;
1917 if (unlikely(Rc(opcode
) != 0)) {
1918 tstat
.float_rounding_mode
= float_round_to_odd
;
1921 set_float_exception_flags(0, &tstat
);
1922 xt
.f128
= float128_div(xa
.f128
, xb
.f128
, &tstat
);
1923 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1925 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1926 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
1927 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIDI
, 1);
1928 } else if (float128_is_zero(xa
.f128
) &&
1929 float128_is_zero(xb
.f128
)) {
1930 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXZDZ
, 1);
1931 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
1932 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
1933 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1937 helper_compute_fprf_float128(env
, xt
.f128
);
1938 putVSR(rD(opcode
) + 32, &xt
, env
);
1939 float_check_status(env
);
1942 /* VSX_RE - VSX floating point reciprocal estimate
1943 * op - instruction mnemonic
1944 * nels - number of elements (1, 2 or 4)
1945 * tp - type (float32 or float64)
1946 * fld - vsr_t field (VsrD(*) or VsrW(*))
1949 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
1950 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1955 getVSR(xB(opcode), &xb, env); \
1956 getVSR(xT(opcode), &xt, env); \
1957 helper_reset_fpstatus(env); \
1959 for (i = 0; i < nels; i++) { \
1960 if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
1961 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1963 xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
1966 xt.fld = helper_frsp(env, xt.fld); \
1970 helper_compute_fprf_float64(env, xt.fld); \
1974 putVSR(xT(opcode), &xt, env); \
1975 float_check_status(env); \
1978 VSX_RE(xsredp
, 1, float64
, VsrD(0), 1, 0)
1979 VSX_RE(xsresp
, 1, float64
, VsrD(0), 1, 1)
1980 VSX_RE(xvredp
, 2, float64
, VsrD(i
), 0, 0)
1981 VSX_RE(xvresp
, 4, float32
, VsrW(i
), 0, 0)
1983 /* VSX_SQRT - VSX floating point square root
1984 * op - instruction mnemonic
1985 * nels - number of elements (1, 2 or 4)
1986 * tp - type (float32 or float64)
1987 * fld - vsr_t field (VsrD(*) or VsrW(*))
1990 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
1991 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1996 getVSR(xB(opcode), &xb, env); \
1997 getVSR(xT(opcode), &xt, env); \
1998 helper_reset_fpstatus(env); \
2000 for (i = 0; i < nels; i++) { \
2001 float_status tstat = env->fp_status; \
2002 set_float_exception_flags(0, &tstat); \
2003 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2004 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2006 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2007 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2008 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2009 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2010 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2015 xt.fld = helper_frsp(env, xt.fld); \
2019 helper_compute_fprf_float64(env, xt.fld); \
2023 putVSR(xT(opcode), &xt, env); \
2024 float_check_status(env); \
2027 VSX_SQRT(xssqrtdp
, 1, float64
, VsrD(0), 1, 0)
2028 VSX_SQRT(xssqrtsp
, 1, float64
, VsrD(0), 1, 1)
2029 VSX_SQRT(xvsqrtdp
, 2, float64
, VsrD(i
), 0, 0)
2030 VSX_SQRT(xvsqrtsp
, 4, float32
, VsrW(i
), 0, 0)
2032 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2033 * op - instruction mnemonic
2034 * nels - number of elements (1, 2 or 4)
2035 * tp - type (float32 or float64)
2036 * fld - vsr_t field (VsrD(*) or VsrW(*))
2039 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
2040 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2045 getVSR(xB(opcode), &xb, env); \
2046 getVSR(xT(opcode), &xt, env); \
2047 helper_reset_fpstatus(env); \
2049 for (i = 0; i < nels; i++) { \
2050 float_status tstat = env->fp_status; \
2051 set_float_exception_flags(0, &tstat); \
2052 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2053 xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
2054 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2056 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2057 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2058 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2059 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2060 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2065 xt.fld = helper_frsp(env, xt.fld); \
2069 helper_compute_fprf_float64(env, xt.fld); \
2073 putVSR(xT(opcode), &xt, env); \
2074 float_check_status(env); \
2077 VSX_RSQRTE(xsrsqrtedp
, 1, float64
, VsrD(0), 1, 0)
2078 VSX_RSQRTE(xsrsqrtesp
, 1, float64
, VsrD(0), 1, 1)
2079 VSX_RSQRTE(xvrsqrtedp
, 2, float64
, VsrD(i
), 0, 0)
2080 VSX_RSQRTE(xvrsqrtesp
, 4, float32
, VsrW(i
), 0, 0)
2082 /* VSX_TDIV - VSX floating point test for divide
2083 * op - instruction mnemonic
2084 * nels - number of elements (1, 2 or 4)
2085 * tp - type (float32 or float64)
2086 * fld - vsr_t field (VsrD(*) or VsrW(*))
2087 * emin - minimum unbiased exponent
2088 * emax - maximum unbiased exponent
2089 * nbits - number of fraction bits
2091 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2092 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2099 getVSR(xA(opcode), &xa, env); \
2100 getVSR(xB(opcode), &xb, env); \
2102 for (i = 0; i < nels; i++) { \
2103 if (unlikely(tp##_is_infinity(xa.fld) || \
2104 tp##_is_infinity(xb.fld) || \
2105 tp##_is_zero(xb.fld))) { \
2109 int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
2110 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2112 if (unlikely(tp##_is_any_nan(xa.fld) || \
2113 tp##_is_any_nan(xb.fld))) { \
2115 } else if ((e_b <= emin) || (e_b >= (emax-2))) { \
2117 } else if (!tp##_is_zero(xa.fld) && \
2118 (((e_a - e_b) >= emax) || \
2119 ((e_a - e_b) <= (emin+1)) || \
2120 (e_a <= (emin+nbits)))) { \
2124 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2125 /* XB is not zero because of the above check and */ \
2126 /* so must be denormalized. */ \
2132 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2135 VSX_TDIV(xstdivdp
, 1, float64
, VsrD(0), -1022, 1023, 52)
2136 VSX_TDIV(xvtdivdp
, 2, float64
, VsrD(i
), -1022, 1023, 52)
2137 VSX_TDIV(xvtdivsp
, 4, float32
, VsrW(i
), -126, 127, 23)
2139 /* VSX_TSQRT - VSX floating point test for square root
2140 * op - instruction mnemonic
2141 * nels - number of elements (1, 2 or 4)
2142 * tp - type (float32 or float64)
2143 * fld - vsr_t field (VsrD(*) or VsrW(*))
2144 * emin - minimum unbiased exponent
2145 * emax - maximum unbiased exponent
2146 * nbits - number of fraction bits
2148 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2149 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2156 getVSR(xA(opcode), &xa, env); \
2157 getVSR(xB(opcode), &xb, env); \
2159 for (i = 0; i < nels; i++) { \
2160 if (unlikely(tp##_is_infinity(xb.fld) || \
2161 tp##_is_zero(xb.fld))) { \
2165 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2167 if (unlikely(tp##_is_any_nan(xb.fld))) { \
2169 } else if (unlikely(tp##_is_zero(xb.fld))) { \
2171 } else if (unlikely(tp##_is_neg(xb.fld))) { \
2173 } else if (!tp##_is_zero(xb.fld) && \
2174 (e_b <= (emin+nbits))) { \
2178 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2179 /* XB is not zero because of the above check and */ \
2180 /* therefore must be denormalized. */ \
2186 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2189 VSX_TSQRT(xstsqrtdp
, 1, float64
, VsrD(0), -1022, 52)
2190 VSX_TSQRT(xvtsqrtdp
, 2, float64
, VsrD(i
), -1022, 52)
2191 VSX_TSQRT(xvtsqrtsp
, 4, float32
, VsrW(i
), -126, 23)
2193 /* VSX_MADD - VSX floating point muliply/add variations
2194 * op - instruction mnemonic
2195 * nels - number of elements (1, 2 or 4)
2196 * tp - type (float32 or float64)
2197 * fld - vsr_t field (VsrD(*) or VsrW(*))
2198 * maddflgs - flags for the float*muladd routine that control the
2199 * various forms (madd, msub, nmadd, nmsub)
2200 * afrm - A form (1=A, 0=M)
2203 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
2204 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2206 ppc_vsr_t xt_in, xa, xb, xt_out; \
2210 if (afrm) { /* AxB + T */ \
2213 } else { /* AxT + B */ \
2218 getVSR(xA(opcode), &xa, env); \
2219 getVSR(xB(opcode), &xb, env); \
2220 getVSR(xT(opcode), &xt_in, env); \
2224 helper_reset_fpstatus(env); \
2226 for (i = 0; i < nels; i++) { \
2227 float_status tstat = env->fp_status; \
2228 set_float_exception_flags(0, &tstat); \
2229 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2230 /* Avoid double rounding errors by rounding the intermediate */ \
2231 /* result to odd. */ \
2232 set_float_rounding_mode(float_round_to_zero, &tstat); \
2233 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2234 maddflgs, &tstat); \
2235 xt_out.fld |= (get_float_exception_flags(&tstat) & \
2236 float_flag_inexact) != 0; \
2238 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2239 maddflgs, &tstat); \
2241 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2243 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2244 tp##_maddsub_update_excp(env, xa.fld, b->fld, c->fld, maddflgs); \
2248 xt_out.fld = helper_frsp(env, xt_out.fld); \
2252 helper_compute_fprf_float64(env, xt_out.fld); \
2255 putVSR(xT(opcode), &xt_out, env); \
2256 float_check_status(env); \
2259 VSX_MADD(xsmaddadp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 0)
2260 VSX_MADD(xsmaddmdp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 0)
2261 VSX_MADD(xsmsubadp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 0)
2262 VSX_MADD(xsmsubmdp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 0)
2263 VSX_MADD(xsnmaddadp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 0)
2264 VSX_MADD(xsnmaddmdp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 0)
2265 VSX_MADD(xsnmsubadp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 0)
2266 VSX_MADD(xsnmsubmdp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 0)
2268 VSX_MADD(xsmaddasp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 1)
2269 VSX_MADD(xsmaddmsp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 1)
2270 VSX_MADD(xsmsubasp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 1)
2271 VSX_MADD(xsmsubmsp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 1)
2272 VSX_MADD(xsnmaddasp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 1)
2273 VSX_MADD(xsnmaddmsp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 1)
2274 VSX_MADD(xsnmsubasp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 1)
2275 VSX_MADD(xsnmsubmsp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 1)
2277 VSX_MADD(xvmaddadp
, 2, float64
, VsrD(i
), MADD_FLGS
, 1, 0, 0)
2278 VSX_MADD(xvmaddmdp
, 2, float64
, VsrD(i
), MADD_FLGS
, 0, 0, 0)
2279 VSX_MADD(xvmsubadp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 1, 0, 0)
2280 VSX_MADD(xvmsubmdp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 0, 0, 0)
2281 VSX_MADD(xvnmaddadp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 1, 0, 0)
2282 VSX_MADD(xvnmaddmdp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 0, 0, 0)
2283 VSX_MADD(xvnmsubadp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 1, 0, 0)
2284 VSX_MADD(xvnmsubmdp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 0, 0, 0)
2286 VSX_MADD(xvmaddasp
, 4, float32
, VsrW(i
), MADD_FLGS
, 1, 0, 0)
2287 VSX_MADD(xvmaddmsp
, 4, float32
, VsrW(i
), MADD_FLGS
, 0, 0, 0)
2288 VSX_MADD(xvmsubasp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 1, 0, 0)
2289 VSX_MADD(xvmsubmsp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 0, 0, 0)
2290 VSX_MADD(xvnmaddasp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 1, 0, 0)
2291 VSX_MADD(xvnmaddmsp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 0, 0, 0)
2292 VSX_MADD(xvnmsubasp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 1, 0, 0)
2293 VSX_MADD(xvnmsubmsp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 0, 0, 0)
2295 /* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2296 * op - instruction mnemonic
2297 * cmp - comparison operation
2298 * exp - expected result of comparison
2299 * svxvc - set VXVC bit
2301 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2302 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2304 ppc_vsr_t xt, xa, xb; \
2305 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2307 getVSR(xA(opcode), &xa, env); \
2308 getVSR(xB(opcode), &xb, env); \
2309 getVSR(xT(opcode), &xt, env); \
2311 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2312 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2313 vxsnan_flag = true; \
2314 if (fpscr_ve == 0 && svxvc) { \
2317 } else if (svxvc) { \
2318 vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2319 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
2321 if (vxsnan_flag) { \
2322 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2325 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2327 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2330 if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
2338 putVSR(xT(opcode), &xt, env); \
2339 helper_float_check_status(env); \
2342 VSX_SCALAR_CMP_DP(xscmpeqdp
, eq
, 1, 0)
2343 VSX_SCALAR_CMP_DP(xscmpgedp
, le
, 1, 1)
2344 VSX_SCALAR_CMP_DP(xscmpgtdp
, lt
, 1, 1)
2345 VSX_SCALAR_CMP_DP(xscmpnedp
, eq
, 0, 0)
2347 void helper_xscmpexpdp(CPUPPCState
*env
, uint32_t opcode
)
2350 int64_t exp_a
, exp_b
;
2353 getVSR(xA(opcode
), &xa
, env
);
2354 getVSR(xB(opcode
), &xb
, env
);
2356 exp_a
= extract64(xa
.VsrD(0), 52, 11);
2357 exp_b
= extract64(xb
.VsrD(0), 52, 11);
2359 if (unlikely(float64_is_any_nan(xa
.VsrD(0)) ||
2360 float64_is_any_nan(xb
.VsrD(0)))) {
2363 if (exp_a
< exp_b
) {
2365 } else if (exp_a
> exp_b
) {
2372 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2373 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2374 env
->crf
[BF(opcode
)] = cc
;
2376 helper_float_check_status(env
);
2379 void helper_xscmpexpqp(CPUPPCState
*env
, uint32_t opcode
)
2382 int64_t exp_a
, exp_b
;
2385 getVSR(rA(opcode
) + 32, &xa
, env
);
2386 getVSR(rB(opcode
) + 32, &xb
, env
);
2388 exp_a
= extract64(xa
.VsrD(0), 48, 15);
2389 exp_b
= extract64(xb
.VsrD(0), 48, 15);
2391 if (unlikely(float128_is_any_nan(xa
.f128
) ||
2392 float128_is_any_nan(xb
.f128
))) {
2395 if (exp_a
< exp_b
) {
2397 } else if (exp_a
> exp_b
) {
2404 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2405 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2406 env
->crf
[BF(opcode
)] = cc
;
2408 helper_float_check_status(env
);
2411 #define VSX_SCALAR_CMP(op, ordered) \
2412 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2416 bool vxsnan_flag = false, vxvc_flag = false; \
2418 helper_reset_fpstatus(env); \
2419 getVSR(xA(opcode), &xa, env); \
2420 getVSR(xB(opcode), &xb, env); \
2422 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2423 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2424 vxsnan_flag = true; \
2426 if (fpscr_ve == 0 && ordered) { \
2429 } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2430 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) { \
2436 if (vxsnan_flag) { \
2437 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2440 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2443 if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2445 } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2451 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2452 env->fpscr |= cc << FPSCR_FPRF; \
2453 env->crf[BF(opcode)] = cc; \
2455 float_check_status(env); \
2458 VSX_SCALAR_CMP(xscmpodp
, 1)
2459 VSX_SCALAR_CMP(xscmpudp
, 0)
2461 #define VSX_SCALAR_CMPQ(op, ordered) \
2462 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2466 bool vxsnan_flag = false, vxvc_flag = false; \
2468 helper_reset_fpstatus(env); \
2469 getVSR(rA(opcode) + 32, &xa, env); \
2470 getVSR(rB(opcode) + 32, &xb, env); \
2472 if (float128_is_signaling_nan(xa.f128, &env->fp_status) || \
2473 float128_is_signaling_nan(xb.f128, &env->fp_status)) { \
2474 vxsnan_flag = true; \
2476 if (fpscr_ve == 0 && ordered) { \
2479 } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) || \
2480 float128_is_quiet_nan(xb.f128, &env->fp_status)) { \
2486 if (vxsnan_flag) { \
2487 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2490 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2493 if (float128_lt(xa.f128, xb.f128, &env->fp_status)) { \
2495 } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) { \
2501 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2502 env->fpscr |= cc << FPSCR_FPRF; \
2503 env->crf[BF(opcode)] = cc; \
2505 float_check_status(env); \
2508 VSX_SCALAR_CMPQ(xscmpoqp
, 1)
2509 VSX_SCALAR_CMPQ(xscmpuqp
, 0)
2511 /* VSX_MAX_MIN - VSX floating point maximum/minimum
2512 * name - instruction mnemonic
2513 * op - operation (max or min)
2514 * nels - number of elements (1, 2 or 4)
2515 * tp - type (float32 or float64)
2516 * fld - vsr_t field (VsrD(*) or VsrW(*))
2518 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2519 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2521 ppc_vsr_t xt, xa, xb; \
2524 getVSR(xA(opcode), &xa, env); \
2525 getVSR(xB(opcode), &xb, env); \
2526 getVSR(xT(opcode), &xt, env); \
2528 for (i = 0; i < nels; i++) { \
2529 xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
2530 if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2531 tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2532 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2536 putVSR(xT(opcode), &xt, env); \
2537 float_check_status(env); \
2540 VSX_MAX_MIN(xsmaxdp
, maxnum
, 1, float64
, VsrD(0))
2541 VSX_MAX_MIN(xvmaxdp
, maxnum
, 2, float64
, VsrD(i
))
2542 VSX_MAX_MIN(xvmaxsp
, maxnum
, 4, float32
, VsrW(i
))
2543 VSX_MAX_MIN(xsmindp
, minnum
, 1, float64
, VsrD(0))
2544 VSX_MAX_MIN(xvmindp
, minnum
, 2, float64
, VsrD(i
))
2545 VSX_MAX_MIN(xvminsp
, minnum
, 4, float32
, VsrW(i
))
2547 #define VSX_MAX_MINC(name, max) \
2548 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2550 ppc_vsr_t xt, xa, xb; \
2551 bool vxsnan_flag = false, vex_flag = false; \
2553 getVSR(rA(opcode) + 32, &xa, env); \
2554 getVSR(rB(opcode) + 32, &xb, env); \
2555 getVSR(rD(opcode) + 32, &xt, env); \
2557 if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
2558 float64_is_any_nan(xb.VsrD(0)))) { \
2559 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2560 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2561 vxsnan_flag = true; \
2563 xt.VsrD(0) = xb.VsrD(0); \
2564 } else if ((max && \
2565 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2567 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2568 xt.VsrD(0) = xa.VsrD(0); \
2570 xt.VsrD(0) = xb.VsrD(0); \
2573 vex_flag = fpscr_ve & vxsnan_flag; \
2574 if (vxsnan_flag) { \
2575 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2578 putVSR(rD(opcode) + 32, &xt, env); \
2582 VSX_MAX_MINC(xsmaxcdp, 1);
2583 VSX_MAX_MINC(xsmincdp
, 0);
2585 #define VSX_MAX_MINJ(name, max) \
2586 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2588 ppc_vsr_t xt, xa, xb; \
2589 bool vxsnan_flag = false, vex_flag = false; \
2591 getVSR(rA(opcode) + 32, &xa, env); \
2592 getVSR(rB(opcode) + 32, &xb, env); \
2593 getVSR(rD(opcode) + 32, &xt, env); \
2595 if (unlikely(float64_is_any_nan(xa.VsrD(0)))) { \
2596 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) { \
2597 vxsnan_flag = true; \
2599 xt.VsrD(0) = xa.VsrD(0); \
2600 } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) { \
2601 if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2602 vxsnan_flag = true; \
2604 xt.VsrD(0) = xb.VsrD(0); \
2605 } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) { \
2607 if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2608 xt.VsrD(0) = 0ULL; \
2610 xt.VsrD(0) = 0x8000000000000000ULL; \
2613 if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) { \
2614 xt.VsrD(0) = 0x8000000000000000ULL; \
2616 xt.VsrD(0) = 0ULL; \
2619 } else if ((max && \
2620 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2622 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2623 xt.VsrD(0) = xa.VsrD(0); \
2625 xt.VsrD(0) = xb.VsrD(0); \
2628 vex_flag = fpscr_ve & vxsnan_flag; \
2629 if (vxsnan_flag) { \
2630 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2633 putVSR(rD(opcode) + 32, &xt, env); \
2637 VSX_MAX_MINJ(xsmaxjdp, 1);
2638 VSX_MAX_MINJ(xsminjdp
, 0);
2640 /* VSX_CMP - VSX floating point compare
2641 * op - instruction mnemonic
2642 * nels - number of elements (1, 2 or 4)
2643 * tp - type (float32 or float64)
2644 * fld - vsr_t field (VsrD(*) or VsrW(*))
2645 * cmp - comparison operation
2646 * svxvc - set VXVC bit
2647 * exp - expected result of comparison
2649 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2650 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2652 ppc_vsr_t xt, xa, xb; \
2655 int all_false = 1; \
2657 getVSR(xA(opcode), &xa, env); \
2658 getVSR(xB(opcode), &xb, env); \
2659 getVSR(xT(opcode), &xt, env); \
2661 for (i = 0; i < nels; i++) { \
2662 if (unlikely(tp##_is_any_nan(xa.fld) || \
2663 tp##_is_any_nan(xb.fld))) { \
2664 if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2665 tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
2666 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2669 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2674 if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
2684 putVSR(xT(opcode), &xt, env); \
2685 if ((opcode >> (31-21)) & 1) { \
2686 env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2688 float_check_status(env); \
2691 VSX_CMP(xvcmpeqdp
, 2, float64
, VsrD(i
), eq
, 0, 1)
2692 VSX_CMP(xvcmpgedp
, 2, float64
, VsrD(i
), le
, 1, 1)
2693 VSX_CMP(xvcmpgtdp
, 2, float64
, VsrD(i
), lt
, 1, 1)
2694 VSX_CMP(xvcmpnedp
, 2, float64
, VsrD(i
), eq
, 0, 0)
2695 VSX_CMP(xvcmpeqsp
, 4, float32
, VsrW(i
), eq
, 0, 1)
2696 VSX_CMP(xvcmpgesp
, 4, float32
, VsrW(i
), le
, 1, 1)
2697 VSX_CMP(xvcmpgtsp
, 4, float32
, VsrW(i
), lt
, 1, 1)
2698 VSX_CMP(xvcmpnesp
, 4, float32
, VsrW(i
), eq
, 0, 0)
2700 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2701 * op - instruction mnemonic
2702 * nels - number of elements (1, 2 or 4)
2703 * stp - source type (float32 or float64)
2704 * ttp - target type (float32 or float64)
2705 * sfld - source vsr_t field
2706 * tfld - target vsr_t field (f32 or f64)
2709 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2710 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2715 getVSR(xB(opcode), &xb, env); \
2716 getVSR(xT(opcode), &xt, env); \
2718 for (i = 0; i < nels; i++) { \
2719 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2720 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2721 &env->fp_status))) { \
2722 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2723 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2726 helper_compute_fprf_##ttp(env, xt.tfld); \
2730 putVSR(xT(opcode), &xt, env); \
2731 float_check_status(env); \
2734 VSX_CVT_FP_TO_FP(xscvdpsp
, 1, float64
, float32
, VsrD(0), VsrW(0), 1)
2735 VSX_CVT_FP_TO_FP(xscvspdp
, 1, float32
, float64
, VsrW(0), VsrD(0), 1)
2736 VSX_CVT_FP_TO_FP(xvcvdpsp
, 2, float64
, float32
, VsrD(i
), VsrW(2*i
), 0)
2737 VSX_CVT_FP_TO_FP(xvcvspdp
, 2, float32
, float64
, VsrW(2*i
), VsrD(i
), 0)
2739 /* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2740 * op - instruction mnemonic
2741 * nels - number of elements (1, 2 or 4)
2742 * stp - source type (float32 or float64)
2743 * ttp - target type (float32 or float64)
2744 * sfld - source vsr_t field
2745 * tfld - target vsr_t field (f32 or f64)
2748 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2749 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2754 getVSR(rB(opcode) + 32, &xb, env); \
2755 getVSR(rD(opcode) + 32, &xt, env); \
2757 for (i = 0; i < nels; i++) { \
2758 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2759 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2760 &env->fp_status))) { \
2761 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2762 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2765 helper_compute_fprf_##ttp(env, xt.tfld); \
2769 putVSR(rD(opcode) + 32, &xt, env); \
2770 float_check_status(env); \
2773 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp
, 1, float64
, float128
, VsrD(0), f128
, 1)
2775 /* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2776 * involving one half precision value
2777 * op - instruction mnemonic
2778 * nels - number of elements (1, 2 or 4)
2781 * sfld - source vsr_t field
2782 * tfld - target vsr_t field
2785 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2786 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2791 getVSR(xB(opcode), &xb, env); \
2792 memset(&xt, 0, sizeof(xt)); \
2794 for (i = 0; i < nels; i++) { \
2795 xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status); \
2796 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2797 &env->fp_status))) { \
2798 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2799 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2802 helper_compute_fprf_##ttp(env, xt.tfld); \
2806 putVSR(xT(opcode), &xt, env); \
2807 float_check_status(env); \
2810 VSX_CVT_FP_TO_FP_HP(xscvdphp
, 1, float64
, float16
, VsrD(0), VsrH(3), 1)
2811 VSX_CVT_FP_TO_FP_HP(xscvhpdp
, 1, float16
, float64
, VsrH(3), VsrD(0), 1)
2812 VSX_CVT_FP_TO_FP_HP(xvcvsphp
, 4, float32
, float16
, VsrW(i
), VsrH(2 * i
+ 1), 0)
2813 VSX_CVT_FP_TO_FP_HP(xvcvhpsp
, 4, float16
, float32
, VsrH(2 * i
+ 1), VsrW(i
), 0)
2816 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2817 * added to this later.
2819 void helper_xscvqpdp(CPUPPCState
*env
, uint32_t opcode
)
2824 getVSR(rB(opcode
) + 32, &xb
, env
);
2825 memset(&xt
, 0, sizeof(xt
));
2827 tstat
= env
->fp_status
;
2828 if (unlikely(Rc(opcode
) != 0)) {
2829 tstat
.float_rounding_mode
= float_round_to_odd
;
2832 xt
.VsrD(0) = float128_to_float64(xb
.f128
, &tstat
);
2833 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2834 if (unlikely(float128_is_signaling_nan(xb
.f128
,
2836 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
2837 xt
.VsrD(0) = float64_snan_to_qnan(xt
.VsrD(0));
2839 helper_compute_fprf_float64(env
, xt
.VsrD(0));
2841 putVSR(rD(opcode
) + 32, &xt
, env
);
2842 float_check_status(env
);
2845 uint64_t helper_xscvdpspn(CPUPPCState
*env
, uint64_t xb
)
2847 float_status tstat
= env
->fp_status
;
2848 set_float_exception_flags(0, &tstat
);
2850 return (uint64_t)float64_to_float32(xb
, &tstat
) << 32;
2853 uint64_t helper_xscvspdpn(CPUPPCState
*env
, uint64_t xb
)
2855 float_status tstat
= env
->fp_status
;
2856 set_float_exception_flags(0, &tstat
);
2858 return float32_to_float64(xb
>> 32, &tstat
);
2861 /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2862 * op - instruction mnemonic
2863 * nels - number of elements (1, 2 or 4)
2864 * stp - source type (float32 or float64)
2865 * ttp - target type (int32, uint32, int64 or uint64)
2866 * sfld - source vsr_t field
2867 * tfld - target vsr_t field
2868 * rnan - resulting NaN
2870 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2871 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2876 getVSR(xB(opcode), &xb, env); \
2877 getVSR(xT(opcode), &xt, env); \
2879 for (i = 0; i < nels; i++) { \
2880 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
2881 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
2882 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2884 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2887 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
2889 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2890 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2895 putVSR(xT(opcode), &xt, env); \
2896 float_check_status(env); \
2899 VSX_CVT_FP_TO_INT(xscvdpsxds
, 1, float64
, int64
, VsrD(0), VsrD(0), \
2900 0x8000000000000000ULL
)
2901 VSX_CVT_FP_TO_INT(xscvdpsxws
, 1, float64
, int32
, VsrD(0), VsrW(1), \
2903 VSX_CVT_FP_TO_INT(xscvdpuxds
, 1, float64
, uint64
, VsrD(0), VsrD(0), 0ULL)
2904 VSX_CVT_FP_TO_INT(xscvdpuxws
, 1, float64
, uint32
, VsrD(0), VsrW(1), 0U)
2905 VSX_CVT_FP_TO_INT(xvcvdpsxds
, 2, float64
, int64
, VsrD(i
), VsrD(i
), \
2906 0x8000000000000000ULL
)
2907 VSX_CVT_FP_TO_INT(xvcvdpsxws
, 2, float64
, int32
, VsrD(i
), VsrW(2*i
), \
2909 VSX_CVT_FP_TO_INT(xvcvdpuxds
, 2, float64
, uint64
, VsrD(i
), VsrD(i
), 0ULL)
2910 VSX_CVT_FP_TO_INT(xvcvdpuxws
, 2, float64
, uint32
, VsrD(i
), VsrW(2*i
), 0U)
2911 VSX_CVT_FP_TO_INT(xvcvspsxds
, 2, float32
, int64
, VsrW(2*i
), VsrD(i
), \
2912 0x8000000000000000ULL
)
2913 VSX_CVT_FP_TO_INT(xvcvspsxws
, 4, float32
, int32
, VsrW(i
), VsrW(i
), 0x80000000U
)
2914 VSX_CVT_FP_TO_INT(xvcvspuxds
, 2, float32
, uint64
, VsrW(2*i
), VsrD(i
), 0ULL)
2915 VSX_CVT_FP_TO_INT(xvcvspuxws
, 4, float32
, uint32
, VsrW(i
), VsrW(i
), 0U)
2917 /* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2918 * op - instruction mnemonic
2919 * stp - source type (float32 or float64)
2920 * ttp - target type (int32, uint32, int64 or uint64)
2921 * sfld - source vsr_t field
2922 * tfld - target vsr_t field
2923 * rnan - resulting NaN
2925 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
2926 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2930 getVSR(rB(opcode) + 32, &xb, env); \
2931 memset(&xt, 0, sizeof(xt)); \
2933 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
2934 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
2935 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2937 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2940 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
2942 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2943 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2947 putVSR(rD(opcode) + 32, &xt, env); \
2948 float_check_status(env); \
2951 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz
, float128
, int64
, f128
, VsrD(0), \
2952 0x8000000000000000ULL
)
2954 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz
, float128
, int32
, f128
, VsrD(0), \
2955 0xffffffff80000000ULL
)
2956 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz
, float128
, uint64
, f128
, VsrD(0), 0x0ULL
)
2957 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz
, float128
, uint32
, f128
, VsrD(0), 0x0ULL
)
2959 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2960 * op - instruction mnemonic
2961 * nels - number of elements (1, 2 or 4)
2962 * stp - source type (int32, uint32, int64 or uint64)
2963 * ttp - target type (float32 or float64)
2964 * sfld - source vsr_t field
2965 * tfld - target vsr_t field
2966 * jdef - definition of the j index (i or 2*i)
2969 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
2970 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2975 getVSR(xB(opcode), &xb, env); \
2976 getVSR(xT(opcode), &xt, env); \
2978 for (i = 0; i < nels; i++) { \
2979 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2981 xt.tfld = helper_frsp(env, xt.tfld); \
2984 helper_compute_fprf_float64(env, xt.tfld); \
2988 putVSR(xT(opcode), &xt, env); \
2989 float_check_status(env); \
2992 VSX_CVT_INT_TO_FP(xscvsxddp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 0)
2993 VSX_CVT_INT_TO_FP(xscvuxddp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 0)
2994 VSX_CVT_INT_TO_FP(xscvsxdsp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 1)
2995 VSX_CVT_INT_TO_FP(xscvuxdsp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 1)
2996 VSX_CVT_INT_TO_FP(xvcvsxddp
, 2, int64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
2997 VSX_CVT_INT_TO_FP(xvcvuxddp
, 2, uint64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
2998 VSX_CVT_INT_TO_FP(xvcvsxwdp
, 2, int32
, float64
, VsrW(2*i
), VsrD(i
), 0, 0)
2999 VSX_CVT_INT_TO_FP(xvcvuxwdp
, 2, uint64
, float64
, VsrW(2*i
), VsrD(i
), 0, 0)
3000 VSX_CVT_INT_TO_FP(xvcvsxdsp
, 2, int64
, float32
, VsrD(i
), VsrW(2*i
), 0, 0)
3001 VSX_CVT_INT_TO_FP(xvcvuxdsp
, 2, uint64
, float32
, VsrD(i
), VsrW(2*i
), 0, 0)
3002 VSX_CVT_INT_TO_FP(xvcvsxwsp
, 4, int32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3003 VSX_CVT_INT_TO_FP(xvcvuxwsp
, 4, uint32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3005 /* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3006 * op - instruction mnemonic
3007 * stp - source type (int32, uint32, int64 or uint64)
3008 * ttp - target type (float32 or float64)
3009 * sfld - source vsr_t field
3010 * tfld - target vsr_t field
3012 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3013 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3017 getVSR(rB(opcode) + 32, &xb, env); \
3018 getVSR(rD(opcode) + 32, &xt, env); \
3020 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3021 helper_compute_fprf_##ttp(env, xt.tfld); \
3023 putVSR(xT(opcode) + 32, &xt, env); \
3024 float_check_status(env); \
3027 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp
, int64
, float128
, VsrD(0), f128
)
3028 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp
, uint64
, float128
, VsrD(0), f128
)
3030 /* For "use current rounding mode", define a value that will not be one of
3031 * the existing rounding model enums.
3033 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3034 float_round_up + float_round_to_zero)
3036 /* VSX_ROUND - VSX floating point round
3037 * op - instruction mnemonic
3038 * nels - number of elements (1, 2 or 4)
3039 * tp - type (float32 or float64)
3040 * fld - vsr_t field (VsrD(*) or VsrW(*))
3041 * rmode - rounding mode
3044 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
3045 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3049 getVSR(xB(opcode), &xb, env); \
3050 getVSR(xT(opcode), &xt, env); \
3052 if (rmode != FLOAT_ROUND_CURRENT) { \
3053 set_float_rounding_mode(rmode, &env->fp_status); \
3056 for (i = 0; i < nels; i++) { \
3057 if (unlikely(tp##_is_signaling_nan(xb.fld, \
3058 &env->fp_status))) { \
3059 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
3060 xt.fld = tp##_snan_to_qnan(xb.fld); \
3062 xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
3065 helper_compute_fprf_float64(env, xt.fld); \
3069 /* If this is not a "use current rounding mode" instruction, \
3070 * then inhibit setting of the XX bit and restore rounding \
3071 * mode from FPSCR */ \
3072 if (rmode != FLOAT_ROUND_CURRENT) { \
3073 fpscr_set_rounding_mode(env); \
3074 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3077 putVSR(xT(opcode), &xt, env); \
3078 float_check_status(env); \
3081 VSX_ROUND(xsrdpi
, 1, float64
, VsrD(0), float_round_ties_away
, 1)
3082 VSX_ROUND(xsrdpic
, 1, float64
, VsrD(0), FLOAT_ROUND_CURRENT
, 1)
3083 VSX_ROUND(xsrdpim
, 1, float64
, VsrD(0), float_round_down
, 1)
3084 VSX_ROUND(xsrdpip
, 1, float64
, VsrD(0), float_round_up
, 1)
3085 VSX_ROUND(xsrdpiz
, 1, float64
, VsrD(0), float_round_to_zero
, 1)
3087 VSX_ROUND(xvrdpi
, 2, float64
, VsrD(i
), float_round_ties_away
, 0)
3088 VSX_ROUND(xvrdpic
, 2, float64
, VsrD(i
), FLOAT_ROUND_CURRENT
, 0)
3089 VSX_ROUND(xvrdpim
, 2, float64
, VsrD(i
), float_round_down
, 0)
3090 VSX_ROUND(xvrdpip
, 2, float64
, VsrD(i
), float_round_up
, 0)
3091 VSX_ROUND(xvrdpiz
, 2, float64
, VsrD(i
), float_round_to_zero
, 0)
3093 VSX_ROUND(xvrspi
, 4, float32
, VsrW(i
), float_round_ties_away
, 0)
3094 VSX_ROUND(xvrspic
, 4, float32
, VsrW(i
), FLOAT_ROUND_CURRENT
, 0)
3095 VSX_ROUND(xvrspim
, 4, float32
, VsrW(i
), float_round_down
, 0)
3096 VSX_ROUND(xvrspip
, 4, float32
, VsrW(i
), float_round_up
, 0)
3097 VSX_ROUND(xvrspiz
, 4, float32
, VsrW(i
), float_round_to_zero
, 0)
3099 uint64_t helper_xsrsp(CPUPPCState
*env
, uint64_t xb
)
3101 helper_reset_fpstatus(env
);
3103 uint64_t xt
= helper_frsp(env
, xb
);
3105 helper_compute_fprf_float64(env
, xt
);
3106 float_check_status(env
);
3110 #define VSX_XXPERM(op, indexed) \
3111 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3113 ppc_vsr_t xt, xa, pcv, xto; \
3116 getVSR(xA(opcode), &xa, env); \
3117 getVSR(xT(opcode), &xt, env); \
3118 getVSR(xB(opcode), &pcv, env); \
3120 for (i = 0; i < 16; i++) { \
3121 idx = pcv.VsrB(i) & 0x1F; \
3125 xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3127 putVSR(xT(opcode), &xto, env); \
3130 VSX_XXPERM(xxperm
, 0)
3131 VSX_XXPERM(xxpermr
, 1)
3133 void helper_xvxsigsp(CPUPPCState
*env
, uint32_t opcode
)
3136 uint32_t exp
, i
, fraction
;
3138 getVSR(xB(opcode
), &xb
, env
);
3139 memset(&xt
, 0, sizeof(xt
));
3141 for (i
= 0; i
< 4; i
++) {
3142 exp
= (xb
.VsrW(i
) >> 23) & 0xFF;
3143 fraction
= xb
.VsrW(i
) & 0x7FFFFF;
3144 if (exp
!= 0 && exp
!= 255) {
3145 xt
.VsrW(i
) = fraction
| 0x00800000;
3147 xt
.VsrW(i
) = fraction
;
3150 putVSR(xT(opcode
), &xt
, env
);
3153 /* VSX_TEST_DC - VSX floating point test data class
3154 * op - instruction mnemonic
3155 * nels - number of elements (1, 2 or 4)
3156 * xbn - VSR register number
3157 * tp - type (float32 or float64)
3158 * fld - vsr_t field (VsrD(*) or VsrW(*))
3159 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3160 * fld_max - target field max
3161 * scrf - set result in CR and FPCC
3163 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3164 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3167 uint32_t i, sign, dcmx; \
3168 uint32_t cc, match = 0; \
3170 getVSR(xbn, &xb, env); \
3172 memset(&xt, 0, sizeof(xt)); \
3173 dcmx = DCMX_XV(opcode); \
3175 dcmx = DCMX(opcode); \
3178 for (i = 0; i < nels; i++) { \
3179 sign = tp##_is_neg(xb.fld); \
3180 if (tp##_is_any_nan(xb.fld)) { \
3181 match = extract32(dcmx, 6, 1); \
3182 } else if (tp##_is_infinity(xb.fld)) { \
3183 match = extract32(dcmx, 4 + !sign, 1); \
3184 } else if (tp##_is_zero(xb.fld)) { \
3185 match = extract32(dcmx, 2 + !sign, 1); \
3186 } else if (tp##_is_zero_or_denormal(xb.fld)) { \
3187 match = extract32(dcmx, 0 + !sign, 1); \
3191 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3192 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
3193 env->fpscr |= cc << FPSCR_FPRF; \
3194 env->crf[BF(opcode)] = cc; \
3196 xt.tfld = match ? fld_max : 0; \
3201 putVSR(xT(opcode), &xt, env); \
3205 VSX_TEST_DC(xvtstdcdp
, 2, xB(opcode
), float64
, VsrD(i
), VsrD(i
), UINT64_MAX
, 0)
3206 VSX_TEST_DC(xvtstdcsp
, 4, xB(opcode
), float32
, VsrW(i
), VsrW(i
), UINT32_MAX
, 0)
3207 VSX_TEST_DC(xststdcdp
, 1, xB(opcode
), float64
, VsrD(0), VsrD(0), 0, 1)
3208 VSX_TEST_DC(xststdcqp
, 1, (rB(opcode
) + 32), float128
, f128
, VsrD(0), 0, 1)
3210 void helper_xststdcsp(CPUPPCState
*env
, uint32_t opcode
)
3213 uint32_t dcmx
, sign
, exp
;
3214 uint32_t cc
, match
= 0, not_sp
= 0;
3216 getVSR(xB(opcode
), &xb
, env
);
3217 dcmx
= DCMX(opcode
);
3218 exp
= (xb
.VsrD(0) >> 52) & 0x7FF;
3220 sign
= float64_is_neg(xb
.VsrD(0));
3221 if (float64_is_any_nan(xb
.VsrD(0))) {
3222 match
= extract32(dcmx
, 6, 1);
3223 } else if (float64_is_infinity(xb
.VsrD(0))) {
3224 match
= extract32(dcmx
, 4 + !sign
, 1);
3225 } else if (float64_is_zero(xb
.VsrD(0))) {
3226 match
= extract32(dcmx
, 2 + !sign
, 1);
3227 } else if (float64_is_zero_or_denormal(xb
.VsrD(0)) ||
3228 (exp
> 0 && exp
< 0x381)) {
3229 match
= extract32(dcmx
, 0 + !sign
, 1);
3232 not_sp
= !float64_eq(xb
.VsrD(0),
3234 float64_to_float32(xb
.VsrD(0), &env
->fp_status
),
3235 &env
->fp_status
), &env
->fp_status
);
3237 cc
= sign
<< CRF_LT_BIT
| match
<< CRF_EQ_BIT
| not_sp
<< CRF_SO_BIT
;
3238 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
3239 env
->fpscr
|= cc
<< FPSCR_FPRF
;
3240 env
->crf
[BF(opcode
)] = cc
;
3243 void helper_xsrqpi(CPUPPCState
*env
, uint32_t opcode
)
3247 uint8_t r
= Rrm(opcode
);
3248 uint8_t ex
= Rc(opcode
);
3249 uint8_t rmc
= RMC(opcode
);
3253 getVSR(rB(opcode
) + 32, &xb
, env
);
3254 memset(&xt
, 0, sizeof(xt
));
3255 helper_reset_fpstatus(env
);
3257 if (r
== 0 && rmc
== 0) {
3258 rmode
= float_round_ties_away
;
3259 } else if (r
== 0 && rmc
== 0x3) {
3261 } else if (r
== 1) {
3264 rmode
= float_round_nearest_even
;
3267 rmode
= float_round_to_zero
;
3270 rmode
= float_round_up
;
3273 rmode
= float_round_down
;
3280 tstat
= env
->fp_status
;
3281 set_float_exception_flags(0, &tstat
);
3282 set_float_rounding_mode(rmode
, &tstat
);
3283 xt
.f128
= float128_round_to_int(xb
.f128
, &tstat
);
3284 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3286 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3287 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3288 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
3289 xt
.f128
= float128_snan_to_qnan(xt
.f128
);
3293 if (ex
== 0 && (tstat
.float_exception_flags
& float_flag_inexact
)) {
3294 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
3297 helper_compute_fprf_float128(env
, xt
.f128
);
3298 float_check_status(env
);
3299 putVSR(rD(opcode
) + 32, &xt
, env
);
3302 void helper_xsrqpxp(CPUPPCState
*env
, uint32_t opcode
)
3306 uint8_t r
= Rrm(opcode
);
3307 uint8_t rmc
= RMC(opcode
);
3312 getVSR(rB(opcode
) + 32, &xb
, env
);
3313 memset(&xt
, 0, sizeof(xt
));
3314 helper_reset_fpstatus(env
);
3316 if (r
== 0 && rmc
== 0) {
3317 rmode
= float_round_ties_away
;
3318 } else if (r
== 0 && rmc
== 0x3) {
3320 } else if (r
== 1) {
3323 rmode
= float_round_nearest_even
;
3326 rmode
= float_round_to_zero
;
3329 rmode
= float_round_up
;
3332 rmode
= float_round_down
;
3339 tstat
= env
->fp_status
;
3340 set_float_exception_flags(0, &tstat
);
3341 set_float_rounding_mode(rmode
, &tstat
);
3342 round_res
= float128_to_floatx80(xb
.f128
, &tstat
);
3343 xt
.f128
= floatx80_to_float128(round_res
, &tstat
);
3344 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3346 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3347 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3348 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
3349 xt
.f128
= float128_snan_to_qnan(xt
.f128
);
3353 helper_compute_fprf_float128(env
, xt
.f128
);
3354 putVSR(rD(opcode
) + 32, &xt
, env
);
3355 float_check_status(env
);
3358 void helper_xssqrtqp(CPUPPCState
*env
, uint32_t opcode
)
3364 getVSR(rB(opcode
) + 32, &xb
, env
);
3365 memset(&xt
, 0, sizeof(xt
));
3366 helper_reset_fpstatus(env
);
3368 tstat
= env
->fp_status
;
3369 if (unlikely(Rc(opcode
) != 0)) {
3370 tstat
.float_rounding_mode
= float_round_to_odd
;
3373 set_float_exception_flags(0, &tstat
);
3374 xt
.f128
= float128_sqrt(xb
.f128
, &tstat
);
3375 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3377 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3378 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3379 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
3380 xt
.f128
= float128_snan_to_qnan(xb
.f128
);
3381 } else if (float128_is_quiet_nan(xb
.f128
, &tstat
)) {
3383 } else if (float128_is_neg(xb
.f128
) && !float128_is_zero(xb
.f128
)) {
3384 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
3385 set_snan_bit_is_one(0, &env
->fp_status
);
3386 xt
.f128
= float128_default_nan(&env
->fp_status
);
3390 helper_compute_fprf_float128(env
, xt
.f128
);
3391 putVSR(rD(opcode
) + 32, &xt
, env
);
3392 float_check_status(env
);
3395 void helper_xssubqp(CPUPPCState
*env
, uint32_t opcode
)
3397 ppc_vsr_t xt
, xa
, xb
;
3400 getVSR(rA(opcode
) + 32, &xa
, env
);
3401 getVSR(rB(opcode
) + 32, &xb
, env
);
3402 getVSR(rD(opcode
) + 32, &xt
, env
);
3403 helper_reset_fpstatus(env
);
3405 tstat
= env
->fp_status
;
3406 if (unlikely(Rc(opcode
) != 0)) {
3407 tstat
.float_rounding_mode
= float_round_to_odd
;
3410 set_float_exception_flags(0, &tstat
);
3411 xt
.f128
= float128_sub(xa
.f128
, xb
.f128
, &tstat
);
3412 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3414 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3415 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
3416 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
3417 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
3418 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3419 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
3423 helper_compute_fprf_float128(env
, xt
.f128
);
3424 putVSR(rD(opcode
) + 32, &xt
, env
);
3425 float_check_status(env
);